Gentoo Archives: gentoo-commits

From: Thomas Deutschmann <whissi@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.7 commit in: /
Date: Wed, 05 Aug 2020 14:36:22
Message-Id: 1596638138.89d0e8ab4377428b936f4a21b215d245e94131a3.whissi@gentoo
1 commit: 89d0e8ab4377428b936f4a21b215d245e94131a3
2 Author: Thomas Deutschmann <whissi <AT> gentoo <DOT> org>
3 AuthorDate: Wed Aug 5 14:33:53 2020 +0000
4 Commit: Thomas Deutschmann <whissi <AT> gentoo <DOT> org>
5 CommitDate: Wed Aug 5 14:35:38 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=89d0e8ab
7
8 Linux patch 5.7.13
9
10 Signed-off-by: Thomas Deutschmann <whissi <AT> gentoo.org>
11
12 0000_README | 4 +
13 1012_linux-5.7.13.patch | 3752 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3756 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 21eff3a..a388fef 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -91,6 +91,10 @@ Patch: 1011_linux-5.7.12.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.7.12
23
24 +Patch: 1012_linux-5.7.13.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.7.13
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1012_linux-5.7.13.patch b/1012_linux-5.7.13.patch
33 new file mode 100644
34 index 0000000..f28c06a
35 --- /dev/null
36 +++ b/1012_linux-5.7.13.patch
37 @@ -0,0 +1,3752 @@
38 +diff --git a/Makefile b/Makefile
39 +index 401d58b35e61..b77b4332a41a 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 7
46 +-SUBLEVEL = 12
47 ++SUBLEVEL = 13
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
52 +index e038abc0c6b4..420ae26e846b 100644
53 +--- a/arch/arm/boot/dts/armada-38x.dtsi
54 ++++ b/arch/arm/boot/dts/armada-38x.dtsi
55 +@@ -344,7 +344,8 @@
56 +
57 + comphy: phy@18300 {
58 + compatible = "marvell,armada-380-comphy";
59 +- reg = <0x18300 0x100>;
60 ++ reg-names = "comphy", "conf";
61 ++ reg = <0x18300 0x100>, <0x18460 4>;
62 + #address-cells = <1>;
63 + #size-cells = <0>;
64 +
65 +diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi b/arch/arm/boot/dts/imx6qdl-icore.dtsi
66 +index 756f3a9f1b4f..12997dae35d9 100644
67 +--- a/arch/arm/boot/dts/imx6qdl-icore.dtsi
68 ++++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi
69 +@@ -397,7 +397,7 @@
70 +
71 + pinctrl_usbotg: usbotggrp {
72 + fsl,pins = <
73 +- MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
74 ++ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
75 + >;
76 + };
77 +
78 +@@ -409,6 +409,7 @@
79 + MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070
80 + MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070
81 + MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070
82 ++ MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
83 + >;
84 + };
85 +
86 +diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
87 +index 825924448ab4..14fd1de52a68 100644
88 +--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
89 ++++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
90 +@@ -99,7 +99,7 @@
91 + &fec2 {
92 + pinctrl-names = "default";
93 + pinctrl-0 = <&pinctrl_enet2>;
94 +- phy-mode = "rgmii";
95 ++ phy-mode = "rgmii-id";
96 + phy-handle = <&ethphy0>;
97 + fsl,magic-packet;
98 + status = "okay";
99 +diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
100 +index 3e5fb72f21fc..c99aa273c296 100644
101 +--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
102 ++++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
103 +@@ -213,7 +213,7 @@
104 + &fec2 {
105 + pinctrl-names = "default";
106 + pinctrl-0 = <&pinctrl_enet2>;
107 +- phy-mode = "rgmii";
108 ++ phy-mode = "rgmii-id";
109 + phy-handle = <&ethphy2>;
110 + status = "okay";
111 + };
112 +diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
113 +index bf531efc0610..0f95a6ef8543 100644
114 +--- a/arch/arm/boot/dts/sun4i-a10.dtsi
115 ++++ b/arch/arm/boot/dts/sun4i-a10.dtsi
116 +@@ -198,7 +198,7 @@
117 + default-pool {
118 + compatible = "shared-dma-pool";
119 + size = <0x6000000>;
120 +- alloc-ranges = <0x4a000000 0x6000000>;
121 ++ alloc-ranges = <0x40000000 0x10000000>;
122 + reusable;
123 + linux,cma-default;
124 + };
125 +diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
126 +index e6b036734a64..c2b4fbf552a3 100644
127 +--- a/arch/arm/boot/dts/sun5i.dtsi
128 ++++ b/arch/arm/boot/dts/sun5i.dtsi
129 +@@ -117,7 +117,7 @@
130 + default-pool {
131 + compatible = "shared-dma-pool";
132 + size = <0x6000000>;
133 +- alloc-ranges = <0x4a000000 0x6000000>;
134 ++ alloc-ranges = <0x40000000 0x10000000>;
135 + reusable;
136 + linux,cma-default;
137 + };
138 +diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
139 +index ffe1d10a1a84..6d6a37940db2 100644
140 +--- a/arch/arm/boot/dts/sun7i-a20.dtsi
141 ++++ b/arch/arm/boot/dts/sun7i-a20.dtsi
142 +@@ -181,7 +181,7 @@
143 + default-pool {
144 + compatible = "shared-dma-pool";
145 + size = <0x6000000>;
146 +- alloc-ranges = <0x4a000000 0x6000000>;
147 ++ alloc-ranges = <0x40000000 0x10000000>;
148 + reusable;
149 + linux,cma-default;
150 + };
151 +diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
152 +index 02ca7adf5375..7fff88e61252 100644
153 +--- a/arch/arm/kernel/hw_breakpoint.c
154 ++++ b/arch/arm/kernel/hw_breakpoint.c
155 +@@ -683,6 +683,12 @@ static void disable_single_step(struct perf_event *bp)
156 + arch_install_hw_breakpoint(bp);
157 + }
158 +
159 ++static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
160 ++ struct arch_hw_breakpoint *info)
161 ++{
162 ++ return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
163 ++}
164 ++
165 + static void watchpoint_handler(unsigned long addr, unsigned int fsr,
166 + struct pt_regs *regs)
167 + {
168 +@@ -742,16 +748,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
169 + }
170 +
171 + pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
172 ++
173 ++ /*
174 ++ * If we triggered a user watchpoint from a uaccess routine,
175 ++ * then handle the stepping ourselves since userspace really
176 ++ * can't help us with this.
177 ++ */
178 ++ if (watchpoint_fault_on_uaccess(regs, info))
179 ++ goto step;
180 ++
181 + perf_bp_event(wp, regs);
182 +
183 + /*
184 +- * If no overflow handler is present, insert a temporary
185 +- * mismatch breakpoint so we can single-step over the
186 +- * watchpoint trigger.
187 ++ * Defer stepping to the overflow handler if one is installed.
188 ++ * Otherwise, insert a temporary mismatch breakpoint so that
189 ++ * we can single-step over the watchpoint trigger.
190 + */
191 +- if (is_default_overflow_handler(wp))
192 +- enable_single_step(wp, instruction_pointer(regs));
193 ++ if (!is_default_overflow_handler(wp))
194 ++ goto unlock;
195 +
196 ++step:
197 ++ enable_single_step(wp, instruction_pointer(regs));
198 + unlock:
199 + rcu_read_unlock();
200 + }
201 +diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
202 +index e0330a25e1c6..28cfe7bad1bf 100644
203 +--- a/arch/arm/kernel/vdso.c
204 ++++ b/arch/arm/kernel/vdso.c
205 +@@ -184,6 +184,7 @@ static void __init patch_vdso(void *ehdr)
206 + if (!cntvct_ok) {
207 + vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
208 + vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
209 ++ vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
210 + }
211 + }
212 +
213 +diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
214 +index 12f0eb56a1cc..619db9b4c9d5 100644
215 +--- a/arch/arm64/include/asm/alternative.h
216 ++++ b/arch/arm64/include/asm/alternative.h
217 +@@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
218 + "663:\n\t" \
219 + newinstr "\n" \
220 + "664:\n\t" \
221 +- ".previous\n\t" \
222 + ".org . - (664b-663b) + (662b-661b)\n\t" \
223 +- ".org . - (662b-661b) + (664b-663b)\n" \
224 ++ ".org . - (662b-661b) + (664b-663b)\n\t" \
225 ++ ".previous\n" \
226 + ".endif\n"
227 +
228 + #define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
229 +diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
230 +index b6f7bc6da5fb..93a161b3bf3f 100644
231 +--- a/arch/arm64/include/asm/checksum.h
232 ++++ b/arch/arm64/include/asm/checksum.h
233 +@@ -24,16 +24,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
234 + {
235 + __uint128_t tmp;
236 + u64 sum;
237 ++ int n = ihl; /* we want it signed */
238 +
239 + tmp = *(const __uint128_t *)iph;
240 + iph += 16;
241 +- ihl -= 4;
242 ++ n -= 4;
243 + tmp += ((tmp >> 64) | (tmp << 64));
244 + sum = tmp >> 64;
245 + do {
246 + sum += *(const u32 *)iph;
247 + iph += 4;
248 +- } while (--ihl);
249 ++ } while (--n > 0);
250 +
251 + sum += ((sum >> 32) | (sum << 32));
252 + return csum_fold((__force u32)(sum >> 32));
253 +diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
254 +index ab5c215cf46c..068958575871 100644
255 +--- a/arch/parisc/include/asm/cmpxchg.h
256 ++++ b/arch/parisc/include/asm/cmpxchg.h
257 +@@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
258 + extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
259 + unsigned int new_);
260 + extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
261 ++extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
262 +
263 + /* don't worry...optimizer will get rid of most of this */
264 + static inline unsigned long
265 +@@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
266 + #endif
267 + case 4: return __cmpxchg_u32((unsigned int *)ptr,
268 + (unsigned int)old, (unsigned int)new_);
269 ++ case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
270 + }
271 + __cmpxchg_called_with_bad_pointer();
272 + return old;
273 +diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
274 +index 70ffbcf889b8..2e4d1f05a926 100644
275 +--- a/arch/parisc/lib/bitops.c
276 ++++ b/arch/parisc/lib/bitops.c
277 +@@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign
278 + _atomic_spin_unlock_irqrestore(ptr, flags);
279 + return (unsigned long)prev;
280 + }
281 ++
282 ++u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
283 ++{
284 ++ unsigned long flags;
285 ++ u8 prev;
286 ++
287 ++ _atomic_spin_lock_irqsave(ptr, flags);
288 ++ if ((prev = *ptr) == old)
289 ++ *ptr = new;
290 ++ _atomic_spin_unlock_irqrestore(ptr, flags);
291 ++ return prev;
292 ++}
293 +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
294 +index 81493cee0a16..115fb9245f16 100644
295 +--- a/arch/riscv/mm/init.c
296 ++++ b/arch/riscv/mm/init.c
297 +@@ -146,33 +146,36 @@ void __init setup_bootmem(void)
298 + {
299 + struct memblock_region *reg;
300 + phys_addr_t mem_size = 0;
301 ++ phys_addr_t total_mem = 0;
302 ++ phys_addr_t mem_start, end = 0;
303 + phys_addr_t vmlinux_end = __pa_symbol(&_end);
304 + phys_addr_t vmlinux_start = __pa_symbol(&_start);
305 +
306 + /* Find the memory region containing the kernel */
307 + for_each_memblock(memory, reg) {
308 +- phys_addr_t end = reg->base + reg->size;
309 +-
310 +- if (reg->base <= vmlinux_start && vmlinux_end <= end) {
311 +- mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
312 +-
313 +- /*
314 +- * Remove memblock from the end of usable area to the
315 +- * end of region
316 +- */
317 +- if (reg->base + mem_size < end)
318 +- memblock_remove(reg->base + mem_size,
319 +- end - reg->base - mem_size);
320 +- }
321 ++ end = reg->base + reg->size;
322 ++ if (!total_mem)
323 ++ mem_start = reg->base;
324 ++ if (reg->base <= vmlinux_start && vmlinux_end <= end)
325 ++ BUG_ON(reg->size == 0);
326 ++ total_mem = total_mem + reg->size;
327 + }
328 +- BUG_ON(mem_size == 0);
329 ++
330 ++ /*
331 ++ * Remove memblock from the end of usable area to the
332 ++ * end of region
333 ++ */
334 ++ mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
335 ++ if (mem_start + mem_size < end)
336 ++ memblock_remove(mem_start + mem_size,
337 ++ end - mem_start - mem_size);
338 +
339 + /* Reserve from the start of the kernel to the end of the kernel */
340 + memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
341 +
342 +- set_max_mapnr(PFN_DOWN(mem_size));
343 + max_pfn = PFN_DOWN(memblock_end_of_DRAM());
344 + max_low_pfn = max_pfn;
345 ++ set_max_mapnr(max_low_pfn);
346 +
347 + #ifdef CONFIG_BLK_DEV_INITRD
348 + setup_initrd();
349 +diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
350 +index ec0ca90dd900..7a580c8ad603 100644
351 +--- a/arch/riscv/mm/kasan_init.c
352 ++++ b/arch/riscv/mm/kasan_init.c
353 +@@ -44,7 +44,7 @@ asmlinkage void __init kasan_early_init(void)
354 + (__pa(((uintptr_t) kasan_early_shadow_pmd))),
355 + __pgprot(_PAGE_TABLE)));
356 +
357 +- flush_tlb_all();
358 ++ local_flush_tlb_all();
359 + }
360 +
361 + static void __init populate(void *start, void *end)
362 +@@ -79,7 +79,7 @@ static void __init populate(void *start, void *end)
363 + pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
364 + __pgprot(_PAGE_TABLE)));
365 +
366 +- flush_tlb_all();
367 ++ local_flush_tlb_all();
368 + memset(start, 0, end - start);
369 + }
370 +
371 +diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
372 +index 22d968bfe9bb..d770da3f8b6f 100644
373 +--- a/arch/sh/include/asm/pgalloc.h
374 ++++ b/arch/sh/include/asm/pgalloc.h
375 +@@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
376 + extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
377 + extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
378 + extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
379 ++#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp))
380 + #endif
381 +
382 + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
383 +@@ -33,13 +34,4 @@ do { \
384 + tlb_remove_page((tlb), (pte)); \
385 + } while (0)
386 +
387 +-#if CONFIG_PGTABLE_LEVELS > 2
388 +-#define __pmd_free_tlb(tlb, pmdp, addr) \
389 +-do { \
390 +- struct page *page = virt_to_page(pmdp); \
391 +- pgtable_pmd_page_dtor(page); \
392 +- tlb_remove_page((tlb), page); \
393 +-} while (0);
394 +-#endif
395 +-
396 + #endif /* __ASM_SH_PGALLOC_H */
397 +diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
398 +index 956a7a03b0c8..9bac5bbb67f3 100644
399 +--- a/arch/sh/kernel/entry-common.S
400 ++++ b/arch/sh/kernel/entry-common.S
401 +@@ -199,7 +199,7 @@ syscall_trace_entry:
402 + mov.l @(OFF_R7,r15), r7 ! arg3
403 + mov.l @(OFF_R3,r15), r3 ! syscall_nr
404 + !
405 +- mov.l 2f, r10 ! Number of syscalls
406 ++ mov.l 6f, r10 ! Number of syscalls
407 + cmp/hs r10, r3
408 + bf syscall_call
409 + mov #-ENOSYS, r0
410 +@@ -353,7 +353,7 @@ ENTRY(system_call)
411 + tst r9, r8
412 + bf syscall_trace_entry
413 + !
414 +- mov.l 2f, r8 ! Number of syscalls
415 ++ mov.l 6f, r8 ! Number of syscalls
416 + cmp/hs r8, r3
417 + bt syscall_badsys
418 + !
419 +@@ -392,7 +392,7 @@ syscall_exit:
420 + #if !defined(CONFIG_CPU_SH2)
421 + 1: .long TRA
422 + #endif
423 +-2: .long NR_syscalls
424 ++6: .long NR_syscalls
425 + 3: .long sys_call_table
426 + 7: .long do_syscall_trace_enter
427 + 8: .long do_syscall_trace_leave
428 +diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
429 +index 519649ddf100..fe522691ac71 100644
430 +--- a/arch/x86/kernel/i8259.c
431 ++++ b/arch/x86/kernel/i8259.c
432 +@@ -207,7 +207,7 @@ spurious_8259A_irq:
433 + * lets ACK and report it. [once per IRQ]
434 + */
435 + if (!(spurious_irq_mask & irqmask)) {
436 +- printk(KERN_DEBUG
437 ++ printk_deferred(KERN_DEBUG
438 + "spurious 8259A interrupt: IRQ%d.\n", irq);
439 + spurious_irq_mask |= irqmask;
440 + }
441 +diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
442 +index 6ad43fc44556..2fd698e28e4d 100644
443 +--- a/arch/x86/kernel/stacktrace.c
444 ++++ b/arch/x86/kernel/stacktrace.c
445 +@@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
446 + * or a page fault), which can make frame pointers
447 + * unreliable.
448 + */
449 +-
450 + if (IS_ENABLED(CONFIG_FRAME_POINTER))
451 + return -EINVAL;
452 + }
453 +@@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
454 + if (unwind_error(&state))
455 + return -EINVAL;
456 +
457 +- /* Success path for non-user tasks, i.e. kthreads and idle tasks */
458 +- if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
459 +- return -EINVAL;
460 +-
461 + return 0;
462 + }
463 +
464 +diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
465 +index 7f969b2d240f..ec88bbe08a32 100644
466 +--- a/arch/x86/kernel/unwind_orc.c
467 ++++ b/arch/x86/kernel/unwind_orc.c
468 +@@ -440,8 +440,11 @@ bool unwind_next_frame(struct unwind_state *state)
469 + /*
470 + * Find the orc_entry associated with the text address.
471 + *
472 +- * Decrement call return addresses by one so they work for sibling
473 +- * calls and calls to noreturn functions.
474 ++ * For a call frame (as opposed to a signal frame), state->ip points to
475 ++ * the instruction after the call. That instruction's stack layout
476 ++ * could be different from the call instruction's layout, for example
477 ++ * if the call was to a noreturn function. So get the ORC data for the
478 ++ * call instruction itself.
479 + */
480 + orc = orc_find(state->signal ? state->ip : state->ip - 1);
481 + if (!orc) {
482 +@@ -662,6 +665,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
483 + state->sp = task->thread.sp;
484 + state->bp = READ_ONCE_NOCHECK(frame->bp);
485 + state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
486 ++ state->signal = (void *)state->ip == ret_from_fork;
487 + }
488 +
489 + if (get_stack_info((unsigned long *)state->sp, state->task,
490 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
491 +index 8967e320a978..6b26deccedfd 100644
492 +--- a/arch/x86/kvm/lapic.c
493 ++++ b/arch/x86/kvm/lapic.c
494 +@@ -2136,7 +2136,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
495 + {
496 + struct kvm_lapic *apic = vcpu->arch.apic;
497 +
498 +- if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
499 ++ if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) ||
500 + apic_lvtt_period(apic))
501 + return;
502 +
503 +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
504 +index a862c768fd54..7dbfc0bc738c 100644
505 +--- a/arch/x86/kvm/svm/svm.c
506 ++++ b/arch/x86/kvm/svm/svm.c
507 +@@ -1105,7 +1105,7 @@ static void init_vmcb(struct vcpu_svm *svm)
508 + svm->nested.vmcb = 0;
509 + svm->vcpu.arch.hflags = 0;
510 +
511 +- if (pause_filter_count) {
512 ++ if (!kvm_pause_in_guest(svm->vcpu.kvm)) {
513 + control->pause_filter_count = pause_filter_count;
514 + if (pause_filter_thresh)
515 + control->pause_filter_thresh = pause_filter_thresh;
516 +@@ -2682,7 +2682,7 @@ static int pause_interception(struct vcpu_svm *svm)
517 + struct kvm_vcpu *vcpu = &svm->vcpu;
518 + bool in_kernel = (svm_get_cpl(vcpu) == 0);
519 +
520 +- if (pause_filter_thresh)
521 ++ if (!kvm_pause_in_guest(vcpu->kvm))
522 + grow_ple_window(vcpu);
523 +
524 + kvm_vcpu_on_spin(vcpu, in_kernel);
525 +@@ -3727,7 +3727,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu,
526 +
527 + static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
528 + {
529 +- if (pause_filter_thresh)
530 ++ if (!kvm_pause_in_guest(vcpu->kvm))
531 + shrink_ple_window(vcpu);
532 + }
533 +
534 +@@ -3892,6 +3892,9 @@ static void svm_vm_destroy(struct kvm *kvm)
535 +
536 + static int svm_vm_init(struct kvm *kvm)
537 + {
538 ++ if (!pause_filter_count || !pause_filter_thresh)
539 ++ kvm->arch.pause_in_guest = true;
540 ++
541 + if (avic) {
542 + int ret = avic_vm_init(kvm);
543 + if (ret)
544 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
545 +index fd1dc3236eca..81f83ee4b12b 100644
546 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
547 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
548 +@@ -692,9 +692,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
549 + return n ? -EFAULT : 0;
550 + }
551 + case AMDGPU_INFO_DEV_INFO: {
552 +- struct drm_amdgpu_info_device dev_info = {};
553 ++ struct drm_amdgpu_info_device dev_info;
554 + uint64_t vm_size;
555 +
556 ++ memset(&dev_info, 0, sizeof(dev_info));
557 + dev_info.device_id = dev->pdev->device;
558 + dev_info.chip_rev = adev->rev_id;
559 + dev_info.external_rev = adev->external_rev_id;
560 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
561 +index b14b0b4ffeb2..96b8feb77b15 100644
562 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
563 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
564 +@@ -775,7 +775,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
565 + tmp_str++;
566 + while (isspace(*++tmp_str));
567 +
568 +- while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
569 ++ while (tmp_str[0]) {
570 ++ sub_str = strsep(&tmp_str, delimiter);
571 + ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
572 + if (ret)
573 + return -EINVAL;
574 +@@ -1035,7 +1036,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
575 + memcpy(buf_cpy, buf, bytes);
576 + buf_cpy[bytes] = '\0';
577 + tmp = buf_cpy;
578 +- while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
579 ++ while (tmp[0]) {
580 ++ sub_str = strsep(&tmp, delimiter);
581 + if (strlen(sub_str)) {
582 + ret = kstrtol(sub_str, 0, &level);
583 + if (ret)
584 +@@ -1632,7 +1634,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
585 + i++;
586 + memcpy(buf_cpy, buf, count-i);
587 + tmp_str = buf_cpy;
588 +- while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
589 ++ while (tmp_str[0]) {
590 ++ sub_str = strsep(&tmp_str, delimiter);
591 + ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
592 + if (ret)
593 + return -EINVAL;
594 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
595 +index 837a286469ec..d50751ae73f1 100644
596 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
597 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
598 +@@ -8489,20 +8489,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
599 + * the same resource. If we have a new DC context as part of
600 + * the DM atomic state from validation we need to free it and
601 + * retain the existing one instead.
602 ++ *
603 ++ * Furthermore, since the DM atomic state only contains the DC
604 ++ * context and can safely be annulled, we can free the state
605 ++ * and clear the associated private object now to free
606 ++ * some memory and avoid a possible use-after-free later.
607 + */
608 +- struct dm_atomic_state *new_dm_state, *old_dm_state;
609 +
610 +- new_dm_state = dm_atomic_get_new_state(state);
611 +- old_dm_state = dm_atomic_get_old_state(state);
612 ++ for (i = 0; i < state->num_private_objs; i++) {
613 ++ struct drm_private_obj *obj = state->private_objs[i].ptr;
614 +
615 +- if (new_dm_state && old_dm_state) {
616 +- if (new_dm_state->context)
617 +- dc_release_state(new_dm_state->context);
618 ++ if (obj->funcs == adev->dm.atomic_obj.funcs) {
619 ++ int j = state->num_private_objs-1;
620 +
621 +- new_dm_state->context = old_dm_state->context;
622 ++ dm_atomic_destroy_state(obj,
623 ++ state->private_objs[i].state);
624 ++
625 ++ /* If i is not at the end of the array then the
626 ++ * last element needs to be moved to where i was
627 ++ * before the array can safely be truncated.
628 ++ */
629 ++ if (i != j)
630 ++ state->private_objs[i] =
631 ++ state->private_objs[j];
632 +
633 +- if (old_dm_state->context)
634 +- dc_retain_state(old_dm_state->context);
635 ++ state->private_objs[j].ptr = NULL;
636 ++ state->private_objs[j].state = NULL;
637 ++ state->private_objs[j].old_state = NULL;
638 ++ state->private_objs[j].new_state = NULL;
639 ++
640 ++ state->num_private_objs = j;
641 ++ break;
642 ++ }
643 + }
644 + }
645 +
646 +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
647 +index 37627d06fb06..3087aa710e8d 100644
648 +--- a/drivers/gpu/drm/drm_gem.c
649 ++++ b/drivers/gpu/drm/drm_gem.c
650 +@@ -872,9 +872,6 @@ err:
651 + * @file_priv: drm file-private structure
652 + *
653 + * Open an object using the global name, returning a handle and the size.
654 +- *
655 +- * This handle (of course) holds a reference to the object, so the object
656 +- * will not go away until the handle is deleted.
657 + */
658 + int
659 + drm_gem_open_ioctl(struct drm_device *dev, void *data,
660 +@@ -899,14 +896,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
661 +
662 + /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
663 + ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
664 +- drm_gem_object_put_unlocked(obj);
665 + if (ret)
666 +- return ret;
667 ++ goto err;
668 +
669 + args->handle = handle;
670 + args->size = obj->size;
671 +
672 +- return 0;
673 ++err:
674 ++ drm_gem_object_put_unlocked(obj);
675 ++ return ret;
676 + }
677 +
678 + /**
679 +diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
680 +index 558baf989f5a..7d2211016eda 100644
681 +--- a/drivers/gpu/drm/drm_mipi_dbi.c
682 ++++ b/drivers/gpu/drm/drm_mipi_dbi.c
683 +@@ -938,7 +938,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
684 + }
685 + }
686 +
687 +- tr.len = chunk;
688 ++ tr.len = chunk * 2;
689 + len -= chunk;
690 +
691 + ret = spi_sync(spi, &m);
692 +diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
693 +index b50b44e76279..8fc3f67e3e76 100644
694 +--- a/drivers/gpu/drm/drm_of.c
695 ++++ b/drivers/gpu/drm/drm_of.c
696 +@@ -322,10 +322,8 @@ static int drm_of_lvds_get_remote_pixels_type(
697 + * configurations by passing the endpoints explicitly to
698 + * drm_of_lvds_get_dual_link_pixel_order().
699 + */
700 +- if (!current_pt || pixels_type != current_pt) {
701 +- of_node_put(remote_port);
702 ++ if (!current_pt || pixels_type != current_pt)
703 + return -EINVAL;
704 +- }
705 + }
706 +
707 + return pixels_type;
708 +diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
709 +index e59907e68854..d72ac23cd110 100644
710 +--- a/drivers/gpu/drm/mcde/mcde_display.c
711 ++++ b/drivers/gpu/drm/mcde/mcde_display.c
712 +@@ -1060,9 +1060,14 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
713 + */
714 + if (fb) {
715 + mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
716 +- if (!mcde->video_mode)
717 +- /* Send a single frame using software sync */
718 +- mcde_display_send_one_frame(mcde);
719 ++ if (!mcde->video_mode) {
720 ++ /*
721 ++ * Send a single frame using software sync if the flow
722 ++ * is not active yet.
723 ++ */
724 ++ if (mcde->flow_active == 0)
725 ++ mcde_display_send_one_frame(mcde);
726 ++ }
727 + dev_info_once(mcde->dev, "sent first display update\n");
728 + } else {
729 + /*
730 +diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
731 +index 89d58f7d2a25..1efdabb5adca 100644
732 +--- a/drivers/i2c/busses/i2c-cadence.c
733 ++++ b/drivers/i2c/busses/i2c-cadence.c
734 +@@ -230,20 +230,21 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
735 + /* Read data if receive data valid is set */
736 + while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
737 + CDNS_I2C_SR_RXDV) {
738 +- /*
739 +- * Clear hold bit that was set for FIFO control if
740 +- * RX data left is less than FIFO depth, unless
741 +- * repeated start is selected.
742 +- */
743 +- if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
744 +- !id->bus_hold_flag)
745 +- cdns_i2c_clear_bus_hold(id);
746 +-
747 + if (id->recv_count > 0) {
748 + *(id->p_recv_buf)++ =
749 + cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
750 + id->recv_count--;
751 + id->curr_recv_count--;
752 ++
753 ++ /*
754 ++ * Clear hold bit that was set for FIFO control
755 ++ * if RX data left is less than or equal to
756 ++ * FIFO DEPTH unless repeated start is selected
757 ++ */
758 ++ if (id->recv_count <= CDNS_I2C_FIFO_DEPTH &&
759 ++ !id->bus_hold_flag)
760 ++ cdns_i2c_clear_bus_hold(id);
761 ++
762 + } else {
763 + dev_err(id->adap.dev.parent,
764 + "xfer_size reg rollover. xfer aborted!\n");
765 +@@ -382,10 +383,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
766 + * Check for the message size against FIFO depth and set the
767 + * 'hold bus' bit if it is greater than FIFO depth.
768 + */
769 +- if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
770 ++ if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
771 + ctrl_reg |= CDNS_I2C_CR_HOLD;
772 +- else
773 +- ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
774 +
775 + cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
776 +
777 +@@ -442,11 +441,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
778 + * Check for the message size against FIFO depth and set the
779 + * 'hold bus' bit if it is greater than FIFO depth.
780 + */
781 +- if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
782 ++ if (id->send_count > CDNS_I2C_FIFO_DEPTH)
783 + ctrl_reg |= CDNS_I2C_CR_HOLD;
784 +- else
785 +- ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
786 +-
787 + cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
788 +
789 + /* Clear the interrupts in interrupt status register. */
790 +diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
791 +index 4f25b2400694..6bb62d04030a 100644
792 +--- a/drivers/infiniband/core/cq.c
793 ++++ b/drivers/infiniband/core/cq.c
794 +@@ -68,6 +68,15 @@ static void rdma_dim_init(struct ib_cq *cq)
795 + INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
796 + }
797 +
798 ++static void rdma_dim_destroy(struct ib_cq *cq)
799 ++{
800 ++ if (!cq->dim)
801 ++ return;
802 ++
803 ++ cancel_work_sync(&cq->dim->work);
804 ++ kfree(cq->dim);
805 ++}
806 ++
807 + static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
808 + {
809 + int rc;
810 +@@ -261,6 +270,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
811 + return cq;
812 +
813 + out_destroy_cq:
814 ++ rdma_dim_destroy(cq);
815 + rdma_restrack_del(&cq->res);
816 + cq->device->ops.destroy_cq(cq, udata);
817 + out_free_wc:
818 +@@ -324,12 +334,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
819 + WARN_ON_ONCE(1);
820 + }
821 +
822 ++ rdma_dim_destroy(cq);
823 + trace_cq_free(cq);
824 + rdma_restrack_del(&cq->res);
825 + cq->device->ops.destroy_cq(cq, udata);
826 +- if (cq->dim)
827 +- cancel_work_sync(&cq->dim->work);
828 +- kfree(cq->dim);
829 + kfree(cq->wc);
830 + kfree(cq);
831 + }
832 +diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
833 +index bdeb6500a919..b56d812b8a7b 100644
834 +--- a/drivers/infiniband/hw/mlx5/odp.c
835 ++++ b/drivers/infiniband/hw/mlx5/odp.c
836 +@@ -1798,9 +1798,7 @@ static bool init_prefetch_work(struct ib_pd *pd,
837 + work->frags[i].mr =
838 + get_prefetchable_mr(pd, advice, sg_list[i].lkey);
839 + if (!work->frags[i].mr) {
840 +- work->num_sge = i - 1;
841 +- if (i)
842 +- destroy_prefetch_work(work);
843 ++ work->num_sge = i;
844 + return false;
845 + }
846 +
847 +@@ -1866,6 +1864,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
848 + srcu_key = srcu_read_lock(&dev->odp_srcu);
849 + if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
850 + srcu_read_unlock(&dev->odp_srcu, srcu_key);
851 ++ destroy_prefetch_work(work);
852 + return -EINVAL;
853 + }
854 + queue_work(system_unbound_wq, &work->work);
855 +diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
856 +index ca29954a54ac..94372408cb5e 100644
857 +--- a/drivers/infiniband/sw/rdmavt/qp.c
858 ++++ b/drivers/infiniband/sw/rdmavt/qp.c
859 +@@ -898,8 +898,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
860 + qp->s_tail_ack_queue = 0;
861 + qp->s_acked_ack_queue = 0;
862 + qp->s_num_rd_atomic = 0;
863 +- if (qp->r_rq.kwq)
864 +- qp->r_rq.kwq->count = qp->r_rq.size;
865 + qp->r_sge.num_sge = 0;
866 + atomic_set(&qp->s_reserved_used, 0);
867 + }
868 +@@ -2352,31 +2350,6 @@ bad_lkey:
869 + return 0;
870 + }
871 +
872 +-/**
873 +- * get_count - count numbers of request work queue entries
874 +- * in circular buffer
875 +- * @rq: data structure for request queue entry
876 +- * @tail: tail indices of the circular buffer
877 +- * @head: head indices of the circular buffer
878 +- *
879 +- * Return - total number of entries in the circular buffer
880 +- */
881 +-static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
882 +-{
883 +- u32 count;
884 +-
885 +- count = head;
886 +-
887 +- if (count >= rq->size)
888 +- count = 0;
889 +- if (count < tail)
890 +- count += rq->size - tail;
891 +- else
892 +- count -= tail;
893 +-
894 +- return count;
895 +-}
896 +-
897 + /**
898 + * get_rvt_head - get head indices of the circular buffer
899 + * @rq: data structure for request queue entry
900 +@@ -2451,7 +2424,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
901 +
902 + if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
903 + head = get_rvt_head(rq, ip);
904 +- kwq->count = get_count(rq, tail, head);
905 ++ kwq->count = rvt_get_rq_count(rq, head, tail);
906 + }
907 + if (unlikely(kwq->count == 0)) {
908 + ret = 0;
909 +@@ -2486,7 +2459,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
910 + * the number of remaining WQEs.
911 + */
912 + if (kwq->count < srq->limit) {
913 +- kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
914 ++ kwq->count =
915 ++ rvt_get_rq_count(rq,
916 ++ get_rvt_head(rq, ip), tail);
917 + if (kwq->count < srq->limit) {
918 + struct ib_event ev;
919 +
920 +diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c
921 +index 977906cc0d11..c58735f4c94a 100644
922 +--- a/drivers/infiniband/sw/rdmavt/rc.c
923 ++++ b/drivers/infiniband/sw/rdmavt/rc.c
924 +@@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
925 + * not atomic, which is OK, since the fuzziness is
926 + * resolved as further ACKs go out.
927 + */
928 +- credits = head - tail;
929 +- if ((int)credits < 0)
930 +- credits += qp->r_rq.size;
931 ++ credits = rvt_get_rq_count(&qp->r_rq, head, tail);
932 + }
933 + /*
934 + * Binary search the credit table to find the code to
935 +diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
936 +index 409276b6374d..e7c8e7473226 100644
937 +--- a/drivers/misc/habanalabs/command_submission.c
938 ++++ b/drivers/misc/habanalabs/command_submission.c
939 +@@ -425,11 +425,19 @@ static int validate_queue_index(struct hl_device *hdev,
940 + struct asic_fixed_properties *asic = &hdev->asic_prop;
941 + struct hw_queue_properties *hw_queue_prop;
942 +
943 ++ /* This must be checked here to prevent out-of-bounds access to
944 ++ * hw_queues_props array
945 ++ */
946 ++ if (chunk->queue_index >= HL_MAX_QUEUES) {
947 ++ dev_err(hdev->dev, "Queue index %d is invalid\n",
948 ++ chunk->queue_index);
949 ++ return -EINVAL;
950 ++ }
951 ++
952 + hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
953 +
954 +- if ((chunk->queue_index >= HL_MAX_QUEUES) ||
955 +- (hw_queue_prop->type == QUEUE_TYPE_NA)) {
956 +- dev_err(hdev->dev, "Queue index %d is invalid\n",
957 ++ if (hw_queue_prop->type == QUEUE_TYPE_NA) {
958 ++ dev_err(hdev->dev, "Queue index %d is not applicable\n",
959 + chunk->queue_index);
960 + return -EINVAL;
961 + }
962 +diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
963 +index 3dd46cd55114..88e7900853db 100644
964 +--- a/drivers/net/bareudp.c
965 ++++ b/drivers/net/bareudp.c
966 +@@ -407,19 +407,34 @@ free_dst:
967 + return err;
968 + }
969 +
970 ++static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
971 ++{
972 ++ if (bareudp->ethertype == proto)
973 ++ return true;
974 ++
975 ++ if (!bareudp->multi_proto_mode)
976 ++ return false;
977 ++
978 ++ if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
979 ++ proto == htons(ETH_P_MPLS_MC))
980 ++ return true;
981 ++
982 ++ if (bareudp->ethertype == htons(ETH_P_IP) &&
983 ++ proto == htons(ETH_P_IPV6))
984 ++ return true;
985 ++
986 ++ return false;
987 ++}
988 ++
989 + static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
990 + {
991 + struct bareudp_dev *bareudp = netdev_priv(dev);
992 + struct ip_tunnel_info *info = NULL;
993 + int err;
994 +
995 +- if (skb->protocol != bareudp->ethertype) {
996 +- if (!bareudp->multi_proto_mode ||
997 +- (skb->protocol != htons(ETH_P_MPLS_MC) &&
998 +- skb->protocol != htons(ETH_P_IPV6))) {
999 +- err = -EINVAL;
1000 +- goto tx_error;
1001 +- }
1002 ++ if (!bareudp_proto_valid(bareudp, skb->protocol)) {
1003 ++ err = -EINVAL;
1004 ++ goto tx_error;
1005 + }
1006 +
1007 + info = skb_tunnel_info(skb);
1008 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
1009 +index 28ce9856a078..0f5ca68c9854 100644
1010 +--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
1011 ++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
1012 +@@ -2925,6 +2925,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
1013 + txq_info = adap->sge.uld_txq_info[tx_uld_type];
1014 + if (unlikely(!txq_info)) {
1015 + WARN_ON(true);
1016 ++ kfree_skb(skb);
1017 + return NET_XMIT_DROP;
1018 + }
1019 +
1020 +diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
1021 +index 5bff5c2be88b..5359fb40578d 100644
1022 +--- a/drivers/net/ethernet/cortina/gemini.c
1023 ++++ b/drivers/net/ethernet/cortina/gemini.c
1024 +@@ -2445,6 +2445,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
1025 + port->reset = devm_reset_control_get_exclusive(dev, NULL);
1026 + if (IS_ERR(port->reset)) {
1027 + dev_err(dev, "no reset\n");
1028 ++ clk_disable_unprepare(port->pclk);
1029 + return PTR_ERR(port->reset);
1030 + }
1031 + reset_control_reset(port->reset);
1032 +@@ -2500,8 +2501,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
1033 + IRQF_SHARED,
1034 + port_names[port->id],
1035 + port);
1036 +- if (ret)
1037 ++ if (ret) {
1038 ++ clk_disable_unprepare(port->pclk);
1039 + return ret;
1040 ++ }
1041 +
1042 + ret = register_netdev(netdev);
1043 + if (!ret) {
1044 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1045 +index df1cb0441183..6e186aea7a2f 100644
1046 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1047 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1048 +@@ -1098,16 +1098,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
1049 + int k, sizeoflast;
1050 + dma_addr_t dma;
1051 +
1052 +- if (type == DESC_TYPE_SKB) {
1053 +- struct sk_buff *skb = (struct sk_buff *)priv;
1054 +- int ret;
1055 +-
1056 +- ret = hns3_fill_skb_desc(ring, skb, desc);
1057 +- if (unlikely(ret < 0))
1058 +- return ret;
1059 +-
1060 +- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1061 +- } else if (type == DESC_TYPE_FRAGLIST_SKB) {
1062 ++ if (type == DESC_TYPE_FRAGLIST_SKB ||
1063 ++ type == DESC_TYPE_SKB) {
1064 + struct sk_buff *skb = (struct sk_buff *)priv;
1065 +
1066 + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1067 +@@ -1452,6 +1444,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1068 +
1069 + next_to_use_head = ring->next_to_use;
1070 +
1071 ++ ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
1072 ++ if (unlikely(ret < 0))
1073 ++ goto fill_err;
1074 ++
1075 + ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
1076 + if (unlikely(ret < 0))
1077 + goto fill_err;
1078 +@@ -4174,8 +4170,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
1079 + return;
1080 +
1081 + if (linkup) {
1082 +- netif_carrier_on(netdev);
1083 + netif_tx_wake_all_queues(netdev);
1084 ++ netif_carrier_on(netdev);
1085 + if (netif_msg_link(handle))
1086 + netdev_info(netdev, "link up\n");
1087 + } else {
1088 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1089 +index b66b93f320b4..dfe247ad8475 100644
1090 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1091 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1092 +@@ -5737,9 +5737,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
1093 + /* to avoid rule conflict, when user configure rule by ethtool,
1094 + * we need to clear all arfs rules
1095 + */
1096 ++ spin_lock_bh(&hdev->fd_rule_lock);
1097 + hclge_clear_arfs_rules(handle);
1098 +
1099 +- spin_lock_bh(&hdev->fd_rule_lock);
1100 + ret = hclge_fd_config_rule(hdev, rule);
1101 +
1102 + spin_unlock_bh(&hdev->fd_rule_lock);
1103 +@@ -5782,6 +5782,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
1104 + return ret;
1105 + }
1106 +
1107 ++/* make sure being called after lock up with fd_rule_lock */
1108 + static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
1109 + bool clear_list)
1110 + {
1111 +@@ -5794,7 +5795,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
1112 + if (!hnae3_dev_fd_supported(hdev))
1113 + return;
1114 +
1115 +- spin_lock_bh(&hdev->fd_rule_lock);
1116 + for_each_set_bit(location, hdev->fd_bmap,
1117 + hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
1118 + hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
1119 +@@ -5811,8 +5811,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
1120 + bitmap_zero(hdev->fd_bmap,
1121 + hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
1122 + }
1123 +-
1124 +- spin_unlock_bh(&hdev->fd_rule_lock);
1125 + }
1126 +
1127 + static int hclge_restore_fd_entries(struct hnae3_handle *handle)
1128 +@@ -6179,7 +6177,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
1129 + u16 flow_id, struct flow_keys *fkeys)
1130 + {
1131 + struct hclge_vport *vport = hclge_get_vport(handle);
1132 +- struct hclge_fd_rule_tuples new_tuples;
1133 ++ struct hclge_fd_rule_tuples new_tuples = {};
1134 + struct hclge_dev *hdev = vport->back;
1135 + struct hclge_fd_rule *rule;
1136 + u16 tmp_queue_id;
1137 +@@ -6189,20 +6187,18 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
1138 + if (!hnae3_dev_fd_supported(hdev))
1139 + return -EOPNOTSUPP;
1140 +
1141 +- memset(&new_tuples, 0, sizeof(new_tuples));
1142 +- hclge_fd_get_flow_tuples(fkeys, &new_tuples);
1143 +-
1144 +- spin_lock_bh(&hdev->fd_rule_lock);
1145 +-
1146 + /* when there is already fd rule existed add by user,
1147 + * arfs should not work
1148 + */
1149 ++ spin_lock_bh(&hdev->fd_rule_lock);
1150 + if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
1151 + spin_unlock_bh(&hdev->fd_rule_lock);
1152 +
1153 + return -EOPNOTSUPP;
1154 + }
1155 +
1156 ++ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
1157 ++
1158 + /* check is there flow director filter existed for this flow,
1159 + * if not, create a new filter for it;
1160 + * if filter exist with different queue id, modify the filter;
1161 +@@ -6287,6 +6283,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
1162 + #endif
1163 + }
1164 +
1165 ++/* make sure being called after lock up with fd_rule_lock */
1166 + static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
1167 + {
1168 + #ifdef CONFIG_RFS_ACCEL
1169 +@@ -6331,10 +6328,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
1170 +
1171 + hdev->fd_en = enable;
1172 + clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
1173 +- if (!enable)
1174 ++
1175 ++ if (!enable) {
1176 ++ spin_lock_bh(&hdev->fd_rule_lock);
1177 + hclge_del_all_fd_entries(handle, clear);
1178 +- else
1179 ++ spin_unlock_bh(&hdev->fd_rule_lock);
1180 ++ } else {
1181 + hclge_restore_fd_entries(handle);
1182 ++ }
1183 + }
1184 +
1185 + static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
1186 +@@ -6799,8 +6800,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
1187 + int i;
1188 +
1189 + set_bit(HCLGE_STATE_DOWN, &hdev->state);
1190 +-
1191 ++ spin_lock_bh(&hdev->fd_rule_lock);
1192 + hclge_clear_arfs_rules(handle);
1193 ++ spin_unlock_bh(&hdev->fd_rule_lock);
1194 +
1195 + /* If it is not PF reset, the firmware will disable the MAC,
1196 + * so it only need to stop phy here.
1197 +@@ -8532,11 +8534,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
1198 + bool writen_to_tbl = false;
1199 + int ret = 0;
1200 +
1201 +- /* When device is resetting, firmware is unable to handle
1202 +- * mailbox. Just record the vlan id, and remove it after
1203 ++ /* When device is resetting or reset failed, firmware is unable to
1204 ++ * handle mailbox. Just record the vlan id, and remove it after
1205 + * reset finished.
1206 + */
1207 +- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
1208 ++ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
1209 ++ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
1210 + set_bit(vlan_id, vport->vlan_del_fail_bmap);
1211 + return -EBUSY;
1212 + }
1213 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1214 +index e6cdd06925e6..0060fa643d0e 100644
1215 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1216 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1217 +@@ -1322,11 +1322,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1218 + if (proto != htons(ETH_P_8021Q))
1219 + return -EPROTONOSUPPORT;
1220 +
1221 +- /* When device is resetting, firmware is unable to handle
1222 +- * mailbox. Just record the vlan id, and remove it after
1223 ++ /* When device is resetting or reset failed, firmware is unable to
1224 ++ * handle mailbox. Just record the vlan id, and remove it after
1225 + * reset finished.
1226 + */
1227 +- if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
1228 ++ if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
1229 ++ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
1230 + set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1231 + return -EBUSY;
1232 + }
1233 +@@ -3142,23 +3143,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
1234 + {
1235 + struct hnae3_handle *nic = &hdev->nic;
1236 + struct hclge_vf_to_pf_msg send_msg;
1237 ++ int ret;
1238 +
1239 + rtnl_lock();
1240 +- hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1241 +- rtnl_unlock();
1242 ++
1243 ++ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
1244 ++ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
1245 ++ dev_warn(&hdev->pdev->dev,
1246 ++ "is resetting when updating port based vlan info\n");
1247 ++ rtnl_unlock();
1248 ++ return;
1249 ++ }
1250 ++
1251 ++ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1252 ++ if (ret) {
1253 ++ rtnl_unlock();
1254 ++ return;
1255 ++ }
1256 +
1257 + /* send msg to PF and wait update port based vlan info */
1258 + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1259 + HCLGE_MBX_PORT_BASE_VLAN_CFG);
1260 + memcpy(send_msg.data, port_base_vlan_info, data_size);
1261 +- hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1262 +-
1263 +- if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
1264 +- nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
1265 +- else
1266 +- nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
1267 ++ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1268 ++ if (!ret) {
1269 ++ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
1270 ++ nic->port_base_vlan_state = state;
1271 ++ else
1272 ++ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
1273 ++ }
1274 +
1275 +- rtnl_lock();
1276 + hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1277 + rtnl_unlock();
1278 + }
1279 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
1280 +index 0fd7eae25fe9..5afb3c9c52d2 100644
1281 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
1282 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
1283 +@@ -3206,7 +3206,7 @@ req_rx_irq_failed:
1284 + req_tx_irq_failed:
1285 + for (j = 0; j < i; j++) {
1286 + free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1287 +- irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1288 ++ irq_dispose_mapping(adapter->tx_scrq[j]->irq);
1289 + }
1290 + release_sub_crqs(adapter, 1);
1291 + return rc;
1292 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1293 +index 64786568af0d..75a8c407e815 100644
1294 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1295 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1296 +@@ -1730,10 +1730,12 @@ static void otx2_reset_task(struct work_struct *work)
1297 + if (!netif_running(pf->netdev))
1298 + return;
1299 +
1300 ++ rtnl_lock();
1301 + otx2_stop(pf->netdev);
1302 + pf->reset_count++;
1303 + otx2_open(pf->netdev);
1304 + netif_trans_update(pf->netdev);
1305 ++ rtnl_unlock();
1306 + }
1307 +
1308 + static const struct net_device_ops otx2_netdev_ops = {
1309 +@@ -2111,6 +2113,7 @@ static void otx2_remove(struct pci_dev *pdev)
1310 +
1311 + pf = netdev_priv(netdev);
1312 +
1313 ++ cancel_work_sync(&pf->reset_task);
1314 + /* Disable link notifications */
1315 + otx2_cgx_config_linkevents(pf, false);
1316 +
1317 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
1318 +index f4227517dc8e..92a3db69a6cd 100644
1319 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
1320 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
1321 +@@ -617,6 +617,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
1322 +
1323 + vf = netdev_priv(netdev);
1324 +
1325 ++ cancel_work_sync(&vf->reset_task);
1326 ++ unregister_netdev(netdev);
1327 + otx2vf_disable_mbox_intr(vf);
1328 +
1329 + otx2_detach_resources(&vf->mbox);
1330 +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
1331 +index 09047109d0da..b743d8b56c84 100644
1332 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
1333 ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
1334 +@@ -2882,6 +2882,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1335 + eth->netdev[id]->irq = eth->irq[0];
1336 + eth->netdev[id]->dev.of_node = np;
1337 +
1338 ++ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
1339 ++
1340 + return 0;
1341 +
1342 + free_netdev:
1343 +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
1344 +index c72c4e1ea383..598e222e0b90 100644
1345 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c
1346 ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
1347 +@@ -4358,12 +4358,14 @@ end:
1348 + static void mlx4_shutdown(struct pci_dev *pdev)
1349 + {
1350 + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
1351 ++ struct mlx4_dev *dev = persist->dev;
1352 +
1353 + mlx4_info(persist->dev, "mlx4_shutdown was called\n");
1354 + mutex_lock(&persist->interface_state_mutex);
1355 + if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
1356 + mlx4_unload_one(pdev);
1357 + mutex_unlock(&persist->interface_state_mutex);
1358 ++ mlx4_pci_disable_device(dev);
1359 + }
1360 +
1361 + static const struct pci_error_handlers mlx4_err_handler = {
1362 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
1363 +index 951ea26d96bc..e472ed0eacfb 100644
1364 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
1365 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
1366 +@@ -301,6 +301,8 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
1367 + MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB);
1368 + }
1369 +
1370 ++ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1371 ++
1372 + return 0;
1373 + }
1374 +
1375 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
1376 +index 58b13192df23..2805416c32a3 100644
1377 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
1378 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
1379 +@@ -80,6 +80,8 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
1380 + gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
1381 + }
1382 +
1383 ++ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1384 ++
1385 + return 0;
1386 + }
1387 +
1388 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
1389 +index 37b176801bcc..038a0f1cecec 100644
1390 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
1391 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
1392 +@@ -136,6 +136,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
1393 + MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
1394 + be32_to_cpu(enc_keyid.key->keyid));
1395 +
1396 ++ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1397 ++
1398 + return 0;
1399 + }
1400 +
1401 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1402 +index bc54913c5861..9861c9e42c0a 100644
1403 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1404 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1405 +@@ -422,7 +422,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
1406 + err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
1407 + &rq->wq_ctrl);
1408 + if (err)
1409 +- return err;
1410 ++ goto err_rq_wq_destroy;
1411 +
1412 + rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
1413 +
1414 +@@ -475,7 +475,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
1415 + err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
1416 + &rq->wq_ctrl);
1417 + if (err)
1418 +- return err;
1419 ++ goto err_rq_wq_destroy;
1420 +
1421 + rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
1422 +
1423 +@@ -3041,6 +3041,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
1424 + priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
1425 + }
1426 +
1427 ++static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
1428 ++ enum mlx5_port_status state)
1429 ++{
1430 ++ struct mlx5_eswitch *esw = mdev->priv.eswitch;
1431 ++ int vport_admin_state;
1432 ++
1433 ++ mlx5_set_port_admin_status(mdev, state);
1434 ++
1435 ++ if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
1436 ++ return;
1437 ++
1438 ++ if (state == MLX5_PORT_UP)
1439 ++ vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1440 ++ else
1441 ++ vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
1442 ++
1443 ++ mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
1444 ++}
1445 ++
1446 + int mlx5e_open_locked(struct net_device *netdev)
1447 + {
1448 + struct mlx5e_priv *priv = netdev_priv(netdev);
1449 +@@ -3073,7 +3092,7 @@ int mlx5e_open(struct net_device *netdev)
1450 + mutex_lock(&priv->state_lock);
1451 + err = mlx5e_open_locked(netdev);
1452 + if (!err)
1453 +- mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
1454 ++ mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
1455 + mutex_unlock(&priv->state_lock);
1456 +
1457 + return err;
1458 +@@ -3107,7 +3126,7 @@ int mlx5e_close(struct net_device *netdev)
1459 + return -ENODEV;
1460 +
1461 + mutex_lock(&priv->state_lock);
1462 +- mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
1463 ++ mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
1464 + err = mlx5e_close_locked(netdev);
1465 + mutex_unlock(&priv->state_lock);
1466 +
1467 +@@ -5185,7 +5204,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
1468 +
1469 + /* Marking the link as currently not needed by the Driver */
1470 + if (!netif_running(netdev))
1471 +- mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
1472 ++ mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
1473 +
1474 + mlx5e_set_netdev_mtu_boundaries(priv);
1475 + mlx5e_set_dev_port_mtu(priv);
1476 +@@ -5395,6 +5414,8 @@ err_cleanup_tx:
1477 + profile->cleanup_tx(priv);
1478 +
1479 + out:
1480 ++ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
1481 ++ cancel_work_sync(&priv->update_stats_work);
1482 + return err;
1483 + }
1484 +
1485 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1486 +index 4a8e0dfdc5f2..e93d7430c1a3 100644
1487 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1488 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1489 +@@ -1922,6 +1922,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1490 + INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
1491 + mlx5e_tc_reoffload_flows_work);
1492 +
1493 ++ mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
1494 ++ 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
1495 + mlx5_lag_add(mdev, netdev);
1496 + priv->events_nb.notifier_call = uplink_rep_async_event;
1497 + mlx5_notifier_register(mdev, &priv->events_nb);
1498 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1499 +index 10f705761666..c0f54d2d4925 100644
1500 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1501 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1502 +@@ -2256,6 +2256,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1503 + match.key->vlan_priority);
1504 +
1505 + *match_level = MLX5_MATCH_L2;
1506 ++ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1507 + }
1508 + }
1509 +
1510 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1511 +index 7f618a443bfd..77a1ac1b1cc1 100644
1512 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1513 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1514 +@@ -2161,7 +2161,7 @@ abort:
1515 + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1516 + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1517 + }
1518 +-
1519 ++ esw_destroy_tsar(esw);
1520 + return err;
1521 + }
1522 +
1523 +@@ -2206,8 +2206,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
1524 + else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1525 + esw_offloads_disable(esw);
1526 +
1527 +- esw_destroy_tsar(esw);
1528 +-
1529 + old_mode = esw->mode;
1530 + esw->mode = MLX5_ESWITCH_NONE;
1531 +
1532 +@@ -2217,6 +2215,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
1533 + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1534 + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1535 + }
1536 ++ esw_destroy_tsar(esw);
1537 ++
1538 + if (clear_vf)
1539 + mlx5_eswitch_clear_vf_vports_info(esw);
1540 + }
1541 +@@ -2374,6 +2374,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1542 + u16 vport, int link_state)
1543 + {
1544 + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1545 ++ int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
1546 ++ int other_vport = 1;
1547 + int err = 0;
1548 +
1549 + if (!ESW_ALLOWED(esw))
1550 +@@ -2381,15 +2383,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1551 + if (IS_ERR(evport))
1552 + return PTR_ERR(evport);
1553 +
1554 ++ if (vport == MLX5_VPORT_UPLINK) {
1555 ++ opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
1556 ++ other_vport = 0;
1557 ++ vport = 0;
1558 ++ }
1559 + mutex_lock(&esw->state_lock);
1560 +
1561 +- err = mlx5_modify_vport_admin_state(esw->dev,
1562 +- MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1563 +- vport, 1, link_state);
1564 ++ err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
1565 + if (err) {
1566 +- mlx5_core_warn(esw->dev,
1567 +- "Failed to set vport %d link state, err = %d",
1568 +- vport, err);
1569 ++ mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
1570 ++ vport, opmod, err);
1571 + goto unlock;
1572 + }
1573 +
1574 +@@ -2431,8 +2435,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1575 + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1576 + int err = 0;
1577 +
1578 +- if (!ESW_ALLOWED(esw))
1579 +- return -EPERM;
1580 + if (IS_ERR(evport))
1581 + return PTR_ERR(evport);
1582 + if (vlan > 4095 || qos > 7)
1583 +@@ -2460,6 +2462,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1584 + u8 set_flags = 0;
1585 + int err;
1586 +
1587 ++ if (!ESW_ALLOWED(esw))
1588 ++ return -EPERM;
1589 ++
1590 + if (vlan || qos)
1591 + set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
1592 +
1593 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
1594 +index c1848b57f61c..56d2a1ab9378 100644
1595 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
1596 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
1597 +@@ -684,6 +684,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { r
1598 + static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
1599 + static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
1600 + static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
1601 ++static inline
1602 ++int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
1603 + static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
1604 + {
1605 + return ERR_PTR(-EOPNOTSUPP);
1606 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1607 +index 5d9def18ae3a..cfc52521d775 100644
1608 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1609 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1610 +@@ -264,9 +264,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
1611 + mlx5_eswitch_get_vport_metadata_mask());
1612 +
1613 + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1614 +- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1615 +- if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
1616 +- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1617 + } else {
1618 + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1619 + MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
1620 +@@ -381,6 +378,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
1621 + flow_act.modify_hdr = attr->modify_hdr;
1622 +
1623 + if (split) {
1624 ++ if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
1625 ++ attr->in_rep->vport == MLX5_VPORT_UPLINK)
1626 ++ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
1627 + fdb = esw_vport_tbl_get(esw, attr);
1628 + } else {
1629 + if (attr->chain || attr->prio)
1630 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
1631 +index 43f97601b500..1d9a5117f90b 100644
1632 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
1633 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
1634 +@@ -252,17 +252,17 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
1635 + if (rq->extts.index >= clock->ptp_info.n_pins)
1636 + return -EINVAL;
1637 +
1638 ++ pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
1639 ++ if (pin < 0)
1640 ++ return -EBUSY;
1641 ++
1642 + if (on) {
1643 +- pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
1644 +- if (pin < 0)
1645 +- return -EBUSY;
1646 + pin_mode = MLX5_PIN_MODE_IN;
1647 + pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
1648 + field_select = MLX5_MTPPS_FS_PIN_MODE |
1649 + MLX5_MTPPS_FS_PATTERN |
1650 + MLX5_MTPPS_FS_ENABLE;
1651 + } else {
1652 +- pin = rq->extts.index;
1653 + field_select = MLX5_MTPPS_FS_ENABLE;
1654 + }
1655 +
1656 +@@ -310,12 +310,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
1657 + if (rq->perout.index >= clock->ptp_info.n_pins)
1658 + return -EINVAL;
1659 +
1660 +- if (on) {
1661 +- pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
1662 +- rq->perout.index);
1663 +- if (pin < 0)
1664 +- return -EBUSY;
1665 ++ pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
1666 ++ rq->perout.index);
1667 ++ if (pin < 0)
1668 ++ return -EBUSY;
1669 +
1670 ++ if (on) {
1671 + pin_mode = MLX5_PIN_MODE_OUT;
1672 + pattern = MLX5_OUT_PATTERN_PERIODIC;
1673 + ts.tv_sec = rq->perout.period.sec;
1674 +@@ -341,7 +341,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
1675 + MLX5_MTPPS_FS_ENABLE |
1676 + MLX5_MTPPS_FS_TIME_STAMP;
1677 + } else {
1678 +- pin = rq->perout.index;
1679 + field_select = MLX5_MTPPS_FS_ENABLE;
1680 + }
1681 +
1682 +@@ -388,10 +387,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
1683 + return 0;
1684 + }
1685 +
1686 ++enum {
1687 ++ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
1688 ++ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
1689 ++};
1690 ++
1691 + static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
1692 + enum ptp_pin_function func, unsigned int chan)
1693 + {
1694 +- return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
1695 ++ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
1696 ++ ptp_info);
1697 ++
1698 ++ switch (func) {
1699 ++ case PTP_PF_NONE:
1700 ++ return 0;
1701 ++ case PTP_PF_EXTTS:
1702 ++ return !(clock->pps_info.pin_caps[pin] &
1703 ++ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
1704 ++ case PTP_PF_PEROUT:
1705 ++ return !(clock->pps_info.pin_caps[pin] &
1706 ++ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
1707 ++ default:
1708 ++ return -EOPNOTSUPP;
1709 ++ }
1710 ++
1711 ++ return -EOPNOTSUPP;
1712 + }
1713 +
1714 + static const struct ptp_clock_info mlx5_ptp_clock_info = {
1715 +@@ -411,6 +431,38 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
1716 + .verify = NULL,
1717 + };
1718 +
1719 ++static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
1720 ++ u32 *mtpps, u32 mtpps_size)
1721 ++{
1722 ++ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
1723 ++
1724 ++ MLX5_SET(mtpps_reg, in, pin, pin);
1725 ++
1726 ++ return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
1727 ++ mtpps_size, MLX5_REG_MTPPS, 0, 0);
1728 ++}
1729 ++
1730 ++static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
1731 ++{
1732 ++ struct mlx5_core_dev *mdev = clock->mdev;
1733 ++ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
1734 ++ u8 mode;
1735 ++ int err;
1736 ++
1737 ++ err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
1738 ++ if (err || !MLX5_GET(mtpps_reg, out, enable))
1739 ++ return PTP_PF_NONE;
1740 ++
1741 ++ mode = MLX5_GET(mtpps_reg, out, pin_mode);
1742 ++
1743 ++ if (mode == MLX5_PIN_MODE_IN)
1744 ++ return PTP_PF_EXTTS;
1745 ++ else if (mode == MLX5_PIN_MODE_OUT)
1746 ++ return PTP_PF_PEROUT;
1747 ++
1748 ++ return PTP_PF_NONE;
1749 ++}
1750 ++
1751 + static int mlx5_init_pin_config(struct mlx5_clock *clock)
1752 + {
1753 + int i;
1754 +@@ -430,8 +482,8 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock)
1755 + sizeof(clock->ptp_info.pin_config[i].name),
1756 + "mlx5_pps%d", i);
1757 + clock->ptp_info.pin_config[i].index = i;
1758 +- clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
1759 +- clock->ptp_info.pin_config[i].chan = i;
1760 ++ clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
1761 ++ clock->ptp_info.pin_config[i].chan = 0;
1762 + }
1763 +
1764 + return 0;
1765 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
1766 +index d6d6fe64887b..71b6185b4904 100644
1767 +--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
1768 ++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
1769 +@@ -1814,7 +1814,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1770 + err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
1771 + bulk_list, cb, cb_priv, tid);
1772 + if (err) {
1773 +- kfree(trans);
1774 ++ kfree_rcu(trans, rcu);
1775 + return err;
1776 + }
1777 + return 0;
1778 +@@ -2051,11 +2051,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1779 + break;
1780 + }
1781 + }
1782 +- rcu_read_unlock();
1783 +- if (!found)
1784 ++ if (!found) {
1785 ++ rcu_read_unlock();
1786 + goto drop;
1787 ++ }
1788 +
1789 + rxl->func(skb, local_port, rxl_item->priv);
1790 ++ rcu_read_unlock();
1791 + return;
1792 +
1793 + drop:
1794 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
1795 +index 84b3d78a9dd8..ac1a63fe0899 100644
1796 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
1797 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
1798 +@@ -8072,16 +8072,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
1799 + mlxsw_sp->router = router;
1800 + router->mlxsw_sp = mlxsw_sp;
1801 +
1802 +- router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
1803 +- err = register_inetaddr_notifier(&router->inetaddr_nb);
1804 +- if (err)
1805 +- goto err_register_inetaddr_notifier;
1806 +-
1807 +- router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
1808 +- err = register_inet6addr_notifier(&router->inet6addr_nb);
1809 +- if (err)
1810 +- goto err_register_inet6addr_notifier;
1811 +-
1812 + INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
1813 + err = __mlxsw_sp_router_init(mlxsw_sp);
1814 + if (err)
1815 +@@ -8122,12 +8112,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
1816 + if (err)
1817 + goto err_neigh_init;
1818 +
1819 +- mlxsw_sp->router->netevent_nb.notifier_call =
1820 +- mlxsw_sp_router_netevent_event;
1821 +- err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
1822 +- if (err)
1823 +- goto err_register_netevent_notifier;
1824 +-
1825 + err = mlxsw_sp_mp_hash_init(mlxsw_sp);
1826 + if (err)
1827 + goto err_mp_hash_init;
1828 +@@ -8136,6 +8120,22 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
1829 + if (err)
1830 + goto err_dscp_init;
1831 +
1832 ++ router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
1833 ++ err = register_inetaddr_notifier(&router->inetaddr_nb);
1834 ++ if (err)
1835 ++ goto err_register_inetaddr_notifier;
1836 ++
1837 ++ router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
1838 ++ err = register_inet6addr_notifier(&router->inet6addr_nb);
1839 ++ if (err)
1840 ++ goto err_register_inet6addr_notifier;
1841 ++
1842 ++ mlxsw_sp->router->netevent_nb.notifier_call =
1843 ++ mlxsw_sp_router_netevent_event;
1844 ++ err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
1845 ++ if (err)
1846 ++ goto err_register_netevent_notifier;
1847 ++
1848 + mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
1849 + err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
1850 + &mlxsw_sp->router->fib_nb,
1851 +@@ -8146,10 +8146,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
1852 + return 0;
1853 +
1854 + err_register_fib_notifier:
1855 +-err_dscp_init:
1856 +-err_mp_hash_init:
1857 + unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
1858 + err_register_netevent_notifier:
1859 ++ unregister_inet6addr_notifier(&router->inet6addr_nb);
1860 ++err_register_inet6addr_notifier:
1861 ++ unregister_inetaddr_notifier(&router->inetaddr_nb);
1862 ++err_register_inetaddr_notifier:
1863 ++ mlxsw_core_flush_owq();
1864 ++err_dscp_init:
1865 ++err_mp_hash_init:
1866 + mlxsw_sp_neigh_fini(mlxsw_sp);
1867 + err_neigh_init:
1868 + mlxsw_sp_vrs_fini(mlxsw_sp);
1869 +@@ -8168,10 +8173,6 @@ err_ipips_init:
1870 + err_rifs_init:
1871 + __mlxsw_sp_router_fini(mlxsw_sp);
1872 + err_router_init:
1873 +- unregister_inet6addr_notifier(&router->inet6addr_nb);
1874 +-err_register_inet6addr_notifier:
1875 +- unregister_inetaddr_notifier(&router->inetaddr_nb);
1876 +-err_register_inetaddr_notifier:
1877 + mutex_destroy(&mlxsw_sp->router->lock);
1878 + kfree(mlxsw_sp->router);
1879 + return err;
1880 +@@ -8182,6 +8183,9 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
1881 + unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
1882 + &mlxsw_sp->router->fib_nb);
1883 + unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
1884 ++ unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
1885 ++ unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
1886 ++ mlxsw_core_flush_owq();
1887 + mlxsw_sp_neigh_fini(mlxsw_sp);
1888 + mlxsw_sp_vrs_fini(mlxsw_sp);
1889 + mlxsw_sp_mr_fini(mlxsw_sp);
1890 +@@ -8191,8 +8195,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
1891 + mlxsw_sp_ipips_fini(mlxsw_sp);
1892 + mlxsw_sp_rifs_fini(mlxsw_sp);
1893 + __mlxsw_sp_router_fini(mlxsw_sp);
1894 +- unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
1895 +- unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
1896 + mutex_destroy(&mlxsw_sp->router->lock);
1897 + kfree(mlxsw_sp->router);
1898 + }
1899 +diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
1900 +index 2fdd0753b3af..0e776131a3ef 100644
1901 +--- a/drivers/net/ethernet/ni/nixge.c
1902 ++++ b/drivers/net/ethernet/ni/nixge.c
1903 +@@ -1298,19 +1298,21 @@ static int nixge_probe(struct platform_device *pdev)
1904 + netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
1905 + err = nixge_of_get_resources(pdev);
1906 + if (err)
1907 +- return err;
1908 ++ goto free_netdev;
1909 + __nixge_hw_set_mac_address(ndev);
1910 +
1911 + priv->tx_irq = platform_get_irq_byname(pdev, "tx");
1912 + if (priv->tx_irq < 0) {
1913 + netdev_err(ndev, "could not find 'tx' irq");
1914 +- return priv->tx_irq;
1915 ++ err = priv->tx_irq;
1916 ++ goto free_netdev;
1917 + }
1918 +
1919 + priv->rx_irq = platform_get_irq_byname(pdev, "rx");
1920 + if (priv->rx_irq < 0) {
1921 + netdev_err(ndev, "could not find 'rx' irq");
1922 +- return priv->rx_irq;
1923 ++ err = priv->rx_irq;
1924 ++ goto free_netdev;
1925 + }
1926 +
1927 + priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1928 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
1929 +index 2c3e9ef22129..337d971ffd92 100644
1930 +--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
1931 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
1932 +@@ -1959,7 +1959,7 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
1933 + netif_device_detach(lif->netdev);
1934 + err = ionic_stop(lif->netdev);
1935 + if (err)
1936 +- return err;
1937 ++ goto reset_out;
1938 + }
1939 +
1940 + if (cb)
1941 +@@ -1969,6 +1969,8 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
1942 + err = ionic_open(lif->netdev);
1943 + netif_device_attach(lif->netdev);
1944 + }
1945 ++
1946 ++reset_out:
1947 + mutex_unlock(&lif->queue_lock);
1948 +
1949 + return err;
1950 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
1951 +index 8d106063e927..666e43748a5f 100644
1952 +--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
1953 ++++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
1954 +@@ -1180,7 +1180,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
1955 + index, attn_bits, attn_acks, asserted_bits,
1956 + deasserted_bits, p_sb_attn_sw->known_attn);
1957 + } else if (asserted_bits == 0x100) {
1958 +- DP_INFO(p_hwfn, "MFW indication via attention\n");
1959 ++ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1960 ++ "MFW indication via attention\n");
1961 + } else {
1962 + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1963 + "MFW indication [deassertion]\n");
1964 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
1965 +index 067ad25553b9..ab335f7dab82 100644
1966 +--- a/drivers/net/ethernet/renesas/ravb_main.c
1967 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
1968 +@@ -1444,6 +1444,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1969 + struct ravb_private *priv = container_of(work, struct ravb_private,
1970 + work);
1971 + struct net_device *ndev = priv->ndev;
1972 ++ int error;
1973 +
1974 + netif_tx_stop_all_queues(ndev);
1975 +
1976 +@@ -1452,15 +1453,36 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1977 + ravb_ptp_stop(ndev);
1978 +
1979 + /* Wait for DMA stopping */
1980 +- ravb_stop_dma(ndev);
1981 ++ if (ravb_stop_dma(ndev)) {
1982 ++ /* If ravb_stop_dma() fails, the hardware is still operating
1983 ++ * for TX and/or RX. So, this should not call the following
1984 ++ * functions because ravb_dmac_init() is possible to fail too.
1985 ++ * Also, this should not retry ravb_stop_dma() again and again
1986 ++ * here because it's possible to wait forever. So, this just
1987 ++ * re-enables the TX and RX and skip the following
1988 ++ * re-initialization procedure.
1989 ++ */
1990 ++ ravb_rcv_snd_enable(ndev);
1991 ++ goto out;
1992 ++ }
1993 +
1994 + ravb_ring_free(ndev, RAVB_BE);
1995 + ravb_ring_free(ndev, RAVB_NC);
1996 +
1997 + /* Device init */
1998 +- ravb_dmac_init(ndev);
1999 ++ error = ravb_dmac_init(ndev);
2000 ++ if (error) {
2001 ++ /* If ravb_dmac_init() fails, descriptors are freed. So, this
2002 ++ * should return here to avoid re-enabling the TX and RX in
2003 ++ * ravb_emac_init().
2004 ++ */
2005 ++ netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
2006 ++ __func__, error);
2007 ++ return;
2008 ++ }
2009 + ravb_emac_init(ndev);
2010 +
2011 ++out:
2012 + /* Initialise PTP Clock driver */
2013 + if (priv->chip_id == RCAR_GEN2)
2014 + ravb_ptp_init(ndev, priv->pdev);
2015 +diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
2016 +index bb8c34d746ab..5f123a8cf68e 100644
2017 +--- a/drivers/net/usb/hso.c
2018 ++++ b/drivers/net/usb/hso.c
2019 +@@ -1390,8 +1390,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
2020 + unsigned long flags;
2021 +
2022 + if (old)
2023 +- hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n",
2024 +- tty->termios.c_cflag, old->c_cflag);
2025 ++ hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n",
2026 ++ (unsigned int)tty->termios.c_cflag,
2027 ++ (unsigned int)old->c_cflag);
2028 +
2029 + /* the actual setup */
2030 + spin_lock_irqsave(&serial->serial_lock, flags);
2031 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
2032 +index eccbf4cd7149..ee062b27cfa7 100644
2033 +--- a/drivers/net/usb/lan78xx.c
2034 ++++ b/drivers/net/usb/lan78xx.c
2035 +@@ -3759,6 +3759,11 @@ static int lan78xx_probe(struct usb_interface *intf,
2036 + netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
2037 + netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
2038 +
2039 ++ if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
2040 ++ ret = -ENODEV;
2041 ++ goto out3;
2042 ++ }
2043 ++
2044 + dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
2045 + dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
2046 + dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
2047 +@@ -3783,6 +3788,7 @@ static int lan78xx_probe(struct usb_interface *intf,
2048 + usb_fill_int_urb(dev->urb_intr, dev->udev,
2049 + dev->pipe_intr, buf, maxp,
2050 + intr_complete, dev, period);
2051 ++ dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
2052 + }
2053 + }
2054 +
2055 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
2056 +index 779e56c43d27..6e64bc8d601f 100644
2057 +--- a/drivers/net/vxlan.c
2058 ++++ b/drivers/net/vxlan.c
2059 +@@ -2863,8 +2863,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2060 + if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
2061 + continue;
2062 + /* the all_zeros_mac entry is deleted at vxlan_uninit */
2063 +- if (!is_zero_ether_addr(f->eth_addr))
2064 +- vxlan_fdb_destroy(vxlan, f, true, true);
2065 ++ if (is_zero_ether_addr(f->eth_addr) &&
2066 ++ f->vni == vxlan->cfg.vni)
2067 ++ continue;
2068 ++ vxlan_fdb_destroy(vxlan, f, true, true);
2069 + }
2070 + spin_unlock_bh(&vxlan->hash_lock[h]);
2071 + }
2072 +diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
2073 +index c84536b03aa8..f70336bb6f52 100644
2074 +--- a/drivers/net/wan/hdlc_x25.c
2075 ++++ b/drivers/net/wan/hdlc_x25.c
2076 +@@ -71,8 +71,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
2077 + {
2078 + unsigned char *ptr;
2079 +
2080 +- if (skb_cow(skb, 1))
2081 ++ if (skb_cow(skb, 1)) {
2082 ++ kfree_skb(skb);
2083 + return NET_RX_DROP;
2084 ++ }
2085 +
2086 + skb_push(skb, 1);
2087 + skb_reset_network_header(skb);
2088 +diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
2089 +index 284832314f31..b2868433718f 100644
2090 +--- a/drivers/net/wan/lapbether.c
2091 ++++ b/drivers/net/wan/lapbether.c
2092 +@@ -128,10 +128,12 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
2093 + {
2094 + unsigned char *ptr;
2095 +
2096 +- skb_push(skb, 1);
2097 +-
2098 +- if (skb_cow(skb, 1))
2099 ++ if (skb_cow(skb, 1)) {
2100 ++ kfree_skb(skb);
2101 + return NET_RX_DROP;
2102 ++ }
2103 ++
2104 ++ skb_push(skb, 1);
2105 +
2106 + ptr = skb->data;
2107 + *ptr = X25_IFACE_DATA;
2108 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
2109 +index bf2f00b89214..85b132a77787 100644
2110 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
2111 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
2112 +@@ -263,6 +263,8 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
2113 + {
2114 + struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
2115 + u32 tp = le32_to_cpu(trig->time_point);
2116 ++ struct iwl_ucode_tlv *dup = NULL;
2117 ++ int ret;
2118 +
2119 + if (le32_to_cpu(tlv->length) < sizeof(*trig))
2120 + return -EINVAL;
2121 +@@ -275,10 +277,20 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
2122 + return -EINVAL;
2123 + }
2124 +
2125 +- if (!le32_to_cpu(trig->occurrences))
2126 ++ if (!le32_to_cpu(trig->occurrences)) {
2127 ++ dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
2128 ++ GFP_KERNEL);
2129 ++ if (!dup)
2130 ++ return -ENOMEM;
2131 ++ trig = (void *)dup->data;
2132 + trig->occurrences = cpu_to_le32(-1);
2133 ++ tlv = dup;
2134 ++ }
2135 ++
2136 ++ ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
2137 ++ kfree(dup);
2138 +
2139 +- return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
2140 ++ return ret;
2141 + }
2142 +
2143 + static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
2144 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
2145 +index b4d0795154e3..a2afd1a3c51b 100644
2146 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
2147 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
2148 +@@ -206,10 +206,11 @@ mt7615_queues_acq(struct seq_file *s, void *data)
2149 + int i;
2150 +
2151 + for (i = 0; i < 16; i++) {
2152 +- int j, acs = i / 4, index = i % 4;
2153 ++ int j, wmm_idx = i % MT7615_MAX_WMM_SETS;
2154 ++ int acs = i / MT7615_MAX_WMM_SETS;
2155 + u32 ctrl, val, qlen = 0;
2156 +
2157 +- val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
2158 ++ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx));
2159 + ctrl = BIT(31) | BIT(15) | (acs << 8);
2160 +
2161 + for (j = 0; j < 32; j++) {
2162 +@@ -217,11 +218,11 @@ mt7615_queues_acq(struct seq_file *s, void *data)
2163 + continue;
2164 +
2165 + mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
2166 +- ctrl | (j + (index << 5)));
2167 ++ ctrl | (j + (wmm_idx << 5)));
2168 + qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
2169 + GENMASK(11, 0));
2170 + }
2171 +- seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
2172 ++ seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen);
2173 + }
2174 +
2175 + return 0;
2176 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
2177 +index 482c6c8b0fb7..88280057e032 100644
2178 +--- a/drivers/net/xen-netfront.c
2179 ++++ b/drivers/net/xen-netfront.c
2180 +@@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
2181 + MODULE_PARM_DESC(max_queues,
2182 + "Maximum number of queues per virtual interface");
2183 +
2184 ++#define XENNET_TIMEOUT (5 * HZ)
2185 ++
2186 + static const struct ethtool_ops xennet_ethtool_ops;
2187 +
2188 + struct netfront_cb {
2189 +@@ -1334,12 +1336,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
2190 +
2191 + netif_carrier_off(netdev);
2192 +
2193 +- xenbus_switch_state(dev, XenbusStateInitialising);
2194 +- wait_event(module_wq,
2195 +- xenbus_read_driver_state(dev->otherend) !=
2196 +- XenbusStateClosed &&
2197 +- xenbus_read_driver_state(dev->otherend) !=
2198 +- XenbusStateUnknown);
2199 ++ do {
2200 ++ xenbus_switch_state(dev, XenbusStateInitialising);
2201 ++ err = wait_event_timeout(module_wq,
2202 ++ xenbus_read_driver_state(dev->otherend) !=
2203 ++ XenbusStateClosed &&
2204 ++ xenbus_read_driver_state(dev->otherend) !=
2205 ++ XenbusStateUnknown, XENNET_TIMEOUT);
2206 ++ } while (!err);
2207 ++
2208 + return netdev;
2209 +
2210 + exit:
2211 +@@ -2139,28 +2144,43 @@ static const struct attribute_group xennet_dev_group = {
2212 + };
2213 + #endif /* CONFIG_SYSFS */
2214 +
2215 +-static int xennet_remove(struct xenbus_device *dev)
2216 ++static void xennet_bus_close(struct xenbus_device *dev)
2217 + {
2218 +- struct netfront_info *info = dev_get_drvdata(&dev->dev);
2219 +-
2220 +- dev_dbg(&dev->dev, "%s\n", dev->nodename);
2221 ++ int ret;
2222 +
2223 +- if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
2224 ++ if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2225 ++ return;
2226 ++ do {
2227 + xenbus_switch_state(dev, XenbusStateClosing);
2228 +- wait_event(module_wq,
2229 +- xenbus_read_driver_state(dev->otherend) ==
2230 +- XenbusStateClosing ||
2231 +- xenbus_read_driver_state(dev->otherend) ==
2232 +- XenbusStateUnknown);
2233 ++ ret = wait_event_timeout(module_wq,
2234 ++ xenbus_read_driver_state(dev->otherend) ==
2235 ++ XenbusStateClosing ||
2236 ++ xenbus_read_driver_state(dev->otherend) ==
2237 ++ XenbusStateClosed ||
2238 ++ xenbus_read_driver_state(dev->otherend) ==
2239 ++ XenbusStateUnknown,
2240 ++ XENNET_TIMEOUT);
2241 ++ } while (!ret);
2242 ++
2243 ++ if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2244 ++ return;
2245 +
2246 ++ do {
2247 + xenbus_switch_state(dev, XenbusStateClosed);
2248 +- wait_event(module_wq,
2249 +- xenbus_read_driver_state(dev->otherend) ==
2250 +- XenbusStateClosed ||
2251 +- xenbus_read_driver_state(dev->otherend) ==
2252 +- XenbusStateUnknown);
2253 +- }
2254 ++ ret = wait_event_timeout(module_wq,
2255 ++ xenbus_read_driver_state(dev->otherend) ==
2256 ++ XenbusStateClosed ||
2257 ++ xenbus_read_driver_state(dev->otherend) ==
2258 ++ XenbusStateUnknown,
2259 ++ XENNET_TIMEOUT);
2260 ++ } while (!ret);
2261 ++}
2262 ++
2263 ++static int xennet_remove(struct xenbus_device *dev)
2264 ++{
2265 ++ struct netfront_info *info = dev_get_drvdata(&dev->dev);
2266 +
2267 ++ xennet_bus_close(dev);
2268 + xennet_disconnect_backend(info);
2269 +
2270 + if (info->netdev->reg_state == NETREG_REGISTERED)
2271 +diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
2272 +index 91d4d5b28a7d..ba6c486d6465 100644
2273 +--- a/drivers/nfc/s3fwrn5/core.c
2274 ++++ b/drivers/nfc/s3fwrn5/core.c
2275 +@@ -198,6 +198,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
2276 + case S3FWRN5_MODE_FW:
2277 + return s3fwrn5_fw_recv_frame(ndev, skb);
2278 + default:
2279 ++ kfree_skb(skb);
2280 + return -ENODEV;
2281 + }
2282 + }
2283 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2284 +index 137d7bcc1358..f7540a9e54fd 100644
2285 +--- a/drivers/nvme/host/core.c
2286 ++++ b/drivers/nvme/host/core.c
2287 +@@ -1106,6 +1106,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
2288 + int pos;
2289 + int len;
2290 +
2291 ++ if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
2292 ++ return 0;
2293 ++
2294 + c.identify.opcode = nvme_admin_identify;
2295 + c.identify.nsid = cpu_to_le32(nsid);
2296 + c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
2297 +@@ -1119,18 +1122,6 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
2298 + if (status) {
2299 + dev_warn(ctrl->device,
2300 + "Identify Descriptors failed (%d)\n", status);
2301 +- /*
2302 +- * Don't treat non-retryable errors as fatal, as we potentially
2303 +- * already have a NGUID or EUI-64. If we failed with DNR set,
2304 +- * we want to silently ignore the error as we can still
2305 +- * identify the device, but if the status has DNR set, we want
2306 +- * to propagate the error back specifically for the disk
2307 +- * revalidation flow to make sure we don't abandon the
2308 +- * device just because of a temporal retry-able error (such
2309 +- * as path of transport errors).
2310 +- */
2311 +- if (status > 0 && (status & NVME_SC_DNR))
2312 +- status = 0;
2313 + goto free_data;
2314 + }
2315 +
2316 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
2317 +index 46f965f8c9bc..8f1b0a30fd2a 100644
2318 +--- a/drivers/nvme/host/nvme.h
2319 ++++ b/drivers/nvme/host/nvme.h
2320 +@@ -126,6 +126,13 @@ enum nvme_quirks {
2321 + * Don't change the value of the temperature threshold feature
2322 + */
2323 + NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
2324 ++
2325 ++ /*
2326 ++ * The controller doesn't handle the Identify Namespace
2327 ++ * Identification Descriptor list subcommand despite claiming
2328 ++ * NVMe 1.3 compliance.
2329 ++ */
2330 ++ NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
2331 + };
2332 +
2333 + /*
2334 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2335 +index 4ad629eb3bc6..10d65f27879f 100644
2336 +--- a/drivers/nvme/host/pci.c
2337 ++++ b/drivers/nvme/host/pci.c
2338 +@@ -3105,6 +3105,8 @@ static const struct pci_device_id nvme_id_table[] = {
2339 + { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
2340 + .driver_data = NVME_QUIRK_IDENTIFY_CNS |
2341 + NVME_QUIRK_DISABLE_WRITE_ZEROES, },
2342 ++ { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
2343 ++ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
2344 + { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
2345 + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2346 + { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
2347 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
2348 +index 4862fa962011..26461bf3fdcc 100644
2349 +--- a/drivers/nvme/host/tcp.c
2350 ++++ b/drivers/nvme/host/tcp.c
2351 +@@ -1392,6 +1392,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
2352 + }
2353 + }
2354 +
2355 ++ /* Set 10 seconds timeout for icresp recvmsg */
2356 ++ queue->sock->sk->sk_rcvtimeo = 10 * HZ;
2357 ++
2358 + queue->sock->sk->sk_allocation = GFP_ATOMIC;
2359 + nvme_tcp_set_queue_io_cpu(queue);
2360 + queue->request = NULL;
2361 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2362 +index 5067562924f0..cd522dd3dd58 100644
2363 +--- a/drivers/pci/quirks.c
2364 ++++ b/drivers/pci/quirks.c
2365 +@@ -2330,6 +2330,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
2366 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
2367 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
2368 +
2369 ++static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
2370 ++{
2371 ++ pci_info(dev, "Disabling ASPM L0s/L1\n");
2372 ++ pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2373 ++}
2374 ++
2375 ++/*
2376 ++ * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the
2377 ++ * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
2378 ++ * disable both L0s and L1 for now to be safe.
2379 ++ */
2380 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
2381 ++
2382 + /*
2383 + * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
2384 + * Link bit cleared after starting the link retrain process to allow this
2385 +diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
2386 +index c5d4428f1f94..2a1233b41aa4 100644
2387 +--- a/drivers/pinctrl/qcom/Kconfig
2388 ++++ b/drivers/pinctrl/qcom/Kconfig
2389 +@@ -7,6 +7,8 @@ config PINCTRL_MSM
2390 + select PINCONF
2391 + select GENERIC_PINCONF
2392 + select GPIOLIB_IRQCHIP
2393 ++ select IRQ_DOMAIN_HIERARCHY
2394 ++ select IRQ_FASTEOI_HIERARCHY_HANDLERS
2395 +
2396 + config PINCTRL_APQ8064
2397 + tristate "Qualcomm APQ8064 pin controller driver"
2398 +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
2399 +index 85858c1d56d0..4ebce5b73845 100644
2400 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c
2401 ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
2402 +@@ -833,6 +833,52 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
2403 + msm_gpio_irq_clear_unmask(d, false);
2404 + }
2405 +
2406 ++/**
2407 ++ * msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
2408 ++ * @d: The irq dta.
2409 ++ *
2410 ++ * This is much like msm_gpio_update_dual_edge_pos() but for IRQs that are
2411 ++ * normally handled by the parent irqchip. The logic here is slightly
2412 ++ * different due to what's easy to do with our parent, but in principle it's
2413 ++ * the same.
2414 ++ */
2415 ++static void msm_gpio_update_dual_edge_parent(struct irq_data *d)
2416 ++{
2417 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
2418 ++ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
2419 ++ const struct msm_pingroup *g = &pctrl->soc->groups[d->hwirq];
2420 ++ int loop_limit = 100;
2421 ++ unsigned int val;
2422 ++ unsigned int type;
2423 ++
2424 ++ /* Read the value and make a guess about what edge we need to catch */
2425 ++ val = msm_readl_io(pctrl, g) & BIT(g->in_bit);
2426 ++ type = val ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING;
2427 ++
2428 ++ do {
2429 ++ /* Set the parent to catch the next edge */
2430 ++ irq_chip_set_type_parent(d, type);
2431 ++
2432 ++ /*
2433 ++ * Possibly the line changed between when we last read "val"
2434 ++ * (and decided what edge we needed) and when set the edge.
2435 ++ * If the value didn't change (or changed and then changed
2436 ++ * back) then we're done.
2437 ++ */
2438 ++ val = msm_readl_io(pctrl, g) & BIT(g->in_bit);
2439 ++ if (type == IRQ_TYPE_EDGE_RISING) {
2440 ++ if (!val)
2441 ++ return;
2442 ++ type = IRQ_TYPE_EDGE_FALLING;
2443 ++ } else if (type == IRQ_TYPE_EDGE_FALLING) {
2444 ++ if (val)
2445 ++ return;
2446 ++ type = IRQ_TYPE_EDGE_RISING;
2447 ++ }
2448 ++ } while (loop_limit-- > 0);
2449 ++ dev_warn_once(pctrl->dev, "dual-edge irq failed to stabilize\n");
2450 ++}
2451 ++
2452 + static void msm_gpio_irq_ack(struct irq_data *d)
2453 + {
2454 + struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
2455 +@@ -841,8 +887,11 @@ static void msm_gpio_irq_ack(struct irq_data *d)
2456 + unsigned long flags;
2457 + u32 val;
2458 +
2459 +- if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
2460 ++ if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
2461 ++ if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
2462 ++ msm_gpio_update_dual_edge_parent(d);
2463 + return;
2464 ++ }
2465 +
2466 + g = &pctrl->soc->groups[d->hwirq];
2467 +
2468 +@@ -861,6 +910,17 @@ static void msm_gpio_irq_ack(struct irq_data *d)
2469 + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
2470 + }
2471 +
2472 ++static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d,
2473 ++ unsigned int type)
2474 ++{
2475 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
2476 ++ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
2477 ++
2478 ++ return type == IRQ_TYPE_EDGE_BOTH &&
2479 ++ pctrl->soc->wakeirq_dual_edge_errata && d->parent_data &&
2480 ++ test_bit(d->hwirq, pctrl->skip_wake_irqs);
2481 ++}
2482 ++
2483 + static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
2484 + {
2485 + struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
2486 +@@ -869,11 +929,21 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
2487 + unsigned long flags;
2488 + u32 val;
2489 +
2490 ++ if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
2491 ++ set_bit(d->hwirq, pctrl->dual_edge_irqs);
2492 ++ irq_set_handler_locked(d, handle_fasteoi_ack_irq);
2493 ++ msm_gpio_update_dual_edge_parent(d);
2494 ++ return 0;
2495 ++ }
2496 ++
2497 + if (d->parent_data)
2498 + irq_chip_set_type_parent(d, type);
2499 +
2500 +- if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
2501 ++ if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
2502 ++ clear_bit(d->hwirq, pctrl->dual_edge_irqs);
2503 ++ irq_set_handler_locked(d, handle_fasteoi_irq);
2504 + return 0;
2505 ++ }
2506 +
2507 + g = &pctrl->soc->groups[d->hwirq];
2508 +
2509 +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
2510 +index 9452da18a78b..7486fe08eb9b 100644
2511 +--- a/drivers/pinctrl/qcom/pinctrl-msm.h
2512 ++++ b/drivers/pinctrl/qcom/pinctrl-msm.h
2513 +@@ -113,6 +113,9 @@ struct msm_gpio_wakeirq_map {
2514 + * @pull_no_keeper: The SoC does not support keeper bias.
2515 + * @wakeirq_map: The map of wakeup capable GPIOs and the pin at PDC/MPM
2516 + * @nwakeirq_map: The number of entries in @wakeirq_map
2517 ++ * @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
2518 ++ * to be aware that their parent can't handle dual
2519 ++ * edge interrupts.
2520 + */
2521 + struct msm_pinctrl_soc_data {
2522 + const struct pinctrl_pin_desc *pins;
2523 +@@ -128,6 +131,7 @@ struct msm_pinctrl_soc_data {
2524 + const int *reserved_gpios;
2525 + const struct msm_gpio_wakeirq_map *wakeirq_map;
2526 + unsigned int nwakeirq_map;
2527 ++ bool wakeirq_dual_edge_errata;
2528 + };
2529 +
2530 + extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
2531 +diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
2532 +index 1b6465a882f2..1d9acad3c1ce 100644
2533 +--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
2534 ++++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
2535 +@@ -1147,6 +1147,7 @@ static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
2536 + .ntiles = ARRAY_SIZE(sc7180_tiles),
2537 + .wakeirq_map = sc7180_pdc_map,
2538 + .nwakeirq_map = ARRAY_SIZE(sc7180_pdc_map),
2539 ++ .wakeirq_dual_edge_errata = true,
2540 + };
2541 +
2542 + static int sc7180_pinctrl_probe(struct platform_device *pdev)
2543 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2544 +index b8b4366f1200..887b6a47f5da 100644
2545 +--- a/drivers/scsi/scsi_lib.c
2546 ++++ b/drivers/scsi/scsi_lib.c
2547 +@@ -564,6 +564,15 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
2548 + scsi_uninit_cmd(cmd);
2549 + }
2550 +
2551 ++static void scsi_run_queue_async(struct scsi_device *sdev)
2552 ++{
2553 ++ if (scsi_target(sdev)->single_lun ||
2554 ++ !list_empty(&sdev->host->starved_list))
2555 ++ kblockd_schedule_work(&sdev->requeue_work);
2556 ++ else
2557 ++ blk_mq_run_hw_queues(sdev->request_queue, true);
2558 ++}
2559 ++
2560 + /* Returns false when no more bytes to process, true if there are more */
2561 + static bool scsi_end_request(struct request *req, blk_status_t error,
2562 + unsigned int bytes)
2563 +@@ -608,11 +617,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
2564 +
2565 + __blk_mq_end_request(req, error);
2566 +
2567 +- if (scsi_target(sdev)->single_lun ||
2568 +- !list_empty(&sdev->host->starved_list))
2569 +- kblockd_schedule_work(&sdev->requeue_work);
2570 +- else
2571 +- blk_mq_run_hw_queues(q, true);
2572 ++ scsi_run_queue_async(sdev);
2573 +
2574 + percpu_ref_put(&q->q_usage_counter);
2575 + return false;
2576 +@@ -1706,6 +1711,7 @@ out_put_budget:
2577 + */
2578 + if (req->rq_flags & RQF_DONTPREP)
2579 + scsi_mq_uninit_cmd(cmd);
2580 ++ scsi_run_queue_async(sdev);
2581 + break;
2582 + }
2583 + return ret;
2584 +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
2585 +index 8b104f76f324..675a83659c98 100644
2586 +--- a/drivers/vhost/scsi.c
2587 ++++ b/drivers/vhost/scsi.c
2588 +@@ -1215,7 +1215,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
2589 + continue;
2590 + }
2591 +
2592 +- switch (v_req.type) {
2593 ++ switch (vhost32_to_cpu(vq, v_req.type)) {
2594 + case VIRTIO_SCSI_T_TMF:
2595 + vc.req = &v_req.tmf;
2596 + vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
2597 +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
2598 +index 1f157d2f4952..67b002ade3e7 100644
2599 +--- a/drivers/virtio/virtio_balloon.c
2600 ++++ b/drivers/virtio/virtio_balloon.c
2601 +@@ -578,10 +578,14 @@ static int init_vqs(struct virtio_balloon *vb)
2602 + static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
2603 + {
2604 + if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
2605 +- &vb->config_read_bitmap))
2606 ++ &vb->config_read_bitmap)) {
2607 + virtio_cread(vb->vdev, struct virtio_balloon_config,
2608 + free_page_hint_cmd_id,
2609 + &vb->cmd_id_received_cache);
2610 ++ /* Legacy balloon config space is LE, unlike all other devices. */
2611 ++ if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
2612 ++ vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache);
2613 ++ }
2614 +
2615 + return vb->cmd_id_received_cache;
2616 + }
2617 +diff --git a/fs/io_uring.c b/fs/io_uring.c
2618 +index d0d3efaaa4d4..4e09af1d5d22 100644
2619 +--- a/fs/io_uring.c
2620 ++++ b/fs/io_uring.c
2621 +@@ -4808,7 +4808,9 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
2622 + {
2623 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
2624 + return -EINVAL;
2625 +- if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
2626 ++ if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
2627 ++ return -EINVAL;
2628 ++ if (sqe->ioprio || sqe->buf_index || sqe->len)
2629 + return -EINVAL;
2630 +
2631 + req->timeout.addr = READ_ONCE(sqe->addr);
2632 +@@ -5014,8 +5016,9 @@ static int io_async_cancel_prep(struct io_kiocb *req,
2633 + {
2634 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
2635 + return -EINVAL;
2636 +- if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
2637 +- sqe->cancel_flags)
2638 ++ if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
2639 ++ return -EINVAL;
2640 ++ if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
2641 + return -EINVAL;
2642 +
2643 + req->cancel.addr = READ_ONCE(sqe->addr);
2644 +@@ -5033,7 +5036,9 @@ static int io_async_cancel(struct io_kiocb *req)
2645 + static int io_files_update_prep(struct io_kiocb *req,
2646 + const struct io_uring_sqe *sqe)
2647 + {
2648 +- if (sqe->flags || sqe->ioprio || sqe->rw_flags)
2649 ++ if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
2650 ++ return -EINVAL;
2651 ++ if (sqe->ioprio || sqe->rw_flags)
2652 + return -EINVAL;
2653 +
2654 + req->files_update.offset = READ_ONCE(sqe->off);
2655 +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
2656 +index 69b27c7dfc3e..fb7fa1fc8e01 100644
2657 +--- a/include/linux/mlx5/mlx5_ifc.h
2658 ++++ b/include/linux/mlx5/mlx5_ifc.h
2659 +@@ -4347,6 +4347,7 @@ struct mlx5_ifc_query_vport_state_out_bits {
2660 + enum {
2661 + MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
2662 + MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
2663 ++ MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2,
2664 + };
2665 +
2666 + struct mlx5_ifc_arm_monitor_counter_in_bits {
2667 +diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
2668 +index 70ebef866cc8..e3def7bbe932 100644
2669 +--- a/include/linux/rhashtable.h
2670 ++++ b/include/linux/rhashtable.h
2671 +@@ -349,11 +349,11 @@ static inline void rht_unlock(struct bucket_table *tbl,
2672 + local_bh_enable();
2673 + }
2674 +
2675 +-static inline struct rhash_head __rcu *__rht_ptr(
2676 +- struct rhash_lock_head *const *bkt)
2677 ++static inline struct rhash_head *__rht_ptr(
2678 ++ struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
2679 + {
2680 +- return (struct rhash_head __rcu *)
2681 +- ((unsigned long)*bkt & ~BIT(0) ?:
2682 ++ return (struct rhash_head *)
2683 ++ ((unsigned long)p & ~BIT(0) ?:
2684 + (unsigned long)RHT_NULLS_MARKER(bkt));
2685 + }
2686 +
2687 +@@ -365,25 +365,26 @@ static inline struct rhash_head __rcu *__rht_ptr(
2688 + * access is guaranteed, such as when destroying the table.
2689 + */
2690 + static inline struct rhash_head *rht_ptr_rcu(
2691 +- struct rhash_lock_head *const *bkt)
2692 ++ struct rhash_lock_head *const *p)
2693 + {
2694 +- struct rhash_head __rcu *p = __rht_ptr(bkt);
2695 +-
2696 +- return rcu_dereference(p);
2697 ++ struct rhash_lock_head __rcu *const *bkt = (void *)p;
2698 ++ return __rht_ptr(rcu_dereference(*bkt), bkt);
2699 + }
2700 +
2701 + static inline struct rhash_head *rht_ptr(
2702 +- struct rhash_lock_head *const *bkt,
2703 ++ struct rhash_lock_head *const *p,
2704 + struct bucket_table *tbl,
2705 + unsigned int hash)
2706 + {
2707 +- return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash);
2708 ++ struct rhash_lock_head __rcu *const *bkt = (void *)p;
2709 ++ return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
2710 + }
2711 +
2712 + static inline struct rhash_head *rht_ptr_exclusive(
2713 +- struct rhash_lock_head *const *bkt)
2714 ++ struct rhash_lock_head *const *p)
2715 + {
2716 +- return rcu_dereference_protected(__rht_ptr(bkt), 1);
2717 ++ struct rhash_lock_head __rcu *const *bkt = (void *)p;
2718 ++ return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
2719 + }
2720 +
2721 + static inline void rht_assign_locked(struct rhash_lock_head **bkt,
2722 +diff --git a/include/net/xfrm.h b/include/net/xfrm.h
2723 +index 03024701c79f..7b616e45fbfc 100644
2724 +--- a/include/net/xfrm.h
2725 ++++ b/include/net/xfrm.h
2726 +@@ -946,7 +946,7 @@ struct xfrm_dst {
2727 + static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
2728 + {
2729 + #ifdef CONFIG_XFRM
2730 +- if (dst->xfrm) {
2731 ++ if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
2732 + const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
2733 +
2734 + return xdst->path;
2735 +@@ -958,7 +958,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
2736 + static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
2737 + {
2738 + #ifdef CONFIG_XFRM
2739 +- if (dst->xfrm) {
2740 ++ if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
2741 + struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2742 + return xdst->child;
2743 + }
2744 +@@ -1633,13 +1633,16 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
2745 + void *);
2746 + void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
2747 + int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
2748 +-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
2749 +- u8 type, int dir,
2750 ++struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
2751 ++ const struct xfrm_mark *mark,
2752 ++ u32 if_id, u8 type, int dir,
2753 + struct xfrm_selector *sel,
2754 + struct xfrm_sec_ctx *ctx, int delete,
2755 + int *err);
2756 +-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
2757 +- int dir, u32 id, int delete, int *err);
2758 ++struct xfrm_policy *xfrm_policy_byid(struct net *net,
2759 ++ const struct xfrm_mark *mark, u32 if_id,
2760 ++ u8 type, int dir, u32 id, int delete,
2761 ++ int *err);
2762 + int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
2763 + void xfrm_policy_hash_rebuild(struct net *net);
2764 + u32 xfrm_get_acqseq(void);
2765 +diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
2766 +index 5fc10108703a..4814f1771120 100644
2767 +--- a/include/rdma/rdmavt_qp.h
2768 ++++ b/include/rdma/rdmavt_qp.h
2769 +@@ -278,6 +278,25 @@ struct rvt_rq {
2770 + spinlock_t lock ____cacheline_aligned_in_smp;
2771 + };
2772 +
2773 ++/**
2774 ++ * rvt_get_rq_count - count numbers of request work queue entries
2775 ++ * in circular buffer
2776 ++ * @rq: data structure for request queue entry
2777 ++ * @head: head indices of the circular buffer
2778 ++ * @tail: tail indices of the circular buffer
2779 ++ *
2780 ++ * Return - total number of entries in the Receive Queue
2781 ++ */
2782 ++
2783 ++static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
2784 ++{
2785 ++ u32 count = head - tail;
2786 ++
2787 ++ if ((s32)count < 0)
2788 ++ count += rq->size;
2789 ++ return count;
2790 ++}
2791 ++
2792 + /*
2793 + * This structure holds the information that the send tasklet needs
2794 + * to send a RDMA read response or atomic operation.
2795 +diff --git a/kernel/audit.c b/kernel/audit.c
2796 +index f711f424a28a..0aa0e00e4f83 100644
2797 +--- a/kernel/audit.c
2798 ++++ b/kernel/audit.c
2799 +@@ -1811,7 +1811,6 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
2800 + }
2801 +
2802 + audit_get_stamp(ab->ctx, &t, &serial);
2803 +- audit_clear_dummy(ab->ctx);
2804 + audit_log_format(ab, "audit(%llu.%03lu:%u): ",
2805 + (unsigned long long)t.tv_sec, t.tv_nsec/1000000, serial);
2806 +
2807 +diff --git a/kernel/audit.h b/kernel/audit.h
2808 +index f0233dc40b17..ddc22878433d 100644
2809 +--- a/kernel/audit.h
2810 ++++ b/kernel/audit.h
2811 +@@ -290,13 +290,6 @@ extern int audit_signal_info_syscall(struct task_struct *t);
2812 + extern void audit_filter_inodes(struct task_struct *tsk,
2813 + struct audit_context *ctx);
2814 + extern struct list_head *audit_killed_trees(void);
2815 +-
2816 +-static inline void audit_clear_dummy(struct audit_context *ctx)
2817 +-{
2818 +- if (ctx)
2819 +- ctx->dummy = 0;
2820 +-}
2821 +-
2822 + #else /* CONFIG_AUDITSYSCALL */
2823 + #define auditsc_get_stamp(c, t, s) 0
2824 + #define audit_put_watch(w) {}
2825 +@@ -330,7 +323,6 @@ static inline int audit_signal_info_syscall(struct task_struct *t)
2826 + }
2827 +
2828 + #define audit_filter_inodes(t, c) AUDIT_DISABLED
2829 +-#define audit_clear_dummy(c) {}
2830 + #endif /* CONFIG_AUDITSYSCALL */
2831 +
2832 + extern char *audit_unpack_string(void **bufp, size_t *remain, size_t len);
2833 +diff --git a/kernel/auditsc.c b/kernel/auditsc.c
2834 +index 814406a35db1..4effe01ebbe2 100644
2835 +--- a/kernel/auditsc.c
2836 ++++ b/kernel/auditsc.c
2837 +@@ -1406,6 +1406,9 @@ static void audit_log_proctitle(void)
2838 + struct audit_context *context = audit_context();
2839 + struct audit_buffer *ab;
2840 +
2841 ++ if (!context || context->dummy)
2842 ++ return;
2843 ++
2844 + ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE);
2845 + if (!ab)
2846 + return; /* audit_panic or being filtered */
2847 +diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
2848 +index d541c8486c95..5e1ac22adf7a 100644
2849 +--- a/kernel/bpf/hashtab.c
2850 ++++ b/kernel/bpf/hashtab.c
2851 +@@ -779,15 +779,20 @@ static void htab_elem_free_rcu(struct rcu_head *head)
2852 + htab_elem_free(htab, l);
2853 + }
2854 +
2855 +-static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
2856 ++static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
2857 + {
2858 + struct bpf_map *map = &htab->map;
2859 ++ void *ptr;
2860 +
2861 + if (map->ops->map_fd_put_ptr) {
2862 +- void *ptr = fd_htab_map_get_ptr(map, l);
2863 +-
2864 ++ ptr = fd_htab_map_get_ptr(map, l);
2865 + map->ops->map_fd_put_ptr(ptr);
2866 + }
2867 ++}
2868 ++
2869 ++static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
2870 ++{
2871 ++ htab_put_fd_value(htab, l);
2872 +
2873 + if (htab_is_prealloc(htab)) {
2874 + __pcpu_freelist_push(&htab->freelist, &l->fnode);
2875 +@@ -839,6 +844,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
2876 + */
2877 + pl_new = this_cpu_ptr(htab->extra_elems);
2878 + l_new = *pl_new;
2879 ++ htab_put_fd_value(htab, old_elem);
2880 + *pl_new = old_elem;
2881 + } else {
2882 + struct pcpu_freelist_node *l;
2883 +diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
2884 +index 13cd683a658a..3f67803123be 100644
2885 +--- a/net/9p/trans_fd.c
2886 ++++ b/net/9p/trans_fd.c
2887 +@@ -362,6 +362,10 @@ static void p9_read_work(struct work_struct *work)
2888 + if (m->rreq->status == REQ_STATUS_SENT) {
2889 + list_del(&m->rreq->req_list);
2890 + p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD);
2891 ++ } else if (m->rreq->status == REQ_STATUS_FLSHD) {
2892 ++ /* Ignore replies associated with a cancelled request. */
2893 ++ p9_debug(P9_DEBUG_TRANS,
2894 ++ "Ignore replies associated with a cancelled request\n");
2895 + } else {
2896 + spin_unlock(&m->client->lock);
2897 + p9_debug(P9_DEBUG_ERROR,
2898 +@@ -703,11 +707,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
2899 + {
2900 + p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
2901 +
2902 ++ spin_lock(&client->lock);
2903 ++ /* Ignore cancelled request if message has been received
2904 ++ * before lock.
2905 ++ */
2906 ++ if (req->status == REQ_STATUS_RCVD) {
2907 ++ spin_unlock(&client->lock);
2908 ++ return 0;
2909 ++ }
2910 ++
2911 + /* we haven't received a response for oldreq,
2912 + * remove it from the list.
2913 + */
2914 +- spin_lock(&client->lock);
2915 + list_del(&req->req_list);
2916 ++ req->status = REQ_STATUS_FLSHD;
2917 + spin_unlock(&client->lock);
2918 + p9_req_put(req);
2919 +
2920 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
2921 +index b11f8d391ad8..fe75f435171c 100644
2922 +--- a/net/bluetooth/hci_event.c
2923 ++++ b/net/bluetooth/hci_event.c
2924 +@@ -1305,6 +1305,9 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
2925 + {
2926 + struct discovery_state *d = &hdev->discovery;
2927 +
2928 ++ if (len > HCI_MAX_AD_LENGTH)
2929 ++ return;
2930 ++
2931 + bacpy(&d->last_adv_addr, bdaddr);
2932 + d->last_adv_addr_type = bdaddr_type;
2933 + d->last_adv_rssi = rssi;
2934 +@@ -5317,7 +5320,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
2935 +
2936 + static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
2937 + u8 bdaddr_type, bdaddr_t *direct_addr,
2938 +- u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
2939 ++ u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
2940 ++ bool ext_adv)
2941 + {
2942 + struct discovery_state *d = &hdev->discovery;
2943 + struct smp_irk *irk;
2944 +@@ -5339,6 +5343,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
2945 + return;
2946 + }
2947 +
2948 ++ if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
2949 ++ bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
2950 ++ return;
2951 ++ }
2952 ++
2953 + /* Find the end of the data in case the report contains padded zero
2954 + * bytes at the end causing an invalid length value.
2955 + *
2956 +@@ -5398,7 +5407,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
2957 + */
2958 + conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
2959 + direct_addr);
2960 +- if (conn && type == LE_ADV_IND) {
2961 ++ if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
2962 + /* Store report for later inclusion by
2963 + * mgmt_device_connected
2964 + */
2965 +@@ -5452,7 +5461,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
2966 + * event or send an immediate device found event if the data
2967 + * should not be stored for later.
2968 + */
2969 +- if (!has_pending_adv_report(hdev)) {
2970 ++ if (!ext_adv && !has_pending_adv_report(hdev)) {
2971 + /* If the report will trigger a SCAN_REQ store it for
2972 + * later merging.
2973 + */
2974 +@@ -5487,7 +5496,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
2975 + /* If the new report will trigger a SCAN_REQ store it for
2976 + * later merging.
2977 + */
2978 +- if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
2979 ++ if (!ext_adv && (type == LE_ADV_IND ||
2980 ++ type == LE_ADV_SCAN_IND)) {
2981 + store_pending_adv_report(hdev, bdaddr, bdaddr_type,
2982 + rssi, flags, data, len);
2983 + return;
2984 +@@ -5527,7 +5537,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
2985 + rssi = ev->data[ev->length];
2986 + process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
2987 + ev->bdaddr_type, NULL, 0, rssi,
2988 +- ev->data, ev->length);
2989 ++ ev->data, ev->length, false);
2990 + } else {
2991 + bt_dev_err(hdev, "Dropping invalid advertising data");
2992 + }
2993 +@@ -5599,7 +5609,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
2994 + if (legacy_evt_type != LE_ADV_INVALID) {
2995 + process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
2996 + ev->bdaddr_type, NULL, 0, ev->rssi,
2997 +- ev->data, ev->length);
2998 ++ ev->data, ev->length,
2999 ++ !(evt_type & LE_EXT_ADV_LEGACY_PDU));
3000 + }
3001 +
3002 + ptr += sizeof(*ev) + ev->length;
3003 +@@ -5797,7 +5808,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
3004 +
3005 + process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
3006 + ev->bdaddr_type, &ev->direct_addr,
3007 +- ev->direct_addr_type, ev->rssi, NULL, 0);
3008 ++ ev->direct_addr_type, ev->rssi, NULL, 0,
3009 ++ false);
3010 +
3011 + ptr += sizeof(*ev);
3012 + }
3013 +diff --git a/net/key/af_key.c b/net/key/af_key.c
3014 +index b67ed3a8486c..979c579afc63 100644
3015 +--- a/net/key/af_key.c
3016 ++++ b/net/key/af_key.c
3017 +@@ -2400,7 +2400,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
3018 + return err;
3019 + }
3020 +
3021 +- xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
3022 ++ xp = xfrm_policy_bysel_ctx(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
3023 + pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
3024 + 1, &err);
3025 + security_xfrm_policy_free(pol_ctx);
3026 +@@ -2651,7 +2651,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
3027 + return -EINVAL;
3028 +
3029 + delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2);
3030 +- xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
3031 ++ xp = xfrm_policy_byid(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
3032 + dir, pol->sadb_x_policy_id, delete, &err);
3033 + if (xp == NULL)
3034 + return -ENOENT;
3035 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
3036 +index 0f72813fed53..4230b483168a 100644
3037 +--- a/net/mac80211/cfg.c
3038 ++++ b/net/mac80211/cfg.c
3039 +@@ -2140,6 +2140,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
3040 + ieee80211_stop_mesh(sdata);
3041 + mutex_lock(&sdata->local->mtx);
3042 + ieee80211_vif_release_channel(sdata);
3043 ++ kfree(sdata->u.mesh.ie);
3044 + mutex_unlock(&sdata->local->mtx);
3045 +
3046 + return 0;
3047 +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
3048 +index 117519bf33d6..aca608ae313f 100644
3049 +--- a/net/mac80211/mesh_pathtbl.c
3050 ++++ b/net/mac80211/mesh_pathtbl.c
3051 +@@ -521,6 +521,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
3052 + del_timer_sync(&mpath->timer);
3053 + atomic_dec(&sdata->u.mesh.mpaths);
3054 + atomic_dec(&tbl->entries);
3055 ++ mesh_path_flush_pending(mpath);
3056 + kfree_rcu(mpath, rcu);
3057 + }
3058 +
3059 +diff --git a/net/rds/recv.c b/net/rds/recv.c
3060 +index c8404971d5ab..aba4afe4dfed 100644
3061 +--- a/net/rds/recv.c
3062 ++++ b/net/rds/recv.c
3063 +@@ -450,12 +450,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
3064 + int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
3065 + {
3066 + struct rds_notifier *notifier;
3067 +- struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
3068 ++ struct rds_rdma_notify cmsg;
3069 + unsigned int count = 0, max_messages = ~0U;
3070 + unsigned long flags;
3071 + LIST_HEAD(copy);
3072 + int err = 0;
3073 +
3074 ++ memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */
3075 +
3076 + /* put_cmsg copies to user space and thus may sleep. We can't do this
3077 + * with rs_lock held, so first grab as many notifications as we can stuff
3078 +diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
3079 +index 47a756503d11..f6fe2e6cd65a 100644
3080 +--- a/net/sunrpc/sunrpc.h
3081 ++++ b/net/sunrpc/sunrpc.h
3082 +@@ -52,4 +52,5 @@ static inline int sock_is_loopback(struct sock *sk)
3083 +
3084 + int rpc_clients_notifier_register(void);
3085 + void rpc_clients_notifier_unregister(void);
3086 ++void auth_domain_cleanup(void);
3087 + #endif /* _NET_SUNRPC_SUNRPC_H */
3088 +diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
3089 +index f9edaa9174a4..236fadc4a439 100644
3090 +--- a/net/sunrpc/sunrpc_syms.c
3091 ++++ b/net/sunrpc/sunrpc_syms.c
3092 +@@ -23,6 +23,7 @@
3093 + #include <linux/sunrpc/rpc_pipe_fs.h>
3094 + #include <linux/sunrpc/xprtsock.h>
3095 +
3096 ++#include "sunrpc.h"
3097 + #include "netns.h"
3098 +
3099 + unsigned int sunrpc_net_id;
3100 +@@ -131,6 +132,7 @@ cleanup_sunrpc(void)
3101 + unregister_rpc_pipefs();
3102 + rpc_destroy_mempool();
3103 + unregister_pernet_subsys(&sunrpc_net_ops);
3104 ++ auth_domain_cleanup();
3105 + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3106 + rpc_unregister_sysctl();
3107 + #endif
3108 +diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
3109 +index 552617e3467b..998b196b6176 100644
3110 +--- a/net/sunrpc/svcauth.c
3111 ++++ b/net/sunrpc/svcauth.c
3112 +@@ -21,6 +21,8 @@
3113 +
3114 + #include <trace/events/sunrpc.h>
3115 +
3116 ++#include "sunrpc.h"
3117 ++
3118 + #define RPCDBG_FACILITY RPCDBG_AUTH
3119 +
3120 +
3121 +@@ -205,3 +207,26 @@ struct auth_domain *auth_domain_find(char *name)
3122 + return NULL;
3123 + }
3124 + EXPORT_SYMBOL_GPL(auth_domain_find);
3125 ++
3126 ++/**
3127 ++ * auth_domain_cleanup - check that the auth_domain table is empty
3128 ++ *
3129 ++ * On module unload the auth_domain_table must be empty. To make it
3130 ++ * easier to catch bugs which don't clean up domains properly, we
3131 ++ * warn if anything remains in the table at cleanup time.
3132 ++ *
3133 ++ * Note that we cannot proactively remove the domains at this stage.
3134 ++ * The ->release() function might be in a module that has already been
3135 ++ * unloaded.
3136 ++ */
3137 ++
3138 ++void auth_domain_cleanup(void)
3139 ++{
3140 ++ int h;
3141 ++ struct auth_domain *hp;
3142 ++
3143 ++ for (h = 0; h < DN_HASHMAX; h++)
3144 ++ hlist_for_each_entry(hp, &auth_domain_table[h], hash)
3145 ++ pr_warn("svc: domain %s still present at module unload.\n",
3146 ++ hp->name);
3147 ++}
3148 +diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
3149 +index 0285aaa1e93c..3d424e80f16d 100644
3150 +--- a/net/x25/x25_subr.c
3151 ++++ b/net/x25/x25_subr.c
3152 +@@ -363,6 +363,12 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause,
3153 + x25->neighbour = NULL;
3154 + read_unlock_bh(&x25_list_lock);
3155 + }
3156 ++ if (x25->neighbour) {
3157 ++ read_lock_bh(&x25_list_lock);
3158 ++ x25_neigh_put(x25->neighbour);
3159 ++ x25->neighbour = NULL;
3160 ++ read_unlock_bh(&x25_list_lock);
3161 ++ }
3162 + }
3163 +
3164 + /*
3165 +diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
3166 +index 5a0ff665b71a..19396f3655c0 100644
3167 +--- a/net/xfrm/espintcp.c
3168 ++++ b/net/xfrm/espintcp.c
3169 +@@ -41,9 +41,32 @@ static void espintcp_rcv(struct strparser *strp, struct sk_buff *skb)
3170 + struct espintcp_ctx *ctx = container_of(strp, struct espintcp_ctx,
3171 + strp);
3172 + struct strp_msg *rxm = strp_msg(skb);
3173 ++ int len = rxm->full_len - 2;
3174 + u32 nonesp_marker;
3175 + int err;
3176 +
3177 ++ /* keepalive packet? */
3178 ++ if (unlikely(len == 1)) {
3179 ++ u8 data;
3180 ++
3181 ++ err = skb_copy_bits(skb, rxm->offset + 2, &data, 1);
3182 ++ if (err < 0) {
3183 ++ kfree_skb(skb);
3184 ++ return;
3185 ++ }
3186 ++
3187 ++ if (data == 0xff) {
3188 ++ kfree_skb(skb);
3189 ++ return;
3190 ++ }
3191 ++ }
3192 ++
3193 ++ /* drop other short messages */
3194 ++ if (unlikely(len <= sizeof(nonesp_marker))) {
3195 ++ kfree_skb(skb);
3196 ++ return;
3197 ++ }
3198 ++
3199 + err = skb_copy_bits(skb, rxm->offset + 2, &nonesp_marker,
3200 + sizeof(nonesp_marker));
3201 + if (err < 0) {
3202 +@@ -83,7 +106,7 @@ static int espintcp_parse(struct strparser *strp, struct sk_buff *skb)
3203 + return err;
3204 +
3205 + len = be16_to_cpu(blen);
3206 +- if (len < 6)
3207 ++ if (len < 2)
3208 + return -EINVAL;
3209 +
3210 + return len;
3211 +@@ -101,8 +124,11 @@ static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
3212 + flags |= nonblock ? MSG_DONTWAIT : 0;
3213 +
3214 + skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err);
3215 +- if (!skb)
3216 ++ if (!skb) {
3217 ++ if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN)
3218 ++ return 0;
3219 + return err;
3220 ++ }
3221 +
3222 + copied = len;
3223 + if (copied > skb->len)
3224 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
3225 +index 564aa6492e7c..6847b3579f54 100644
3226 +--- a/net/xfrm/xfrm_policy.c
3227 ++++ b/net/xfrm/xfrm_policy.c
3228 +@@ -1433,14 +1433,10 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
3229 + spin_unlock_bh(&pq->hold_queue.lock);
3230 + }
3231 +
3232 +-static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
3233 +- struct xfrm_policy *pol)
3234 ++static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
3235 ++ struct xfrm_policy *pol)
3236 + {
3237 +- if (policy->mark.v == pol->mark.v &&
3238 +- policy->priority == pol->priority)
3239 +- return true;
3240 +-
3241 +- return false;
3242 ++ return mark->v == pol->mark.v && mark->m == pol->mark.m;
3243 + }
3244 +
3245 + static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
3246 +@@ -1503,7 +1499,7 @@ static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
3247 + if (pol->type == policy->type &&
3248 + pol->if_id == policy->if_id &&
3249 + !selector_cmp(&pol->selector, &policy->selector) &&
3250 +- xfrm_policy_mark_match(policy, pol) &&
3251 ++ xfrm_policy_mark_match(&policy->mark, pol) &&
3252 + xfrm_sec_ctx_match(pol->security, policy->security) &&
3253 + !WARN_ON(delpol)) {
3254 + delpol = pol;
3255 +@@ -1538,7 +1534,7 @@ static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
3256 + if (pol->type == policy->type &&
3257 + pol->if_id == policy->if_id &&
3258 + !selector_cmp(&pol->selector, &policy->selector) &&
3259 +- xfrm_policy_mark_match(policy, pol) &&
3260 ++ xfrm_policy_mark_match(&policy->mark, pol) &&
3261 + xfrm_sec_ctx_match(pol->security, policy->security) &&
3262 + !WARN_ON(delpol)) {
3263 + if (excl)
3264 +@@ -1610,9 +1606,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
3265 + EXPORT_SYMBOL(xfrm_policy_insert);
3266 +
3267 + static struct xfrm_policy *
3268 +-__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
3269 +- u8 type, int dir,
3270 +- struct xfrm_selector *sel,
3271 ++__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
3272 ++ u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
3273 + struct xfrm_sec_ctx *ctx)
3274 + {
3275 + struct xfrm_policy *pol;
3276 +@@ -1623,7 +1618,7 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
3277 + hlist_for_each_entry(pol, chain, bydst) {
3278 + if (pol->type == type &&
3279 + pol->if_id == if_id &&
3280 +- (mark & pol->mark.m) == pol->mark.v &&
3281 ++ xfrm_policy_mark_match(mark, pol) &&
3282 + !selector_cmp(sel, &pol->selector) &&
3283 + xfrm_sec_ctx_match(ctx, pol->security))
3284 + return pol;
3285 +@@ -1632,11 +1627,10 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
3286 + return NULL;
3287 + }
3288 +
3289 +-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
3290 +- u8 type, int dir,
3291 +- struct xfrm_selector *sel,
3292 +- struct xfrm_sec_ctx *ctx, int delete,
3293 +- int *err)
3294 ++struct xfrm_policy *
3295 ++xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
3296 ++ u8 type, int dir, struct xfrm_selector *sel,
3297 ++ struct xfrm_sec_ctx *ctx, int delete, int *err)
3298 + {
3299 + struct xfrm_pol_inexact_bin *bin = NULL;
3300 + struct xfrm_policy *pol, *ret = NULL;
3301 +@@ -1703,9 +1697,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
3302 + }
3303 + EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
3304 +
3305 +-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
3306 +- u8 type, int dir, u32 id, int delete,
3307 +- int *err)
3308 ++struct xfrm_policy *
3309 ++xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
3310 ++ u8 type, int dir, u32 id, int delete, int *err)
3311 + {
3312 + struct xfrm_policy *pol, *ret;
3313 + struct hlist_head *chain;
3314 +@@ -1720,8 +1714,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
3315 + ret = NULL;
3316 + hlist_for_each_entry(pol, chain, byidx) {
3317 + if (pol->type == type && pol->index == id &&
3318 +- pol->if_id == if_id &&
3319 +- (mark & pol->mark.m) == pol->mark.v) {
3320 ++ pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
3321 + xfrm_pol_hold(pol);
3322 + if (delete) {
3323 + *err = security_xfrm_policy_delete(
3324 +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
3325 +index e6cfaa680ef3..fbb7d9d06478 100644
3326 +--- a/net/xfrm/xfrm_user.c
3327 ++++ b/net/xfrm/xfrm_user.c
3328 +@@ -1863,7 +1863,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
3329 + struct km_event c;
3330 + int delete;
3331 + struct xfrm_mark m;
3332 +- u32 mark = xfrm_mark_get(attrs, &m);
3333 + u32 if_id = 0;
3334 +
3335 + p = nlmsg_data(nlh);
3336 +@@ -1880,8 +1879,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
3337 + if (attrs[XFRMA_IF_ID])
3338 + if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
3339 +
3340 ++ xfrm_mark_get(attrs, &m);
3341 ++
3342 + if (p->index)
3343 +- xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err);
3344 ++ xp = xfrm_policy_byid(net, &m, if_id, type, p->dir,
3345 ++ p->index, delete, &err);
3346 + else {
3347 + struct nlattr *rt = attrs[XFRMA_SEC_CTX];
3348 + struct xfrm_sec_ctx *ctx;
3349 +@@ -1898,8 +1900,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
3350 + if (err)
3351 + return err;
3352 + }
3353 +- xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel,
3354 +- ctx, delete, &err);
3355 ++ xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
3356 ++ &p->sel, ctx, delete, &err);
3357 + security_xfrm_policy_free(ctx);
3358 + }
3359 + if (xp == NULL)
3360 +@@ -2166,7 +2168,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
3361 + u8 type = XFRM_POLICY_TYPE_MAIN;
3362 + int err = -ENOENT;
3363 + struct xfrm_mark m;
3364 +- u32 mark = xfrm_mark_get(attrs, &m);
3365 + u32 if_id = 0;
3366 +
3367 + err = copy_from_user_policy_type(&type, attrs);
3368 +@@ -2180,8 +2181,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
3369 + if (attrs[XFRMA_IF_ID])
3370 + if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
3371 +
3372 ++ xfrm_mark_get(attrs, &m);
3373 ++
3374 + if (p->index)
3375 +- xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err);
3376 ++ xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index,
3377 ++ 0, &err);
3378 + else {
3379 + struct nlattr *rt = attrs[XFRMA_SEC_CTX];
3380 + struct xfrm_sec_ctx *ctx;
3381 +@@ -2198,7 +2202,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
3382 + if (err)
3383 + return err;
3384 + }
3385 +- xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir,
3386 ++ xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
3387 + &p->sel, ctx, 0, &err);
3388 + security_xfrm_policy_free(ctx);
3389 + }
3390 +diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
3391 +index 82e26442724b..a356fb0e5773 100644
3392 +--- a/sound/pci/hda/hda_controller.h
3393 ++++ b/sound/pci/hda/hda_controller.h
3394 +@@ -41,7 +41,7 @@
3395 + /* 24 unused */
3396 + #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
3397 + #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
3398 +-/* 27 unused */
3399 ++#define AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP (1 << 27) /* Workaround for spurious wakeups after suspend */
3400 + #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
3401 + #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
3402 + #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */
3403 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
3404 +index 11ec5c56c80e..9d14c40c07ea 100644
3405 +--- a/sound/pci/hda/hda_intel.c
3406 ++++ b/sound/pci/hda/hda_intel.c
3407 +@@ -298,7 +298,8 @@ enum {
3408 + /* PCH for HSW/BDW; with runtime PM */
3409 + /* no i915 binding for this as HSW/BDW has another controller for HDMI */
3410 + #define AZX_DCAPS_INTEL_PCH \
3411 +- (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME)
3412 ++ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
3413 ++ AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
3414 +
3415 + /* HSW HDMI */
3416 + #define AZX_DCAPS_INTEL_HASWELL \
3417 +@@ -1028,7 +1029,14 @@ static int azx_suspend(struct device *dev)
3418 + chip = card->private_data;
3419 + bus = azx_bus(chip);
3420 + snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
3421 +- pm_runtime_force_suspend(dev);
3422 ++ /* An ugly workaround: direct call of __azx_runtime_suspend() and
3423 ++ * __azx_runtime_resume() for old Intel platforms that suffer from
3424 ++ * spurious wakeups after S3 suspend
3425 ++ */
3426 ++ if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
3427 ++ __azx_runtime_suspend(chip);
3428 ++ else
3429 ++ pm_runtime_force_suspend(dev);
3430 + if (bus->irq >= 0) {
3431 + free_irq(bus->irq, chip);
3432 + bus->irq = -1;
3433 +@@ -1057,7 +1065,10 @@ static int azx_resume(struct device *dev)
3434 + if (azx_acquire_irq(chip, 1) < 0)
3435 + return -EIO;
3436 +
3437 +- pm_runtime_force_resume(dev);
3438 ++ if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
3439 ++ __azx_runtime_resume(chip, false);
3440 ++ else
3441 ++ pm_runtime_force_resume(dev);
3442 + snd_power_change_state(card, SNDRV_CTL_POWER_D0);
3443 +
3444 + trace_azx_resume(chip);
3445 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
3446 +index e821c9df8107..37391c3d2f47 100644
3447 +--- a/sound/pci/hda/patch_hdmi.c
3448 ++++ b/sound/pci/hda/patch_hdmi.c
3449 +@@ -2439,6 +2439,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp,
3450 + mutex_lock(&spec->bind_lock);
3451 + spec->use_acomp_notifier = use_acomp;
3452 + spec->codec->relaxed_resume = use_acomp;
3453 ++ spec->codec->bus->keep_power = 0;
3454 + /* reprogram each jack detection logic depending on the notifier */
3455 + for (i = 0; i < spec->num_pins; i++)
3456 + reprogram_jack_detect(spec->codec,
3457 +@@ -2533,7 +2534,6 @@ static void generic_acomp_init(struct hda_codec *codec,
3458 + if (!snd_hdac_acomp_init(&codec->bus->core, &spec->drm_audio_ops,
3459 + match_bound_vga, 0)) {
3460 + spec->acomp_registered = true;
3461 +- codec->bus->keep_power = 0;
3462 + }
3463 + }
3464 +
3465 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3466 +index 27dd8945d6e6..d8d018536484 100644
3467 +--- a/sound/pci/hda/patch_realtek.c
3468 ++++ b/sound/pci/hda/patch_realtek.c
3469 +@@ -5940,6 +5940,16 @@ static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
3470 + snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
3471 + }
3472 +
3473 ++static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
3474 ++ const struct hda_fixup *fix, int action)
3475 ++{
3476 ++ if (action != HDA_FIXUP_ACT_INIT)
3477 ++ return;
3478 ++
3479 ++ msleep(100);
3480 ++ alc_write_coef_idx(codec, 0x65, 0x0);
3481 ++}
3482 ++
3483 + /* for hda_fixup_thinkpad_acpi() */
3484 + #include "thinkpad_helper.c"
3485 +
3486 +@@ -6117,8 +6127,10 @@ enum {
3487 + ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
3488 + ALC269VC_FIXUP_ACER_HEADSET_MIC,
3489 + ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE,
3490 +- ALC289_FIXUP_ASUS_G401,
3491 ++ ALC289_FIXUP_ASUS_GA401,
3492 ++ ALC289_FIXUP_ASUS_GA502,
3493 + ALC256_FIXUP_ACER_MIC_NO_PRESENCE,
3494 ++ ALC285_FIXUP_HP_GPIO_AMP_INIT,
3495 + };
3496 +
3497 + static const struct hda_fixup alc269_fixups[] = {
3498 +@@ -7328,7 +7340,14 @@ static const struct hda_fixup alc269_fixups[] = {
3499 + .chained = true,
3500 + .chain_id = ALC269_FIXUP_HEADSET_MIC
3501 + },
3502 +- [ALC289_FIXUP_ASUS_G401] = {
3503 ++ [ALC289_FIXUP_ASUS_GA401] = {
3504 ++ .type = HDA_FIXUP_PINS,
3505 ++ .v.pins = (const struct hda_pintbl[]) {
3506 ++ { 0x19, 0x03a11020 }, /* headset mic with jack detect */
3507 ++ { }
3508 ++ },
3509 ++ },
3510 ++ [ALC289_FIXUP_ASUS_GA502] = {
3511 + .type = HDA_FIXUP_PINS,
3512 + .v.pins = (const struct hda_pintbl[]) {
3513 + { 0x19, 0x03a11020 }, /* headset mic with jack detect */
3514 +@@ -7344,6 +7363,12 @@ static const struct hda_fixup alc269_fixups[] = {
3515 + .chained = true,
3516 + .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
3517 + },
3518 ++ [ALC285_FIXUP_HP_GPIO_AMP_INIT] = {
3519 ++ .type = HDA_FIXUP_FUNC,
3520 ++ .v.func = alc285_fixup_hp_gpio_amp_init,
3521 ++ .chained = true,
3522 ++ .chain_id = ALC285_FIXUP_HP_GPIO_LED
3523 ++ },
3524 + };
3525 +
3526 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3527 +@@ -7494,7 +7519,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3528 + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
3529 + SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
3530 + SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
3531 +- SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
3532 ++ SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
3533 + SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
3534 + SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
3535 + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
3536 +@@ -7526,7 +7551,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3537 + SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
3538 + SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3539 + SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
3540 +- SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_G401),
3541 ++ SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
3542 ++ SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
3543 + SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
3544 + SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
3545 + SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
3546 +@@ -7546,7 +7572,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3547 + SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
3548 + SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
3549 + SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
3550 +- SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK),
3551 ++ SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
3552 + SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
3553 + SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
3554 + SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
3555 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
3556 +index 9702c4311b91..0247162a9fbf 100644
3557 +--- a/sound/usb/pcm.c
3558 ++++ b/sound/usb/pcm.c
3559 +@@ -367,6 +367,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
3560 + ifnum = 0;
3561 + goto add_sync_ep_from_ifnum;
3562 + case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
3563 ++ case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */
3564 + case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
3565 + case USB_ID(0x0d9a, 0x00df): /* RTX6001 */
3566 + ep = 0x81;
3567 +diff --git a/tools/lib/traceevent/plugins/Makefile b/tools/lib/traceevent/plugins/Makefile
3568 +index 349bb81482ab..680d883efe05 100644
3569 +--- a/tools/lib/traceevent/plugins/Makefile
3570 ++++ b/tools/lib/traceevent/plugins/Makefile
3571 +@@ -197,7 +197,7 @@ define do_generate_dynamic_list_file
3572 + xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
3573 + if [ "$$symbol_type" = "U W" ];then \
3574 + (echo '{'; \
3575 +- $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
3576 ++ $(NM) -u -D $1 | awk 'NF>1 {sub("@.*", "", $$2); print "\t"$$2";"}' | sort -u;\
3577 + echo '};'; \
3578 + ) > $2; \
3579 + else \
3580 +diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c
3581 +index 0a6e75b8777a..28a5d0c18b1d 100644
3582 +--- a/tools/perf/arch/arm/util/auxtrace.c
3583 ++++ b/tools/perf/arch/arm/util/auxtrace.c
3584 +@@ -56,7 +56,7 @@ struct auxtrace_record
3585 + struct perf_pmu *cs_etm_pmu;
3586 + struct evsel *evsel;
3587 + bool found_etm = false;
3588 +- bool found_spe = false;
3589 ++ struct perf_pmu *found_spe = NULL;
3590 + static struct perf_pmu **arm_spe_pmus = NULL;
3591 + static int nr_spes = 0;
3592 + int i = 0;
3593 +@@ -74,12 +74,12 @@ struct auxtrace_record
3594 + evsel->core.attr.type == cs_etm_pmu->type)
3595 + found_etm = true;
3596 +
3597 +- if (!nr_spes)
3598 ++ if (!nr_spes || found_spe)
3599 + continue;
3600 +
3601 + for (i = 0; i < nr_spes; i++) {
3602 + if (evsel->core.attr.type == arm_spe_pmus[i]->type) {
3603 +- found_spe = true;
3604 ++ found_spe = arm_spe_pmus[i];
3605 + break;
3606 + }
3607 + }
3608 +@@ -96,7 +96,7 @@ struct auxtrace_record
3609 +
3610 + #if defined(__aarch64__)
3611 + if (found_spe)
3612 +- return arm_spe_recording_init(err, arm_spe_pmus[i]);
3613 ++ return arm_spe_recording_init(err, found_spe);
3614 + #endif
3615 +
3616 + /*
3617 +diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
3618 +index 8294ae3ffb3c..43c9cda199b8 100755
3619 +--- a/tools/testing/selftests/bpf/test_offload.py
3620 ++++ b/tools/testing/selftests/bpf/test_offload.py
3621 +@@ -318,6 +318,9 @@ class DebugfsDir:
3622 + continue
3623 +
3624 + if os.path.isfile(p):
3625 ++ # We need to init trap_flow_action_cookie before read it
3626 ++ if f == "trap_flow_action_cookie":
3627 ++ cmd('echo deadbeef > %s/%s' % (path, f))
3628 + _, out = cmd('cat %s/%s' % (path, f))
3629 + dfs[f] = out.strip()
3630 + elif os.path.isdir(p):
3631 +diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
3632 +index 9dc35a16e415..51df5e305855 100755
3633 +--- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
3634 ++++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
3635 +@@ -144,7 +144,7 @@ setup()
3636 +
3637 + cleanup()
3638 + {
3639 +- for n in h1 r1 h2 h3 h4
3640 ++ for n in h0 r1 h1 h2 h3
3641 + do
3642 + ip netns del ${n} 2>/dev/null
3643 + done
3644 +diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh
3645 +index eb8e2a23bbb4..43a948feed26 100755
3646 +--- a/tools/testing/selftests/net/forwarding/ethtool.sh
3647 ++++ b/tools/testing/selftests/net/forwarding/ethtool.sh
3648 +@@ -252,8 +252,6 @@ check_highest_speed_is_chosen()
3649 + fi
3650 +
3651 + local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1))
3652 +- # Remove the first speed, h1 does not advertise this speed.
3653 +- unset speeds_arr[0]
3654 +
3655 + max_speed=${speeds_arr[0]}
3656 + for current in ${speeds_arr[@]}; do
3657 +diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
3658 +index 15d3489ecd9c..ceb7ad4dbd94 100755
3659 +--- a/tools/testing/selftests/net/ip_defrag.sh
3660 ++++ b/tools/testing/selftests/net/ip_defrag.sh
3661 +@@ -6,6 +6,8 @@
3662 + set +x
3663 + set -e
3664 +
3665 ++modprobe -q nf_defrag_ipv6
3666 ++
3667 + readonly NETNS="ns-$(mktemp -u XXXXXX)"
3668 +
3669 + setup() {
3670 +diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
3671 +index 8c8c7d79c38d..2c522f7a0aec 100644
3672 +--- a/tools/testing/selftests/net/psock_fanout.c
3673 ++++ b/tools/testing/selftests/net/psock_fanout.c
3674 +@@ -350,7 +350,8 @@ static int test_datapath(uint16_t typeflags, int port_off,
3675 + int fds[2], fds_udp[2][2], ret;
3676 +
3677 + fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n",
3678 +- typeflags, PORT_BASE, PORT_BASE + port_off);
3679 ++ typeflags, (uint16_t)PORT_BASE,
3680 ++ (uint16_t)(PORT_BASE + port_off));
3681 +
3682 + fds[0] = sock_fanout_open(typeflags, 0);
3683 + fds[1] = sock_fanout_open(typeflags, 0);
3684 +diff --git a/tools/testing/selftests/net/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c
3685 +index 422e7761254d..bcb79ba1f214 100644
3686 +--- a/tools/testing/selftests/net/rxtimestamp.c
3687 ++++ b/tools/testing/selftests/net/rxtimestamp.c
3688 +@@ -329,8 +329,7 @@ int main(int argc, char **argv)
3689 + bool all_tests = true;
3690 + int arg_index = 0;
3691 + int failures = 0;
3692 +- int s, t;
3693 +- char opt;
3694 ++ int s, t, opt;
3695 +
3696 + while ((opt = getopt_long(argc, argv, "", long_options,
3697 + &arg_index)) != -1) {
3698 +diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
3699 +index ceaad78e9667..3155fbbf644b 100644
3700 +--- a/tools/testing/selftests/net/so_txtime.c
3701 ++++ b/tools/testing/selftests/net/so_txtime.c
3702 +@@ -121,7 +121,7 @@ static bool do_recv_one(int fdr, struct timed_send *ts)
3703 + if (rbuf[0] != ts->data)
3704 + error(1, 0, "payload mismatch. expected %c", ts->data);
3705 +
3706 +- if (labs(tstop - texpect) > cfg_variance_us)
3707 ++ if (llabs(tstop - texpect) > cfg_variance_us)
3708 + error(1, 0, "exceeds variance (%d us)", cfg_variance_us);
3709 +
3710 + return false;
3711 +diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c
3712 +index 4555f88252ba..a61b7b3da549 100644
3713 +--- a/tools/testing/selftests/net/tcp_mmap.c
3714 ++++ b/tools/testing/selftests/net/tcp_mmap.c
3715 +@@ -344,7 +344,7 @@ int main(int argc, char *argv[])
3716 + {
3717 + struct sockaddr_storage listenaddr, addr;
3718 + unsigned int max_pacing_rate = 0;
3719 +- size_t total = 0;
3720 ++ uint64_t total = 0;
3721 + char *host = NULL;
3722 + int fd, c, on = 1;
3723 + char *buffer;
3724 +@@ -473,12 +473,12 @@ int main(int argc, char *argv[])
3725 + zflg = 0;
3726 + }
3727 + while (total < FILE_SZ) {
3728 +- ssize_t wr = FILE_SZ - total;
3729 ++ int64_t wr = FILE_SZ - total;
3730 +
3731 + if (wr > chunk_size)
3732 + wr = chunk_size;
3733 + /* Note : we just want to fill the pipe with 0 bytes */
3734 +- wr = send(fd, buffer, wr, zflg ? MSG_ZEROCOPY : 0);
3735 ++ wr = send(fd, buffer, (size_t)wr, zflg ? MSG_ZEROCOPY : 0);
3736 + if (wr <= 0)
3737 + break;
3738 + total += wr;
3739 +diff --git a/tools/testing/selftests/net/txtimestamp.sh b/tools/testing/selftests/net/txtimestamp.sh
3740 +index eea6f5193693..31637769f59f 100755
3741 +--- a/tools/testing/selftests/net/txtimestamp.sh
3742 ++++ b/tools/testing/selftests/net/txtimestamp.sh
3743 +@@ -75,7 +75,7 @@ main() {
3744 + fi
3745 + }
3746 +
3747 +-if [[ "$(ip netns identify)" == "root" ]]; then
3748 ++if [[ -z "$(ip netns identify)" ]]; then
3749 + ./in_netns.sh $0 $@
3750 + else
3751 + main $@
3752 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
3753 +index e3b9ee268823..8a9d13e8e904 100644
3754 +--- a/virt/kvm/arm/mmu.c
3755 ++++ b/virt/kvm/arm/mmu.c
3756 +@@ -1198,7 +1198,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
3757 + return true;
3758 + }
3759 +
3760 +-static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
3761 ++static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz)
3762 + {
3763 + pud_t *pudp;
3764 + pmd_t *pmdp;
3765 +@@ -1210,11 +1210,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
3766 + return false;
3767 +
3768 + if (pudp)
3769 +- return kvm_s2pud_exec(pudp);
3770 ++ return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
3771 + else if (pmdp)
3772 +- return kvm_s2pmd_exec(pmdp);
3773 ++ return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
3774 + else
3775 +- return kvm_s2pte_exec(ptep);
3776 ++ return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
3777 + }
3778 +
3779 + static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
3780 +@@ -1801,7 +1801,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
3781 + * execute permissions, and we preserve whatever we have.
3782 + */
3783 + needs_exec = exec_fault ||
3784 +- (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
3785 ++ (fault_status == FSC_PERM &&
3786 ++ stage2_is_exec(kvm, fault_ipa, vma_pagesize));
3787 +
3788 + if (vma_pagesize == PUD_SIZE) {
3789 + pud_t new_pud = kvm_pfn_pud(pfn, mem_type);