Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 27 Feb 2019 11:23:33
Message-Id: 1551266586.bfcc3c8f1cb3df7438bb44c51fc44cd1f403e534.mpagano@gentoo
1 commit: bfcc3c8f1cb3df7438bb44c51fc44cd1f403e534
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 27 11:23:06 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 27 11:23:06 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bfcc3c8f
7
8 proj/linux-patches: Linux patch 4.19.26
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1025_linux-4.19.26.patch | 6610 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6614 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index aee0edf..fa1c672 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -143,6 +143,10 @@ Patch: 1024_linux-4.19.25.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.25
23
24 +Patch: 1025_linux-4.19.26.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.26
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1025_linux-4.19.26.patch b/1025_linux-4.19.26.patch
33 new file mode 100644
34 index 0000000..b49d24c
35 --- /dev/null
36 +++ b/1025_linux-4.19.26.patch
37 @@ -0,0 +1,6610 @@
38 +diff --git a/Makefile b/Makefile
39 +index 2caa131ff306b..b71076cecba9c 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 25
47 ++SUBLEVEL = 26
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
52 +index ff7d3232764a2..db681cf4959c8 100644
53 +--- a/arch/arc/include/asm/cache.h
54 ++++ b/arch/arc/include/asm/cache.h
55 +@@ -52,6 +52,17 @@
56 + #define cache_line_size() SMP_CACHE_BYTES
57 + #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
58 +
59 ++/*
60 ++ * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
61 ++ * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
62 ++ * alignment for any atomic64_t embedded in buffer.
63 ++ * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
64 ++ * value of 4 (and not 8) in ARC ABI.
65 ++ */
66 ++#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
67 ++#define ARCH_SLAB_MINALIGN 8
68 ++#endif
69 ++
70 + extern void arc_cache_init(void);
71 + extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
72 + extern void read_decode_cache_bcr(void);
73 +diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
74 +index 8b90d25a15cca..1f945d0f40daa 100644
75 +--- a/arch/arc/kernel/head.S
76 ++++ b/arch/arc/kernel/head.S
77 +@@ -17,6 +17,7 @@
78 + #include <asm/entry.h>
79 + #include <asm/arcregs.h>
80 + #include <asm/cache.h>
81 ++#include <asm/irqflags.h>
82 +
83 + .macro CPU_EARLY_SETUP
84 +
85 +@@ -47,6 +48,15 @@
86 + sr r5, [ARC_REG_DC_CTRL]
87 +
88 + 1:
89 ++
90 ++#ifdef CONFIG_ISA_ARCV2
91 ++ ; Unaligned access is disabled at reset, so re-enable early as
92 ++ ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
93 ++ ; by default
94 ++ lr r5, [status32]
95 ++ bset r5, r5, STATUS_AD_BIT
96 ++ kflag r5
97 ++#endif
98 + .endm
99 +
100 + .section .init.text, "ax",@progbits
101 +@@ -93,9 +103,9 @@ ENTRY(stext)
102 + #ifdef CONFIG_ARC_UBOOT_SUPPORT
103 + ; Uboot - kernel ABI
104 + ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
105 +- ; r1 = magic number (board identity, unused as of now
106 ++ ; r1 = magic number (always zero as of now)
107 + ; r2 = pointer to uboot provided cmdline or external DTB in mem
108 +- ; These are handled later in setup_arch()
109 ++ ; These are handled later in handle_uboot_args()
110 + st r0, [@uboot_tag]
111 + st r2, [@uboot_arg]
112 + #endif
113 +diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
114 +index b2cae79a25d71..62a30e58441c5 100644
115 +--- a/arch/arc/kernel/setup.c
116 ++++ b/arch/arc/kernel/setup.c
117 +@@ -449,43 +449,80 @@ void setup_processor(void)
118 + arc_chk_core_config();
119 + }
120 +
121 +-static inline int is_kernel(unsigned long addr)
122 ++static inline bool uboot_arg_invalid(unsigned long addr)
123 + {
124 +- if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
125 +- return 1;
126 +- return 0;
127 ++ /*
128 ++ * Check that it is a untranslated address (although MMU is not enabled
129 ++ * yet, it being a high address ensures this is not by fluke)
130 ++ */
131 ++ if (addr < PAGE_OFFSET)
132 ++ return true;
133 ++
134 ++ /* Check that address doesn't clobber resident kernel image */
135 ++ return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
136 + }
137 +
138 +-void __init setup_arch(char **cmdline_p)
139 ++#define IGNORE_ARGS "Ignore U-boot args: "
140 ++
141 ++/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
142 ++#define UBOOT_TAG_NONE 0
143 ++#define UBOOT_TAG_CMDLINE 1
144 ++#define UBOOT_TAG_DTB 2
145 ++
146 ++void __init handle_uboot_args(void)
147 + {
148 ++ bool use_embedded_dtb = true;
149 ++ bool append_cmdline = false;
150 ++
151 + #ifdef CONFIG_ARC_UBOOT_SUPPORT
152 +- /* make sure that uboot passed pointer to cmdline/dtb is valid */
153 +- if (uboot_tag && is_kernel((unsigned long)uboot_arg))
154 +- panic("Invalid uboot arg\n");
155 ++ /* check that we know this tag */
156 ++ if (uboot_tag != UBOOT_TAG_NONE &&
157 ++ uboot_tag != UBOOT_TAG_CMDLINE &&
158 ++ uboot_tag != UBOOT_TAG_DTB) {
159 ++ pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
160 ++ goto ignore_uboot_args;
161 ++ }
162 ++
163 ++ if (uboot_tag != UBOOT_TAG_NONE &&
164 ++ uboot_arg_invalid((unsigned long)uboot_arg)) {
165 ++ pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
166 ++ goto ignore_uboot_args;
167 ++ }
168 ++
169 ++ /* see if U-boot passed an external Device Tree blob */
170 ++ if (uboot_tag == UBOOT_TAG_DTB) {
171 ++ machine_desc = setup_machine_fdt((void *)uboot_arg);
172 +
173 +- /* See if u-boot passed an external Device Tree blob */
174 +- machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */
175 +- if (!machine_desc)
176 ++ /* external Device Tree blob is invalid - use embedded one */
177 ++ use_embedded_dtb = !machine_desc;
178 ++ }
179 ++
180 ++ if (uboot_tag == UBOOT_TAG_CMDLINE)
181 ++ append_cmdline = true;
182 ++
183 ++ignore_uboot_args:
184 + #endif
185 +- {
186 +- /* No, so try the embedded one */
187 ++
188 ++ if (use_embedded_dtb) {
189 + machine_desc = setup_machine_fdt(__dtb_start);
190 + if (!machine_desc)
191 + panic("Embedded DT invalid\n");
192 ++ }
193 +
194 +- /*
195 +- * If we are here, it is established that @uboot_arg didn't
196 +- * point to DT blob. Instead if u-boot says it is cmdline,
197 +- * append to embedded DT cmdline.
198 +- * setup_machine_fdt() would have populated @boot_command_line
199 +- */
200 +- if (uboot_tag == 1) {
201 +- /* Ensure a whitespace between the 2 cmdlines */
202 +- strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
203 +- strlcat(boot_command_line, uboot_arg,
204 +- COMMAND_LINE_SIZE);
205 +- }
206 ++ /*
207 ++ * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
208 ++ * append processing can only happen after.
209 ++ */
210 ++ if (append_cmdline) {
211 ++ /* Ensure a whitespace between the 2 cmdlines */
212 ++ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
213 ++ strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
214 + }
215 ++}
216 ++
217 ++void __init setup_arch(char **cmdline_p)
218 ++{
219 ++ handle_uboot_args();
220 +
221 + /* Save unparsed command line copy for /proc/cmdline */
222 + *cmdline_p = boot_command_line;
223 +diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
224 +index 2c118a6ab3587..0dc23fc227ed2 100644
225 +--- a/arch/arm/probes/kprobes/opt-arm.c
226 ++++ b/arch/arm/probes/kprobes/opt-arm.c
227 +@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
228 + }
229 +
230 + /* Copy arch-dep-instance from template. */
231 +- memcpy(code, (unsigned char *)optprobe_template_entry,
232 ++ memcpy(code, (unsigned long *)&optprobe_template_entry,
233 + TMPL_END_IDX * sizeof(kprobe_opcode_t));
234 +
235 + /* Adjust buffer according to instruction. */
236 +diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
237 +index 951c4231bdb85..4c47b3fd958b6 100644
238 +--- a/arch/mips/configs/ath79_defconfig
239 ++++ b/arch/mips/configs/ath79_defconfig
240 +@@ -71,6 +71,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
241 + # CONFIG_SERIAL_8250_PCI is not set
242 + CONFIG_SERIAL_8250_NR_UARTS=1
243 + CONFIG_SERIAL_8250_RUNTIME_UARTS=1
244 ++CONFIG_SERIAL_OF_PLATFORM=y
245 + CONFIG_SERIAL_AR933X=y
246 + CONFIG_SERIAL_AR933X_CONSOLE=y
247 + # CONFIG_HW_RANDOM is not set
248 +diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
249 +index d31bc2f012088..fb2b6d0b77c36 100644
250 +--- a/arch/mips/jazz/jazzdma.c
251 ++++ b/arch/mips/jazz/jazzdma.c
252 +@@ -74,14 +74,15 @@ static int __init vdma_init(void)
253 + get_order(VDMA_PGTBL_SIZE));
254 + BUG_ON(!pgtbl);
255 + dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
256 +- pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
257 ++ pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
258 +
259 + /*
260 + * Clear the R4030 translation table
261 + */
262 + vdma_pgtbl_init();
263 +
264 +- r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
265 ++ r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
266 ++ CPHYSADDR((unsigned long)pgtbl));
267 + r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
268 + r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
269 +
270 +diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
271 +index aeb7b1b0f2024..252c00985c973 100644
272 +--- a/arch/mips/net/ebpf_jit.c
273 ++++ b/arch/mips/net/ebpf_jit.c
274 +@@ -343,12 +343,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
275 + const struct bpf_prog *prog = ctx->skf;
276 + int stack_adjust = ctx->stack_size;
277 + int store_offset = stack_adjust - 8;
278 ++ enum reg_val_type td;
279 + int r0 = MIPS_R_V0;
280 +
281 +- if (dest_reg == MIPS_R_RA &&
282 +- get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
283 ++ if (dest_reg == MIPS_R_RA) {
284 + /* Don't let zero extended value escape. */
285 +- emit_instr(ctx, sll, r0, r0, 0);
286 ++ td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
287 ++ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
288 ++ emit_instr(ctx, sll, r0, r0, 0);
289 ++ }
290 +
291 + if (ctx->flags & EBPF_SAVE_RA) {
292 + emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
293 +diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
294 +index 2582df1c529bb..0964c236e3e5a 100644
295 +--- a/arch/parisc/kernel/ptrace.c
296 ++++ b/arch/parisc/kernel/ptrace.c
297 +@@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
298 +
299 + long do_syscall_trace_enter(struct pt_regs *regs)
300 + {
301 +- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
302 +- tracehook_report_syscall_entry(regs)) {
303 ++ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
304 ++ int rc = tracehook_report_syscall_entry(regs);
305 ++
306 + /*
307 +- * Tracing decided this syscall should not happen or the
308 +- * debugger stored an invalid system call number. Skip
309 +- * the system call and the system call restart handling.
310 ++ * As tracesys_next does not set %r28 to -ENOSYS
311 ++ * when %r20 is set to -1, initialize it here.
312 + */
313 +- regs->gr[20] = -1UL;
314 +- goto out;
315 ++ regs->gr[28] = -ENOSYS;
316 ++
317 ++ if (rc) {
318 ++ /*
319 ++ * A nonzero return code from
320 ++ * tracehook_report_syscall_entry() tells us
321 ++ * to prevent the syscall execution. Skip
322 ++ * the syscall call and the syscall restart handling.
323 ++ *
324 ++ * Note that the tracer may also just change
325 ++ * regs->gr[20] to an invalid syscall number,
326 ++ * that is handled by tracesys_next.
327 ++ */
328 ++ regs->gr[20] = -1UL;
329 ++ return -1;
330 ++ }
331 + }
332 +
333 + /* Do the secure computing check after ptrace. */
334 +@@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
335 + regs->gr[24] & 0xffffffff,
336 + regs->gr[23] & 0xffffffff);
337 +
338 +-out:
339 + /*
340 + * Sign extend the syscall number to 64bit since it may have been
341 + * modified by a compat ptrace call
342 +diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
343 +index 81d4574d1f377..9fd2ff28b8ff2 100644
344 +--- a/arch/powerpc/kernel/head_8xx.S
345 ++++ b/arch/powerpc/kernel/head_8xx.S
346 +@@ -919,11 +919,12 @@ start_here:
347 +
348 + /* set up the PTE pointers for the Abatron bdiGDB.
349 + */
350 +- tovirt(r6,r6)
351 + lis r5, abatron_pteptrs@h
352 + ori r5, r5, abatron_pteptrs@l
353 + stw r5, 0xf0(0) /* Must match your Abatron config file */
354 + tophys(r5,r5)
355 ++ lis r6, swapper_pg_dir@h
356 ++ ori r6, r6, swapper_pg_dir@l
357 + stw r6, 0(r5)
358 +
359 + /* Now turn on the MMU for real! */
360 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
361 +index 7bcfa61375c09..98d13c6a64be0 100644
362 +--- a/arch/x86/kvm/cpuid.c
363 ++++ b/arch/x86/kvm/cpuid.c
364 +@@ -337,6 +337,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
365 + unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
366 + unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
367 + unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
368 ++ unsigned f_la57 = 0;
369 +
370 + /* cpuid 1.edx */
371 + const u32 kvm_cpuid_1_edx_x86_features =
372 +@@ -491,7 +492,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
373 + // TSC_ADJUST is emulated
374 + entry->ebx |= F(TSC_ADJUST);
375 + entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
376 ++ f_la57 = entry->ecx & F(LA57);
377 + cpuid_mask(&entry->ecx, CPUID_7_ECX);
378 ++ /* Set LA57 based on hardware capability. */
379 ++ entry->ecx |= f_la57;
380 + entry->ecx |= f_umip;
381 + /* PKU is not yet implemented for shadow paging. */
382 + if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
383 +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
384 +index 52a7c3faee0cc..782f98b332f05 100644
385 +--- a/arch/x86/xen/enlighten_pv.c
386 ++++ b/arch/x86/xen/enlighten_pv.c
387 +@@ -899,10 +899,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
388 + val = native_read_msr_safe(msr, err);
389 + switch (msr) {
390 + case MSR_IA32_APICBASE:
391 +-#ifdef CONFIG_X86_X2APIC
392 +- if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
393 +-#endif
394 +- val &= ~X2APIC_ENABLE;
395 ++ val &= ~X2APIC_ENABLE;
396 + break;
397 + }
398 + return val;
399 +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
400 +index ea59c01ce8db0..f530d35412428 100644
401 +--- a/drivers/acpi/nfit/core.c
402 ++++ b/drivers/acpi/nfit/core.c
403 +@@ -719,6 +719,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
404 + struct acpi_nfit_memory_map *memdev;
405 + struct acpi_nfit_desc *acpi_desc;
406 + struct nfit_mem *nfit_mem;
407 ++ u16 physical_id;
408 +
409 + mutex_lock(&acpi_desc_lock);
410 + list_for_each_entry(acpi_desc, &acpi_descs, list) {
411 +@@ -726,10 +727,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
412 + list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
413 + memdev = __to_nfit_memdev(nfit_mem);
414 + if (memdev->device_handle == device_handle) {
415 ++ *flags = memdev->flags;
416 ++ physical_id = memdev->physical_id;
417 + mutex_unlock(&acpi_desc->init_mutex);
418 + mutex_unlock(&acpi_desc_lock);
419 +- *flags = memdev->flags;
420 +- return memdev->physical_id;
421 ++ return physical_id;
422 + }
423 + }
424 + mutex_unlock(&acpi_desc->init_mutex);
425 +diff --git a/drivers/atm/he.c b/drivers/atm/he.c
426 +index 29f102dcfec49..329ce9072ee9f 100644
427 +--- a/drivers/atm/he.c
428 ++++ b/drivers/atm/he.c
429 +@@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
430 + instead of '/ 512', use '>> 9' to prevent a call
431 + to divdu3 on x86 platforms
432 + */
433 +- rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
434 ++ rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
435 +
436 + if (rate_cps < 10)
437 + rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
438 +diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
439 +index 00e954f22bc92..74401e0adb29c 100644
440 +--- a/drivers/gpio/gpio-mt7621.c
441 ++++ b/drivers/gpio/gpio-mt7621.c
442 +@@ -30,6 +30,7 @@
443 + #define GPIO_REG_EDGE 0xA0
444 +
445 + struct mtk_gc {
446 ++ struct irq_chip irq_chip;
447 + struct gpio_chip chip;
448 + spinlock_t lock;
449 + int bank;
450 +@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
451 + return 0;
452 + }
453 +
454 +-static struct irq_chip mediatek_gpio_irq_chip = {
455 +- .irq_unmask = mediatek_gpio_irq_unmask,
456 +- .irq_mask = mediatek_gpio_irq_mask,
457 +- .irq_mask_ack = mediatek_gpio_irq_mask,
458 +- .irq_set_type = mediatek_gpio_irq_type,
459 +-};
460 +-
461 + static int
462 + mediatek_gpio_xlate(struct gpio_chip *chip,
463 + const struct of_phandle_args *spec, u32 *flags)
464 +@@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
465 + return ret;
466 + }
467 +
468 ++ rg->irq_chip.name = dev_name(dev);
469 ++ rg->irq_chip.parent_device = dev;
470 ++ rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
471 ++ rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
472 ++ rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
473 ++ rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
474 ++
475 + if (mtk->gpio_irq) {
476 + /*
477 + * Manually request the irq here instead of passing
478 +@@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
479 + return ret;
480 + }
481 +
482 +- ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
483 ++ ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
484 + 0, handle_simple_irq, IRQ_TYPE_NONE);
485 + if (ret) {
486 + dev_err(dev, "failed to add gpiochip_irqchip\n");
487 + return ret;
488 + }
489 +
490 +- gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
491 ++ gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
492 + mtk->gpio_irq, NULL);
493 + }
494 +
495 +@@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev)
496 + mtk->gpio_irq = irq_of_parse_and_map(np, 0);
497 + mtk->dev = dev;
498 + platform_set_drvdata(pdev, mtk);
499 +- mediatek_gpio_irq_chip.name = dev_name(dev);
500 +
501 + for (i = 0; i < MTK_BANK_CNT; i++) {
502 + ret = mediatek_gpio_bank_probe(dev, np, i);
503 +diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
504 +index 9f3f166f17608..eb27fa76e8fc7 100644
505 +--- a/drivers/gpio/gpio-pxa.c
506 ++++ b/drivers/gpio/gpio-pxa.c
507 +@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
508 + {
509 + switch (gpio_type) {
510 + case PXA3XX_GPIO:
511 ++ case MMP2_GPIO:
512 + return false;
513 +
514 + default:
515 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
516 +index fd825d30edf13..c0396e83f3526 100644
517 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
518 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
519 +@@ -159,6 +159,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
520 + }
521 +
522 + if (amdgpu_device_is_px(dev)) {
523 ++ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
524 + pm_runtime_use_autosuspend(dev->dev);
525 + pm_runtime_set_autosuspend_delay(dev->dev, 5000);
526 + pm_runtime_set_active(dev->dev);
527 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
528 +index 80f5db4ef75fd..0805c423a5ce0 100644
529 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
530 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
531 +@@ -1072,8 +1072,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
532 + * the GPU device is not already present in the topology device
533 + * list then return NULL. This means a new topology device has to
534 + * be created for this GPU.
535 +- * TODO: Rather than assiging @gpu to first topology device withtout
536 +- * gpu attached, it will better to have more stringent check.
537 + */
538 + static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
539 + {
540 +@@ -1081,12 +1079,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
541 + struct kfd_topology_device *out_dev = NULL;
542 +
543 + down_write(&topology_lock);
544 +- list_for_each_entry(dev, &topology_device_list, list)
545 ++ list_for_each_entry(dev, &topology_device_list, list) {
546 ++ /* Discrete GPUs need their own topology device list
547 ++ * entries. Don't assign them to CPU/APU nodes.
548 ++ */
549 ++ if (!gpu->device_info->needs_iommu_device &&
550 ++ dev->node_props.cpu_cores_count)
551 ++ continue;
552 ++
553 + if (!dev->gpu && (dev->node_props.simd_count > 0)) {
554 + dev->gpu = gpu;
555 + out_dev = dev;
556 + break;
557 + }
558 ++ }
559 + up_write(&topology_lock);
560 + return out_dev;
561 + }
562 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
563 +index a851bb07443f0..c5ba9128b7361 100644
564 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
565 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
566 +@@ -624,12 +624,13 @@ static int dm_suspend(void *handle)
567 + struct amdgpu_display_manager *dm = &adev->dm;
568 + int ret = 0;
569 +
570 ++ WARN_ON(adev->dm.cached_state);
571 ++ adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
572 ++
573 + s3_handle_mst(adev->ddev, true);
574 +
575 + amdgpu_dm_irq_suspend(adev);
576 +
577 +- WARN_ON(adev->dm.cached_state);
578 +- adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
579 +
580 + dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
581 +
582 +diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
583 +index 580e7e82034fa..53ccacf99eca4 100644
584 +--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
585 ++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
586 +@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
587 +
588 + pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
589 +
590 +- if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
591 ++ if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
592 + /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
593 + pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
594 + /* un-mute audio */
595 +@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
596 + pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
597 + pipe_ctx->stream_res.stream_enc, true);
598 + if (pipe_ctx->stream_res.audio) {
599 ++ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
600 ++
601 + if (option != KEEP_ACQUIRED_RESOURCE ||
602 + !dc->debug.az_endpoint_mute_only) {
603 + /*only disalbe az_endpoint if power down or free*/
604 +@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
605 + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
606 + pipe_ctx->stream_res.audio = NULL;
607 + }
608 ++ if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
609 ++ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
610 ++ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
611 +
612 + /* TODO: notify audio driver for if audio modes list changed
613 + * add audio mode list change flag */
614 +diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
615 +index 2d6506c08bf72..6f91634880aa2 100644
616 +--- a/drivers/gpu/drm/i915/intel_fbdev.c
617 ++++ b/drivers/gpu/drm/i915/intel_fbdev.c
618 +@@ -334,8 +334,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
619 + bool *enabled, int width, int height)
620 + {
621 + struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
622 +- unsigned long conn_configured, conn_seq, mask;
623 + unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
624 ++ unsigned long conn_configured, conn_seq;
625 + int i, j;
626 + bool *save_enabled;
627 + bool fallback = true, ret = true;
628 +@@ -353,10 +353,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
629 + drm_modeset_backoff(&ctx);
630 +
631 + memcpy(save_enabled, enabled, count);
632 +- mask = GENMASK(count - 1, 0);
633 ++ conn_seq = GENMASK(count - 1, 0);
634 + conn_configured = 0;
635 + retry:
636 +- conn_seq = conn_configured;
637 + for (i = 0; i < count; i++) {
638 + struct drm_fb_helper_connector *fb_conn;
639 + struct drm_connector *connector;
640 +@@ -369,7 +368,8 @@ retry:
641 + if (conn_configured & BIT(i))
642 + continue;
643 +
644 +- if (conn_seq == 0 && !connector->has_tile)
645 ++ /* First pass, only consider tiled connectors */
646 ++ if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
647 + continue;
648 +
649 + if (connector->status == connector_status_connected)
650 +@@ -473,8 +473,10 @@ retry:
651 + conn_configured |= BIT(i);
652 + }
653 +
654 +- if ((conn_configured & mask) != mask && conn_configured != conn_seq)
655 ++ if (conn_configured != conn_seq) { /* repeat until no more are found */
656 ++ conn_seq = conn_configured;
657 + goto retry;
658 ++ }
659 +
660 + /*
661 + * If the BIOS didn't enable everything it could, fall back to have the
662 +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
663 +index bf5f294f172fa..611ac340fb289 100644
664 +--- a/drivers/gpu/drm/meson/meson_drv.c
665 ++++ b/drivers/gpu/drm/meson/meson_drv.c
666 +@@ -368,8 +368,10 @@ static int meson_probe_remote(struct platform_device *pdev,
667 + remote_node = of_graph_get_remote_port_parent(ep);
668 + if (!remote_node ||
669 + remote_node == parent || /* Ignore parent endpoint */
670 +- !of_device_is_available(remote_node))
671 ++ !of_device_is_available(remote_node)) {
672 ++ of_node_put(remote_node);
673 + continue;
674 ++ }
675 +
676 + count += meson_probe_remote(pdev, match, remote, remote_node);
677 +
678 +@@ -388,10 +390,13 @@ static int meson_drv_probe(struct platform_device *pdev)
679 +
680 + for_each_endpoint_of_node(np, ep) {
681 + remote = of_graph_get_remote_port_parent(ep);
682 +- if (!remote || !of_device_is_available(remote))
683 ++ if (!remote || !of_device_is_available(remote)) {
684 ++ of_node_put(remote);
685 + continue;
686 ++ }
687 +
688 + count += meson_probe_remote(pdev, &match, np, remote);
689 ++ of_node_put(remote);
690 + }
691 +
692 + if (count && !match)
693 +diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
694 +index dec1e081f5295..6a8fb6fd183c3 100644
695 +--- a/drivers/gpu/drm/radeon/radeon_kms.c
696 ++++ b/drivers/gpu/drm/radeon/radeon_kms.c
697 +@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
698 + }
699 +
700 + if (radeon_is_px(dev)) {
701 ++ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
702 + pm_runtime_use_autosuspend(dev->dev);
703 + pm_runtime_set_autosuspend_delay(dev->dev, 5000);
704 + pm_runtime_set_active(dev->dev);
705 +diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
706 +index d7950b52a1fd9..e30b1f5b9d91a 100644
707 +--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
708 ++++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
709 +@@ -717,17 +717,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
710 + remote = of_graph_get_remote_port_parent(ep);
711 + if (!remote)
712 + continue;
713 ++ of_node_put(remote);
714 +
715 + /* does this node match any registered engines? */
716 + list_for_each_entry(frontend, &drv->frontend_list, list) {
717 + if (remote == frontend->node) {
718 +- of_node_put(remote);
719 + of_node_put(port);
720 ++ of_node_put(ep);
721 + return frontend;
722 + }
723 + }
724 + }
725 +-
726 ++ of_node_put(port);
727 + return ERR_PTR(-EINVAL);
728 + }
729 +
730 +diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
731 +index e36399213324d..ceb3db6f3fdda 100644
732 +--- a/drivers/hwmon/tmp421.c
733 ++++ b/drivers/hwmon/tmp421.c
734 +@@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = {
735 + .data = (void *)2
736 + },
737 + {
738 +- .compatible = "ti,tmp422",
739 ++ .compatible = "ti,tmp442",
740 + .data = (void *)3
741 + },
742 + { },
743 +diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
744 +index 0d3473b4596e1..21f4239022c7a 100644
745 +--- a/drivers/infiniband/hw/mthca/mthca_provider.c
746 ++++ b/drivers/infiniband/hw/mthca/mthca_provider.c
747 +@@ -533,7 +533,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
748 + {
749 + struct mthca_ucontext *context;
750 +
751 +- qp = kmalloc(sizeof *qp, GFP_KERNEL);
752 ++ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
753 + if (!qp)
754 + return ERR_PTR(-ENOMEM);
755 +
756 +@@ -599,7 +599,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
757 + if (pd->uobject)
758 + return ERR_PTR(-EINVAL);
759 +
760 +- qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
761 ++ qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
762 + if (!qp)
763 + return ERR_PTR(-ENOMEM);
764 +
765 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
766 +index 0b34e909505f5..2c1114ee0c6da 100644
767 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
768 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
769 +@@ -2951,7 +2951,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
770 + {
771 + struct srp_target_port *target = host_to_target(scmnd->device->host);
772 + struct srp_rdma_ch *ch;
773 +- int i, j;
774 + u8 status;
775 +
776 + shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
777 +@@ -2963,15 +2962,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
778 + if (status)
779 + return FAILED;
780 +
781 +- for (i = 0; i < target->ch_count; i++) {
782 +- ch = &target->ch[i];
783 +- for (j = 0; j < target->req_ring_size; ++j) {
784 +- struct srp_request *req = &ch->req_ring[j];
785 +-
786 +- srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
787 +- }
788 +- }
789 +-
790 + return SUCCESS;
791 + }
792 +
793 +diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
794 +index 4ac378e489023..40ca1e8fa09fc 100644
795 +--- a/drivers/isdn/hardware/avm/b1.c
796 ++++ b/drivers/isdn/hardware/avm/b1.c
797 +@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
798 + int i, j;
799 +
800 + for (j = 0; j < AVM_MAXVERSION; j++)
801 +- cinfo->version[j] = "\0\0" + 1;
802 ++ cinfo->version[j] = "";
803 + for (i = 0, j = 0;
804 + j < AVM_MAXVERSION && i < cinfo->versionlen;
805 + j++, i += cinfo->versionbuf[i] + 1)
806 +diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
807 +index b730037a0e2d3..9cff667b2d245 100644
808 +--- a/drivers/isdn/i4l/isdn_tty.c
809 ++++ b/drivers/isdn/i4l/isdn_tty.c
810 +@@ -1456,15 +1456,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
811 + {
812 + modem_info *info = (modem_info *) tty->driver_data;
813 +
814 ++ mutex_lock(&modem_info_mutex);
815 + if (!old_termios)
816 + isdn_tty_change_speed(info);
817 + else {
818 + if (tty->termios.c_cflag == old_termios->c_cflag &&
819 + tty->termios.c_ispeed == old_termios->c_ispeed &&
820 +- tty->termios.c_ospeed == old_termios->c_ospeed)
821 ++ tty->termios.c_ospeed == old_termios->c_ospeed) {
822 ++ mutex_unlock(&modem_info_mutex);
823 + return;
824 ++ }
825 + isdn_tty_change_speed(info);
826 + }
827 ++ mutex_unlock(&modem_info_mutex);
828 + }
829 +
830 + /*
831 +diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
832 +index a2e74feee2b2f..fd64df5a57a5e 100644
833 +--- a/drivers/leds/leds-lp5523.c
834 ++++ b/drivers/leds/leds-lp5523.c
835 +@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
836 +
837 + /* Let the programs run for couple of ms and check the engine status */
838 + usleep_range(3000, 6000);
839 +- lp55xx_read(chip, LP5523_REG_STATUS, &status);
840 ++ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
841 ++ if (ret)
842 ++ return ret;
843 + status &= LP5523_ENG_STATUS_MASK;
844 +
845 + if (status != LP5523_ENG_STATUS_MASK) {
846 +diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
847 +index 30d09d1771717..11ab17f64c649 100644
848 +--- a/drivers/mfd/ab8500-core.c
849 ++++ b/drivers/mfd/ab8500-core.c
850 +@@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
851 + mutex_unlock(&ab8500->lock);
852 + dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
853 +
854 +- return ret;
855 ++ return (ret < 0) ? ret : 0;
856 + }
857 +
858 + static int ab8500_get_register(struct device *dev, u8 bank,
859 +diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
860 +index 0be511dd93d01..f8e0fa97bb31e 100644
861 +--- a/drivers/mfd/axp20x.c
862 ++++ b/drivers/mfd/axp20x.c
863 +@@ -640,9 +640,9 @@ static const struct mfd_cell axp221_cells[] = {
864 +
865 + static const struct mfd_cell axp223_cells[] = {
866 + {
867 +- .name = "axp221-pek",
868 +- .num_resources = ARRAY_SIZE(axp22x_pek_resources),
869 +- .resources = axp22x_pek_resources,
870 ++ .name = "axp221-pek",
871 ++ .num_resources = ARRAY_SIZE(axp22x_pek_resources),
872 ++ .resources = axp22x_pek_resources,
873 + }, {
874 + .name = "axp22x-adc",
875 + .of_compatible = "x-powers,axp221-adc",
876 +@@ -650,7 +650,7 @@ static const struct mfd_cell axp223_cells[] = {
877 + .name = "axp20x-battery-power-supply",
878 + .of_compatible = "x-powers,axp221-battery-power-supply",
879 + }, {
880 +- .name = "axp20x-regulator",
881 ++ .name = "axp20x-regulator",
882 + }, {
883 + .name = "axp20x-ac-power-supply",
884 + .of_compatible = "x-powers,axp221-ac-power-supply",
885 +@@ -666,9 +666,9 @@ static const struct mfd_cell axp223_cells[] = {
886 +
887 + static const struct mfd_cell axp152_cells[] = {
888 + {
889 +- .name = "axp20x-pek",
890 +- .num_resources = ARRAY_SIZE(axp152_pek_resources),
891 +- .resources = axp152_pek_resources,
892 ++ .name = "axp20x-pek",
893 ++ .num_resources = ARRAY_SIZE(axp152_pek_resources),
894 ++ .resources = axp152_pek_resources,
895 + },
896 + };
897 +
898 +@@ -697,87 +697,101 @@ static const struct resource axp288_charger_resources[] = {
899 +
900 + static const struct mfd_cell axp288_cells[] = {
901 + {
902 +- .name = "axp288_adc",
903 +- .num_resources = ARRAY_SIZE(axp288_adc_resources),
904 +- .resources = axp288_adc_resources,
905 +- },
906 +- {
907 +- .name = "axp288_extcon",
908 +- .num_resources = ARRAY_SIZE(axp288_extcon_resources),
909 +- .resources = axp288_extcon_resources,
910 +- },
911 +- {
912 +- .name = "axp288_charger",
913 +- .num_resources = ARRAY_SIZE(axp288_charger_resources),
914 +- .resources = axp288_charger_resources,
915 +- },
916 +- {
917 +- .name = "axp288_fuel_gauge",
918 +- .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
919 +- .resources = axp288_fuel_gauge_resources,
920 +- },
921 +- {
922 +- .name = "axp221-pek",
923 +- .num_resources = ARRAY_SIZE(axp288_power_button_resources),
924 +- .resources = axp288_power_button_resources,
925 +- },
926 +- {
927 +- .name = "axp288_pmic_acpi",
928 ++ .name = "axp288_adc",
929 ++ .num_resources = ARRAY_SIZE(axp288_adc_resources),
930 ++ .resources = axp288_adc_resources,
931 ++ }, {
932 ++ .name = "axp288_extcon",
933 ++ .num_resources = ARRAY_SIZE(axp288_extcon_resources),
934 ++ .resources = axp288_extcon_resources,
935 ++ }, {
936 ++ .name = "axp288_charger",
937 ++ .num_resources = ARRAY_SIZE(axp288_charger_resources),
938 ++ .resources = axp288_charger_resources,
939 ++ }, {
940 ++ .name = "axp288_fuel_gauge",
941 ++ .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
942 ++ .resources = axp288_fuel_gauge_resources,
943 ++ }, {
944 ++ .name = "axp221-pek",
945 ++ .num_resources = ARRAY_SIZE(axp288_power_button_resources),
946 ++ .resources = axp288_power_button_resources,
947 ++ }, {
948 ++ .name = "axp288_pmic_acpi",
949 + },
950 + };
951 +
952 + static const struct mfd_cell axp803_cells[] = {
953 + {
954 +- .name = "axp221-pek",
955 +- .num_resources = ARRAY_SIZE(axp803_pek_resources),
956 +- .resources = axp803_pek_resources,
957 ++ .name = "axp221-pek",
958 ++ .num_resources = ARRAY_SIZE(axp803_pek_resources),
959 ++ .resources = axp803_pek_resources,
960 ++ }, {
961 ++ .name = "axp20x-gpio",
962 ++ .of_compatible = "x-powers,axp813-gpio",
963 ++ }, {
964 ++ .name = "axp813-adc",
965 ++ .of_compatible = "x-powers,axp813-adc",
966 ++ }, {
967 ++ .name = "axp20x-battery-power-supply",
968 ++ .of_compatible = "x-powers,axp813-battery-power-supply",
969 ++ }, {
970 ++ .name = "axp20x-ac-power-supply",
971 ++ .of_compatible = "x-powers,axp813-ac-power-supply",
972 ++ .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
973 ++ .resources = axp20x_ac_power_supply_resources,
974 + },
975 +- { .name = "axp20x-regulator" },
976 ++ { .name = "axp20x-regulator" },
977 + };
978 +
979 + static const struct mfd_cell axp806_self_working_cells[] = {
980 + {
981 +- .name = "axp221-pek",
982 +- .num_resources = ARRAY_SIZE(axp806_pek_resources),
983 +- .resources = axp806_pek_resources,
984 ++ .name = "axp221-pek",
985 ++ .num_resources = ARRAY_SIZE(axp806_pek_resources),
986 ++ .resources = axp806_pek_resources,
987 + },
988 +- { .name = "axp20x-regulator" },
989 ++ { .name = "axp20x-regulator" },
990 + };
991 +
992 + static const struct mfd_cell axp806_cells[] = {
993 + {
994 +- .id = 2,
995 +- .name = "axp20x-regulator",
996 ++ .id = 2,
997 ++ .name = "axp20x-regulator",
998 + },
999 + };
1000 +
1001 + static const struct mfd_cell axp809_cells[] = {
1002 + {
1003 +- .name = "axp221-pek",
1004 +- .num_resources = ARRAY_SIZE(axp809_pek_resources),
1005 +- .resources = axp809_pek_resources,
1006 ++ .name = "axp221-pek",
1007 ++ .num_resources = ARRAY_SIZE(axp809_pek_resources),
1008 ++ .resources = axp809_pek_resources,
1009 + }, {
1010 +- .id = 1,
1011 +- .name = "axp20x-regulator",
1012 ++ .id = 1,
1013 ++ .name = "axp20x-regulator",
1014 + },
1015 + };
1016 +
1017 + static const struct mfd_cell axp813_cells[] = {
1018 + {
1019 +- .name = "axp221-pek",
1020 +- .num_resources = ARRAY_SIZE(axp803_pek_resources),
1021 +- .resources = axp803_pek_resources,
1022 ++ .name = "axp221-pek",
1023 ++ .num_resources = ARRAY_SIZE(axp803_pek_resources),
1024 ++ .resources = axp803_pek_resources,
1025 + }, {
1026 +- .name = "axp20x-regulator",
1027 ++ .name = "axp20x-regulator",
1028 + }, {
1029 +- .name = "axp20x-gpio",
1030 +- .of_compatible = "x-powers,axp813-gpio",
1031 ++ .name = "axp20x-gpio",
1032 ++ .of_compatible = "x-powers,axp813-gpio",
1033 + }, {
1034 +- .name = "axp813-adc",
1035 +- .of_compatible = "x-powers,axp813-adc",
1036 ++ .name = "axp813-adc",
1037 ++ .of_compatible = "x-powers,axp813-adc",
1038 + }, {
1039 + .name = "axp20x-battery-power-supply",
1040 + .of_compatible = "x-powers,axp813-battery-power-supply",
1041 ++ }, {
1042 ++ .name = "axp20x-ac-power-supply",
1043 ++ .of_compatible = "x-powers,axp813-ac-power-supply",
1044 ++ .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
1045 ++ .resources = axp20x_ac_power_supply_resources,
1046 + },
1047 + };
1048 +
1049 +diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
1050 +index 503979c81dae1..fab3cdc27ed64 100644
1051 +--- a/drivers/mfd/bd9571mwv.c
1052 ++++ b/drivers/mfd/bd9571mwv.c
1053 +@@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = {
1054 + };
1055 +
1056 + static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
1057 ++ regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
1058 + regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
1059 + regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
1060 + regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
1061 +diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
1062 +index 6b22d54a540d1..bccde3eac92ca 100644
1063 +--- a/drivers/mfd/cros_ec_dev.c
1064 ++++ b/drivers/mfd/cros_ec_dev.c
1065 +@@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev)
1066 +
1067 + cros_ec_debugfs_remove(ec);
1068 +
1069 ++ mfd_remove_devices(ec->dev);
1070 + cdev_del(&ec->cdev);
1071 + device_unregister(&ec->class_dev);
1072 + return 0;
1073 +diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
1074 +index 5970b8def5487..aec20e1c7d3d5 100644
1075 +--- a/drivers/mfd/db8500-prcmu.c
1076 ++++ b/drivers/mfd/db8500-prcmu.c
1077 +@@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = {
1078 + .irq_unmask = prcmu_irq_unmask,
1079 + };
1080 +
1081 +-static __init char *fw_project_name(u32 project)
1082 ++static char *fw_project_name(u32 project)
1083 + {
1084 + switch (project) {
1085 + case PRCMU_FW_PROJECT_U8500:
1086 +@@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
1087 + INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
1088 + }
1089 +
1090 +-static void __init init_prcm_registers(void)
1091 ++static void init_prcm_registers(void)
1092 + {
1093 + u32 val;
1094 +
1095 +diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
1096 +index c63e331738c17..234febfe6398b 100644
1097 +--- a/drivers/mfd/mc13xxx-core.c
1098 ++++ b/drivers/mfd/mc13xxx-core.c
1099 +@@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
1100 +
1101 + mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
1102 +
1103 +- mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
1104 ++ ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
1105 ++ if (ret)
1106 ++ goto out;
1107 +
1108 + adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
1109 + adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
1110 +diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
1111 +index 77b64bd64df36..ab24e176ef448 100644
1112 +--- a/drivers/mfd/mt6397-core.c
1113 ++++ b/drivers/mfd/mt6397-core.c
1114 +@@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev)
1115 +
1116 + default:
1117 + dev_err(&pdev->dev, "unsupported chip: %d\n", id);
1118 +- ret = -ENODEV;
1119 +- break;
1120 ++ return -ENODEV;
1121 + }
1122 +
1123 + if (ret) {
1124 +diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
1125 +index 52fafea06067e..8d420c37b2a61 100644
1126 +--- a/drivers/mfd/qcom_rpm.c
1127 ++++ b/drivers/mfd/qcom_rpm.c
1128 +@@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
1129 + return -EFAULT;
1130 + }
1131 +
1132 ++ writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
1133 ++ writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
1134 ++ writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
1135 ++
1136 + dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
1137 + fw_version[1],
1138 + fw_version[2]);
1139 +diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
1140 +index 7a30546880a42..fe8d335a4d74d 100644
1141 +--- a/drivers/mfd/ti_am335x_tscadc.c
1142 ++++ b/drivers/mfd/ti_am335x_tscadc.c
1143 +@@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
1144 + cell->pdata_size = sizeof(tscadc);
1145 + }
1146 +
1147 +- err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
1148 +- tscadc->used_cells, NULL, 0, NULL);
1149 ++ err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
1150 ++ tscadc->cells, tscadc->used_cells, NULL,
1151 ++ 0, NULL);
1152 + if (err < 0)
1153 + goto err_disable_clk;
1154 +
1155 +diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
1156 +index 910f569ff77c1..8bcdecf494d05 100644
1157 +--- a/drivers/mfd/tps65218.c
1158 ++++ b/drivers/mfd/tps65218.c
1159 +@@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client,
1160 +
1161 + mutex_init(&tps->tps_lock);
1162 +
1163 +- ret = regmap_add_irq_chip(tps->regmap, tps->irq,
1164 +- IRQF_ONESHOT, 0, &tps65218_irq_chip,
1165 +- &tps->irq_data);
1166 ++ ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
1167 ++ IRQF_ONESHOT, 0, &tps65218_irq_chip,
1168 ++ &tps->irq_data);
1169 + if (ret < 0)
1170 + return ret;
1171 +
1172 +@@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client,
1173 + ARRAY_SIZE(tps65218_cells), NULL, 0,
1174 + regmap_irq_get_domain(tps->irq_data));
1175 +
1176 +- if (ret < 0)
1177 +- goto err_irq;
1178 +-
1179 +- return 0;
1180 +-
1181 +-err_irq:
1182 +- regmap_del_irq_chip(tps->irq, tps->irq_data);
1183 +-
1184 + return ret;
1185 + }
1186 +
1187 +-static int tps65218_remove(struct i2c_client *client)
1188 +-{
1189 +- struct tps65218 *tps = i2c_get_clientdata(client);
1190 +-
1191 +- regmap_del_irq_chip(tps->irq, tps->irq_data);
1192 +-
1193 +- return 0;
1194 +-}
1195 +-
1196 + static const struct i2c_device_id tps65218_id_table[] = {
1197 + { "tps65218", TPS65218 },
1198 + { },
1199 +@@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = {
1200 + .of_match_table = of_tps65218_match_table,
1201 + },
1202 + .probe = tps65218_probe,
1203 +- .remove = tps65218_remove,
1204 + .id_table = tps65218_id_table,
1205 + };
1206 +
1207 +diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
1208 +index 4be3d239da9ec..299016bc46d90 100644
1209 +--- a/drivers/mfd/twl-core.c
1210 ++++ b/drivers/mfd/twl-core.c
1211 +@@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
1212 + * letting it generate the right frequencies for USB, MADC, and
1213 + * other purposes.
1214 + */
1215 +-static inline int __init protect_pm_master(void)
1216 ++static inline int protect_pm_master(void)
1217 + {
1218 + int e = 0;
1219 +
1220 +@@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void)
1221 + return e;
1222 + }
1223 +
1224 +-static inline int __init unprotect_pm_master(void)
1225 ++static inline int unprotect_pm_master(void)
1226 + {
1227 + int e = 0;
1228 +
1229 +diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
1230 +index 1ee68bd440fbc..16c6e2accfaa5 100644
1231 +--- a/drivers/mfd/wm5110-tables.c
1232 ++++ b/drivers/mfd/wm5110-tables.c
1233 +@@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = {
1234 + { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
1235 + { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
1236 + { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
1237 ++ { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
1238 + { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
1239 + { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
1240 + { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
1241 +@@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
1242 + case ARIZONA_ASRC_ENABLE:
1243 + case ARIZONA_ASRC_STATUS:
1244 + case ARIZONA_ASRC_RATE1:
1245 ++ case ARIZONA_ASRC_RATE2:
1246 + case ARIZONA_ISRC_1_CTRL_1:
1247 + case ARIZONA_ISRC_1_CTRL_2:
1248 + case ARIZONA_ISRC_1_CTRL_3:
1249 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1250 +index 4b73131a0f206..1b5f591cf0a23 100644
1251 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
1252 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1253 +@@ -2595,11 +2595,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
1254 + goto err_device_destroy;
1255 + }
1256 +
1257 +- clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1258 +- /* Make sure we don't have a race with AENQ Links state handler */
1259 +- if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1260 +- netif_carrier_on(adapter->netdev);
1261 +-
1262 + rc = ena_enable_msix_and_set_admin_interrupts(adapter,
1263 + adapter->num_queues);
1264 + if (rc) {
1265 +@@ -2616,6 +2611,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
1266 + }
1267 +
1268 + set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
1269 ++
1270 ++ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1271 ++ if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1272 ++ netif_carrier_on(adapter->netdev);
1273 ++
1274 + mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
1275 + dev_err(&pdev->dev, "Device reset completed successfully\n");
1276 +
1277 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1278 +index 65a22cd9aef26..029730bbe7db1 100644
1279 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1280 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1281 +@@ -2052,6 +2052,7 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1282 + bool nonlinear = skb_is_nonlinear(skb);
1283 + struct rtnl_link_stats64 *percpu_stats;
1284 + struct dpaa_percpu_priv *percpu_priv;
1285 ++ struct netdev_queue *txq;
1286 + struct dpaa_priv *priv;
1287 + struct qm_fd fd;
1288 + int offset = 0;
1289 +@@ -2101,6 +2102,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1290 + if (unlikely(err < 0))
1291 + goto skb_to_fd_failed;
1292 +
1293 ++ txq = netdev_get_tx_queue(net_dev, queue_mapping);
1294 ++
1295 ++ /* LLTX requires to do our own update of trans_start */
1296 ++ txq->trans_start = jiffies;
1297 ++
1298 + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1299 + fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
1300 + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1301 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1302 +index ad1779fc410e6..a78bfafd212c8 100644
1303 +--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1304 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1305 +@@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
1306 + struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
1307 + int i;
1308 +
1309 +- vf_cb->mac_cb = NULL;
1310 +-
1311 +- kfree(vf_cb);
1312 +-
1313 + for (i = 0; i < handle->q_num; i++)
1314 + hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
1315 ++
1316 ++ kfree(vf_cb);
1317 + }
1318 +
1319 + static int hns_ae_wait_flow_down(struct hnae_handle *handle)
1320 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1321 +index a1aeeb8094c37..f5cd9539980f8 100644
1322 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1323 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1324 +@@ -620,6 +620,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
1325 + }
1326 + #endif
1327 +
1328 ++#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
1329 ++
1330 + /* We reach this function only after checking that any of
1331 + * the (IPv4 | IPv6) bits are set in cqe->status.
1332 + */
1333 +@@ -627,9 +629,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
1334 + netdev_features_t dev_features)
1335 + {
1336 + __wsum hw_checksum = 0;
1337 ++ void *hdr;
1338 ++
1339 ++ /* CQE csum doesn't cover padding octets in short ethernet
1340 ++ * frames. And the pad field is appended prior to calculating
1341 ++ * and appending the FCS field.
1342 ++ *
1343 ++ * Detecting these padded frames requires to verify and parse
1344 ++ * IP headers, so we simply force all those small frames to skip
1345 ++ * checksum complete.
1346 ++ */
1347 ++ if (short_frame(skb->len))
1348 ++ return -EINVAL;
1349 +
1350 +- void *hdr = (u8 *)va + sizeof(struct ethhdr);
1351 +-
1352 ++ hdr = (u8 *)va + sizeof(struct ethhdr);
1353 + hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
1354 +
1355 + if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
1356 +@@ -822,6 +835,11 @@ xdp_drop_no_cnt:
1357 + skb_record_rx_queue(skb, cq_ring);
1358 +
1359 + if (likely(dev->features & NETIF_F_RXCSUM)) {
1360 ++ /* TODO: For IP non TCP/UDP packets when csum complete is
1361 ++ * not an option (not supported or any other reason) we can
1362 ++ * actually check cqe IPOK status bit and report
1363 ++ * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
1364 ++ */
1365 + if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
1366 + MLX4_CQE_STATUS_UDP)) &&
1367 + (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
1368 +diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
1369 +index 7262c6310650e..288fca826a55c 100644
1370 +--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
1371 ++++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
1372 +@@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
1373 + int i;
1374 +
1375 + if (chunk->nsg > 0)
1376 +- pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
1377 ++ pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages,
1378 + PCI_DMA_BIDIRECTIONAL);
1379 +
1380 + for (i = 0; i < chunk->npages; ++i)
1381 +- __free_pages(sg_page(&chunk->mem[i]),
1382 +- get_order(chunk->mem[i].length));
1383 ++ __free_pages(sg_page(&chunk->sg[i]),
1384 ++ get_order(chunk->sg[i].length));
1385 + }
1386 +
1387 + static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
1388 +@@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
1389 +
1390 + for (i = 0; i < chunk->npages; ++i)
1391 + dma_free_coherent(&dev->persist->pdev->dev,
1392 +- chunk->mem[i].length,
1393 +- lowmem_page_address(sg_page(&chunk->mem[i])),
1394 +- sg_dma_address(&chunk->mem[i]));
1395 ++ chunk->buf[i].size,
1396 ++ chunk->buf[i].addr,
1397 ++ chunk->buf[i].dma_addr);
1398 + }
1399 +
1400 + void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
1401 +@@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
1402 + return 0;
1403 + }
1404 +
1405 +-static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
1406 +- int order, gfp_t gfp_mask)
1407 ++static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
1408 ++ int order, gfp_t gfp_mask)
1409 + {
1410 +- void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
1411 +- &sg_dma_address(mem), gfp_mask);
1412 +- if (!buf)
1413 ++ buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
1414 ++ &buf->dma_addr, gfp_mask);
1415 ++ if (!buf->addr)
1416 + return -ENOMEM;
1417 +
1418 +- if (offset_in_page(buf)) {
1419 +- dma_free_coherent(dev, PAGE_SIZE << order,
1420 +- buf, sg_dma_address(mem));
1421 ++ if (offset_in_page(buf->addr)) {
1422 ++ dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
1423 ++ buf->dma_addr);
1424 + return -ENOMEM;
1425 + }
1426 +
1427 +- sg_set_buf(mem, buf, PAGE_SIZE << order);
1428 +- sg_dma_len(mem) = PAGE_SIZE << order;
1429 ++ buf->size = PAGE_SIZE << order;
1430 + return 0;
1431 + }
1432 +
1433 +@@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1434 +
1435 + while (npages > 0) {
1436 + if (!chunk) {
1437 +- chunk = kmalloc_node(sizeof(*chunk),
1438 ++ chunk = kzalloc_node(sizeof(*chunk),
1439 + gfp_mask & ~(__GFP_HIGHMEM |
1440 + __GFP_NOWARN),
1441 + dev->numa_node);
1442 + if (!chunk) {
1443 +- chunk = kmalloc(sizeof(*chunk),
1444 ++ chunk = kzalloc(sizeof(*chunk),
1445 + gfp_mask & ~(__GFP_HIGHMEM |
1446 + __GFP_NOWARN));
1447 + if (!chunk)
1448 + goto fail;
1449 + }
1450 ++ chunk->coherent = coherent;
1451 +
1452 +- sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
1453 +- chunk->npages = 0;
1454 +- chunk->nsg = 0;
1455 ++ if (!coherent)
1456 ++ sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
1457 + list_add_tail(&chunk->list, &icm->chunk_list);
1458 + }
1459 +
1460 +@@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1461 +
1462 + if (coherent)
1463 + ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
1464 +- &chunk->mem[chunk->npages],
1465 +- cur_order, mask);
1466 ++ &chunk->buf[chunk->npages],
1467 ++ cur_order, mask);
1468 + else
1469 +- ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
1470 ++ ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
1471 + cur_order, mask,
1472 + dev->numa_node);
1473 +
1474 +@@ -205,7 +204,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1475 + if (coherent)
1476 + ++chunk->nsg;
1477 + else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
1478 +- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
1479 ++ chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
1480 + chunk->npages,
1481 + PCI_DMA_BIDIRECTIONAL);
1482 +
1483 +@@ -220,7 +219,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1484 + }
1485 +
1486 + if (!coherent && chunk) {
1487 +- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
1488 ++ chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
1489 + chunk->npages,
1490 + PCI_DMA_BIDIRECTIONAL);
1491 +
1492 +@@ -320,7 +319,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
1493 + u64 idx;
1494 + struct mlx4_icm_chunk *chunk;
1495 + struct mlx4_icm *icm;
1496 +- struct page *page = NULL;
1497 ++ void *addr = NULL;
1498 +
1499 + if (!table->lowmem)
1500 + return NULL;
1501 +@@ -336,28 +335,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
1502 +
1503 + list_for_each_entry(chunk, &icm->chunk_list, list) {
1504 + for (i = 0; i < chunk->npages; ++i) {
1505 ++ dma_addr_t dma_addr;
1506 ++ size_t len;
1507 ++
1508 ++ if (table->coherent) {
1509 ++ len = chunk->buf[i].size;
1510 ++ dma_addr = chunk->buf[i].dma_addr;
1511 ++ addr = chunk->buf[i].addr;
1512 ++ } else {
1513 ++ struct page *page;
1514 ++
1515 ++ len = sg_dma_len(&chunk->sg[i]);
1516 ++ dma_addr = sg_dma_address(&chunk->sg[i]);
1517 ++
1518 ++ /* XXX: we should never do this for highmem
1519 ++ * allocation. This function either needs
1520 ++ * to be split, or the kernel virtual address
1521 ++ * return needs to be made optional.
1522 ++ */
1523 ++ page = sg_page(&chunk->sg[i]);
1524 ++ addr = lowmem_page_address(page);
1525 ++ }
1526 ++
1527 + if (dma_handle && dma_offset >= 0) {
1528 +- if (sg_dma_len(&chunk->mem[i]) > dma_offset)
1529 +- *dma_handle = sg_dma_address(&chunk->mem[i]) +
1530 +- dma_offset;
1531 +- dma_offset -= sg_dma_len(&chunk->mem[i]);
1532 ++ if (len > dma_offset)
1533 ++ *dma_handle = dma_addr + dma_offset;
1534 ++ dma_offset -= len;
1535 + }
1536 ++
1537 + /*
1538 + * DMA mapping can merge pages but not split them,
1539 + * so if we found the page, dma_handle has already
1540 + * been assigned to.
1541 + */
1542 +- if (chunk->mem[i].length > offset) {
1543 +- page = sg_page(&chunk->mem[i]);
1544 ++ if (len > offset)
1545 + goto out;
1546 +- }
1547 +- offset -= chunk->mem[i].length;
1548 ++ offset -= len;
1549 + }
1550 + }
1551 +
1552 ++ addr = NULL;
1553 + out:
1554 + mutex_unlock(&table->mutex);
1555 +- return page ? lowmem_page_address(page) + offset : NULL;
1556 ++ return addr ? addr + offset : NULL;
1557 + }
1558 +
1559 + int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
1560 +diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
1561 +index c9169a490557c..d199874b1c074 100644
1562 +--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
1563 ++++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
1564 +@@ -47,11 +47,21 @@ enum {
1565 + MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
1566 + };
1567 +
1568 ++struct mlx4_icm_buf {
1569 ++ void *addr;
1570 ++ size_t size;
1571 ++ dma_addr_t dma_addr;
1572 ++};
1573 ++
1574 + struct mlx4_icm_chunk {
1575 + struct list_head list;
1576 + int npages;
1577 + int nsg;
1578 +- struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
1579 ++ bool coherent;
1580 ++ union {
1581 ++ struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
1582 ++ struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
1583 ++ };
1584 + };
1585 +
1586 + struct mlx4_icm {
1587 +@@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
1588 +
1589 + static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
1590 + {
1591 +- return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
1592 ++ if (iter->chunk->coherent)
1593 ++ return iter->chunk->buf[iter->page_idx].dma_addr;
1594 ++ else
1595 ++ return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
1596 + }
1597 +
1598 + static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
1599 + {
1600 +- return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
1601 ++ if (iter->chunk->coherent)
1602 ++ return iter->chunk->buf[iter->page_idx].size;
1603 ++ else
1604 ++ return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
1605 + }
1606 +
1607 + int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
1608 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1609 +index 16ceeb1b2c9d8..da52e60d4437c 100644
1610 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
1611 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1612 +@@ -633,6 +633,7 @@ enum {
1613 + MLX5E_STATE_ASYNC_EVENTS_ENABLED,
1614 + MLX5E_STATE_OPENED,
1615 + MLX5E_STATE_DESTROYING,
1616 ++ MLX5E_STATE_XDP_TX_ENABLED,
1617 + };
1618 +
1619 + struct mlx5e_rqt {
1620 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
1621 +index ad6d471d00dd4..4a33c9a7cac7e 100644
1622 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
1623 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
1624 +@@ -262,7 +262,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1625 + int sq_num;
1626 + int i;
1627 +
1628 +- if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
1629 ++ /* this flag is sufficient, no need to test internal sq state */
1630 ++ if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
1631 + return -ENETDOWN;
1632 +
1633 + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1634 +@@ -275,9 +276,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1635 +
1636 + sq = &priv->channels.c[sq_num]->xdpsq;
1637 +
1638 +- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
1639 +- return -ENETDOWN;
1640 +-
1641 + for (i = 0; i < n; i++) {
1642 + struct xdp_frame *xdpf = frames[i];
1643 + struct mlx5e_xdp_info xdpi;
1644 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
1645 +index 6dfab045925f0..4d096623178b9 100644
1646 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
1647 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
1648 +@@ -49,6 +49,23 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
1649 + int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1650 + u32 flags);
1651 +
1652 ++static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
1653 ++{
1654 ++ set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
1655 ++}
1656 ++
1657 ++static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
1658 ++{
1659 ++ clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
1660 ++ /* let other device's napi(s) see our new state */
1661 ++ synchronize_rcu();
1662 ++}
1663 ++
1664 ++static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
1665 ++{
1666 ++ return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
1667 ++}
1668 ++
1669 + static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
1670 + {
1671 + struct mlx5_wq_cyc *wq = &sq->wq;
1672 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1673 +index 944f21f99d437..637d59c01fe5c 100644
1674 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1675 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1676 +@@ -2890,6 +2890,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
1677 +
1678 + mlx5e_build_tx2sq_maps(priv);
1679 + mlx5e_activate_channels(&priv->channels);
1680 ++ mlx5e_xdp_tx_enable(priv);
1681 + netif_tx_start_all_queues(priv->netdev);
1682 +
1683 + if (MLX5_ESWITCH_MANAGER(priv->mdev))
1684 +@@ -2911,6 +2912,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
1685 + */
1686 + netif_tx_stop_all_queues(priv->netdev);
1687 + netif_tx_disable(priv->netdev);
1688 ++ mlx5e_xdp_tx_disable(priv);
1689 + mlx5e_deactivate_channels(&priv->channels);
1690 + }
1691 +
1692 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1693 +index c9cc9747d21d1..701624a63d2f4 100644
1694 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1695 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1696 +@@ -144,6 +144,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
1697 +
1698 + s->tx_packets += sq_stats->packets;
1699 + s->tx_bytes += sq_stats->bytes;
1700 ++ s->tx_queue_dropped += sq_stats->dropped;
1701 + }
1702 + }
1703 + }
1704 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1705 +index 3092c59c0dc71..9f7f8425f6767 100644
1706 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1707 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1708 +@@ -96,6 +96,7 @@ struct mlx5e_tc_flow_parse_attr {
1709 + struct ip_tunnel_info tun_info;
1710 + struct mlx5_flow_spec spec;
1711 + int num_mod_hdr_actions;
1712 ++ int max_mod_hdr_actions;
1713 + void *mod_hdr_actions;
1714 + int mirred_ifindex;
1715 + };
1716 +@@ -1742,9 +1743,9 @@ static struct mlx5_fields fields[] = {
1717 + OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1718 + };
1719 +
1720 +-/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1721 +- * max from the SW pedit action. On success, it says how many HW actions were
1722 +- * actually parsed.
1723 ++/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
1724 ++ * max from the SW pedit action. On success, attr->num_mod_hdr_actions
1725 ++ * says how many HW actions were actually parsed.
1726 + */
1727 + static int offload_pedit_fields(struct pedit_headers *masks,
1728 + struct pedit_headers *vals,
1729 +@@ -1767,9 +1768,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
1730 + add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1731 +
1732 + action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1733 +- action = parse_attr->mod_hdr_actions;
1734 +- max_actions = parse_attr->num_mod_hdr_actions;
1735 +- nactions = 0;
1736 ++ action = parse_attr->mod_hdr_actions +
1737 ++ parse_attr->num_mod_hdr_actions * action_size;
1738 ++
1739 ++ max_actions = parse_attr->max_mod_hdr_actions;
1740 ++ nactions = parse_attr->num_mod_hdr_actions;
1741 +
1742 + for (i = 0; i < ARRAY_SIZE(fields); i++) {
1743 + f = &fields[i];
1744 +@@ -1874,7 +1877,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1745 + if (!parse_attr->mod_hdr_actions)
1746 + return -ENOMEM;
1747 +
1748 +- parse_attr->num_mod_hdr_actions = max_actions;
1749 ++ parse_attr->max_mod_hdr_actions = max_actions;
1750 + return 0;
1751 + }
1752 +
1753 +@@ -1918,9 +1921,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1754 + goto out_err;
1755 + }
1756 +
1757 +- err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1758 +- if (err)
1759 +- goto out_err;
1760 ++ if (!parse_attr->mod_hdr_actions) {
1761 ++ err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1762 ++ if (err)
1763 ++ goto out_err;
1764 ++ }
1765 +
1766 + err = offload_pedit_fields(masks, vals, parse_attr);
1767 + if (err < 0)
1768 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
1769 +index c7901a3f2a794..a903e97793f9a 100644
1770 +--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
1771 ++++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
1772 +@@ -1367,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1773 + u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1774 +
1775 + if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1776 +- break;
1777 ++ return 0;
1778 + cond_resched();
1779 + } while (time_before(jiffies, end));
1780 +- return 0;
1781 ++ return -EBUSY;
1782 + }
1783 +
1784 + static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1785 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
1786 +index e3c6fe8b1d406..1dcf152b28138 100644
1787 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
1788 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
1789 +@@ -75,7 +75,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
1790 + act_set = mlxsw_afa_block_first_set(rulei->act_block);
1791 + mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
1792 +
1793 +- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
1794 ++ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
1795 ++ if (err)
1796 ++ goto err_ptce2_write;
1797 ++
1798 ++ return 0;
1799 ++
1800 ++err_ptce2_write:
1801 ++ cregion->ops->entry_remove(cregion, centry);
1802 ++ return err;
1803 + }
1804 +
1805 + static void
1806 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1807 +index cdec48bcc6ad5..af673abdb4823 100644
1808 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1809 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1810 +@@ -1209,7 +1209,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1811 + static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1812 + {
1813 + return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1814 +- MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
1815 ++ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1816 + }
1817 +
1818 + static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1819 +@@ -1221,7 +1221,7 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1820 + static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1821 + const char *mac, u16 fid, bool adding,
1822 + enum mlxsw_reg_sfd_rec_action action,
1823 +- bool dynamic)
1824 ++ enum mlxsw_reg_sfd_rec_policy policy)
1825 + {
1826 + char *sfd_pl;
1827 + u8 num_rec;
1828 +@@ -1232,8 +1232,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1829 + return -ENOMEM;
1830 +
1831 + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1832 +- mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1833 +- mac, fid, action, local_port);
1834 ++ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1835 + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1836 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1837 + if (err)
1838 +@@ -1252,7 +1251,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1839 + bool dynamic)
1840 + {
1841 + return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1842 +- MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
1843 ++ MLXSW_REG_SFD_REC_ACTION_NOP,
1844 ++ mlxsw_sp_sfd_rec_policy(dynamic));
1845 + }
1846 +
1847 + int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1848 +@@ -1260,7 +1260,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1849 + {
1850 + return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1851 + MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1852 +- false);
1853 ++ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1854 + }
1855 +
1856 + static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1857 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
1858 +index 2fa1c050a14b4..92cd8abeb41d7 100644
1859 +--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
1860 ++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
1861 +@@ -1592,6 +1592,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1862 + cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1863 + rx_prod.bd_prod = cpu_to_le16(bd_prod);
1864 + rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1865 ++
1866 ++ /* Make sure chain element is updated before ringing the doorbell */
1867 ++ dma_wmb();
1868 ++
1869 + DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1870 + }
1871 +
1872 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
1873 +index 20909036e0028..1c39305274440 100644
1874 +--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
1875 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
1876 +@@ -260,6 +260,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
1877 + struct stmmac_extra_stats *x, u32 chan)
1878 + {
1879 + u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
1880 ++ u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
1881 + int ret = 0;
1882 +
1883 + /* ABNORMAL interrupts */
1884 +@@ -279,8 +280,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
1885 + x->normal_irq_n++;
1886 +
1887 + if (likely(intr_status & XGMAC_RI)) {
1888 +- u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
1889 +- if (likely(value & XGMAC_RIE)) {
1890 ++ if (likely(intr_en & XGMAC_RIE)) {
1891 + x->rx_normal_irq_n++;
1892 + ret |= handle_rx;
1893 + }
1894 +@@ -292,7 +292,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
1895 + }
1896 +
1897 + /* Clear interrupts */
1898 +- writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
1899 ++ writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
1900 +
1901 + return ret;
1902 + }
1903 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1904 +index 2103b865726ac..123b74e25ed81 100644
1905 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1906 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1907 +@@ -3522,27 +3522,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget)
1908 + struct stmmac_channel *ch =
1909 + container_of(napi, struct stmmac_channel, napi);
1910 + struct stmmac_priv *priv = ch->priv_data;
1911 +- int work_done = 0, work_rem = budget;
1912 ++ int work_done, rx_done = 0, tx_done = 0;
1913 + u32 chan = ch->index;
1914 +
1915 + priv->xstats.napi_poll++;
1916 +
1917 +- if (ch->has_tx) {
1918 +- int done = stmmac_tx_clean(priv, work_rem, chan);
1919 ++ if (ch->has_tx)
1920 ++ tx_done = stmmac_tx_clean(priv, budget, chan);
1921 ++ if (ch->has_rx)
1922 ++ rx_done = stmmac_rx(priv, budget, chan);
1923 +
1924 +- work_done += done;
1925 +- work_rem -= done;
1926 +- }
1927 +-
1928 +- if (ch->has_rx) {
1929 +- int done = stmmac_rx(priv, work_rem, chan);
1930 ++ work_done = max(rx_done, tx_done);
1931 ++ work_done = min(work_done, budget);
1932 +
1933 +- work_done += done;
1934 +- work_rem -= done;
1935 +- }
1936 ++ if (work_done < budget && napi_complete_done(napi, work_done)) {
1937 ++ int stat;
1938 +
1939 +- if (work_done < budget && napi_complete_done(napi, work_done))
1940 + stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
1941 ++ stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
1942 ++ &priv->xstats, chan);
1943 ++ if (stat && napi_reschedule(napi))
1944 ++ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
1945 ++ }
1946 +
1947 + return work_done;
1948 + }
1949 +@@ -4191,6 +4192,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1950 + return ret;
1951 + }
1952 +
1953 ++ /* Rx Watchdog is available in the COREs newer than the 3.40.
1954 ++ * In some case, for example on bugged HW this feature
1955 ++ * has to be disable and this can be done by passing the
1956 ++ * riwt_off field from the platform.
1957 ++ */
1958 ++ if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
1959 ++ (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
1960 ++ priv->use_riwt = 1;
1961 ++ dev_info(priv->device,
1962 ++ "Enable RX Mitigation via HW Watchdog Timer\n");
1963 ++ }
1964 ++
1965 + return 0;
1966 + }
1967 +
1968 +@@ -4323,18 +4336,6 @@ int stmmac_dvr_probe(struct device *device,
1969 + if (flow_ctrl)
1970 + priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
1971 +
1972 +- /* Rx Watchdog is available in the COREs newer than the 3.40.
1973 +- * In some case, for example on bugged HW this feature
1974 +- * has to be disable and this can be done by passing the
1975 +- * riwt_off field from the platform.
1976 +- */
1977 +- if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
1978 +- (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
1979 +- priv->use_riwt = 1;
1980 +- dev_info(priv->device,
1981 +- "Enable RX Mitigation via HW Watchdog Timer\n");
1982 +- }
1983 +-
1984 + /* Setup channels NAPI */
1985 + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
1986 +
1987 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1988 +index c54a50dbd5ac2..d819e8eaba122 100644
1989 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1990 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1991 +@@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
1992 + */
1993 + static void stmmac_pci_remove(struct pci_dev *pdev)
1994 + {
1995 ++ int i;
1996 ++
1997 + stmmac_dvr_remove(&pdev->dev);
1998 ++
1999 ++ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
2000 ++ if (pci_resource_len(pdev, i) == 0)
2001 ++ continue;
2002 ++ pcim_iounmap_regions(pdev, BIT(i));
2003 ++ break;
2004 ++ }
2005 ++
2006 + pci_disable_device(pdev);
2007 + }
2008 +
2009 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2010 +index 531294f4978bc..58ea18af9813a 100644
2011 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2012 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2013 +@@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
2014 + /* Queue 0 is not AVB capable */
2015 + if (queue <= 0 || queue >= tx_queues_count)
2016 + return -EINVAL;
2017 ++ if (!priv->dma_cap.av)
2018 ++ return -EOPNOTSUPP;
2019 + if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
2020 + return -EOPNOTSUPP;
2021 +
2022 +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
2023 +index 493cd382b8aa0..01711e6e9a394 100644
2024 +--- a/drivers/net/geneve.c
2025 ++++ b/drivers/net/geneve.c
2026 +@@ -1406,9 +1406,13 @@ static void geneve_link_config(struct net_device *dev,
2027 + }
2028 + #if IS_ENABLED(CONFIG_IPV6)
2029 + case AF_INET6: {
2030 +- struct rt6_info *rt = rt6_lookup(geneve->net,
2031 +- &info->key.u.ipv6.dst, NULL, 0,
2032 +- NULL, 0);
2033 ++ struct rt6_info *rt;
2034 ++
2035 ++ if (!__in6_dev_get(dev))
2036 ++ break;
2037 ++
2038 ++ rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
2039 ++ NULL, 0);
2040 +
2041 + if (rt && rt->dst.dev)
2042 + ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
2043 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
2044 +index 70f3f90c2ed69..2787e8b1d668a 100644
2045 +--- a/drivers/net/phy/phylink.c
2046 ++++ b/drivers/net/phy/phylink.c
2047 +@@ -502,6 +502,17 @@ static void phylink_run_resolve(struct phylink *pl)
2048 + queue_work(system_power_efficient_wq, &pl->resolve);
2049 + }
2050 +
2051 ++static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
2052 ++{
2053 ++ unsigned long state = pl->phylink_disable_state;
2054 ++
2055 ++ set_bit(bit, &pl->phylink_disable_state);
2056 ++ if (state == 0) {
2057 ++ queue_work(system_power_efficient_wq, &pl->resolve);
2058 ++ flush_work(&pl->resolve);
2059 ++ }
2060 ++}
2061 ++
2062 + static void phylink_fixed_poll(struct timer_list *t)
2063 + {
2064 + struct phylink *pl = container_of(t, struct phylink, link_poll);
2065 +@@ -955,9 +966,7 @@ void phylink_stop(struct phylink *pl)
2066 + if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
2067 + del_timer_sync(&pl->link_poll);
2068 +
2069 +- set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
2070 +- queue_work(system_power_efficient_wq, &pl->resolve);
2071 +- flush_work(&pl->resolve);
2072 ++ phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
2073 + }
2074 + EXPORT_SYMBOL_GPL(phylink_stop);
2075 +
2076 +@@ -1664,9 +1673,7 @@ static void phylink_sfp_link_down(void *upstream)
2077 +
2078 + ASSERT_RTNL();
2079 +
2080 +- set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
2081 +- queue_work(system_power_efficient_wq, &pl->resolve);
2082 +- flush_work(&pl->resolve);
2083 ++ phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
2084 + }
2085 +
2086 + static void phylink_sfp_link_up(void *upstream)
2087 +diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
2088 +index ad9db652874dc..fef701bfad62e 100644
2089 +--- a/drivers/net/phy/sfp-bus.c
2090 ++++ b/drivers/net/phy/sfp-bus.c
2091 +@@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
2092 + return ret;
2093 + }
2094 + }
2095 ++ bus->socket_ops->attach(bus->sfp);
2096 + if (bus->started)
2097 + bus->socket_ops->start(bus->sfp);
2098 + bus->netdev->sfp_bus = bus;
2099 +@@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
2100 + if (bus->registered) {
2101 + if (bus->started)
2102 + bus->socket_ops->stop(bus->sfp);
2103 ++ bus->socket_ops->detach(bus->sfp);
2104 + if (bus->phydev && ops && ops->disconnect_phy)
2105 + ops->disconnect_phy(bus->upstream);
2106 + }
2107 +diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
2108 +index fd8bb998ae52d..68c8fbf099f87 100644
2109 +--- a/drivers/net/phy/sfp.c
2110 ++++ b/drivers/net/phy/sfp.c
2111 +@@ -184,6 +184,7 @@ struct sfp {
2112 +
2113 + struct gpio_desc *gpio[GPIO_MAX];
2114 +
2115 ++ bool attached;
2116 + unsigned int state;
2117 + struct delayed_work poll;
2118 + struct delayed_work timeout;
2119 +@@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
2120 + */
2121 + switch (sfp->sm_mod_state) {
2122 + default:
2123 +- if (event == SFP_E_INSERT) {
2124 ++ if (event == SFP_E_INSERT && sfp->attached) {
2125 + sfp_module_tx_disable(sfp);
2126 + sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
2127 + }
2128 +@@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
2129 + mutex_unlock(&sfp->sm_mutex);
2130 + }
2131 +
2132 ++static void sfp_attach(struct sfp *sfp)
2133 ++{
2134 ++ sfp->attached = true;
2135 ++ if (sfp->state & SFP_F_PRESENT)
2136 ++ sfp_sm_event(sfp, SFP_E_INSERT);
2137 ++}
2138 ++
2139 ++static void sfp_detach(struct sfp *sfp)
2140 ++{
2141 ++ sfp->attached = false;
2142 ++ sfp_sm_event(sfp, SFP_E_REMOVE);
2143 ++}
2144 ++
2145 + static void sfp_start(struct sfp *sfp)
2146 + {
2147 + sfp_sm_event(sfp, SFP_E_DEV_UP);
2148 +@@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
2149 + }
2150 +
2151 + static const struct sfp_socket_ops sfp_module_ops = {
2152 ++ .attach = sfp_attach,
2153 ++ .detach = sfp_detach,
2154 + .start = sfp_start,
2155 + .stop = sfp_stop,
2156 + .module_info = sfp_module_info,
2157 +@@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev)
2158 + dev_info(sfp->dev, "Host maximum power %u.%uW\n",
2159 + sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
2160 +
2161 +- sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
2162 +- if (!sfp->sfp_bus)
2163 +- return -ENOMEM;
2164 +-
2165 + /* Get the initial state, and always signal TX disable,
2166 + * since the network interface will not be up.
2167 + */
2168 +@@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev)
2169 + sfp->state |= SFP_F_RATE_SELECT;
2170 + sfp_set_state(sfp, sfp->state);
2171 + sfp_module_tx_disable(sfp);
2172 +- rtnl_lock();
2173 +- if (sfp->state & SFP_F_PRESENT)
2174 +- sfp_sm_event(sfp, SFP_E_INSERT);
2175 +- rtnl_unlock();
2176 +
2177 + for (i = 0; i < GPIO_MAX; i++) {
2178 + if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
2179 +@@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev)
2180 + dev_warn(sfp->dev,
2181 + "No tx_disable pin: SFP modules will always be emitting.\n");
2182 +
2183 ++ sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
2184 ++ if (!sfp->sfp_bus)
2185 ++ return -ENOMEM;
2186 ++
2187 + return 0;
2188 + }
2189 +
2190 +diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
2191 +index 31b0acf337e27..64f54b0bbd8c4 100644
2192 +--- a/drivers/net/phy/sfp.h
2193 ++++ b/drivers/net/phy/sfp.h
2194 +@@ -7,6 +7,8 @@
2195 + struct sfp;
2196 +
2197 + struct sfp_socket_ops {
2198 ++ void (*attach)(struct sfp *sfp);
2199 ++ void (*detach)(struct sfp *sfp);
2200 + void (*start)(struct sfp *sfp);
2201 + void (*stop)(struct sfp *sfp);
2202 + int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
2203 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2204 +index 4b6572f0188a7..723814d84b7d8 100644
2205 +--- a/drivers/net/team/team.c
2206 ++++ b/drivers/net/team/team.c
2207 +@@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
2208 + }
2209 + }
2210 +
2211 +-static bool __team_option_inst_tmp_find(const struct list_head *opts,
2212 +- const struct team_option_inst *needle)
2213 +-{
2214 +- struct team_option_inst *opt_inst;
2215 +-
2216 +- list_for_each_entry(opt_inst, opts, tmp_list)
2217 +- if (opt_inst == needle)
2218 +- return true;
2219 +- return false;
2220 +-}
2221 +-
2222 + static int __team_options_register(struct team *team,
2223 + const struct team_option *option,
2224 + size_t option_count)
2225 +@@ -2463,7 +2452,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2226 + int err = 0;
2227 + int i;
2228 + struct nlattr *nl_option;
2229 +- LIST_HEAD(opt_inst_list);
2230 +
2231 + rtnl_lock();
2232 +
2233 +@@ -2483,6 +2471,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2234 + struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2235 + struct nlattr *attr;
2236 + struct nlattr *attr_data;
2237 ++ LIST_HEAD(opt_inst_list);
2238 + enum team_option_type opt_type;
2239 + int opt_port_ifindex = 0; /* != 0 for per-port options */
2240 + u32 opt_array_index = 0;
2241 +@@ -2587,23 +2576,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2242 + if (err)
2243 + goto team_put;
2244 + opt_inst->changed = true;
2245 +-
2246 +- /* dumb/evil user-space can send us duplicate opt,
2247 +- * keep only the last one
2248 +- */
2249 +- if (__team_option_inst_tmp_find(&opt_inst_list,
2250 +- opt_inst))
2251 +- continue;
2252 +-
2253 + list_add(&opt_inst->tmp_list, &opt_inst_list);
2254 + }
2255 + if (!opt_found) {
2256 + err = -ENOENT;
2257 + goto team_put;
2258 + }
2259 +- }
2260 +
2261 +- err = team_nl_send_event_options_get(team, &opt_inst_list);
2262 ++ err = team_nl_send_event_options_get(team, &opt_inst_list);
2263 ++ if (err)
2264 ++ break;
2265 ++ }
2266 +
2267 + team_put:
2268 + team_nl_team_put(team);
2269 +diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
2270 +index a7f37063518ec..3d05bc1937d40 100644
2271 +--- a/drivers/pinctrl/pinctrl-max77620.c
2272 ++++ b/drivers/pinctrl/pinctrl-max77620.c
2273 +@@ -34,14 +34,12 @@ enum max77620_pin_ppdrv {
2274 + MAX77620_PIN_PP_DRV,
2275 + };
2276 +
2277 +-enum max77620_pinconf_param {
2278 +- MAX77620_ACTIVE_FPS_SOURCE = PIN_CONFIG_END + 1,
2279 +- MAX77620_ACTIVE_FPS_POWER_ON_SLOTS,
2280 +- MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS,
2281 +- MAX77620_SUSPEND_FPS_SOURCE,
2282 +- MAX77620_SUSPEND_FPS_POWER_ON_SLOTS,
2283 +- MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS,
2284 +-};
2285 ++#define MAX77620_ACTIVE_FPS_SOURCE (PIN_CONFIG_END + 1)
2286 ++#define MAX77620_ACTIVE_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 2)
2287 ++#define MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 3)
2288 ++#define MAX77620_SUSPEND_FPS_SOURCE (PIN_CONFIG_END + 4)
2289 ++#define MAX77620_SUSPEND_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 5)
2290 ++#define MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 6)
2291 +
2292 + struct max77620_pin_function {
2293 + const char *name;
2294 +diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
2295 +index bf07735275a49..0fc382cb977bf 100644
2296 +--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
2297 ++++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
2298 +@@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
2299 + }
2300 +
2301 + static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
2302 +- unsigned int tid, int pg_idx, bool reply)
2303 ++ unsigned int tid, int pg_idx)
2304 + {
2305 + struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
2306 + GFP_KERNEL);
2307 +@@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
2308 + req = (struct cpl_set_tcb_field *)skb->head;
2309 + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
2310 + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2311 +- req->reply = V_NO_REPLY(reply ? 0 : 1);
2312 ++ req->reply = V_NO_REPLY(1);
2313 + req->cpu_idx = 0;
2314 + req->word = htons(31);
2315 + req->mask = cpu_to_be64(0xF0000000);
2316 +@@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
2317 + * @tid: connection id
2318 + * @hcrc: header digest enabled
2319 + * @dcrc: data digest enabled
2320 +- * @reply: request reply from h/w
2321 + * set up the iscsi digest settings for a connection identified by tid
2322 + */
2323 + static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2324 +- int hcrc, int dcrc, int reply)
2325 ++ int hcrc, int dcrc)
2326 + {
2327 + struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
2328 + GFP_KERNEL);
2329 +@@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2330 + req = (struct cpl_set_tcb_field *)skb->head;
2331 + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
2332 + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2333 +- req->reply = V_NO_REPLY(reply ? 0 : 1);
2334 ++ req->reply = V_NO_REPLY(1);
2335 + req->cpu_idx = 0;
2336 + req->word = htons(31);
2337 + req->mask = cpu_to_be64(0x0F000000);
2338 +diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2339 +index 211da1d5a8699..689d6c813a50d 100644
2340 +--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2341 ++++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2342 +@@ -1517,16 +1517,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
2343 + struct cxgbi_sock *csk;
2344 +
2345 + csk = lookup_tid(t, tid);
2346 +- if (!csk)
2347 ++ if (!csk) {
2348 + pr_err("can't find conn. for tid %u.\n", tid);
2349 ++ return;
2350 ++ }
2351 +
2352 + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2353 + "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
2354 + csk, csk->state, csk->flags, csk->tid, rpl->status);
2355 +
2356 +- if (rpl->status != CPL_ERR_NONE)
2357 ++ if (rpl->status != CPL_ERR_NONE) {
2358 + pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
2359 + csk, tid, rpl->status);
2360 ++ csk->err = -EINVAL;
2361 ++ }
2362 ++
2363 ++ complete(&csk->cmpl);
2364 +
2365 + __kfree_skb(skb);
2366 + }
2367 +@@ -1903,7 +1909,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
2368 + }
2369 +
2370 + static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2371 +- int pg_idx, bool reply)
2372 ++ int pg_idx)
2373 + {
2374 + struct sk_buff *skb;
2375 + struct cpl_set_tcb_field *req;
2376 +@@ -1919,7 +1925,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2377 + req = (struct cpl_set_tcb_field *)skb->head;
2378 + INIT_TP_WR(req, csk->tid);
2379 + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
2380 +- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
2381 ++ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2382 + req->word_cookie = htons(0);
2383 + req->mask = cpu_to_be64(0x3 << 8);
2384 + req->val = cpu_to_be64(pg_idx << 8);
2385 +@@ -1928,12 +1934,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2386 + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2387 + "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
2388 +
2389 ++ reinit_completion(&csk->cmpl);
2390 + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2391 +- return 0;
2392 ++ wait_for_completion(&csk->cmpl);
2393 ++
2394 ++ return csk->err;
2395 + }
2396 +
2397 + static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2398 +- int hcrc, int dcrc, int reply)
2399 ++ int hcrc, int dcrc)
2400 + {
2401 + struct sk_buff *skb;
2402 + struct cpl_set_tcb_field *req;
2403 +@@ -1951,7 +1960,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2404 + req = (struct cpl_set_tcb_field *)skb->head;
2405 + INIT_TP_WR(req, tid);
2406 + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2407 +- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
2408 ++ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2409 + req->word_cookie = htons(0);
2410 + req->mask = cpu_to_be64(0x3 << 4);
2411 + req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
2412 +@@ -1961,8 +1970,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2413 + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2414 + "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
2415 +
2416 ++ reinit_completion(&csk->cmpl);
2417 + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2418 +- return 0;
2419 ++ wait_for_completion(&csk->cmpl);
2420 ++
2421 ++ return csk->err;
2422 + }
2423 +
2424 + static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
2425 +diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
2426 +index 3f3af5e74a07d..f2c561ca731a3 100644
2427 +--- a/drivers/scsi/cxgbi/libcxgbi.c
2428 ++++ b/drivers/scsi/cxgbi/libcxgbi.c
2429 +@@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
2430 + skb_queue_head_init(&csk->receive_queue);
2431 + skb_queue_head_init(&csk->write_queue);
2432 + timer_setup(&csk->retry_timer, NULL, 0);
2433 ++ init_completion(&csk->cmpl);
2434 + rwlock_init(&csk->callback_lock);
2435 + csk->cdev = cdev;
2436 + csk->flags = 0;
2437 +@@ -2252,14 +2253,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2438 + if (!err && conn->hdrdgst_en)
2439 + err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2440 + conn->hdrdgst_en,
2441 +- conn->datadgst_en, 0);
2442 ++ conn->datadgst_en);
2443 + break;
2444 + case ISCSI_PARAM_DATADGST_EN:
2445 + err = iscsi_set_param(cls_conn, param, buf, buflen);
2446 + if (!err && conn->datadgst_en)
2447 + err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2448 + conn->hdrdgst_en,
2449 +- conn->datadgst_en, 0);
2450 ++ conn->datadgst_en);
2451 + break;
2452 + case ISCSI_PARAM_MAX_R2T:
2453 + return iscsi_tcp_set_max_r2t(conn, buf);
2454 +@@ -2385,7 +2386,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
2455 +
2456 + ppm = csk->cdev->cdev2ppm(csk->cdev);
2457 + err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
2458 +- ppm->tformat.pgsz_idx_dflt, 0);
2459 ++ ppm->tformat.pgsz_idx_dflt);
2460 + if (err < 0)
2461 + return err;
2462 +
2463 +diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
2464 +index dcb190e753434..3bf7414a75e5e 100644
2465 +--- a/drivers/scsi/cxgbi/libcxgbi.h
2466 ++++ b/drivers/scsi/cxgbi/libcxgbi.h
2467 +@@ -146,6 +146,7 @@ struct cxgbi_sock {
2468 + struct sk_buff_head receive_queue;
2469 + struct sk_buff_head write_queue;
2470 + struct timer_list retry_timer;
2471 ++ struct completion cmpl;
2472 + int err;
2473 + rwlock_t callback_lock;
2474 + void *user_data;
2475 +@@ -487,9 +488,9 @@ struct cxgbi_device {
2476 + struct cxgbi_ppm *,
2477 + struct cxgbi_task_tag_info *);
2478 + int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
2479 +- unsigned int, int, int, int);
2480 ++ unsigned int, int, int);
2481 + int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
2482 +- unsigned int, int, bool);
2483 ++ unsigned int, int);
2484 +
2485 + void (*csk_release_offload_resources)(struct cxgbi_sock *);
2486 + int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
2487 +diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
2488 +index 08c7b1e25fe48..dde84f7443136 100644
2489 +--- a/drivers/scsi/isci/init.c
2490 ++++ b/drivers/scsi/isci/init.c
2491 +@@ -588,6 +588,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
2492 + shost->max_lun = ~0;
2493 + shost->max_cmd_len = MAX_COMMAND_SIZE;
2494 +
2495 ++ /* turn on DIF support */
2496 ++ scsi_host_set_prot(shost,
2497 ++ SHOST_DIF_TYPE1_PROTECTION |
2498 ++ SHOST_DIF_TYPE2_PROTECTION |
2499 ++ SHOST_DIF_TYPE3_PROTECTION);
2500 ++ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
2501 ++
2502 + err = scsi_add_host(shost, &pdev->dev);
2503 + if (err)
2504 + goto err_shost;
2505 +@@ -675,13 +682,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2506 + goto err_host_alloc;
2507 + }
2508 + pci_info->hosts[i] = h;
2509 +-
2510 +- /* turn on DIF support */
2511 +- scsi_host_set_prot(to_shost(h),
2512 +- SHOST_DIF_TYPE1_PROTECTION |
2513 +- SHOST_DIF_TYPE2_PROTECTION |
2514 +- SHOST_DIF_TYPE3_PROTECTION);
2515 +- scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
2516 + }
2517 +
2518 + err = isci_setup_interrupts(pdev);
2519 +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
2520 +index 2f0a4f2c5ff80..d4821b9dea45d 100644
2521 +--- a/drivers/scsi/qedi/qedi_iscsi.c
2522 ++++ b/drivers/scsi/qedi/qedi_iscsi.c
2523 +@@ -954,6 +954,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
2524 +
2525 + qedi_ep = ep->dd_data;
2526 + if (qedi_ep->state == EP_STATE_IDLE ||
2527 ++ qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
2528 + qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
2529 + return -1;
2530 +
2531 +@@ -1036,6 +1037,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
2532 +
2533 + switch (qedi_ep->state) {
2534 + case EP_STATE_OFLDCONN_START:
2535 ++ case EP_STATE_OFLDCONN_NONE:
2536 + goto ep_release_conn;
2537 + case EP_STATE_OFLDCONN_FAILED:
2538 + break;
2539 +@@ -1226,6 +1228,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
2540 +
2541 + if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
2542 + QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
2543 ++ qedi_ep->state = EP_STATE_OFLDCONN_NONE;
2544 + ret = -EIO;
2545 + goto set_path_exit;
2546 + }
2547 +diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
2548 +index 11260776212fa..892d70d545537 100644
2549 +--- a/drivers/scsi/qedi/qedi_iscsi.h
2550 ++++ b/drivers/scsi/qedi/qedi_iscsi.h
2551 +@@ -59,6 +59,7 @@ enum {
2552 + EP_STATE_OFLDCONN_FAILED = 0x2000,
2553 + EP_STATE_CONNECT_FAILED = 0x4000,
2554 + EP_STATE_DISCONN_TIMEDOUT = 0x8000,
2555 ++ EP_STATE_OFLDCONN_NONE = 0x10000,
2556 + };
2557 +
2558 + struct qedi_conn;
2559 +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
2560 +index 0e13349dce570..575445c761b48 100644
2561 +--- a/drivers/scsi/qla4xxx/ql4_os.c
2562 ++++ b/drivers/scsi/qla4xxx/ql4_os.c
2563 +@@ -7237,6 +7237,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
2564 +
2565 + rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
2566 + fw_ddb_entry);
2567 ++ if (rc)
2568 ++ goto free_sess;
2569 +
2570 + ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
2571 + __func__, fnode_sess->dev.kobj.name);
2572 +diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
2573 +index 14e5bf7af0bb1..c3bcaaec0fc5c 100644
2574 +--- a/drivers/scsi/ufs/ufs.h
2575 ++++ b/drivers/scsi/ufs/ufs.h
2576 +@@ -195,7 +195,7 @@ enum ufs_desc_def_size {
2577 + QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
2578 + QUERY_DESC_UNIT_DEF_SIZE = 0x23,
2579 + QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
2580 +- QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
2581 ++ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
2582 + QUERY_DESC_POWER_DEF_SIZE = 0x62,
2583 + QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
2584 + };
2585 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
2586 +index 12ddb5928a738..6e80dfe4fa979 100644
2587 +--- a/drivers/scsi/ufs/ufshcd.c
2588 ++++ b/drivers/scsi/ufs/ufshcd.c
2589 +@@ -7768,6 +7768,8 @@ out:
2590 + trace_ufshcd_system_resume(dev_name(hba->dev), ret,
2591 + ktime_to_us(ktime_sub(ktime_get(), start)),
2592 + hba->curr_dev_pwr_mode, hba->uic_link_state);
2593 ++ if (!ret)
2594 ++ hba->is_sys_suspended = false;
2595 + return ret;
2596 + }
2597 + EXPORT_SYMBOL(ufshcd_system_resume);
2598 +diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
2599 +index ac263a180253e..894e60ecebe20 100644
2600 +--- a/drivers/staging/erofs/data.c
2601 ++++ b/drivers/staging/erofs/data.c
2602 +@@ -25,7 +25,7 @@ static inline void read_endio(struct bio *bio)
2603 + struct page *page = bvec->bv_page;
2604 +
2605 + /* page is already locked */
2606 +- BUG_ON(PageUptodate(page));
2607 ++ DBG_BUGON(PageUptodate(page));
2608 +
2609 + if (unlikely(err))
2610 + SetPageError(page);
2611 +@@ -91,12 +91,12 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
2612 + struct erofs_map_blocks *map,
2613 + int flags)
2614 + {
2615 ++ int err = 0;
2616 + erofs_blk_t nblocks, lastblk;
2617 + u64 offset = map->m_la;
2618 + struct erofs_vnode *vi = EROFS_V(inode);
2619 +
2620 + trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
2621 +- BUG_ON(is_inode_layout_compression(inode));
2622 +
2623 + nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
2624 + lastblk = nblocks - is_inode_layout_inline(inode);
2625 +@@ -123,18 +123,27 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
2626 + map->m_plen = inode->i_size - offset;
2627 +
2628 + /* inline data should locate in one meta block */
2629 +- BUG_ON(erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE);
2630 ++ if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
2631 ++ DBG_BUGON(1);
2632 ++ err = -EIO;
2633 ++ goto err_out;
2634 ++ }
2635 ++
2636 + map->m_flags |= EROFS_MAP_META;
2637 + } else {
2638 + errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
2639 + vi->nid, inode->i_size, map->m_la);
2640 +- BUG();
2641 ++ DBG_BUGON(1);
2642 ++ err = -EIO;
2643 ++ goto err_out;
2644 + }
2645 +
2646 + out:
2647 + map->m_llen = map->m_plen;
2648 ++
2649 ++err_out:
2650 + trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
2651 +- return 0;
2652 ++ return err;
2653 + }
2654 +
2655 + #ifdef CONFIG_EROFS_FS_ZIP
2656 +@@ -190,7 +199,7 @@ static inline struct bio *erofs_read_raw_page(
2657 + erofs_off_t current_block = (erofs_off_t)page->index;
2658 + int err;
2659 +
2660 +- BUG_ON(!nblocks);
2661 ++ DBG_BUGON(!nblocks);
2662 +
2663 + if (PageUptodate(page)) {
2664 + err = 0;
2665 +@@ -233,7 +242,7 @@ submit_bio_retry:
2666 + }
2667 +
2668 + /* for RAW access mode, m_plen must be equal to m_llen */
2669 +- BUG_ON(map.m_plen != map.m_llen);
2670 ++ DBG_BUGON(map.m_plen != map.m_llen);
2671 +
2672 + blknr = erofs_blknr(map.m_pa);
2673 + blkoff = erofs_blkoff(map.m_pa);
2674 +@@ -243,7 +252,7 @@ submit_bio_retry:
2675 + void *vsrc, *vto;
2676 + struct page *ipage;
2677 +
2678 +- BUG_ON(map.m_plen > PAGE_SIZE);
2679 ++ DBG_BUGON(map.m_plen > PAGE_SIZE);
2680 +
2681 + ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
2682 +
2683 +@@ -270,7 +279,7 @@ submit_bio_retry:
2684 + }
2685 +
2686 + /* pa must be block-aligned for raw reading */
2687 +- BUG_ON(erofs_blkoff(map.m_pa) != 0);
2688 ++ DBG_BUGON(erofs_blkoff(map.m_pa));
2689 +
2690 + /* max # of continuous pages */
2691 + if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
2692 +@@ -331,7 +340,7 @@ static int erofs_raw_access_readpage(struct file *file, struct page *page)
2693 + if (IS_ERR(bio))
2694 + return PTR_ERR(bio);
2695 +
2696 +- BUG_ON(bio != NULL); /* since we have only one bio -- must be NULL */
2697 ++ DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
2698 + return 0;
2699 + }
2700 +
2701 +@@ -369,7 +378,7 @@ static int erofs_raw_access_readpages(struct file *filp,
2702 + /* pages could still be locked */
2703 + put_page(page);
2704 + }
2705 +- BUG_ON(!list_empty(pages));
2706 ++ DBG_BUGON(!list_empty(pages));
2707 +
2708 + /* the rare case (end in gaps) */
2709 + if (unlikely(bio != NULL))
2710 +diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
2711 +index be6ae3b1bdbe1..04b84ff31d036 100644
2712 +--- a/drivers/staging/erofs/dir.c
2713 ++++ b/drivers/staging/erofs/dir.c
2714 +@@ -53,8 +53,11 @@ static int erofs_fill_dentries(struct dir_context *ctx,
2715 + strnlen(de_name, maxsize - nameoff) :
2716 + le16_to_cpu(de[1].nameoff) - nameoff;
2717 +
2718 +- /* the corrupted directory found */
2719 +- BUG_ON(de_namelen < 0);
2720 ++ /* a corrupted entry is found */
2721 ++ if (unlikely(de_namelen < 0)) {
2722 ++ DBG_BUGON(1);
2723 ++ return -EIO;
2724 ++ }
2725 +
2726 + #ifdef CONFIG_EROFS_FS_DEBUG
2727 + dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
2728 +diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
2729 +index fbf6ff25cd1bd..9e7815f55a17c 100644
2730 +--- a/drivers/staging/erofs/inode.c
2731 ++++ b/drivers/staging/erofs/inode.c
2732 +@@ -132,7 +132,13 @@ static int fill_inline_data(struct inode *inode, void *data, unsigned m_pofs)
2733 + return -ENOMEM;
2734 +
2735 + m_pofs += vi->inode_isize + vi->xattr_isize;
2736 +- BUG_ON(m_pofs + inode->i_size > PAGE_SIZE);
2737 ++
2738 ++ /* inline symlink data shouldn't across page boundary as well */
2739 ++ if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
2740 ++ DBG_BUGON(1);
2741 ++ kfree(lnk);
2742 ++ return -EIO;
2743 ++ }
2744 +
2745 + /* get in-page inline data */
2746 + memcpy(lnk, data + m_pofs, inode->i_size);
2747 +@@ -170,7 +176,7 @@ static int fill_inode(struct inode *inode, int isdir)
2748 + return PTR_ERR(page);
2749 + }
2750 +
2751 +- BUG_ON(!PageUptodate(page));
2752 ++ DBG_BUGON(!PageUptodate(page));
2753 + data = page_address(page);
2754 +
2755 + err = read_inode(inode, data + ofs);
2756 +diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
2757 +index e6313c54e3ad6..9f44ed8f00239 100644
2758 +--- a/drivers/staging/erofs/internal.h
2759 ++++ b/drivers/staging/erofs/internal.h
2760 +@@ -184,50 +184,70 @@ struct erofs_workgroup {
2761 +
2762 + #define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
2763 +
2764 +-static inline bool erofs_workgroup_try_to_freeze(
2765 +- struct erofs_workgroup *grp, int v)
2766 ++#if defined(CONFIG_SMP)
2767 ++static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
2768 ++ int val)
2769 + {
2770 +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
2771 +- if (v != atomic_cmpxchg(&grp->refcount,
2772 +- v, EROFS_LOCKED_MAGIC))
2773 +- return false;
2774 + preempt_disable();
2775 ++ if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
2776 ++ preempt_enable();
2777 ++ return false;
2778 ++ }
2779 ++ return true;
2780 ++}
2781 ++
2782 ++static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
2783 ++ int orig_val)
2784 ++{
2785 ++ /*
2786 ++ * other observers should notice all modifications
2787 ++ * in the freezing period.
2788 ++ */
2789 ++ smp_mb();
2790 ++ atomic_set(&grp->refcount, orig_val);
2791 ++ preempt_enable();
2792 ++}
2793 ++
2794 ++static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
2795 ++{
2796 ++ return atomic_cond_read_relaxed(&grp->refcount,
2797 ++ VAL != EROFS_LOCKED_MAGIC);
2798 ++}
2799 + #else
2800 ++static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
2801 ++ int val)
2802 ++{
2803 + preempt_disable();
2804 +- if (atomic_read(&grp->refcount) != v) {
2805 ++ /* no need to spin on UP platforms, let's just disable preemption. */
2806 ++ if (val != atomic_read(&grp->refcount)) {
2807 + preempt_enable();
2808 + return false;
2809 + }
2810 +-#endif
2811 + return true;
2812 + }
2813 +
2814 +-static inline void erofs_workgroup_unfreeze(
2815 +- struct erofs_workgroup *grp, int v)
2816 ++static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
2817 ++ int orig_val)
2818 + {
2819 +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
2820 +- atomic_set(&grp->refcount, v);
2821 +-#endif
2822 + preempt_enable();
2823 + }
2824 +
2825 ++static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
2826 ++{
2827 ++ int v = atomic_read(&grp->refcount);
2828 ++
2829 ++ /* workgroup is never freezed on uniprocessor systems */
2830 ++ DBG_BUGON(v == EROFS_LOCKED_MAGIC);
2831 ++ return v;
2832 ++}
2833 ++#endif
2834 ++
2835 + static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
2836 + {
2837 +- const int locked = (int)EROFS_LOCKED_MAGIC;
2838 + int o;
2839 +
2840 + repeat:
2841 +- o = atomic_read(&grp->refcount);
2842 +-
2843 +- /* spin if it is temporarily locked at the reclaim path */
2844 +- if (unlikely(o == locked)) {
2845 +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
2846 +- do
2847 +- cpu_relax();
2848 +- while (atomic_read(&grp->refcount) == locked);
2849 +-#endif
2850 +- goto repeat;
2851 +- }
2852 ++ o = erofs_wait_on_workgroup_freezed(grp);
2853 +
2854 + if (unlikely(o <= 0))
2855 + return -1;
2856 +diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
2857 +index 2df9768edac96..b0583cdb079ae 100644
2858 +--- a/drivers/staging/erofs/super.c
2859 ++++ b/drivers/staging/erofs/super.c
2860 +@@ -40,7 +40,6 @@ static int erofs_init_inode_cache(void)
2861 +
2862 + static void erofs_exit_inode_cache(void)
2863 + {
2864 +- BUG_ON(erofs_inode_cachep == NULL);
2865 + kmem_cache_destroy(erofs_inode_cachep);
2866 + }
2867 +
2868 +@@ -265,8 +264,8 @@ static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
2869 + int ret = 1; /* 0 - busy */
2870 + struct address_space *const mapping = page->mapping;
2871 +
2872 +- BUG_ON(!PageLocked(page));
2873 +- BUG_ON(mapping->a_ops != &managed_cache_aops);
2874 ++ DBG_BUGON(!PageLocked(page));
2875 ++ DBG_BUGON(mapping->a_ops != &managed_cache_aops);
2876 +
2877 + if (PagePrivate(page))
2878 + ret = erofs_try_to_free_cached_page(mapping, page);
2879 +@@ -279,10 +278,10 @@ static void managed_cache_invalidatepage(struct page *page,
2880 + {
2881 + const unsigned int stop = length + offset;
2882 +
2883 +- BUG_ON(!PageLocked(page));
2884 ++ DBG_BUGON(!PageLocked(page));
2885 +
2886 +- /* Check for overflow */
2887 +- BUG_ON(stop > PAGE_SIZE || stop < length);
2888 ++ /* Check for potential overflow in debug mode */
2889 ++ DBG_BUGON(stop > PAGE_SIZE || stop < length);
2890 +
2891 + if (offset == 0 && stop == PAGE_SIZE)
2892 + while (!managed_cache_releasepage(page, GFP_NOFS))
2893 +@@ -404,12 +403,6 @@ static int erofs_read_super(struct super_block *sb,
2894 +
2895 + erofs_register_super(sb);
2896 +
2897 +- /*
2898 +- * We already have a positive dentry, which was instantiated
2899 +- * by d_make_root. Just need to d_rehash it.
2900 +- */
2901 +- d_rehash(sb->s_root);
2902 +-
2903 + if (!silent)
2904 + infoln("mounted on %s with opts: %s.", dev_name,
2905 + (char *)data);
2906 +@@ -625,7 +618,7 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
2907 +
2908 + static int erofs_remount(struct super_block *sb, int *flags, char *data)
2909 + {
2910 +- BUG_ON(!sb_rdonly(sb));
2911 ++ DBG_BUGON(!sb_rdonly(sb));
2912 +
2913 + *flags |= SB_RDONLY;
2914 + return 0;
2915 +diff --git a/drivers/staging/erofs/unzip_pagevec.h b/drivers/staging/erofs/unzip_pagevec.h
2916 +index 0956615b86f72..23856ba2742d8 100644
2917 +--- a/drivers/staging/erofs/unzip_pagevec.h
2918 ++++ b/drivers/staging/erofs/unzip_pagevec.h
2919 +@@ -150,7 +150,7 @@ z_erofs_pagevec_ctor_dequeue(struct z_erofs_pagevec_ctor *ctor,
2920 + erofs_vtptr_t t;
2921 +
2922 + if (unlikely(ctor->index >= ctor->nr)) {
2923 +- BUG_ON(ctor->next == NULL);
2924 ++ DBG_BUGON(!ctor->next);
2925 + z_erofs_pagevec_ctor_pagedown(ctor, true);
2926 + }
2927 +
2928 +diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
2929 +index 0346630b67c8c..1279241449f4b 100644
2930 +--- a/drivers/staging/erofs/unzip_vle.c
2931 ++++ b/drivers/staging/erofs/unzip_vle.c
2932 +@@ -18,9 +18,6 @@ static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
2933 +
2934 + void z_erofs_exit_zip_subsystem(void)
2935 + {
2936 +- BUG_ON(z_erofs_workqueue == NULL);
2937 +- BUG_ON(z_erofs_workgroup_cachep == NULL);
2938 +-
2939 + destroy_workqueue(z_erofs_workqueue);
2940 + kmem_cache_destroy(z_erofs_workgroup_cachep);
2941 + }
2942 +@@ -293,12 +290,9 @@ z_erofs_vle_work_lookup(struct super_block *sb,
2943 + *grp_ret = grp = container_of(egrp,
2944 + struct z_erofs_vle_workgroup, obj);
2945 +
2946 +-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
2947 + work = z_erofs_vle_grab_work(grp, pageofs);
2948 ++ /* if multiref is disabled, `primary' is always true */
2949 + primary = true;
2950 +-#else
2951 +- BUG();
2952 +-#endif
2953 +
2954 + DBG_BUGON(work->pageofs != pageofs);
2955 +
2956 +@@ -365,12 +359,12 @@ z_erofs_vle_work_register(struct super_block *sb,
2957 + struct z_erofs_vle_workgroup *grp = *grp_ret;
2958 + struct z_erofs_vle_work *work;
2959 +
2960 +-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
2961 +- BUG_ON(grp != NULL);
2962 +-#else
2963 +- if (grp != NULL)
2964 +- goto skip;
2965 +-#endif
2966 ++ /* if multiref is disabled, grp should never be nullptr */
2967 ++ if (unlikely(grp)) {
2968 ++ DBG_BUGON(1);
2969 ++ return ERR_PTR(-EINVAL);
2970 ++ }
2971 ++
2972 + /* no available workgroup, let's allocate one */
2973 + grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
2974 + if (unlikely(grp == NULL))
2975 +@@ -393,13 +387,7 @@ z_erofs_vle_work_register(struct super_block *sb,
2976 + *hosted = true;
2977 +
2978 + newgrp = true;
2979 +-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
2980 +-skip:
2981 +- /* currently unimplemented */
2982 +- BUG();
2983 +-#else
2984 + work = z_erofs_vle_grab_primary_work(grp);
2985 +-#endif
2986 + work->pageofs = pageofs;
2987 +
2988 + mutex_init(&work->lock);
2989 +@@ -606,7 +594,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
2990 +
2991 + enum z_erofs_page_type page_type;
2992 + unsigned cur, end, spiltted, index;
2993 +- int err;
2994 ++ int err = 0;
2995 +
2996 + /* register locked file pages as online pages in pack */
2997 + z_erofs_onlinepage_init(page);
2998 +@@ -624,7 +612,7 @@ repeat:
2999 + /* go ahead the next map_blocks */
3000 + debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
3001 +
3002 +- if (!z_erofs_vle_work_iter_end(builder))
3003 ++ if (z_erofs_vle_work_iter_end(builder))
3004 + fe->initial = false;
3005 +
3006 + map->m_la = offset + cur;
3007 +@@ -633,12 +621,11 @@ repeat:
3008 + if (unlikely(err))
3009 + goto err_out;
3010 +
3011 +- /* deal with hole (FIXME! broken now) */
3012 + if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
3013 + goto hitted;
3014 +
3015 + DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
3016 +- BUG_ON(erofs_blkoff(map->m_pa));
3017 ++ DBG_BUGON(erofs_blkoff(map->m_pa));
3018 +
3019 + err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
3020 + if (unlikely(err))
3021 +@@ -683,7 +670,7 @@ retry:
3022 +
3023 + err = z_erofs_vle_work_add_page(builder,
3024 + newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
3025 +- if (!err)
3026 ++ if (likely(!err))
3027 + goto retry;
3028 + }
3029 +
3030 +@@ -694,9 +681,10 @@ retry:
3031 +
3032 + /* FIXME! avoid the last relundant fixup & endio */
3033 + z_erofs_onlinepage_fixup(page, index, true);
3034 +- ++spiltted;
3035 +
3036 +- /* also update nr_pages and increase queued_pages */
3037 ++ /* bump up the number of spiltted parts of a page */
3038 ++ ++spiltted;
3039 ++ /* also update nr_pages */
3040 + work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
3041 + next_part:
3042 + /* can be used for verification */
3043 +@@ -706,16 +694,18 @@ next_part:
3044 + if (end > 0)
3045 + goto repeat;
3046 +
3047 ++out:
3048 + /* FIXME! avoid the last relundant fixup & endio */
3049 + z_erofs_onlinepage_endio(page);
3050 +
3051 + debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
3052 + __func__, page, spiltted, map->m_llen);
3053 +- return 0;
3054 ++ return err;
3055 +
3056 ++ /* if some error occurred while processing this page */
3057 + err_out:
3058 +- /* TODO: the missing error handing cases */
3059 +- return err;
3060 ++ SetPageError(page);
3061 ++ goto out;
3062 + }
3063 +
3064 + static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
3065 +@@ -752,7 +742,7 @@ static inline void z_erofs_vle_read_endio(struct bio *bio)
3066 + bool cachemngd = false;
3067 +
3068 + DBG_BUGON(PageUptodate(page));
3069 +- BUG_ON(page->mapping == NULL);
3070 ++ DBG_BUGON(!page->mapping);
3071 +
3072 + #ifdef EROFS_FS_HAS_MANAGED_CACHE
3073 + if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
3074 +@@ -796,10 +786,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
3075 + const unsigned clusterpages = erofs_clusterpages(sbi);
3076 +
3077 + struct z_erofs_pagevec_ctor ctor;
3078 +- unsigned nr_pages;
3079 +-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
3080 +- unsigned sparsemem_pages = 0;
3081 +-#endif
3082 ++ unsigned int nr_pages;
3083 ++ unsigned int sparsemem_pages = 0;
3084 + struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
3085 + struct page **pages, **compressed_pages, *page;
3086 + unsigned i, llen;
3087 +@@ -811,12 +799,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
3088 + int err;
3089 +
3090 + might_sleep();
3091 +-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
3092 + work = z_erofs_vle_grab_primary_work(grp);
3093 +-#else
3094 +- BUG();
3095 +-#endif
3096 +- BUG_ON(!READ_ONCE(work->nr_pages));
3097 ++ DBG_BUGON(!READ_ONCE(work->nr_pages));
3098 +
3099 + mutex_lock(&work->lock);
3100 + nr_pages = work->nr_pages;
3101 +@@ -865,14 +849,12 @@ repeat:
3102 + else
3103 + pagenr = z_erofs_onlinepage_index(page);
3104 +
3105 +- BUG_ON(pagenr >= nr_pages);
3106 ++ DBG_BUGON(pagenr >= nr_pages);
3107 ++ DBG_BUGON(pages[pagenr]);
3108 +
3109 +-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
3110 +- BUG_ON(pages[pagenr] != NULL);
3111 +- ++sparsemem_pages;
3112 +-#endif
3113 + pages[pagenr] = page;
3114 + }
3115 ++ sparsemem_pages = i;
3116 +
3117 + z_erofs_pagevec_ctor_exit(&ctor, true);
3118 +
3119 +@@ -891,9 +873,8 @@ repeat:
3120 + if (z_erofs_is_stagingpage(page))
3121 + continue;
3122 + #ifdef EROFS_FS_HAS_MANAGED_CACHE
3123 +- else if (page->mapping == mngda) {
3124 +- BUG_ON(PageLocked(page));
3125 +- BUG_ON(!PageUptodate(page));
3126 ++ if (page->mapping == mngda) {
3127 ++ DBG_BUGON(!PageUptodate(page));
3128 + continue;
3129 + }
3130 + #endif
3131 +@@ -901,11 +882,9 @@ repeat:
3132 + /* only non-head page could be reused as a compressed page */
3133 + pagenr = z_erofs_onlinepage_index(page);
3134 +
3135 +- BUG_ON(pagenr >= nr_pages);
3136 +-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
3137 +- BUG_ON(pages[pagenr] != NULL);
3138 ++ DBG_BUGON(pagenr >= nr_pages);
3139 ++ DBG_BUGON(pages[pagenr]);
3140 + ++sparsemem_pages;
3141 +-#endif
3142 + pages[pagenr] = page;
3143 +
3144 + overlapped = true;
3145 +@@ -914,9 +893,6 @@ repeat:
3146 + llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
3147 +
3148 + if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
3149 +- /* FIXME! this should be fixed in the future */
3150 +- BUG_ON(grp->llen != llen);
3151 +-
3152 + err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
3153 + pages, nr_pages, work->pageofs);
3154 + goto out;
3155 +@@ -931,12 +907,8 @@ repeat:
3156 + if (err != -ENOTSUPP)
3157 + goto out_percpu;
3158 +
3159 +-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
3160 +- if (sparsemem_pages >= nr_pages) {
3161 +- BUG_ON(sparsemem_pages > nr_pages);
3162 ++ if (sparsemem_pages >= nr_pages)
3163 + goto skip_allocpage;
3164 +- }
3165 +-#endif
3166 +
3167 + for (i = 0; i < nr_pages; ++i) {
3168 + if (pages[i] != NULL)
3169 +@@ -945,9 +917,7 @@ repeat:
3170 + pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
3171 + }
3172 +
3173 +-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
3174 + skip_allocpage:
3175 +-#endif
3176 + vout = erofs_vmap(pages, nr_pages);
3177 +
3178 + err = z_erofs_vle_unzip_vmap(compressed_pages,
3179 +@@ -1031,7 +1001,7 @@ static void z_erofs_vle_unzip_wq(struct work_struct *work)
3180 + struct z_erofs_vle_unzip_io_sb, io.u.work);
3181 + LIST_HEAD(page_pool);
3182 +
3183 +- BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
3184 ++ DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
3185 + z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
3186 +
3187 + put_pages_list(&page_pool);
3188 +@@ -1360,7 +1330,6 @@ static inline int __z_erofs_vle_normalaccess_readpages(
3189 + continue;
3190 + }
3191 +
3192 +- BUG_ON(PagePrivate(page));
3193 + set_page_private(page, (unsigned long)head);
3194 + head = page;
3195 + }
3196 +diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
3197 +index 3939985008652..3316bc36965d4 100644
3198 +--- a/drivers/staging/erofs/unzip_vle.h
3199 ++++ b/drivers/staging/erofs/unzip_vle.h
3200 +@@ -47,13 +47,6 @@ static inline bool z_erofs_gather_if_stagingpage(struct list_head *page_pool,
3201 + #define Z_EROFS_VLE_INLINE_PAGEVECS 3
3202 +
3203 + struct z_erofs_vle_work {
3204 +- /* struct z_erofs_vle_work *left, *right; */
3205 +-
3206 +-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
3207 +- struct list_head list;
3208 +-
3209 +- atomic_t refcount;
3210 +-#endif
3211 + struct mutex lock;
3212 +
3213 + /* I: decompression offset in page */
3214 +@@ -107,10 +100,8 @@ static inline void z_erofs_vle_set_workgrp_fmt(
3215 + grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK);
3216 + }
3217 +
3218 +-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
3219 +-#error multiref decompression is unimplemented yet
3220 +-#else
3221 +
3222 ++/* definitions if multiref is disabled */
3223 + #define z_erofs_vle_grab_primary_work(grp) (&(grp)->work)
3224 + #define z_erofs_vle_grab_work(grp, pageofs) (&(grp)->work)
3225 + #define z_erofs_vle_work_workgroup(wrk, primary) \
3226 +@@ -118,7 +109,6 @@ static inline void z_erofs_vle_set_workgrp_fmt(
3227 + struct z_erofs_vle_workgroup, work) : \
3228 + ({ BUG(); (void *)NULL; }))
3229 +
3230 +-#endif
3231 +
3232 + #define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_vle_workgroup)
3233 +
3234 +diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
3235 +index f5b665f15be52..9cb35cd33365a 100644
3236 +--- a/drivers/staging/erofs/unzip_vle_lz4.c
3237 ++++ b/drivers/staging/erofs/unzip_vle_lz4.c
3238 +@@ -57,7 +57,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
3239 + if (compressed_pages[j] != page)
3240 + continue;
3241 +
3242 +- BUG_ON(mirrored[j]);
3243 ++ DBG_BUGON(mirrored[j]);
3244 + memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
3245 + mirrored[j] = true;
3246 + break;
3247 +diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
3248 +index 595cf90af9bb2..dd2ac9dbc4b47 100644
3249 +--- a/drivers/staging/erofs/utils.c
3250 ++++ b/drivers/staging/erofs/utils.c
3251 +@@ -23,9 +23,6 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
3252 + list_del(&page->lru);
3253 + } else {
3254 + page = alloc_pages(gfp | __GFP_NOFAIL, 0);
3255 +-
3256 +- BUG_ON(page == NULL);
3257 +- BUG_ON(page->mapping != NULL);
3258 + }
3259 + return page;
3260 + }
3261 +@@ -60,7 +57,7 @@ repeat:
3262 + /* decrease refcount added by erofs_workgroup_put */
3263 + if (unlikely(oldcount == 1))
3264 + atomic_long_dec(&erofs_global_shrink_cnt);
3265 +- BUG_ON(index != grp->index);
3266 ++ DBG_BUGON(index != grp->index);
3267 + }
3268 + rcu_read_unlock();
3269 + return grp;
3270 +@@ -73,8 +70,11 @@ int erofs_register_workgroup(struct super_block *sb,
3271 + struct erofs_sb_info *sbi;
3272 + int err;
3273 +
3274 +- /* grp->refcount should not < 1 */
3275 +- BUG_ON(!atomic_read(&grp->refcount));
3276 ++ /* grp shouldn't be broken or used before */
3277 ++ if (unlikely(atomic_read(&grp->refcount) != 1)) {
3278 ++ DBG_BUGON(1);
3279 ++ return -EINVAL;
3280 ++ }
3281 +
3282 + err = radix_tree_preload(GFP_NOFS);
3283 + if (err)
3284 +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
3285 +index 9cd404acdb82b..ac7620120491b 100644
3286 +--- a/drivers/target/target_core_user.c
3287 ++++ b/drivers/target/target_core_user.c
3288 +@@ -148,7 +148,7 @@ struct tcmu_dev {
3289 + size_t ring_size;
3290 +
3291 + struct mutex cmdr_lock;
3292 +- struct list_head cmdr_queue;
3293 ++ struct list_head qfull_queue;
3294 +
3295 + uint32_t dbi_max;
3296 + uint32_t dbi_thresh;
3297 +@@ -159,6 +159,7 @@ struct tcmu_dev {
3298 +
3299 + struct timer_list cmd_timer;
3300 + unsigned int cmd_time_out;
3301 ++ struct list_head inflight_queue;
3302 +
3303 + struct timer_list qfull_timer;
3304 + int qfull_time_out;
3305 +@@ -179,7 +180,7 @@ struct tcmu_dev {
3306 + struct tcmu_cmd {
3307 + struct se_cmd *se_cmd;
3308 + struct tcmu_dev *tcmu_dev;
3309 +- struct list_head cmdr_queue_entry;
3310 ++ struct list_head queue_entry;
3311 +
3312 + uint16_t cmd_id;
3313 +
3314 +@@ -192,6 +193,7 @@ struct tcmu_cmd {
3315 + unsigned long deadline;
3316 +
3317 + #define TCMU_CMD_BIT_EXPIRED 0
3318 ++#define TCMU_CMD_BIT_INFLIGHT 1
3319 + unsigned long flags;
3320 + };
3321 + /*
3322 +@@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
3323 + if (!tcmu_cmd)
3324 + return NULL;
3325 +
3326 +- INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
3327 ++ INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
3328 + tcmu_cmd->se_cmd = se_cmd;
3329 + tcmu_cmd->tcmu_dev = udev;
3330 +
3331 +@@ -915,11 +917,13 @@ setup_timer:
3332 + return 0;
3333 +
3334 + tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
3335 +- mod_timer(timer, tcmu_cmd->deadline);
3336 ++ if (!timer_pending(timer))
3337 ++ mod_timer(timer, tcmu_cmd->deadline);
3338 ++
3339 + return 0;
3340 + }
3341 +
3342 +-static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
3343 ++static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
3344 + {
3345 + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
3346 + unsigned int tmo;
3347 +@@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
3348 + if (ret)
3349 + return ret;
3350 +
3351 +- list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
3352 ++ list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
3353 + pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
3354 + tcmu_cmd->cmd_id, udev->name);
3355 + return 0;
3356 +@@ -999,7 +1003,7 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
3357 + base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
3358 + command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
3359 +
3360 +- if (!list_empty(&udev->cmdr_queue))
3361 ++ if (!list_empty(&udev->qfull_queue))
3362 + goto queue;
3363 +
3364 + mb = udev->mb_addr;
3365 +@@ -1096,13 +1100,16 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
3366 + UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
3367 + tcmu_flush_dcache_range(mb, sizeof(*mb));
3368 +
3369 ++ list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
3370 ++ set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
3371 ++
3372 + /* TODO: only if FLUSH and FUA? */
3373 + uio_event_notify(&udev->uio_info);
3374 +
3375 + return 0;
3376 +
3377 + queue:
3378 +- if (add_to_cmdr_queue(tcmu_cmd)) {
3379 ++ if (add_to_qfull_queue(tcmu_cmd)) {
3380 + *scsi_err = TCM_OUT_OF_RESOURCES;
3381 + return -1;
3382 + }
3383 +@@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
3384 + if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
3385 + goto out;
3386 +
3387 ++ list_del_init(&cmd->queue_entry);
3388 ++
3389 + tcmu_cmd_reset_dbi_cur(cmd);
3390 +
3391 + if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
3392 +@@ -1194,9 +1203,29 @@ out:
3393 + tcmu_free_cmd(cmd);
3394 + }
3395 +
3396 ++static void tcmu_set_next_deadline(struct list_head *queue,
3397 ++ struct timer_list *timer)
3398 ++{
3399 ++ struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
3400 ++ unsigned long deadline = 0;
3401 ++
3402 ++ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
3403 ++ if (!time_after(jiffies, tcmu_cmd->deadline)) {
3404 ++ deadline = tcmu_cmd->deadline;
3405 ++ break;
3406 ++ }
3407 ++ }
3408 ++
3409 ++ if (deadline)
3410 ++ mod_timer(timer, deadline);
3411 ++ else
3412 ++ del_timer(timer);
3413 ++}
3414 ++
3415 + static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3416 + {
3417 + struct tcmu_mailbox *mb;
3418 ++ struct tcmu_cmd *cmd;
3419 + int handled = 0;
3420 +
3421 + if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
3422 +@@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3423 + while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
3424 +
3425 + struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
3426 +- struct tcmu_cmd *cmd;
3427 +
3428 + tcmu_flush_dcache_range(entry, sizeof(*entry));
3429 +
3430 +@@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3431 + /* no more pending commands */
3432 + del_timer(&udev->cmd_timer);
3433 +
3434 +- if (list_empty(&udev->cmdr_queue)) {
3435 ++ if (list_empty(&udev->qfull_queue)) {
3436 + /*
3437 + * no more pending or waiting commands so try to
3438 + * reclaim blocks if needed.
3439 +@@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3440 + tcmu_global_max_blocks)
3441 + schedule_delayed_work(&tcmu_unmap_work, 0);
3442 + }
3443 ++ } else if (udev->cmd_time_out) {
3444 ++ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
3445 + }
3446 +
3447 + return handled;
3448 +@@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
3449 + if (!time_after(jiffies, cmd->deadline))
3450 + return 0;
3451 +
3452 +- is_running = list_empty(&cmd->cmdr_queue_entry);
3453 ++ is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
3454 + se_cmd = cmd->se_cmd;
3455 +
3456 + if (is_running) {
3457 +@@ -1288,12 +1318,11 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
3458 + */
3459 + scsi_status = SAM_STAT_CHECK_CONDITION;
3460 + } else {
3461 +- list_del_init(&cmd->cmdr_queue_entry);
3462 +-
3463 + idr_remove(&udev->commands, id);
3464 + tcmu_free_cmd(cmd);
3465 + scsi_status = SAM_STAT_TASK_SET_FULL;
3466 + }
3467 ++ list_del_init(&cmd->queue_entry);
3468 +
3469 + pr_debug("Timing out cmd %u on dev %s that is %s.\n",
3470 + id, udev->name, is_running ? "inflight" : "queued");
3471 +@@ -1372,7 +1401,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
3472 +
3473 + INIT_LIST_HEAD(&udev->node);
3474 + INIT_LIST_HEAD(&udev->timedout_entry);
3475 +- INIT_LIST_HEAD(&udev->cmdr_queue);
3476 ++ INIT_LIST_HEAD(&udev->qfull_queue);
3477 ++ INIT_LIST_HEAD(&udev->inflight_queue);
3478 + idr_init(&udev->commands);
3479 +
3480 + timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
3481 +@@ -1383,7 +1413,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
3482 + return &udev->se_dev;
3483 + }
3484 +
3485 +-static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
3486 ++static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
3487 + {
3488 + struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
3489 + LIST_HEAD(cmds);
3490 +@@ -1391,15 +1421,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
3491 + sense_reason_t scsi_ret;
3492 + int ret;
3493 +
3494 +- if (list_empty(&udev->cmdr_queue))
3495 ++ if (list_empty(&udev->qfull_queue))
3496 + return true;
3497 +
3498 + pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
3499 +
3500 +- list_splice_init(&udev->cmdr_queue, &cmds);
3501 ++ list_splice_init(&udev->qfull_queue, &cmds);
3502 +
3503 +- list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
3504 +- list_del_init(&tcmu_cmd->cmdr_queue_entry);
3505 ++ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
3506 ++ list_del_init(&tcmu_cmd->queue_entry);
3507 +
3508 + pr_debug("removing cmd %u on dev %s from queue\n",
3509 + tcmu_cmd->cmd_id, udev->name);
3510 +@@ -1437,14 +1467,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
3511 + * cmd was requeued, so just put all cmds back in
3512 + * the queue
3513 + */
3514 +- list_splice_tail(&cmds, &udev->cmdr_queue);
3515 ++ list_splice_tail(&cmds, &udev->qfull_queue);
3516 + drained = false;
3517 +- goto done;
3518 ++ break;
3519 + }
3520 + }
3521 +- if (list_empty(&udev->cmdr_queue))
3522 +- del_timer(&udev->qfull_timer);
3523 +-done:
3524 ++
3525 ++ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
3526 + return drained;
3527 + }
3528 +
3529 +@@ -1454,7 +1483,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
3530 +
3531 + mutex_lock(&udev->cmdr_lock);
3532 + tcmu_handle_completions(udev);
3533 +- run_cmdr_queue(udev, false);
3534 ++ run_qfull_queue(udev, false);
3535 + mutex_unlock(&udev->cmdr_lock);
3536 +
3537 + return 0;
3538 +@@ -1982,7 +2011,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev)
3539 + /* complete IO that has executed successfully */
3540 + tcmu_handle_completions(udev);
3541 + /* fail IO waiting to be queued */
3542 +- run_cmdr_queue(udev, true);
3543 ++ run_qfull_queue(udev, true);
3544 +
3545 + unlock:
3546 + mutex_unlock(&udev->cmdr_lock);
3547 +@@ -1997,7 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
3548 + mutex_lock(&udev->cmdr_lock);
3549 +
3550 + idr_for_each_entry(&udev->commands, cmd, i) {
3551 +- if (!list_empty(&cmd->cmdr_queue_entry))
3552 ++ if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
3553 + continue;
3554 +
3555 + pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
3556 +@@ -2006,6 +2035,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
3557 +
3558 + idr_remove(&udev->commands, i);
3559 + if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
3560 ++ list_del_init(&cmd->queue_entry);
3561 + if (err_level == 1) {
3562 + /*
3563 + * Userspace was not able to start the
3564 +@@ -2666,6 +2696,10 @@ static void check_timedout_devices(void)
3565 +
3566 + mutex_lock(&udev->cmdr_lock);
3567 + idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
3568 ++
3569 ++ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
3570 ++ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
3571 ++
3572 + mutex_unlock(&udev->cmdr_lock);
3573 +
3574 + spin_lock_bh(&timed_out_udevs_lock);
3575 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
3576 +index 5eaeca805c95c..b214a72d5caad 100644
3577 +--- a/drivers/vhost/vhost.c
3578 ++++ b/drivers/vhost/vhost.c
3579 +@@ -1035,8 +1035,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
3580 + int type, ret;
3581 +
3582 + ret = copy_from_iter(&type, sizeof(type), from);
3583 +- if (ret != sizeof(type))
3584 ++ if (ret != sizeof(type)) {
3585 ++ ret = -EINVAL;
3586 + goto done;
3587 ++ }
3588 +
3589 + switch (type) {
3590 + case VHOST_IOTLB_MSG:
3591 +@@ -1055,8 +1057,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
3592 +
3593 + iov_iter_advance(from, offset);
3594 + ret = copy_from_iter(&msg, sizeof(msg), from);
3595 +- if (ret != sizeof(msg))
3596 ++ if (ret != sizeof(msg)) {
3597 ++ ret = -EINVAL;
3598 + goto done;
3599 ++ }
3600 + if (vhost_process_iotlb_msg(dev, &msg)) {
3601 + ret = -EFAULT;
3602 + goto done;
3603 +diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
3604 +index bdfcc0a71db14..6bde543452f25 100644
3605 +--- a/drivers/video/backlight/pwm_bl.c
3606 ++++ b/drivers/video/backlight/pwm_bl.c
3607 +@@ -262,6 +262,16 @@ static int pwm_backlight_parse_dt(struct device *dev,
3608 +
3609 + memset(data, 0, sizeof(*data));
3610 +
3611 ++ /*
3612 ++ * These values are optional and set as 0 by default, the out values
3613 ++ * are modified only if a valid u32 value can be decoded.
3614 ++ */
3615 ++ of_property_read_u32(node, "post-pwm-on-delay-ms",
3616 ++ &data->post_pwm_on_delay);
3617 ++ of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
3618 ++
3619 ++ data->enable_gpio = -EINVAL;
3620 ++
3621 + /*
3622 + * Determine the number of brightness levels, if this property is not
3623 + * set a default table of brightness levels will be used.
3624 +@@ -374,15 +384,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
3625 + data->max_brightness--;
3626 + }
3627 +
3628 +- /*
3629 +- * These values are optional and set as 0 by default, the out values
3630 +- * are modified only if a valid u32 value can be decoded.
3631 +- */
3632 +- of_property_read_u32(node, "post-pwm-on-delay-ms",
3633 +- &data->post_pwm_on_delay);
3634 +- of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
3635 +-
3636 +- data->enable_gpio = -EINVAL;
3637 + return 0;
3638 + }
3639 +
3640 +diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
3641 +index afbd6101c78eb..070026a7e55a5 100644
3642 +--- a/drivers/video/fbdev/udlfb.c
3643 ++++ b/drivers/video/fbdev/udlfb.c
3644 +@@ -916,8 +916,6 @@ static int dlfb_ops_open(struct fb_info *info, int user)
3645 +
3646 + dlfb->fb_count++;
3647 +
3648 +- kref_get(&dlfb->kref);
3649 +-
3650 + if (fb_defio && (info->fbdefio == NULL)) {
3651 + /* enable defio at last moment if not disabled by client */
3652 +
3653 +@@ -940,14 +938,17 @@ static int dlfb_ops_open(struct fb_info *info, int user)
3654 + return 0;
3655 + }
3656 +
3657 +-/*
3658 +- * Called when all client interfaces to start transactions have been disabled,
3659 +- * and all references to our device instance (dlfb_data) are released.
3660 +- * Every transaction must have a reference, so we know are fully spun down
3661 +- */
3662 +-static void dlfb_free(struct kref *kref)
3663 ++static void dlfb_ops_destroy(struct fb_info *info)
3664 + {
3665 +- struct dlfb_data *dlfb = container_of(kref, struct dlfb_data, kref);
3666 ++ struct dlfb_data *dlfb = info->par;
3667 ++
3668 ++ if (info->cmap.len != 0)
3669 ++ fb_dealloc_cmap(&info->cmap);
3670 ++ if (info->monspecs.modedb)
3671 ++ fb_destroy_modedb(info->monspecs.modedb);
3672 ++ vfree(info->screen_base);
3673 ++
3674 ++ fb_destroy_modelist(&info->modelist);
3675 +
3676 + while (!list_empty(&dlfb->deferred_free)) {
3677 + struct dlfb_deferred_free *d = list_entry(dlfb->deferred_free.next, struct dlfb_deferred_free, list);
3678 +@@ -957,40 +958,13 @@ static void dlfb_free(struct kref *kref)
3679 + }
3680 + vfree(dlfb->backing_buffer);
3681 + kfree(dlfb->edid);
3682 ++ usb_put_dev(dlfb->udev);
3683 + kfree(dlfb);
3684 +-}
3685 +-
3686 +-static void dlfb_free_framebuffer(struct dlfb_data *dlfb)
3687 +-{
3688 +- struct fb_info *info = dlfb->info;
3689 +-
3690 +- if (info) {
3691 +- unregister_framebuffer(info);
3692 +-
3693 +- if (info->cmap.len != 0)
3694 +- fb_dealloc_cmap(&info->cmap);
3695 +- if (info->monspecs.modedb)
3696 +- fb_destroy_modedb(info->monspecs.modedb);
3697 +- vfree(info->screen_base);
3698 +-
3699 +- fb_destroy_modelist(&info->modelist);
3700 +-
3701 +- dlfb->info = NULL;
3702 +-
3703 +- /* Assume info structure is freed after this point */
3704 +- framebuffer_release(info);
3705 +- }
3706 +
3707 +- /* ref taken in probe() as part of registering framebfufer */
3708 +- kref_put(&dlfb->kref, dlfb_free);
3709 ++ /* Assume info structure is freed after this point */
3710 ++ framebuffer_release(info);
3711 + }
3712 +
3713 +-static void dlfb_free_framebuffer_work(struct work_struct *work)
3714 +-{
3715 +- struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
3716 +- free_framebuffer_work.work);
3717 +- dlfb_free_framebuffer(dlfb);
3718 +-}
3719 + /*
3720 + * Assumes caller is holding info->lock mutex (for open and release at least)
3721 + */
3722 +@@ -1000,10 +974,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
3723 +
3724 + dlfb->fb_count--;
3725 +
3726 +- /* We can't free fb_info here - fbmem will touch it when we return */
3727 +- if (dlfb->virtualized && (dlfb->fb_count == 0))
3728 +- schedule_delayed_work(&dlfb->free_framebuffer_work, HZ);
3729 +-
3730 + if ((dlfb->fb_count == 0) && (info->fbdefio)) {
3731 + fb_deferred_io_cleanup(info);
3732 + kfree(info->fbdefio);
3733 +@@ -1013,8 +983,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
3734 +
3735 + dev_dbg(info->dev, "release, user=%d count=%d\n", user, dlfb->fb_count);
3736 +
3737 +- kref_put(&dlfb->kref, dlfb_free);
3738 +-
3739 + return 0;
3740 + }
3741 +
3742 +@@ -1172,6 +1140,7 @@ static struct fb_ops dlfb_ops = {
3743 + .fb_blank = dlfb_ops_blank,
3744 + .fb_check_var = dlfb_ops_check_var,
3745 + .fb_set_par = dlfb_ops_set_par,
3746 ++ .fb_destroy = dlfb_ops_destroy,
3747 + };
3748 +
3749 +
3750 +@@ -1615,12 +1584,13 @@ success:
3751 + return true;
3752 + }
3753 +
3754 +-static void dlfb_init_framebuffer_work(struct work_struct *work);
3755 +-
3756 + static int dlfb_usb_probe(struct usb_interface *intf,
3757 + const struct usb_device_id *id)
3758 + {
3759 ++ int i;
3760 ++ const struct device_attribute *attr;
3761 + struct dlfb_data *dlfb;
3762 ++ struct fb_info *info;
3763 + int retval = -ENOMEM;
3764 + struct usb_device *usbdev = interface_to_usbdev(intf);
3765 +
3766 +@@ -1631,10 +1601,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
3767 + goto error;
3768 + }
3769 +
3770 +- kref_init(&dlfb->kref); /* matching kref_put in usb .disconnect fn */
3771 + INIT_LIST_HEAD(&dlfb->deferred_free);
3772 +
3773 +- dlfb->udev = usbdev;
3774 ++ dlfb->udev = usb_get_dev(usbdev);
3775 + usb_set_intfdata(intf, dlfb);
3776 +
3777 + dev_dbg(&intf->dev, "console enable=%d\n", console);
3778 +@@ -1657,42 +1626,6 @@ static int dlfb_usb_probe(struct usb_interface *intf,
3779 + }
3780 +
3781 +
3782 +- if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
3783 +- retval = -ENOMEM;
3784 +- dev_err(&intf->dev, "unable to allocate urb list\n");
3785 +- goto error;
3786 +- }
3787 +-
3788 +- kref_get(&dlfb->kref); /* matching kref_put in free_framebuffer_work */
3789 +-
3790 +- /* We don't register a new USB class. Our client interface is dlfbev */
3791 +-
3792 +- /* Workitem keep things fast & simple during USB enumeration */
3793 +- INIT_DELAYED_WORK(&dlfb->init_framebuffer_work,
3794 +- dlfb_init_framebuffer_work);
3795 +- schedule_delayed_work(&dlfb->init_framebuffer_work, 0);
3796 +-
3797 +- return 0;
3798 +-
3799 +-error:
3800 +- if (dlfb) {
3801 +-
3802 +- kref_put(&dlfb->kref, dlfb_free); /* last ref from kref_init */
3803 +-
3804 +- /* dev has been deallocated. Do not dereference */
3805 +- }
3806 +-
3807 +- return retval;
3808 +-}
3809 +-
3810 +-static void dlfb_init_framebuffer_work(struct work_struct *work)
3811 +-{
3812 +- int i, retval;
3813 +- struct fb_info *info;
3814 +- const struct device_attribute *attr;
3815 +- struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
3816 +- init_framebuffer_work.work);
3817 +-
3818 + /* allocates framebuffer driver structure, not framebuffer memory */
3819 + info = framebuffer_alloc(0, &dlfb->udev->dev);
3820 + if (!info) {
3821 +@@ -1706,17 +1639,22 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
3822 + dlfb->ops = dlfb_ops;
3823 + info->fbops = &dlfb->ops;
3824 +
3825 ++ INIT_LIST_HEAD(&info->modelist);
3826 ++
3827 ++ if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
3828 ++ retval = -ENOMEM;
3829 ++ dev_err(&intf->dev, "unable to allocate urb list\n");
3830 ++ goto error;
3831 ++ }
3832 ++
3833 ++ /* We don't register a new USB class. Our client interface is dlfbev */
3834 ++
3835 + retval = fb_alloc_cmap(&info->cmap, 256, 0);
3836 + if (retval < 0) {
3837 + dev_err(info->device, "cmap allocation failed: %d\n", retval);
3838 + goto error;
3839 + }
3840 +
3841 +- INIT_DELAYED_WORK(&dlfb->free_framebuffer_work,
3842 +- dlfb_free_framebuffer_work);
3843 +-
3844 +- INIT_LIST_HEAD(&info->modelist);
3845 +-
3846 + retval = dlfb_setup_modes(dlfb, info, NULL, 0);
3847 + if (retval != 0) {
3848 + dev_err(info->device,
3849 +@@ -1760,10 +1698,16 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
3850 + dev_name(info->dev), info->var.xres, info->var.yres,
3851 + ((dlfb->backing_buffer) ?
3852 + info->fix.smem_len * 2 : info->fix.smem_len) >> 10);
3853 +- return;
3854 ++ return 0;
3855 +
3856 + error:
3857 +- dlfb_free_framebuffer(dlfb);
3858 ++ if (dlfb->info) {
3859 ++ dlfb_ops_destroy(dlfb->info);
3860 ++ } else if (dlfb) {
3861 ++ usb_put_dev(dlfb->udev);
3862 ++ kfree(dlfb);
3863 ++ }
3864 ++ return retval;
3865 + }
3866 +
3867 + static void dlfb_usb_disconnect(struct usb_interface *intf)
3868 +@@ -1791,20 +1735,9 @@ static void dlfb_usb_disconnect(struct usb_interface *intf)
3869 + for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
3870 + device_remove_file(info->dev, &fb_device_attrs[i]);
3871 + device_remove_bin_file(info->dev, &edid_attr);
3872 +- unlink_framebuffer(info);
3873 + }
3874 +
3875 +- usb_set_intfdata(intf, NULL);
3876 +- dlfb->udev = NULL;
3877 +-
3878 +- /* if clients still have us open, will be freed on last close */
3879 +- if (dlfb->fb_count == 0)
3880 +- schedule_delayed_work(&dlfb->free_framebuffer_work, 0);
3881 +-
3882 +- /* release reference taken by kref_init in probe() */
3883 +- kref_put(&dlfb->kref, dlfb_free);
3884 +-
3885 +- /* consider dlfb_data freed */
3886 ++ unregister_framebuffer(info);
3887 + }
3888 +
3889 + static struct usb_driver dlfb_driver = {
3890 +diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
3891 +index 5c4a764717c4d..81208cd3f4ecb 100644
3892 +--- a/drivers/watchdog/mt7621_wdt.c
3893 ++++ b/drivers/watchdog/mt7621_wdt.c
3894 +@@ -17,6 +17,7 @@
3895 + #include <linux/watchdog.h>
3896 + #include <linux/moduleparam.h>
3897 + #include <linux/platform_device.h>
3898 ++#include <linux/mod_devicetable.h>
3899 +
3900 + #include <asm/mach-ralink/ralink_regs.h>
3901 +
3902 +diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
3903 +index 98967f0a7d10e..db7c57d82cfdc 100644
3904 +--- a/drivers/watchdog/rt2880_wdt.c
3905 ++++ b/drivers/watchdog/rt2880_wdt.c
3906 +@@ -18,6 +18,7 @@
3907 + #include <linux/watchdog.h>
3908 + #include <linux/moduleparam.h>
3909 + #include <linux/platform_device.h>
3910 ++#include <linux/mod_devicetable.h>
3911 +
3912 + #include <asm/mach-ralink/ralink_regs.h>
3913 +
3914 +diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
3915 +index b1092fbefa630..d4ea33581ac26 100644
3916 +--- a/drivers/xen/pvcalls-back.c
3917 ++++ b/drivers/xen/pvcalls-back.c
3918 +@@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque)
3919 +
3920 + /* write the data, then modify the indexes */
3921 + virt_wmb();
3922 +- if (ret < 0)
3923 ++ if (ret < 0) {
3924 ++ atomic_set(&map->read, 0);
3925 + intf->in_error = ret;
3926 +- else
3927 ++ } else
3928 + intf->in_prod = prod + ret;
3929 + /* update the indexes, then notify the other end */
3930 + virt_wmb();
3931 +@@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
3932 + static void pvcalls_sk_state_change(struct sock *sock)
3933 + {
3934 + struct sock_mapping *map = sock->sk_user_data;
3935 +- struct pvcalls_data_intf *intf;
3936 +
3937 + if (map == NULL)
3938 + return;
3939 +
3940 +- intf = map->ring;
3941 +- intf->in_error = -ENOTCONN;
3942 ++ atomic_inc(&map->read);
3943 + notify_remote_via_irq(map->irq);
3944 + }
3945 +
3946 +diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
3947 +index 77224d8f3e6fe..91da7e44d5d4f 100644
3948 +--- a/drivers/xen/pvcalls-front.c
3949 ++++ b/drivers/xen/pvcalls-front.c
3950 +@@ -31,6 +31,12 @@
3951 + #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
3952 + #define PVCALLS_FRONT_MAX_SPIN 5000
3953 +
3954 ++static struct proto pvcalls_proto = {
3955 ++ .name = "PVCalls",
3956 ++ .owner = THIS_MODULE,
3957 ++ .obj_size = sizeof(struct sock),
3958 ++};
3959 ++
3960 + struct pvcalls_bedata {
3961 + struct xen_pvcalls_front_ring ring;
3962 + grant_ref_t ref;
3963 +@@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock)
3964 + return ret;
3965 + }
3966 +
3967 ++static void free_active_ring(struct sock_mapping *map)
3968 ++{
3969 ++ if (!map->active.ring)
3970 ++ return;
3971 ++
3972 ++ free_pages((unsigned long)map->active.data.in,
3973 ++ map->active.ring->ring_order);
3974 ++ free_page((unsigned long)map->active.ring);
3975 ++}
3976 ++
3977 ++static int alloc_active_ring(struct sock_mapping *map)
3978 ++{
3979 ++ void *bytes;
3980 ++
3981 ++ map->active.ring = (struct pvcalls_data_intf *)
3982 ++ get_zeroed_page(GFP_KERNEL);
3983 ++ if (!map->active.ring)
3984 ++ goto out;
3985 ++
3986 ++ map->active.ring->ring_order = PVCALLS_RING_ORDER;
3987 ++ bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
3988 ++ PVCALLS_RING_ORDER);
3989 ++ if (!bytes)
3990 ++ goto out;
3991 ++
3992 ++ map->active.data.in = bytes;
3993 ++ map->active.data.out = bytes +
3994 ++ XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
3995 ++
3996 ++ return 0;
3997 ++
3998 ++out:
3999 ++ free_active_ring(map);
4000 ++ return -ENOMEM;
4001 ++}
4002 ++
4003 + static int create_active(struct sock_mapping *map, int *evtchn)
4004 + {
4005 + void *bytes;
4006 +@@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
4007 + *evtchn = -1;
4008 + init_waitqueue_head(&map->active.inflight_conn_req);
4009 +
4010 +- map->active.ring = (struct pvcalls_data_intf *)
4011 +- __get_free_page(GFP_KERNEL | __GFP_ZERO);
4012 +- if (map->active.ring == NULL)
4013 +- goto out_error;
4014 +- map->active.ring->ring_order = PVCALLS_RING_ORDER;
4015 +- bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
4016 +- PVCALLS_RING_ORDER);
4017 +- if (bytes == NULL)
4018 +- goto out_error;
4019 ++ bytes = map->active.data.in;
4020 + for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
4021 + map->active.ring->ref[i] = gnttab_grant_foreign_access(
4022 + pvcalls_front_dev->otherend_id,
4023 +@@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
4024 + pvcalls_front_dev->otherend_id,
4025 + pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
4026 +
4027 +- map->active.data.in = bytes;
4028 +- map->active.data.out = bytes +
4029 +- XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
4030 +-
4031 + ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
4032 + if (ret)
4033 + goto out_error;
4034 +@@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
4035 + out_error:
4036 + if (*evtchn >= 0)
4037 + xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
4038 +- free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
4039 +- free_page((unsigned long)map->active.ring);
4040 + return ret;
4041 + }
4042 +
4043 +@@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
4044 + return PTR_ERR(map);
4045 +
4046 + bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
4047 ++ ret = alloc_active_ring(map);
4048 ++ if (ret < 0) {
4049 ++ pvcalls_exit_sock(sock);
4050 ++ return ret;
4051 ++ }
4052 +
4053 + spin_lock(&bedata->socket_lock);
4054 + ret = get_request(bedata, &req_id);
4055 + if (ret < 0) {
4056 + spin_unlock(&bedata->socket_lock);
4057 ++ free_active_ring(map);
4058 + pvcalls_exit_sock(sock);
4059 + return ret;
4060 + }
4061 + ret = create_active(map, &evtchn);
4062 + if (ret < 0) {
4063 + spin_unlock(&bedata->socket_lock);
4064 ++ free_active_ring(map);
4065 + pvcalls_exit_sock(sock);
4066 + return ret;
4067 + }
4068 +@@ -560,15 +595,13 @@ static int __read_ring(struct pvcalls_data_intf *intf,
4069 + error = intf->in_error;
4070 + /* get pointers before reading from the ring */
4071 + virt_rmb();
4072 +- if (error < 0)
4073 +- return error;
4074 +
4075 + size = pvcalls_queued(prod, cons, array_size);
4076 + masked_prod = pvcalls_mask(prod, array_size);
4077 + masked_cons = pvcalls_mask(cons, array_size);
4078 +
4079 + if (size == 0)
4080 +- return 0;
4081 ++ return error ?: size;
4082 +
4083 + if (len > size)
4084 + len = size;
4085 +@@ -780,25 +813,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
4086 + }
4087 + }
4088 +
4089 +- spin_lock(&bedata->socket_lock);
4090 +- ret = get_request(bedata, &req_id);
4091 +- if (ret < 0) {
4092 ++ map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
4093 ++ if (map2 == NULL) {
4094 + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4095 + (void *)&map->passive.flags);
4096 +- spin_unlock(&bedata->socket_lock);
4097 ++ pvcalls_exit_sock(sock);
4098 ++ return -ENOMEM;
4099 ++ }
4100 ++ ret = alloc_active_ring(map2);
4101 ++ if (ret < 0) {
4102 ++ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4103 ++ (void *)&map->passive.flags);
4104 ++ kfree(map2);
4105 + pvcalls_exit_sock(sock);
4106 + return ret;
4107 + }
4108 +- map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
4109 +- if (map2 == NULL) {
4110 ++ spin_lock(&bedata->socket_lock);
4111 ++ ret = get_request(bedata, &req_id);
4112 ++ if (ret < 0) {
4113 + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4114 + (void *)&map->passive.flags);
4115 + spin_unlock(&bedata->socket_lock);
4116 ++ free_active_ring(map2);
4117 ++ kfree(map2);
4118 + pvcalls_exit_sock(sock);
4119 +- return -ENOMEM;
4120 ++ return ret;
4121 + }
4122 ++
4123 + ret = create_active(map2, &evtchn);
4124 + if (ret < 0) {
4125 ++ free_active_ring(map2);
4126 + kfree(map2);
4127 + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4128 + (void *)&map->passive.flags);
4129 +@@ -839,7 +883,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
4130 +
4131 + received:
4132 + map2->sock = newsock;
4133 +- newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
4134 ++ newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
4135 + if (!newsock->sk) {
4136 + bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
4137 + map->passive.inflight_req_id = PVCALLS_INVALID_ID;
4138 +@@ -1032,8 +1076,8 @@ int pvcalls_front_release(struct socket *sock)
4139 + spin_lock(&bedata->socket_lock);
4140 + list_del(&map->list);
4141 + spin_unlock(&bedata->socket_lock);
4142 +- if (READ_ONCE(map->passive.inflight_req_id) !=
4143 +- PVCALLS_INVALID_ID) {
4144 ++ if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
4145 ++ READ_ONCE(map->passive.inflight_req_id) != 0) {
4146 + pvcalls_front_free_map(bedata,
4147 + map->passive.accept_map);
4148 + }
4149 +diff --git a/fs/afs/flock.c b/fs/afs/flock.c
4150 +index dc62d15a964b8..1bb300ef362b0 100644
4151 +--- a/fs/afs/flock.c
4152 ++++ b/fs/afs/flock.c
4153 +@@ -208,7 +208,7 @@ again:
4154 + /* The new front of the queue now owns the state variables. */
4155 + next = list_entry(vnode->pending_locks.next,
4156 + struct file_lock, fl_u.afs.link);
4157 +- vnode->lock_key = afs_file_key(next->fl_file);
4158 ++ vnode->lock_key = key_get(afs_file_key(next->fl_file));
4159 + vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
4160 + vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
4161 + goto again;
4162 +@@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
4163 + /* The new front of the queue now owns the state variables. */
4164 + next = list_entry(vnode->pending_locks.next,
4165 + struct file_lock, fl_u.afs.link);
4166 +- vnode->lock_key = afs_file_key(next->fl_file);
4167 ++ vnode->lock_key = key_get(afs_file_key(next->fl_file));
4168 + vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
4169 + vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
4170 + afs_lock_may_be_available(vnode);
4171 +diff --git a/fs/afs/inode.c b/fs/afs/inode.c
4172 +index 071075d775a95..0726e40db0f8b 100644
4173 +--- a/fs/afs/inode.c
4174 ++++ b/fs/afs/inode.c
4175 +@@ -411,7 +411,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
4176 + } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
4177 + valid = true;
4178 + } else {
4179 +- vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
4180 + vnode->cb_v_break = vnode->volume->cb_v_break;
4181 + valid = false;
4182 + }
4183 +@@ -543,6 +542,8 @@ void afs_evict_inode(struct inode *inode)
4184 + #endif
4185 +
4186 + afs_put_permits(rcu_access_pointer(vnode->permit_cache));
4187 ++ key_put(vnode->lock_key);
4188 ++ vnode->lock_key = NULL;
4189 + _leave("");
4190 + }
4191 +
4192 +diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
4193 +index 041c27ea8de15..f74193da0e092 100644
4194 +--- a/fs/ceph/snap.c
4195 ++++ b/fs/ceph/snap.c
4196 +@@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
4197 + capsnap->size);
4198 +
4199 + spin_lock(&mdsc->snap_flush_lock);
4200 +- list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
4201 ++ if (list_empty(&ci->i_snap_flush_item))
4202 ++ list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
4203 + spin_unlock(&mdsc->snap_flush_lock);
4204 + return 1; /* caller may want to ceph_flush_snaps */
4205 + }
4206 +diff --git a/fs/proc/base.c b/fs/proc/base.c
4207 +index 7e9f07bf260d2..81d77b15b3479 100644
4208 +--- a/fs/proc/base.c
4209 ++++ b/fs/proc/base.c
4210 +@@ -1084,10 +1084,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
4211 +
4212 + task_lock(p);
4213 + if (!p->vfork_done && process_shares_mm(p, mm)) {
4214 +- pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
4215 +- task_pid_nr(p), p->comm,
4216 +- p->signal->oom_score_adj, oom_adj,
4217 +- task_pid_nr(task), task->comm);
4218 + p->signal->oom_score_adj = oom_adj;
4219 + if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
4220 + p->signal->oom_score_adj_min = (short)oom_adj;
4221 +diff --git a/include/keys/user-type.h b/include/keys/user-type.h
4222 +index e098cbe27db54..12babe9915944 100644
4223 +--- a/include/keys/user-type.h
4224 ++++ b/include/keys/user-type.h
4225 +@@ -31,7 +31,7 @@
4226 + struct user_key_payload {
4227 + struct rcu_head rcu; /* RCU destructor */
4228 + unsigned short datalen; /* length of this data */
4229 +- char data[0]; /* actual data */
4230 ++ char data[0] __aligned(__alignof__(u64)); /* actual data */
4231 + };
4232 +
4233 + extern struct key_type key_type_user;
4234 +diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
4235 +index b1ce500fe8b3d..d756f2318efe0 100644
4236 +--- a/include/linux/compiler-clang.h
4237 ++++ b/include/linux/compiler-clang.h
4238 +@@ -3,9 +3,8 @@
4239 + #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
4240 + #endif
4241 +
4242 +-/* Some compiler specific definitions are overwritten here
4243 +- * for Clang compiler
4244 +- */
4245 ++/* Compiler specific definitions for Clang compiler */
4246 ++
4247 + #define uninitialized_var(x) x = *(&(x))
4248 +
4249 + /* same as gcc, this was present in clang-2.6 so we can assume it works
4250 +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
4251 +index 0242f6eec4eaf..a8ff0ca0c3213 100644
4252 +--- a/include/linux/compiler-gcc.h
4253 ++++ b/include/linux/compiler-gcc.h
4254 +@@ -58,10 +58,6 @@
4255 + (typeof(ptr)) (__ptr + (off)); \
4256 + })
4257 +
4258 +-/* Make the optimizer believe the variable can be manipulated arbitrarily. */
4259 +-#define OPTIMIZER_HIDE_VAR(var) \
4260 +- __asm__ ("" : "=r" (var) : "0" (var))
4261 +-
4262 + /*
4263 + * A trick to suppress uninitialized variable warning without generating any
4264 + * code
4265 +diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
4266 +index 4c7f9befa9f6c..f1fc60f103176 100644
4267 +--- a/include/linux/compiler-intel.h
4268 ++++ b/include/linux/compiler-intel.h
4269 +@@ -5,9 +5,7 @@
4270 +
4271 + #ifdef __ECC
4272 +
4273 +-/* Some compiler specific definitions are overwritten here
4274 +- * for Intel ECC compiler
4275 +- */
4276 ++/* Compiler specific definitions for Intel ECC compiler */
4277 +
4278 + #include <asm/intrinsics.h>
4279 +
4280 +diff --git a/include/linux/compiler.h b/include/linux/compiler.h
4281 +index 681d866efb1eb..269d376f5a119 100644
4282 +--- a/include/linux/compiler.h
4283 ++++ b/include/linux/compiler.h
4284 +@@ -158,7 +158,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
4285 + #endif
4286 +
4287 + #ifndef OPTIMIZER_HIDE_VAR
4288 +-#define OPTIMIZER_HIDE_VAR(var) barrier()
4289 ++/* Make the optimizer believe the variable can be manipulated arbitrarily. */
4290 ++#define OPTIMIZER_HIDE_VAR(var) \
4291 ++ __asm__ ("" : "=r" (var) : "0" (var))
4292 + #endif
4293 +
4294 + /* Not-quite-unique ID. */
4295 +diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
4296 +index 59ddf9af909e4..2dd0a9ed5b361 100644
4297 +--- a/include/linux/qed/qed_chain.h
4298 ++++ b/include/linux/qed/qed_chain.h
4299 +@@ -663,6 +663,37 @@ out:
4300 + static inline void qed_chain_set_prod(struct qed_chain *p_chain,
4301 + u32 prod_idx, void *p_prod_elem)
4302 + {
4303 ++ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
4304 ++ u32 cur_prod, page_mask, page_cnt, page_diff;
4305 ++
4306 ++ cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
4307 ++ p_chain->u.chain32.prod_idx;
4308 ++
4309 ++ /* Assume that number of elements in a page is power of 2 */
4310 ++ page_mask = ~p_chain->elem_per_page_mask;
4311 ++
4312 ++ /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
4313 ++ * reaches the first element of next page before the page index
4314 ++ * is incremented. See qed_chain_produce().
4315 ++ * Index wrap around is not a problem because the difference
4316 ++ * between current and given producer indices is always
4317 ++ * positive and lower than the chain's capacity.
4318 ++ */
4319 ++ page_diff = (((cur_prod - 1) & page_mask) -
4320 ++ ((prod_idx - 1) & page_mask)) /
4321 ++ p_chain->elem_per_page;
4322 ++
4323 ++ page_cnt = qed_chain_get_page_cnt(p_chain);
4324 ++ if (is_chain_u16(p_chain))
4325 ++ p_chain->pbl.c.u16.prod_page_idx =
4326 ++ (p_chain->pbl.c.u16.prod_page_idx -
4327 ++ page_diff + page_cnt) % page_cnt;
4328 ++ else
4329 ++ p_chain->pbl.c.u32.prod_page_idx =
4330 ++ (p_chain->pbl.c.u32.prod_page_idx -
4331 ++ page_diff + page_cnt) % page_cnt;
4332 ++ }
4333 ++
4334 + if (is_chain_u16(p_chain))
4335 + p_chain->u.chain16.prod_idx = (u16) prod_idx;
4336 + else
4337 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
4338 +index 5d69e208e8d91..a404d475acee3 100644
4339 +--- a/include/linux/skbuff.h
4340 ++++ b/include/linux/skbuff.h
4341 +@@ -2392,7 +2392,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
4342 +
4343 + if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
4344 + skb_set_transport_header(skb, keys.control.thoff);
4345 +- else
4346 ++ else if (offset_hint >= 0)
4347 + skb_set_transport_header(skb, offset_hint);
4348 + }
4349 +
4350 +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
4351 +index cb462f9ab7dd5..e0348cb0a1dd7 100644
4352 +--- a/include/linux/virtio_net.h
4353 ++++ b/include/linux/virtio_net.h
4354 +@@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
4355 +
4356 + if (!skb_partial_csum_set(skb, start, off))
4357 + return -EINVAL;
4358 ++ } else {
4359 ++ /* gso packets without NEEDS_CSUM do not set transport_offset.
4360 ++ * probe and drop if does not match one of the above types.
4361 ++ */
4362 ++ if (gso_type && skb->network_header) {
4363 ++ if (!skb->protocol)
4364 ++ virtio_net_hdr_set_proto(skb, hdr);
4365 ++retry:
4366 ++ skb_probe_transport_header(skb, -1);
4367 ++ if (!skb_transport_header_was_set(skb)) {
4368 ++ /* UFO does not specify ipv4 or 6: try both */
4369 ++ if (gso_type & SKB_GSO_UDP &&
4370 ++ skb->protocol == htons(ETH_P_IP)) {
4371 ++ skb->protocol = htons(ETH_P_IPV6);
4372 ++ goto retry;
4373 ++ }
4374 ++ return -EINVAL;
4375 ++ }
4376 ++ }
4377 + }
4378 +
4379 + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
4380 +diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
4381 +index 0e355f4a3d763..0a3de10c6dece 100644
4382 +--- a/include/net/netfilter/nf_flow_table.h
4383 ++++ b/include/net/netfilter/nf_flow_table.h
4384 +@@ -84,7 +84,6 @@ struct flow_offload {
4385 + struct nf_flow_route {
4386 + struct {
4387 + struct dst_entry *dst;
4388 +- int ifindex;
4389 + } tuple[FLOW_OFFLOAD_DIR_MAX];
4390 + };
4391 +
4392 +diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
4393 +index 14565d703291b..e8baca85bac6a 100644
4394 +--- a/include/uapi/linux/inet_diag.h
4395 ++++ b/include/uapi/linux/inet_diag.h
4396 +@@ -137,15 +137,21 @@ enum {
4397 + INET_DIAG_TCLASS,
4398 + INET_DIAG_SKMEMINFO,
4399 + INET_DIAG_SHUTDOWN,
4400 +- INET_DIAG_DCTCPINFO,
4401 +- INET_DIAG_PROTOCOL, /* response attribute only */
4402 ++
4403 ++ /*
4404 ++ * Next extenstions cannot be requested in struct inet_diag_req_v2:
4405 ++ * its field idiag_ext has only 8 bits.
4406 ++ */
4407 ++
4408 ++ INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */
4409 ++ INET_DIAG_PROTOCOL, /* response attribute only */
4410 + INET_DIAG_SKV6ONLY,
4411 + INET_DIAG_LOCALS,
4412 + INET_DIAG_PEERS,
4413 + INET_DIAG_PAD,
4414 +- INET_DIAG_MARK,
4415 +- INET_DIAG_BBRINFO,
4416 +- INET_DIAG_CLASS_ID,
4417 ++ INET_DIAG_MARK, /* only with CAP_NET_ADMIN */
4418 ++ INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */
4419 ++ INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
4420 + INET_DIAG_MD5SIG,
4421 + __INET_DIAG_MAX,
4422 + };
4423 +diff --git a/include/video/udlfb.h b/include/video/udlfb.h
4424 +index 3abd327bada64..7d09e54ae54e0 100644
4425 +--- a/include/video/udlfb.h
4426 ++++ b/include/video/udlfb.h
4427 +@@ -36,12 +36,9 @@ struct dlfb_data {
4428 + struct usb_device *udev;
4429 + struct fb_info *info;
4430 + struct urb_list urbs;
4431 +- struct kref kref;
4432 + char *backing_buffer;
4433 + int fb_count;
4434 + bool virtualized; /* true when physical usb device not present */
4435 +- struct delayed_work init_framebuffer_work;
4436 +- struct delayed_work free_framebuffer_work;
4437 + atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */
4438 + atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
4439 + char *edid; /* null until we read edid from hw or get from sysfs */
4440 +diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
4441 +index 8061a439ef18c..6a32933cae4ff 100644
4442 +--- a/kernel/bpf/stackmap.c
4443 ++++ b/kernel/bpf/stackmap.c
4444 +@@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr,
4445 +
4446 + if (nhdr->n_type == BPF_BUILD_ID &&
4447 + nhdr->n_namesz == sizeof("GNU") &&
4448 +- nhdr->n_descsz == BPF_BUILD_ID_SIZE) {
4449 ++ nhdr->n_descsz > 0 &&
4450 ++ nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
4451 + memcpy(build_id,
4452 + note_start + note_offs +
4453 + ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
4454 +- BPF_BUILD_ID_SIZE);
4455 ++ nhdr->n_descsz);
4456 ++ memset(build_id + nhdr->n_descsz, 0,
4457 ++ BPF_BUILD_ID_SIZE - nhdr->n_descsz);
4458 + return 0;
4459 + }
4460 + new_offs = note_offs + sizeof(Elf32_Nhdr) +
4461 +@@ -260,7 +263,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
4462 + return -EFAULT; /* page not mapped */
4463 +
4464 + ret = -EINVAL;
4465 +- page_addr = page_address(page);
4466 ++ page_addr = kmap_atomic(page);
4467 + ehdr = (Elf32_Ehdr *)page_addr;
4468 +
4469 + /* compare magic x7f "ELF" */
4470 +@@ -276,6 +279,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
4471 + else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
4472 + ret = stack_map_get_build_id_64(page_addr, build_id);
4473 + out:
4474 ++ kunmap_atomic(page_addr);
4475 + put_page(page);
4476 + return ret;
4477 + }
4478 +@@ -310,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
4479 + for (i = 0; i < trace_nr; i++) {
4480 + id_offs[i].status = BPF_STACK_BUILD_ID_IP;
4481 + id_offs[i].ip = ips[i];
4482 ++ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
4483 + }
4484 + return;
4485 + }
4486 +@@ -320,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
4487 + /* per entry fall back to ips */
4488 + id_offs[i].status = BPF_STACK_BUILD_ID_IP;
4489 + id_offs[i].ip = ips[i];
4490 ++ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
4491 + continue;
4492 + }
4493 + id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
4494 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4495 +index bf6f1d70484dc..17bd0c0dfa98a 100644
4496 +--- a/kernel/trace/trace.c
4497 ++++ b/kernel/trace/trace.c
4498 +@@ -3383,6 +3383,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
4499 + const char tgid_space[] = " ";
4500 + const char space[] = " ";
4501 +
4502 ++ print_event_info(buf, m);
4503 ++
4504 + seq_printf(m, "# %s _-----=> irqs-off\n",
4505 + tgid ? tgid_space : space);
4506 + seq_printf(m, "# %s / _----=> need-resched\n",
4507 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
4508 +index 149b6f4cf0233..89d4439516f6c 100644
4509 +--- a/mm/mempolicy.c
4510 ++++ b/mm/mempolicy.c
4511 +@@ -1300,7 +1300,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
4512 + nodemask_t *nodes)
4513 + {
4514 + unsigned long copy = ALIGN(maxnode-1, 64) / 8;
4515 +- const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
4516 ++ unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
4517 +
4518 + if (copy > nbytes) {
4519 + if (copy > PAGE_SIZE)
4520 +@@ -1477,7 +1477,7 @@ static int kernel_get_mempolicy(int __user *policy,
4521 + int uninitialized_var(pval);
4522 + nodemask_t nodes;
4523 +
4524 +- if (nmask != NULL && maxnode < MAX_NUMNODES)
4525 ++ if (nmask != NULL && maxnode < nr_node_ids)
4526 + return -EINVAL;
4527 +
4528 + err = do_get_mempolicy(&pval, &nodes, addr, flags);
4529 +@@ -1513,7 +1513,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
4530 + unsigned long nr_bits, alloc_size;
4531 + DECLARE_BITMAP(bm, MAX_NUMNODES);
4532 +
4533 +- nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
4534 ++ nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
4535 + alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
4536 +
4537 + if (nmask)
4538 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
4539 +index 3899fa6e201dd..a2976adeeedce 100644
4540 +--- a/net/batman-adv/soft-interface.c
4541 ++++ b/net/batman-adv/soft-interface.c
4542 +@@ -227,6 +227,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
4543 +
4544 + switch (ntohs(ethhdr->h_proto)) {
4545 + case ETH_P_8021Q:
4546 ++ if (!pskb_may_pull(skb, sizeof(*vhdr)))
4547 ++ goto dropped;
4548 + vhdr = vlan_eth_hdr(skb);
4549 +
4550 + /* drop batman-in-batman packets to prevent loops */
4551 +diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
4552 +index 502f663495308..4d4b9b5ea1c17 100644
4553 +--- a/net/bridge/br_fdb.c
4554 ++++ b/net/bridge/br_fdb.c
4555 +@@ -1088,6 +1088,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
4556 + err = -ENOMEM;
4557 + goto err_unlock;
4558 + }
4559 ++ if (swdev_notify)
4560 ++ fdb->added_by_user = 1;
4561 + fdb->added_by_external_learn = 1;
4562 + fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
4563 + } else {
4564 +@@ -1107,6 +1109,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
4565 + modified = true;
4566 + }
4567 +
4568 ++ if (swdev_notify)
4569 ++ fdb->added_by_user = 1;
4570 ++
4571 + if (modified)
4572 + fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
4573 + }
4574 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
4575 +index 6dec8e9b34511..20ed7adcf1cc4 100644
4576 +--- a/net/bridge/br_multicast.c
4577 ++++ b/net/bridge/br_multicast.c
4578 +@@ -1420,14 +1420,7 @@ static void br_multicast_query_received(struct net_bridge *br,
4579 + return;
4580 +
4581 + br_multicast_update_query_timer(br, query, max_delay);
4582 +-
4583 +- /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
4584 +- * the arrival port for IGMP Queries where the source address
4585 +- * is 0.0.0.0 should not be added to router port list.
4586 +- */
4587 +- if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
4588 +- saddr->proto == htons(ETH_P_IPV6))
4589 +- br_multicast_mark_router(br, port);
4590 ++ br_multicast_mark_router(br, port);
4591 + }
4592 +
4593 + static void br_ip4_multicast_query(struct net_bridge *br,
4594 +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
4595 +index a127d14421164..f7d7f32ac673c 100644
4596 +--- a/net/ceph/messenger.c
4597 ++++ b/net/ceph/messenger.c
4598 +@@ -2091,6 +2091,8 @@ static int process_connect(struct ceph_connection *con)
4599 + dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
4600 +
4601 + if (con->auth) {
4602 ++ int len = le32_to_cpu(con->in_reply.authorizer_len);
4603 ++
4604 + /*
4605 + * Any connection that defines ->get_authorizer()
4606 + * should also define ->add_authorizer_challenge() and
4607 +@@ -2100,8 +2102,7 @@ static int process_connect(struct ceph_connection *con)
4608 + */
4609 + if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
4610 + ret = con->ops->add_authorizer_challenge(
4611 +- con, con->auth->authorizer_reply_buf,
4612 +- le32_to_cpu(con->in_reply.authorizer_len));
4613 ++ con, con->auth->authorizer_reply_buf, len);
4614 + if (ret < 0)
4615 + return ret;
4616 +
4617 +@@ -2111,10 +2112,12 @@ static int process_connect(struct ceph_connection *con)
4618 + return 0;
4619 + }
4620 +
4621 +- ret = con->ops->verify_authorizer_reply(con);
4622 +- if (ret < 0) {
4623 +- con->error_msg = "bad authorize reply";
4624 +- return ret;
4625 ++ if (len) {
4626 ++ ret = con->ops->verify_authorizer_reply(con);
4627 ++ if (ret < 0) {
4628 ++ con->error_msg = "bad authorize reply";
4629 ++ return ret;
4630 ++ }
4631 + }
4632 + }
4633 +
4634 +diff --git a/net/core/filter.c b/net/core/filter.c
4635 +index 8c2411fb25090..fb0080e84bd43 100644
4636 +--- a/net/core/filter.c
4637 ++++ b/net/core/filter.c
4638 +@@ -3930,7 +3930,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4639 + sk->sk_rcvlowat = val ? : 1;
4640 + break;
4641 + case SO_MARK:
4642 +- sk->sk_mark = val;
4643 ++ if (sk->sk_mark != val) {
4644 ++ sk->sk_mark = val;
4645 ++ sk_dst_reset(sk);
4646 ++ }
4647 + break;
4648 + default:
4649 + ret = -EINVAL;
4650 +@@ -4001,7 +4004,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4651 + /* Only some options are supported */
4652 + switch (optname) {
4653 + case TCP_BPF_IW:
4654 +- if (val <= 0 || tp->data_segs_out > 0)
4655 ++ if (val <= 0 || tp->data_segs_out > tp->syn_data)
4656 + ret = -EINVAL;
4657 + else
4658 + tp->snd_cwnd = val;
4659 +diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
4660 +index 1a4e9ff02762e..5731670c560b0 100644
4661 +--- a/net/ipv4/inet_diag.c
4662 ++++ b/net/ipv4/inet_diag.c
4663 +@@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk,
4664 + + nla_total_size(1) /* INET_DIAG_TOS */
4665 + + nla_total_size(1) /* INET_DIAG_TCLASS */
4666 + + nla_total_size(4) /* INET_DIAG_MARK */
4667 ++ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
4668 + + nla_total_size(sizeof(struct inet_diag_meminfo))
4669 + + nla_total_size(sizeof(struct inet_diag_msg))
4670 + + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
4671 +@@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
4672 + goto errout;
4673 + }
4674 +
4675 +- if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) {
4676 ++ if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
4677 ++ ext & (1 << (INET_DIAG_TCLASS - 1))) {
4678 + u32 classid = 0;
4679 +
4680 + #ifdef CONFIG_SOCK_CGROUP_DATA
4681 + classid = sock_cgroup_classid(&sk->sk_cgrp_data);
4682 + #endif
4683 ++ /* Fallback to socket priority if class id isn't set.
4684 ++ * Classful qdiscs use it as direct reference to class.
4685 ++ * For cgroup2 classid is always zero.
4686 ++ */
4687 ++ if (!classid)
4688 ++ classid = sk->sk_priority;
4689 +
4690 + if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
4691 + goto errout;
4692 +diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
4693 +index fb1e7f237f531..3cd237b42f446 100644
4694 +--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
4695 ++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
4696 +@@ -56,7 +56,7 @@ struct clusterip_config {
4697 + #endif
4698 + enum clusterip_hashmode hash_mode; /* which hashing mode */
4699 + u_int32_t hash_initval; /* hash initialization */
4700 +- struct rcu_head rcu;
4701 ++ struct rcu_head rcu; /* for call_rcu_bh */
4702 + struct net *net; /* netns for pernet list */
4703 + char ifname[IFNAMSIZ]; /* device ifname */
4704 + };
4705 +@@ -72,6 +72,8 @@ struct clusterip_net {
4706 +
4707 + #ifdef CONFIG_PROC_FS
4708 + struct proc_dir_entry *procdir;
4709 ++ /* mutex protects the config->pde*/
4710 ++ struct mutex mutex;
4711 + #endif
4712 + };
4713 +
4714 +@@ -118,17 +120,18 @@ clusterip_config_entry_put(struct clusterip_config *c)
4715 +
4716 + local_bh_disable();
4717 + if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
4718 ++ list_del_rcu(&c->list);
4719 ++ spin_unlock(&cn->lock);
4720 ++ local_bh_enable();
4721 + /* In case anyone still accesses the file, the open/close
4722 + * functions are also incrementing the refcount on their own,
4723 + * so it's safe to remove the entry even if it's in use. */
4724 + #ifdef CONFIG_PROC_FS
4725 ++ mutex_lock(&cn->mutex);
4726 + if (cn->procdir)
4727 + proc_remove(c->pde);
4728 ++ mutex_unlock(&cn->mutex);
4729 + #endif
4730 +- list_del_rcu(&c->list);
4731 +- spin_unlock(&cn->lock);
4732 +- local_bh_enable();
4733 +-
4734 + return;
4735 + }
4736 + local_bh_enable();
4737 +@@ -278,9 +281,11 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
4738 +
4739 + /* create proc dir entry */
4740 + sprintf(buffer, "%pI4", &ip);
4741 ++ mutex_lock(&cn->mutex);
4742 + c->pde = proc_create_data(buffer, 0600,
4743 + cn->procdir,
4744 + &clusterip_proc_fops, c);
4745 ++ mutex_unlock(&cn->mutex);
4746 + if (!c->pde) {
4747 + err = -ENOMEM;
4748 + goto err;
4749 +@@ -833,6 +838,7 @@ static int clusterip_net_init(struct net *net)
4750 + pr_err("Unable to proc dir entry\n");
4751 + return -ENOMEM;
4752 + }
4753 ++ mutex_init(&cn->mutex);
4754 + #endif /* CONFIG_PROC_FS */
4755 +
4756 + return 0;
4757 +@@ -841,9 +847,12 @@ static int clusterip_net_init(struct net *net)
4758 + static void clusterip_net_exit(struct net *net)
4759 + {
4760 + struct clusterip_net *cn = clusterip_pernet(net);
4761 ++
4762 + #ifdef CONFIG_PROC_FS
4763 ++ mutex_lock(&cn->mutex);
4764 + proc_remove(cn->procdir);
4765 + cn->procdir = NULL;
4766 ++ mutex_unlock(&cn->mutex);
4767 + #endif
4768 + nf_unregister_net_hook(net, &cip_arp_ops);
4769 + }
4770 +diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
4771 +index 8b075f0bc3516..6d0b1f3e927bd 100644
4772 +--- a/net/ipv6/netfilter.c
4773 ++++ b/net/ipv6/netfilter.c
4774 +@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
4775 + struct sock *sk = sk_to_full_sk(skb->sk);
4776 + unsigned int hh_len;
4777 + struct dst_entry *dst;
4778 ++ int strict = (ipv6_addr_type(&iph->daddr) &
4779 ++ (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
4780 + struct flowi6 fl6 = {
4781 + .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
4782 +- rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
4783 ++ strict ? skb_dst(skb)->dev->ifindex : 0,
4784 + .flowi6_mark = skb->mark,
4785 + .flowi6_uid = sock_net_uid(net, sk),
4786 + .daddr = iph->daddr,
4787 +diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
4788 +index 8d0ba757a46ce..9b2f272ca1649 100644
4789 +--- a/net/ipv6/seg6.c
4790 ++++ b/net/ipv6/seg6.c
4791 +@@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info)
4792 + rcu_read_unlock();
4793 +
4794 + genlmsg_end(msg, hdr);
4795 +- genlmsg_reply(msg, info);
4796 +-
4797 +- return 0;
4798 ++ return genlmsg_reply(msg, info);
4799 +
4800 + nla_put_failure:
4801 + rcu_read_unlock();
4802 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
4803 +index eb162bd0e0419..da6d5a3f53995 100644
4804 +--- a/net/ipv6/sit.c
4805 ++++ b/net/ipv6/sit.c
4806 +@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
4807 + }
4808 +
4809 + err = 0;
4810 +- if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
4811 ++ if (__in6_dev_get(skb->dev) &&
4812 ++ !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
4813 + goto out;
4814 +
4815 + if (t->parms.iph.daddr == 0)
4816 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
4817 +index 76ba2f34ef6b1..cab8b2b647f96 100644
4818 +--- a/net/ipv6/udp.c
4819 ++++ b/net/ipv6/udp.c
4820 +@@ -1322,10 +1322,7 @@ do_udp_sendmsg:
4821 + ipc6.opt = opt;
4822 +
4823 + fl6.flowi6_proto = sk->sk_protocol;
4824 +- if (!ipv6_addr_any(daddr))
4825 +- fl6.daddr = *daddr;
4826 +- else
4827 +- fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
4828 ++ fl6.daddr = *daddr;
4829 + if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
4830 + fl6.saddr = np->saddr;
4831 + fl6.fl6_sport = inet->inet_sport;
4832 +@@ -1353,6 +1350,9 @@ do_udp_sendmsg:
4833 + }
4834 + }
4835 +
4836 ++ if (ipv6_addr_any(&fl6.daddr))
4837 ++ fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
4838 ++
4839 + final_p = fl6_update_dst(&fl6, opt, &final);
4840 + if (final_p)
4841 + connected = false;
4842 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
4843 +index 5d22eda8a6b1e..c2abe9db1ea24 100644
4844 +--- a/net/mac80211/cfg.c
4845 ++++ b/net/mac80211/cfg.c
4846 +@@ -887,6 +887,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
4847 + BSS_CHANGED_P2P_PS |
4848 + BSS_CHANGED_TXPOWER;
4849 + int err;
4850 ++ int prev_beacon_int;
4851 +
4852 + old = sdata_dereference(sdata->u.ap.beacon, sdata);
4853 + if (old)
4854 +@@ -909,6 +910,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
4855 +
4856 + sdata->needed_rx_chains = sdata->local->rx_chains;
4857 +
4858 ++ prev_beacon_int = sdata->vif.bss_conf.beacon_int;
4859 + sdata->vif.bss_conf.beacon_int = params->beacon_interval;
4860 +
4861 + mutex_lock(&local->mtx);
4862 +@@ -917,8 +919,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
4863 + if (!err)
4864 + ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
4865 + mutex_unlock(&local->mtx);
4866 +- if (err)
4867 ++ if (err) {
4868 ++ sdata->vif.bss_conf.beacon_int = prev_beacon_int;
4869 + return err;
4870 ++ }
4871 +
4872 + /*
4873 + * Apply control port protocol, this allows us to
4874 +diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
4875 +index 21526630bf655..e84103b405341 100644
4876 +--- a/net/mac80211/mesh.h
4877 ++++ b/net/mac80211/mesh.h
4878 +@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
4879 + * @dst: mesh path destination mac address
4880 + * @mpp: mesh proxy mac address
4881 + * @rhash: rhashtable list pointer
4882 ++ * @walk_list: linked list containing all mesh_path objects.
4883 + * @gate_list: list pointer for known gates list
4884 + * @sdata: mesh subif
4885 + * @next_hop: mesh neighbor to which frames for this destination will be
4886 +@@ -105,6 +106,7 @@ struct mesh_path {
4887 + u8 dst[ETH_ALEN];
4888 + u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
4889 + struct rhash_head rhash;
4890 ++ struct hlist_node walk_list;
4891 + struct hlist_node gate_list;
4892 + struct ieee80211_sub_if_data *sdata;
4893 + struct sta_info __rcu *next_hop;
4894 +@@ -133,12 +135,16 @@ struct mesh_path {
4895 + * gate's mpath may or may not be resolved and active.
4896 + * @gates_lock: protects updates to known_gates
4897 + * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
4898 ++ * @walk_head: linked list containging all mesh_path objects
4899 ++ * @walk_lock: lock protecting walk_head
4900 + * @entries: number of entries in the table
4901 + */
4902 + struct mesh_table {
4903 + struct hlist_head known_gates;
4904 + spinlock_t gates_lock;
4905 + struct rhashtable rhead;
4906 ++ struct hlist_head walk_head;
4907 ++ spinlock_t walk_lock;
4908 + atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
4909 + };
4910 +
4911 +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
4912 +index a5125624a76dc..c3a7396fb9556 100644
4913 +--- a/net/mac80211/mesh_pathtbl.c
4914 ++++ b/net/mac80211/mesh_pathtbl.c
4915 +@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
4916 + return NULL;
4917 +
4918 + INIT_HLIST_HEAD(&newtbl->known_gates);
4919 ++ INIT_HLIST_HEAD(&newtbl->walk_head);
4920 + atomic_set(&newtbl->entries, 0);
4921 + spin_lock_init(&newtbl->gates_lock);
4922 ++ spin_lock_init(&newtbl->walk_lock);
4923 +
4924 + return newtbl;
4925 + }
4926 +@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
4927 + static struct mesh_path *
4928 + __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
4929 + {
4930 +- int i = 0, ret;
4931 +- struct mesh_path *mpath = NULL;
4932 +- struct rhashtable_iter iter;
4933 +-
4934 +- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
4935 +- if (ret)
4936 +- return NULL;
4937 +-
4938 +- rhashtable_walk_start(&iter);
4939 ++ int i = 0;
4940 ++ struct mesh_path *mpath;
4941 +
4942 +- while ((mpath = rhashtable_walk_next(&iter))) {
4943 +- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
4944 +- continue;
4945 +- if (IS_ERR(mpath))
4946 +- break;
4947 ++ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
4948 + if (i++ == idx)
4949 + break;
4950 + }
4951 +- rhashtable_walk_stop(&iter);
4952 +- rhashtable_walk_exit(&iter);
4953 +
4954 +- if (IS_ERR(mpath) || !mpath)
4955 ++ if (!mpath)
4956 + return NULL;
4957 +
4958 + if (mpath_expired(mpath)) {
4959 +@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
4960 + return ERR_PTR(-ENOMEM);
4961 +
4962 + tbl = sdata->u.mesh.mesh_paths;
4963 ++ spin_lock_bh(&tbl->walk_lock);
4964 + do {
4965 + ret = rhashtable_lookup_insert_fast(&tbl->rhead,
4966 + &new_mpath->rhash,
4967 +@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
4968 + mpath = rhashtable_lookup_fast(&tbl->rhead,
4969 + dst,
4970 + mesh_rht_params);
4971 +-
4972 ++ else if (!ret)
4973 ++ hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
4974 + } while (unlikely(ret == -EEXIST && !mpath));
4975 ++ spin_unlock_bh(&tbl->walk_lock);
4976 +
4977 +- if (ret && ret != -EEXIST)
4978 +- return ERR_PTR(ret);
4979 +-
4980 +- /* At this point either new_mpath was added, or we found a
4981 +- * matching entry already in the table; in the latter case
4982 +- * free the unnecessary new entry.
4983 +- */
4984 +- if (ret == -EEXIST) {
4985 ++ if (ret) {
4986 + kfree(new_mpath);
4987 ++
4988 ++ if (ret != -EEXIST)
4989 ++ return ERR_PTR(ret);
4990 ++
4991 + new_mpath = mpath;
4992 + }
4993 ++
4994 + sdata->u.mesh.mesh_paths_generation++;
4995 + return new_mpath;
4996 + }
4997 +@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
4998 +
4999 + memcpy(new_mpath->mpp, mpp, ETH_ALEN);
5000 + tbl = sdata->u.mesh.mpp_paths;
5001 ++
5002 ++ spin_lock_bh(&tbl->walk_lock);
5003 + ret = rhashtable_lookup_insert_fast(&tbl->rhead,
5004 + &new_mpath->rhash,
5005 + mesh_rht_params);
5006 ++ if (!ret)
5007 ++ hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
5008 ++ spin_unlock_bh(&tbl->walk_lock);
5009 ++
5010 ++ if (ret)
5011 ++ kfree(new_mpath);
5012 +
5013 + sdata->u.mesh.mpp_paths_generation++;
5014 + return ret;
5015 +@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
5016 + struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
5017 + static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
5018 + struct mesh_path *mpath;
5019 +- struct rhashtable_iter iter;
5020 +- int ret;
5021 +-
5022 +- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5023 +- if (ret)
5024 +- return;
5025 +
5026 +- rhashtable_walk_start(&iter);
5027 +-
5028 +- while ((mpath = rhashtable_walk_next(&iter))) {
5029 +- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5030 +- continue;
5031 +- if (IS_ERR(mpath))
5032 +- break;
5033 ++ rcu_read_lock();
5034 ++ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
5035 + if (rcu_access_pointer(mpath->next_hop) == sta &&
5036 + mpath->flags & MESH_PATH_ACTIVE &&
5037 + !(mpath->flags & MESH_PATH_FIXED)) {
5038 +@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
5039 + WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
5040 + }
5041 + }
5042 +- rhashtable_walk_stop(&iter);
5043 +- rhashtable_walk_exit(&iter);
5044 ++ rcu_read_unlock();
5045 + }
5046 +
5047 + static void mesh_path_free_rcu(struct mesh_table *tbl,
5048 +@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
5049 +
5050 + static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
5051 + {
5052 ++ hlist_del_rcu(&mpath->walk_list);
5053 + rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
5054 + mesh_path_free_rcu(tbl, mpath);
5055 + }
5056 +@@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
5057 + struct ieee80211_sub_if_data *sdata = sta->sdata;
5058 + struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
5059 + struct mesh_path *mpath;
5060 +- struct rhashtable_iter iter;
5061 +- int ret;
5062 +-
5063 +- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5064 +- if (ret)
5065 +- return;
5066 +-
5067 +- rhashtable_walk_start(&iter);
5068 +-
5069 +- while ((mpath = rhashtable_walk_next(&iter))) {
5070 +- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5071 +- continue;
5072 +- if (IS_ERR(mpath))
5073 +- break;
5074 ++ struct hlist_node *n;
5075 +
5076 ++ spin_lock_bh(&tbl->walk_lock);
5077 ++ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5078 + if (rcu_access_pointer(mpath->next_hop) == sta)
5079 + __mesh_path_del(tbl, mpath);
5080 + }
5081 +-
5082 +- rhashtable_walk_stop(&iter);
5083 +- rhashtable_walk_exit(&iter);
5084 ++ spin_unlock_bh(&tbl->walk_lock);
5085 + }
5086 +
5087 + static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
5088 +@@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
5089 + {
5090 + struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
5091 + struct mesh_path *mpath;
5092 +- struct rhashtable_iter iter;
5093 +- int ret;
5094 +-
5095 +- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5096 +- if (ret)
5097 +- return;
5098 +-
5099 +- rhashtable_walk_start(&iter);
5100 +-
5101 +- while ((mpath = rhashtable_walk_next(&iter))) {
5102 +- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5103 +- continue;
5104 +- if (IS_ERR(mpath))
5105 +- break;
5106 ++ struct hlist_node *n;
5107 +
5108 ++ spin_lock_bh(&tbl->walk_lock);
5109 ++ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5110 + if (ether_addr_equal(mpath->mpp, proxy))
5111 + __mesh_path_del(tbl, mpath);
5112 + }
5113 +-
5114 +- rhashtable_walk_stop(&iter);
5115 +- rhashtable_walk_exit(&iter);
5116 ++ spin_unlock_bh(&tbl->walk_lock);
5117 + }
5118 +
5119 + static void table_flush_by_iface(struct mesh_table *tbl)
5120 + {
5121 + struct mesh_path *mpath;
5122 +- struct rhashtable_iter iter;
5123 +- int ret;
5124 +-
5125 +- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5126 +- if (ret)
5127 +- return;
5128 +-
5129 +- rhashtable_walk_start(&iter);
5130 ++ struct hlist_node *n;
5131 +
5132 +- while ((mpath = rhashtable_walk_next(&iter))) {
5133 +- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5134 +- continue;
5135 +- if (IS_ERR(mpath))
5136 +- break;
5137 ++ spin_lock_bh(&tbl->walk_lock);
5138 ++ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5139 + __mesh_path_del(tbl, mpath);
5140 + }
5141 +-
5142 +- rhashtable_walk_stop(&iter);
5143 +- rhashtable_walk_exit(&iter);
5144 ++ spin_unlock_bh(&tbl->walk_lock);
5145 + }
5146 +
5147 + /**
5148 +@@ -675,7 +624,7 @@ static int table_path_del(struct mesh_table *tbl,
5149 + {
5150 + struct mesh_path *mpath;
5151 +
5152 +- rcu_read_lock();
5153 ++ spin_lock_bh(&tbl->walk_lock);
5154 + mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
5155 + if (!mpath) {
5156 + rcu_read_unlock();
5157 +@@ -683,7 +632,7 @@ static int table_path_del(struct mesh_table *tbl,
5158 + }
5159 +
5160 + __mesh_path_del(tbl, mpath);
5161 +- rcu_read_unlock();
5162 ++ spin_unlock_bh(&tbl->walk_lock);
5163 + return 0;
5164 + }
5165 +
5166 +@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
5167 + struct mesh_table *tbl)
5168 + {
5169 + struct mesh_path *mpath;
5170 +- struct rhashtable_iter iter;
5171 +- int ret;
5172 ++ struct hlist_node *n;
5173 +
5174 +- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
5175 +- if (ret)
5176 +- return;
5177 +-
5178 +- rhashtable_walk_start(&iter);
5179 +-
5180 +- while ((mpath = rhashtable_walk_next(&iter))) {
5181 +- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5182 +- continue;
5183 +- if (IS_ERR(mpath))
5184 +- break;
5185 ++ spin_lock_bh(&tbl->walk_lock);
5186 ++ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5187 + if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
5188 + (!(mpath->flags & MESH_PATH_FIXED)) &&
5189 + time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
5190 + __mesh_path_del(tbl, mpath);
5191 + }
5192 +-
5193 +- rhashtable_walk_stop(&iter);
5194 +- rhashtable_walk_exit(&iter);
5195 ++ spin_unlock_bh(&tbl->walk_lock);
5196 + }
5197 +
5198 + void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
5199 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
5200 +index 51ad330bf8e83..828348b2a504d 100644
5201 +--- a/net/mac80211/rx.c
5202 ++++ b/net/mac80211/rx.c
5203 +@@ -2598,6 +2598,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
5204 + struct ieee80211_sub_if_data *sdata = rx->sdata;
5205 + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
5206 + u16 ac, q, hdrlen;
5207 ++ int tailroom = 0;
5208 +
5209 + hdr = (struct ieee80211_hdr *) skb->data;
5210 + hdrlen = ieee80211_hdrlen(hdr->frame_control);
5211 +@@ -2684,8 +2685,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
5212 + if (!ifmsh->mshcfg.dot11MeshForwarding)
5213 + goto out;
5214 +
5215 ++ if (sdata->crypto_tx_tailroom_needed_cnt)
5216 ++ tailroom = IEEE80211_ENCRYPT_TAILROOM;
5217 ++
5218 + fwd_skb = skb_copy_expand(skb, local->tx_headroom +
5219 +- sdata->encrypt_headroom, 0, GFP_ATOMIC);
5220 ++ sdata->encrypt_headroom,
5221 ++ tailroom, GFP_ATOMIC);
5222 + if (!fwd_skb)
5223 + goto out;
5224 +
5225 +diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
5226 +index d8125616edc79..e1537ace2b90c 100644
5227 +--- a/net/netfilter/nf_flow_table_core.c
5228 ++++ b/net/netfilter/nf_flow_table_core.c
5229 +@@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
5230 + {
5231 + struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
5232 + struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
5233 ++ struct dst_entry *other_dst = route->tuple[!dir].dst;
5234 + struct dst_entry *dst = route->tuple[dir].dst;
5235 +
5236 + ft->dir = dir;
5237 +@@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
5238 + ft->src_port = ctt->src.u.tcp.port;
5239 + ft->dst_port = ctt->dst.u.tcp.port;
5240 +
5241 +- ft->iifidx = route->tuple[dir].ifindex;
5242 +- ft->oifidx = route->tuple[!dir].ifindex;
5243 ++ ft->iifidx = other_dst->dev->ifindex;
5244 ++ ft->oifidx = dst->dev->ifindex;
5245 + ft->dst_cache = dst;
5246 + }
5247 +
5248 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5249 +index ed9af46720e14..7d424fd270255 100644
5250 +--- a/net/netfilter/nf_tables_api.c
5251 ++++ b/net/netfilter/nf_tables_api.c
5252 +@@ -291,6 +291,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
5253 + int err;
5254 +
5255 + list_for_each_entry(rule, &ctx->chain->rules, list) {
5256 ++ if (!nft_is_active_next(ctx->net, rule))
5257 ++ continue;
5258 ++
5259 + err = nft_delrule(ctx, rule);
5260 + if (err < 0)
5261 + return err;
5262 +@@ -4439,6 +4442,8 @@ err6:
5263 + err5:
5264 + kfree(trans);
5265 + err4:
5266 ++ if (obj)
5267 ++ obj->use--;
5268 + kfree(elem.priv);
5269 + err3:
5270 + if (nla[NFTA_SET_ELEM_DATA] != NULL)
5271 +diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
5272 +index 00db27dfd2ff7..b0bc130947c94 100644
5273 +--- a/net/netfilter/nfnetlink_osf.c
5274 ++++ b/net/netfilter/nfnetlink_osf.c
5275 +@@ -71,6 +71,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
5276 + int ttl_check,
5277 + struct nf_osf_hdr_ctx *ctx)
5278 + {
5279 ++ const __u8 *optpinit = ctx->optp;
5280 + unsigned int check_WSS = 0;
5281 + int fmatch = FMATCH_WRONG;
5282 + int foptsize, optnum;
5283 +@@ -160,6 +161,9 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
5284 + }
5285 + }
5286 +
5287 ++ if (fmatch != FMATCH_OK)
5288 ++ ctx->optp = optpinit;
5289 ++
5290 + return fmatch == FMATCH_OK;
5291 + }
5292 +
5293 +diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
5294 +index 29d6fc73caf99..38da1f5436b48 100644
5295 +--- a/net/netfilter/nft_compat.c
5296 ++++ b/net/netfilter/nft_compat.c
5297 +@@ -282,6 +282,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
5298 + {
5299 + struct xt_target *target = expr->ops->data;
5300 + void *info = nft_expr_priv(expr);
5301 ++ struct module *me = target->me;
5302 + struct xt_tgdtor_param par;
5303 +
5304 + par.net = ctx->net;
5305 +@@ -292,7 +293,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
5306 + par.target->destroy(&par);
5307 +
5308 + if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
5309 +- module_put(target->me);
5310 ++ module_put(me);
5311 + }
5312 +
5313 + static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
5314 +diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
5315 +index 5fd4c57c79cc9..436cc14cfc59b 100644
5316 +--- a/net/netfilter/nft_flow_offload.c
5317 ++++ b/net/netfilter/nft_flow_offload.c
5318 +@@ -12,6 +12,7 @@
5319 + #include <net/netfilter/nf_conntrack_core.h>
5320 + #include <linux/netfilter/nf_conntrack_common.h>
5321 + #include <net/netfilter/nf_flow_table.h>
5322 ++#include <net/netfilter/nf_conntrack_helper.h>
5323 +
5324 + struct nft_flow_offload {
5325 + struct nft_flowtable *flowtable;
5326 +@@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
5327 + memset(&fl, 0, sizeof(fl));
5328 + switch (nft_pf(pkt)) {
5329 + case NFPROTO_IPV4:
5330 +- fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
5331 ++ fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
5332 ++ fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
5333 + break;
5334 + case NFPROTO_IPV6:
5335 +- fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
5336 ++ fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
5337 ++ fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
5338 + break;
5339 + }
5340 +
5341 +@@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
5342 + return -ENOENT;
5343 +
5344 + route->tuple[dir].dst = this_dst;
5345 +- route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
5346 + route->tuple[!dir].dst = other_dst;
5347 +- route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
5348 +
5349 + return 0;
5350 + }
5351 +@@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
5352 + {
5353 + struct nft_flow_offload *priv = nft_expr_priv(expr);
5354 + struct nf_flowtable *flowtable = &priv->flowtable->data;
5355 ++ const struct nf_conn_help *help;
5356 + enum ip_conntrack_info ctinfo;
5357 + struct nf_flow_route route;
5358 + struct flow_offload *flow;
5359 +@@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
5360 + goto out;
5361 + }
5362 +
5363 +- if (test_bit(IPS_HELPER_BIT, &ct->status))
5364 ++ help = nfct_help(ct);
5365 ++ if (help)
5366 + goto out;
5367 +
5368 + if (ctinfo == IP_CT_NEW ||
5369 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5370 +index c76c21604ffd9..fd16fb836df28 100644
5371 +--- a/net/packet/af_packet.c
5372 ++++ b/net/packet/af_packet.c
5373 +@@ -4275,7 +4275,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
5374 + rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
5375 + if (unlikely(rb->frames_per_block == 0))
5376 + goto out;
5377 +- if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
5378 ++ if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
5379 + goto out;
5380 + if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
5381 + req->tp_frame_nr))
5382 +diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
5383 +index 9ccc93f257db0..38bb882bb9587 100644
5384 +--- a/net/sched/cls_tcindex.c
5385 ++++ b/net/sched/cls_tcindex.c
5386 +@@ -48,7 +48,7 @@ struct tcindex_data {
5387 + u32 hash; /* hash table size; 0 if undefined */
5388 + u32 alloc_hash; /* allocated size */
5389 + u32 fall_through; /* 0: only classify if explicit match */
5390 +- struct rcu_head rcu;
5391 ++ struct rcu_work rwork;
5392 + };
5393 +
5394 + static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
5395 +@@ -221,17 +221,11 @@ found:
5396 + return 0;
5397 + }
5398 +
5399 +-static int tcindex_destroy_element(struct tcf_proto *tp,
5400 +- void *arg, struct tcf_walker *walker)
5401 +-{
5402 +- bool last;
5403 +-
5404 +- return tcindex_delete(tp, arg, &last, NULL);
5405 +-}
5406 +-
5407 +-static void __tcindex_destroy(struct rcu_head *head)
5408 ++static void tcindex_destroy_work(struct work_struct *work)
5409 + {
5410 +- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
5411 ++ struct tcindex_data *p = container_of(to_rcu_work(work),
5412 ++ struct tcindex_data,
5413 ++ rwork);
5414 +
5415 + kfree(p->perfect);
5416 + kfree(p->h);
5417 +@@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r)
5418 + return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
5419 + }
5420 +
5421 +-static void __tcindex_partial_destroy(struct rcu_head *head)
5422 ++static void tcindex_partial_destroy_work(struct work_struct *work)
5423 + {
5424 +- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
5425 ++ struct tcindex_data *p = container_of(to_rcu_work(work),
5426 ++ struct tcindex_data,
5427 ++ rwork);
5428 +
5429 + kfree(p->perfect);
5430 + kfree(p);
5431 +@@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp)
5432 + kfree(cp->perfect);
5433 + }
5434 +
5435 +-static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
5436 ++static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
5437 + {
5438 + int i, err = 0;
5439 +
5440 +@@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
5441 + TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
5442 + if (err < 0)
5443 + goto errout;
5444 ++#ifdef CONFIG_NET_CLS_ACT
5445 ++ cp->perfect[i].exts.net = net;
5446 ++#endif
5447 + }
5448 +
5449 + return 0;
5450 +@@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5451 + struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
5452 + {
5453 + struct tcindex_filter_result new_filter_result, *old_r = r;
5454 +- struct tcindex_filter_result cr;
5455 + struct tcindex_data *cp = NULL, *oldp;
5456 + struct tcindex_filter *f = NULL; /* make gcc behave */
5457 ++ struct tcf_result cr = {};
5458 + int err, balloc = 0;
5459 + struct tcf_exts e;
5460 +
5461 +@@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5462 + if (p->perfect) {
5463 + int i;
5464 +
5465 +- if (tcindex_alloc_perfect_hash(cp) < 0)
5466 ++ if (tcindex_alloc_perfect_hash(net, cp) < 0)
5467 + goto errout;
5468 + for (i = 0; i < cp->hash; i++)
5469 + cp->perfect[i].res = p->perfect[i].res;
5470 +@@ -346,13 +345,10 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5471 + cp->h = p->h;
5472 +
5473 + err = tcindex_filter_result_init(&new_filter_result);
5474 +- if (err < 0)
5475 +- goto errout1;
5476 +- err = tcindex_filter_result_init(&cr);
5477 + if (err < 0)
5478 + goto errout1;
5479 + if (old_r)
5480 +- cr.res = r->res;
5481 ++ cr = r->res;
5482 +
5483 + if (tb[TCA_TCINDEX_HASH])
5484 + cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
5485 +@@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5486 + err = -ENOMEM;
5487 + if (!cp->perfect && !cp->h) {
5488 + if (valid_perfect_hash(cp)) {
5489 +- if (tcindex_alloc_perfect_hash(cp) < 0)
5490 ++ if (tcindex_alloc_perfect_hash(net, cp) < 0)
5491 + goto errout_alloc;
5492 + balloc = 1;
5493 + } else {
5494 +@@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5495 + }
5496 +
5497 + if (tb[TCA_TCINDEX_CLASSID]) {
5498 +- cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
5499 +- tcf_bind_filter(tp, &cr.res, base);
5500 ++ cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
5501 ++ tcf_bind_filter(tp, &cr, base);
5502 + }
5503 +
5504 + if (old_r && old_r != r) {
5505 +@@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5506 + }
5507 +
5508 + oldp = p;
5509 +- r->res = cr.res;
5510 ++ r->res = cr;
5511 + tcf_exts_change(&r->exts, &e);
5512 +
5513 + rcu_assign_pointer(tp->root, cp);
5514 +@@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5515 + ; /* nothing */
5516 +
5517 + rcu_assign_pointer(*fp, f);
5518 ++ } else {
5519 ++ tcf_exts_destroy(&new_filter_result.exts);
5520 + }
5521 +
5522 + if (oldp)
5523 +- call_rcu(&oldp->rcu, __tcindex_partial_destroy);
5524 ++ tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
5525 + return 0;
5526 +
5527 + errout_alloc:
5528 +@@ -487,7 +485,6 @@ errout_alloc:
5529 + else if (balloc == 2)
5530 + kfree(cp->h);
5531 + errout1:
5532 +- tcf_exts_destroy(&cr.exts);
5533 + tcf_exts_destroy(&new_filter_result.exts);
5534 + errout:
5535 + kfree(cp);
5536 +@@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp,
5537 + struct netlink_ext_ack *extack)
5538 + {
5539 + struct tcindex_data *p = rtnl_dereference(tp->root);
5540 +- struct tcf_walker walker;
5541 ++ int i;
5542 +
5543 + pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
5544 +- walker.count = 0;
5545 +- walker.skip = 0;
5546 +- walker.fn = tcindex_destroy_element;
5547 +- tcindex_walk(tp, &walker);
5548 +
5549 +- call_rcu(&p->rcu, __tcindex_destroy);
5550 ++ if (p->perfect) {
5551 ++ for (i = 0; i < p->hash; i++) {
5552 ++ struct tcindex_filter_result *r = p->perfect + i;
5553 ++
5554 ++ tcf_unbind_filter(tp, &r->res);
5555 ++ if (tcf_exts_get_net(&r->exts))
5556 ++ tcf_queue_work(&r->rwork,
5557 ++ tcindex_destroy_rexts_work);
5558 ++ else
5559 ++ __tcindex_destroy_rexts(r);
5560 ++ }
5561 ++ }
5562 ++
5563 ++ for (i = 0; p->h && i < p->hash; i++) {
5564 ++ struct tcindex_filter *f, *next;
5565 ++ bool last;
5566 ++
5567 ++ for (f = rtnl_dereference(p->h[i]); f; f = next) {
5568 ++ next = rtnl_dereference(f->next);
5569 ++ tcindex_delete(tp, &f->result, &last, NULL);
5570 ++ }
5571 ++ }
5572 ++
5573 ++ tcf_queue_work(&p->rwork, tcindex_destroy_work);
5574 + }
5575 +
5576 +
5577 +diff --git a/net/sctp/diag.c b/net/sctp/diag.c
5578 +index 078f01a8d582a..435847d98b51c 100644
5579 +--- a/net/sctp/diag.c
5580 ++++ b/net/sctp/diag.c
5581 +@@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
5582 + + nla_total_size(1) /* INET_DIAG_TOS */
5583 + + nla_total_size(1) /* INET_DIAG_TCLASS */
5584 + + nla_total_size(4) /* INET_DIAG_MARK */
5585 ++ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
5586 + + nla_total_size(addrlen * asoc->peer.transport_count)
5587 + + nla_total_size(addrlen * addrcnt)
5588 + + nla_total_size(sizeof(struct inet_diag_meminfo))
5589 +diff --git a/net/sctp/offload.c b/net/sctp/offload.c
5590 +index 123e9f2dc2265..edfcf16e704c4 100644
5591 +--- a/net/sctp/offload.c
5592 ++++ b/net/sctp/offload.c
5593 +@@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
5594 + {
5595 + skb->ip_summed = CHECKSUM_NONE;
5596 + skb->csum_not_inet = 0;
5597 ++ gso_reset_checksum(skb, ~0);
5598 + return sctp_compute_cksum(skb, skb_transport_offset(skb));
5599 + }
5600 +
5601 +diff --git a/net/sctp/stream.c b/net/sctp/stream.c
5602 +index f24633114dfdf..2936ed17bf9ef 100644
5603 +--- a/net/sctp/stream.c
5604 ++++ b/net/sctp/stream.c
5605 +@@ -144,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
5606 + }
5607 + }
5608 +
5609 +- for (i = outcnt; i < stream->outcnt; i++)
5610 ++ for (i = outcnt; i < stream->outcnt; i++) {
5611 + kfree(SCTP_SO(stream, i)->ext);
5612 ++ SCTP_SO(stream, i)->ext = NULL;
5613 ++ }
5614 + }
5615 +
5616 + static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
5617 +diff --git a/net/socket.c b/net/socket.c
5618 +index 390a8ecef4bf4..5c820212ba815 100644
5619 +--- a/net/socket.c
5620 ++++ b/net/socket.c
5621 +@@ -941,8 +941,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
5622 + EXPORT_SYMBOL(dlci_ioctl_set);
5623 +
5624 + static long sock_do_ioctl(struct net *net, struct socket *sock,
5625 +- unsigned int cmd, unsigned long arg,
5626 +- unsigned int ifreq_size)
5627 ++ unsigned int cmd, unsigned long arg)
5628 + {
5629 + int err;
5630 + void __user *argp = (void __user *)arg;
5631 +@@ -968,11 +967,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
5632 + } else {
5633 + struct ifreq ifr;
5634 + bool need_copyout;
5635 +- if (copy_from_user(&ifr, argp, ifreq_size))
5636 ++ if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
5637 + return -EFAULT;
5638 + err = dev_ioctl(net, cmd, &ifr, &need_copyout);
5639 + if (!err && need_copyout)
5640 +- if (copy_to_user(argp, &ifr, ifreq_size))
5641 ++ if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
5642 + return -EFAULT;
5643 + }
5644 + return err;
5645 +@@ -1071,8 +1070,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
5646 + err = open_related_ns(&net->ns, get_net_ns);
5647 + break;
5648 + default:
5649 +- err = sock_do_ioctl(net, sock, cmd, arg,
5650 +- sizeof(struct ifreq));
5651 ++ err = sock_do_ioctl(net, sock, cmd, arg);
5652 + break;
5653 + }
5654 + return err;
5655 +@@ -2752,8 +2750,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
5656 + int err;
5657 +
5658 + set_fs(KERNEL_DS);
5659 +- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
5660 +- sizeof(struct compat_ifreq));
5661 ++ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
5662 + set_fs(old_fs);
5663 + if (!err)
5664 + err = compat_put_timeval(&ktv, up);
5665 +@@ -2769,8 +2766,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
5666 + int err;
5667 +
5668 + set_fs(KERNEL_DS);
5669 +- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
5670 +- sizeof(struct compat_ifreq));
5671 ++ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
5672 + set_fs(old_fs);
5673 + if (!err)
5674 + err = compat_put_timespec(&kts, up);
5675 +@@ -2966,6 +2962,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
5676 + return dev_ioctl(net, cmd, &ifreq, NULL);
5677 + }
5678 +
5679 ++static int compat_ifreq_ioctl(struct net *net, struct socket *sock,
5680 ++ unsigned int cmd,
5681 ++ struct compat_ifreq __user *uifr32)
5682 ++{
5683 ++ struct ifreq __user *uifr;
5684 ++ int err;
5685 ++
5686 ++ /* Handle the fact that while struct ifreq has the same *layout* on
5687 ++ * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
5688 ++ * which are handled elsewhere, it still has different *size* due to
5689 ++ * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
5690 ++ * resulting in struct ifreq being 32 and 40 bytes respectively).
5691 ++ * As a result, if the struct happens to be at the end of a page and
5692 ++ * the next page isn't readable/writable, we get a fault. To prevent
5693 ++ * that, copy back and forth to the full size.
5694 ++ */
5695 ++
5696 ++ uifr = compat_alloc_user_space(sizeof(*uifr));
5697 ++ if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
5698 ++ return -EFAULT;
5699 ++
5700 ++ err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
5701 ++
5702 ++ if (!err) {
5703 ++ switch (cmd) {
5704 ++ case SIOCGIFFLAGS:
5705 ++ case SIOCGIFMETRIC:
5706 ++ case SIOCGIFMTU:
5707 ++ case SIOCGIFMEM:
5708 ++ case SIOCGIFHWADDR:
5709 ++ case SIOCGIFINDEX:
5710 ++ case SIOCGIFADDR:
5711 ++ case SIOCGIFBRDADDR:
5712 ++ case SIOCGIFDSTADDR:
5713 ++ case SIOCGIFNETMASK:
5714 ++ case SIOCGIFPFLAGS:
5715 ++ case SIOCGIFTXQLEN:
5716 ++ case SIOCGMIIPHY:
5717 ++ case SIOCGMIIREG:
5718 ++ case SIOCGIFNAME:
5719 ++ if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
5720 ++ err = -EFAULT;
5721 ++ break;
5722 ++ }
5723 ++ }
5724 ++ return err;
5725 ++}
5726 ++
5727 + static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
5728 + struct compat_ifreq __user *uifr32)
5729 + {
5730 +@@ -3081,8 +3125,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
5731 + }
5732 +
5733 + set_fs(KERNEL_DS);
5734 +- ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
5735 +- sizeof(struct compat_ifreq));
5736 ++ ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
5737 + set_fs(old_fs);
5738 +
5739 + out:
5740 +@@ -3182,21 +3225,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
5741 + case SIOCSIFTXQLEN:
5742 + case SIOCBRADDIF:
5743 + case SIOCBRDELIF:
5744 ++ case SIOCGIFNAME:
5745 + case SIOCSIFNAME:
5746 + case SIOCGMIIPHY:
5747 + case SIOCGMIIREG:
5748 + case SIOCSMIIREG:
5749 +- case SIOCSARP:
5750 +- case SIOCGARP:
5751 +- case SIOCDARP:
5752 +- case SIOCATMARK:
5753 + case SIOCBONDENSLAVE:
5754 + case SIOCBONDRELEASE:
5755 + case SIOCBONDSETHWADDR:
5756 + case SIOCBONDCHANGEACTIVE:
5757 +- case SIOCGIFNAME:
5758 +- return sock_do_ioctl(net, sock, cmd, arg,
5759 +- sizeof(struct compat_ifreq));
5760 ++ return compat_ifreq_ioctl(net, sock, cmd, argp);
5761 ++
5762 ++ case SIOCSARP:
5763 ++ case SIOCGARP:
5764 ++ case SIOCDARP:
5765 ++ case SIOCATMARK:
5766 ++ return sock_do_ioctl(net, sock, cmd, arg);
5767 + }
5768 +
5769 + return -ENOIOCTLCMD;
5770 +diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
5771 +index 956a5ea47b58e..3d6bf790cf1fb 100644
5772 +--- a/net/sunrpc/xprtrdma/verbs.c
5773 ++++ b/net/sunrpc/xprtrdma/verbs.c
5774 +@@ -872,7 +872,7 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
5775 + for (i = 0; i <= buf->rb_sc_last; i++) {
5776 + sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
5777 + if (!sc)
5778 +- goto out_destroy;
5779 ++ return -ENOMEM;
5780 +
5781 + sc->sc_xprt = r_xprt;
5782 + buf->rb_sc_ctxs[i] = sc;
5783 +@@ -880,10 +880,6 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
5784 + buf->rb_flags = 0;
5785 +
5786 + return 0;
5787 +-
5788 +-out_destroy:
5789 +- rpcrdma_sendctxs_destroy(buf);
5790 +- return -ENOMEM;
5791 + }
5792 +
5793 + /* The sendctx queue is not guaranteed to have a size that is a
5794 +diff --git a/security/keys/key.c b/security/keys/key.c
5795 +index d97c9394b5dd4..249a6da4d2770 100644
5796 +--- a/security/keys/key.c
5797 ++++ b/security/keys/key.c
5798 +@@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
5799 +
5800 + spin_lock(&user->lock);
5801 + if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
5802 +- if (user->qnkeys + 1 >= maxkeys ||
5803 +- user->qnbytes + quotalen >= maxbytes ||
5804 ++ if (user->qnkeys + 1 > maxkeys ||
5805 ++ user->qnbytes + quotalen > maxbytes ||
5806 + user->qnbytes + quotalen < user->qnbytes)
5807 + goto no_quota;
5808 + }
5809 +diff --git a/security/keys/keyring.c b/security/keys/keyring.c
5810 +index 41bcf57e96f21..99a55145ddcd2 100644
5811 +--- a/security/keys/keyring.c
5812 ++++ b/security/keys/keyring.c
5813 +@@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring,
5814 + BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
5815 + (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
5816 +
5817 +- if (ctx->index_key.description)
5818 +- ctx->index_key.desc_len = strlen(ctx->index_key.description);
5819 +-
5820 + /* Check to see if this top-level keyring is what we are looking for
5821 + * and whether it is valid or not.
5822 + */
5823 +@@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring,
5824 + struct keyring_search_context ctx = {
5825 + .index_key.type = type,
5826 + .index_key.description = description,
5827 ++ .index_key.desc_len = strlen(description),
5828 + .cred = current_cred(),
5829 + .match_data.cmp = key_default_cmp,
5830 + .match_data.raw_data = description,
5831 +diff --git a/security/keys/proc.c b/security/keys/proc.c
5832 +index 5af2934965d80..d38be9db2cc07 100644
5833 +--- a/security/keys/proc.c
5834 ++++ b/security/keys/proc.c
5835 +@@ -166,8 +166,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
5836 + int rc;
5837 +
5838 + struct keyring_search_context ctx = {
5839 +- .index_key.type = key->type,
5840 +- .index_key.description = key->description,
5841 ++ .index_key = key->index_key,
5842 + .cred = m->file->f_cred,
5843 + .match_data.cmp = lookup_user_key_possessed,
5844 + .match_data.raw_data = key,
5845 +diff --git a/security/keys/request_key.c b/security/keys/request_key.c
5846 +index 114f7408feee6..7385536986497 100644
5847 +--- a/security/keys/request_key.c
5848 ++++ b/security/keys/request_key.c
5849 +@@ -545,6 +545,7 @@ struct key *request_key_and_link(struct key_type *type,
5850 + struct keyring_search_context ctx = {
5851 + .index_key.type = type,
5852 + .index_key.description = description,
5853 ++ .index_key.desc_len = strlen(description),
5854 + .cred = current_cred(),
5855 + .match_data.cmp = key_default_cmp,
5856 + .match_data.raw_data = description,
5857 +diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
5858 +index 424e1d90412ea..6797843154f03 100644
5859 +--- a/security/keys/request_key_auth.c
5860 ++++ b/security/keys/request_key_auth.c
5861 +@@ -246,7 +246,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
5862 + struct key *authkey;
5863 + key_ref_t authkey_ref;
5864 +
5865 +- sprintf(description, "%x", target_id);
5866 ++ ctx.index_key.desc_len = sprintf(description, "%x", target_id);
5867 +
5868 + authkey_ref = search_process_keyrings(&ctx);
5869 +
5870 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5871 +index 9199d91d0a594..bf1ffcaab23fe 100644
5872 +--- a/sound/pci/hda/patch_realtek.c
5873 ++++ b/sound/pci/hda/patch_realtek.c
5874 +@@ -1855,6 +1855,8 @@ enum {
5875 + ALC887_FIXUP_BASS_CHMAP,
5876 + ALC1220_FIXUP_GB_DUAL_CODECS,
5877 + ALC1220_FIXUP_CLEVO_P950,
5878 ++ ALC1220_FIXUP_SYSTEM76_ORYP5,
5879 ++ ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
5880 + };
5881 +
5882 + static void alc889_fixup_coef(struct hda_codec *codec,
5883 +@@ -2056,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
5884 + snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
5885 + }
5886 +
5887 ++static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
5888 ++ const struct hda_fixup *fix, int action);
5889 ++
5890 ++static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
5891 ++ const struct hda_fixup *fix,
5892 ++ int action)
5893 ++{
5894 ++ alc1220_fixup_clevo_p950(codec, fix, action);
5895 ++ alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
5896 ++}
5897 ++
5898 + static const struct hda_fixup alc882_fixups[] = {
5899 + [ALC882_FIXUP_ABIT_AW9D_MAX] = {
5900 + .type = HDA_FIXUP_PINS,
5901 +@@ -2300,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = {
5902 + .type = HDA_FIXUP_FUNC,
5903 + .v.func = alc1220_fixup_clevo_p950,
5904 + },
5905 ++ [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
5906 ++ .type = HDA_FIXUP_FUNC,
5907 ++ .v.func = alc1220_fixup_system76_oryp5,
5908 ++ },
5909 ++ [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
5910 ++ .type = HDA_FIXUP_PINS,
5911 ++ .v.pins = (const struct hda_pintbl[]) {
5912 ++ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
5913 ++ {}
5914 ++ },
5915 ++ .chained = true,
5916 ++ .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
5917 ++ },
5918 + };
5919 +
5920 + static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5921 +@@ -2376,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5922 + SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
5923 + SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
5924 + SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
5925 ++ SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
5926 ++ SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
5927 + SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
5928 + SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
5929 + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
5930 +@@ -5573,6 +5601,7 @@ enum {
5931 + ALC294_FIXUP_ASUS_HEADSET_MIC,
5932 + ALC294_FIXUP_ASUS_SPK,
5933 + ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
5934 ++ ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
5935 + };
5936 +
5937 + static const struct hda_fixup alc269_fixups[] = {
5938 +@@ -6506,6 +6535,17 @@ static const struct hda_fixup alc269_fixups[] = {
5939 + .chained = true,
5940 + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
5941 + },
5942 ++ [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = {
5943 ++ .type = HDA_FIXUP_VERBS,
5944 ++ .v.verbs = (const struct hda_verb[]) {
5945 ++ /* Disable PCBEEP-IN passthrough */
5946 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
5947 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
5948 ++ { }
5949 ++ },
5950 ++ .chained = true,
5951 ++ .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
5952 ++ },
5953 + };
5954 +
5955 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5956 +@@ -7187,7 +7227,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5957 + {0x12, 0x90a60130},
5958 + {0x19, 0x03a11020},
5959 + {0x21, 0x0321101f}),
5960 +- SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
5961 ++ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
5962 + {0x12, 0x90a60130},
5963 + {0x14, 0x90170110},
5964 + {0x19, 0x04a11040},
5965 +diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
5966 +index d029cad08cbd8..89f8b0dae7ef0 100644
5967 +--- a/tools/testing/selftests/bpf/test_progs.c
5968 ++++ b/tools/testing/selftests/bpf/test_progs.c
5969 +@@ -1136,7 +1136,9 @@ static void test_stacktrace_build_id(void)
5970 + int i, j;
5971 + struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
5972 + int build_id_matches = 0;
5973 ++ int retry = 1;
5974 +
5975 ++retry:
5976 + err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
5977 + if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
5978 + goto out;
5979 +@@ -1249,6 +1251,19 @@ static void test_stacktrace_build_id(void)
5980 + previous_key = key;
5981 + } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
5982 +
5983 ++ /* stack_map_get_build_id_offset() is racy and sometimes can return
5984 ++ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
5985 ++ * try it one more time.
5986 ++ */
5987 ++ if (build_id_matches < 1 && retry--) {
5988 ++ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
5989 ++ close(pmu_fd);
5990 ++ bpf_object__close(obj);
5991 ++ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
5992 ++ __func__);
5993 ++ goto retry;
5994 ++ }
5995 ++
5996 + if (CHECK(build_id_matches < 1, "build id match",
5997 + "Didn't find expected build ID from the map\n"))
5998 + goto disable_pmu;
5999 +@@ -1289,7 +1304,9 @@ static void test_stacktrace_build_id_nmi(void)
6000 + int i, j;
6001 + struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
6002 + int build_id_matches = 0;
6003 ++ int retry = 1;
6004 +
6005 ++retry:
6006 + err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
6007 + if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
6008 + return;
6009 +@@ -1384,6 +1401,19 @@ static void test_stacktrace_build_id_nmi(void)
6010 + previous_key = key;
6011 + } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
6012 +
6013 ++ /* stack_map_get_build_id_offset() is racy and sometimes can return
6014 ++ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
6015 ++ * try it one more time.
6016 ++ */
6017 ++ if (build_id_matches < 1 && retry--) {
6018 ++ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
6019 ++ close(pmu_fd);
6020 ++ bpf_object__close(obj);
6021 ++ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
6022 ++ __func__);
6023 ++ goto retry;
6024 ++ }
6025 ++
6026 + if (CHECK(build_id_matches < 1, "build id match",
6027 + "Didn't find expected build ID from the map\n"))
6028 + goto disable_pmu;
6029 +diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
6030 +index aeeb76a54d633..e38f1cb7089d3 100644
6031 +--- a/tools/testing/selftests/bpf/test_sock_addr.c
6032 ++++ b/tools/testing/selftests/bpf/test_sock_addr.c
6033 +@@ -44,6 +44,7 @@
6034 + #define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
6035 + #define SRC6_IP "::1"
6036 + #define SRC6_REWRITE_IP "::6"
6037 ++#define WILDCARD6_IP "::"
6038 + #define SERV6_PORT 6060
6039 + #define SERV6_REWRITE_PORT 6666
6040 +
6041 +@@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test);
6042 + static int bind6_prog_load(const struct sock_addr_test *test);
6043 + static int connect4_prog_load(const struct sock_addr_test *test);
6044 + static int connect6_prog_load(const struct sock_addr_test *test);
6045 ++static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
6046 + static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
6047 + static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
6048 + static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
6049 + static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
6050 + static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
6051 + static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
6052 ++static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
6053 +
6054 + static struct sock_addr_test tests[] = {
6055 + /* bind */
6056 +@@ -462,6 +465,34 @@ static struct sock_addr_test tests[] = {
6057 + SRC6_REWRITE_IP,
6058 + SYSCALL_ENOTSUPP,
6059 + },
6060 ++ {
6061 ++ "sendmsg6: set dst IP = [::] (BSD'ism)",
6062 ++ sendmsg6_rw_wildcard_prog_load,
6063 ++ BPF_CGROUP_UDP6_SENDMSG,
6064 ++ BPF_CGROUP_UDP6_SENDMSG,
6065 ++ AF_INET6,
6066 ++ SOCK_DGRAM,
6067 ++ SERV6_IP,
6068 ++ SERV6_PORT,
6069 ++ SERV6_REWRITE_IP,
6070 ++ SERV6_REWRITE_PORT,
6071 ++ SRC6_REWRITE_IP,
6072 ++ SUCCESS,
6073 ++ },
6074 ++ {
6075 ++ "sendmsg6: preserve dst IP = [::] (BSD'ism)",
6076 ++ sendmsg_allow_prog_load,
6077 ++ BPF_CGROUP_UDP6_SENDMSG,
6078 ++ BPF_CGROUP_UDP6_SENDMSG,
6079 ++ AF_INET6,
6080 ++ SOCK_DGRAM,
6081 ++ WILDCARD6_IP,
6082 ++ SERV6_PORT,
6083 ++ SERV6_REWRITE_IP,
6084 ++ SERV6_PORT,
6085 ++ SRC6_IP,
6086 ++ SUCCESS,
6087 ++ },
6088 + {
6089 + "sendmsg6: deny call",
6090 + sendmsg_deny_prog_load,
6091 +@@ -714,16 +745,27 @@ static int connect6_prog_load(const struct sock_addr_test *test)
6092 + return load_path(test, CONNECT6_PROG_PATH);
6093 + }
6094 +
6095 +-static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
6096 ++static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
6097 ++ int32_t rc)
6098 + {
6099 + struct bpf_insn insns[] = {
6100 +- /* return 0 */
6101 +- BPF_MOV64_IMM(BPF_REG_0, 0),
6102 ++ /* return rc */
6103 ++ BPF_MOV64_IMM(BPF_REG_0, rc),
6104 + BPF_EXIT_INSN(),
6105 + };
6106 + return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
6107 + }
6108 +
6109 ++static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
6110 ++{
6111 ++ return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
6112 ++}
6113 ++
6114 ++static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
6115 ++{
6116 ++ return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
6117 ++}
6118 ++
6119 + static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
6120 + {
6121 + struct sockaddr_in dst4_rw_addr;
6122 +@@ -844,6 +886,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
6123 + return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
6124 + }
6125 +
6126 ++static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
6127 ++{
6128 ++ return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
6129 ++}
6130 ++
6131 + static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
6132 + {
6133 + return load_path(test, SENDMSG6_PROG_PATH);
6134 +diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
6135 +index d8313d0438b74..b90dff8d3a94b 100755
6136 +--- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
6137 ++++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
6138 +@@ -1,7 +1,7 @@
6139 + #!/bin/bash
6140 + # SPDX-License-Identifier: GPL-2.0
6141 +
6142 +-ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding"
6143 ++ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn"
6144 + NUM_NETIFS=4
6145 + CHECK_TC="yes"
6146 + source lib.sh
6147 +@@ -96,6 +96,51 @@ flooding()
6148 + flood_test $swp2 $h1 $h2
6149 + }
6150 +
6151 ++vlan_deletion()
6152 ++{
6153 ++ # Test that the deletion of a VLAN on a bridge port does not affect
6154 ++ # the PVID VLAN
6155 ++ log_info "Add and delete a VLAN on bridge port $swp1"
6156 ++
6157 ++ bridge vlan add vid 10 dev $swp1
6158 ++ bridge vlan del vid 10 dev $swp1
6159 ++
6160 ++ ping_ipv4
6161 ++ ping_ipv6
6162 ++}
6163 ++
6164 ++extern_learn()
6165 ++{
6166 ++ local mac=de:ad:be:ef:13:37
6167 ++ local ageing_time
6168 ++
6169 ++ # Test that externally learned FDB entries can roam, but not age out
6170 ++ RET=0
6171 ++
6172 ++ bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1
6173 ++
6174 ++ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
6175 ++ check_err $? "Did not find FDB entry when should"
6176 ++
6177 ++ # Wait for 10 seconds after the ageing time to make sure the FDB entry
6178 ++ # was not aged out
6179 ++ ageing_time=$(bridge_ageing_time_get br0)
6180 ++ sleep $((ageing_time + 10))
6181 ++
6182 ++ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
6183 ++ check_err $? "FDB entry was aged out when should not"
6184 ++
6185 ++ $MZ $h2 -c 1 -p 64 -a $mac -t ip -q
6186 ++
6187 ++ bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37
6188 ++ check_err $? "FDB entry did not roam when should"
6189 ++
6190 ++ log_test "Externally learned FDB entry - ageing & roaming"
6191 ++
6192 ++ bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null
6193 ++ bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null
6194 ++}
6195 ++
6196 + trap cleanup EXIT
6197 +
6198 + setup_prepare
6199 +diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
6200 +index 637ea0219617f..0da3545cabdb6 100644
6201 +--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
6202 ++++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
6203 +@@ -17,7 +17,7 @@
6204 + "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2",
6205 + "expExitCode": "0",
6206 + "verifyCmd": "$TC actions get action ife index 2",
6207 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2",
6208 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2",
6209 + "matchCount": "1",
6210 + "teardown": [
6211 + "$TC actions flush action ife"
6212 +@@ -41,7 +41,7 @@
6213 + "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2",
6214 + "expExitCode": "0",
6215 + "verifyCmd": "$TC actions get action ife index 2",
6216 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2",
6217 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2",
6218 + "matchCount": "1",
6219 + "teardown": [
6220 + "$TC actions flush action ife"
6221 +@@ -65,7 +65,7 @@
6222 + "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2",
6223 + "expExitCode": "0",
6224 + "verifyCmd": "$TC actions get action ife index 2",
6225 +- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2",
6226 ++ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2",
6227 + "matchCount": "1",
6228 + "teardown": [
6229 + "$TC actions flush action ife"
6230 +@@ -89,7 +89,7 @@
6231 + "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2",
6232 + "expExitCode": "0",
6233 + "verifyCmd": "$TC actions get action ife index 2",
6234 +- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2",
6235 ++ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2",
6236 + "matchCount": "1",
6237 + "teardown": [
6238 + "$TC actions flush action ife"
6239 +@@ -113,7 +113,7 @@
6240 + "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2",
6241 + "expExitCode": "0",
6242 + "verifyCmd": "$TC actions get action ife index 2",
6243 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2",
6244 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2",
6245 + "matchCount": "1",
6246 + "teardown": [
6247 + "$TC actions flush action ife"
6248 +@@ -137,7 +137,7 @@
6249 + "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2",
6250 + "expExitCode": "0",
6251 + "verifyCmd": "$TC actions get action ife index 2",
6252 +- "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2",
6253 ++ "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2",
6254 + "matchCount": "1",
6255 + "teardown": [
6256 + "$TC actions flush action ife"
6257 +@@ -161,7 +161,7 @@
6258 + "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90",
6259 + "expExitCode": "0",
6260 + "verifyCmd": "$TC actions get action ife index 90",
6261 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90",
6262 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90",
6263 + "matchCount": "1",
6264 + "teardown": [
6265 + "$TC actions flush action ife"
6266 +@@ -185,7 +185,7 @@
6267 + "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90",
6268 + "expExitCode": "255",
6269 + "verifyCmd": "$TC actions get action ife index 90",
6270 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90",
6271 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90",
6272 + "matchCount": "0",
6273 + "teardown": []
6274 + },
6275 +@@ -207,7 +207,7 @@
6276 + "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9",
6277 + "expExitCode": "0",
6278 + "verifyCmd": "$TC actions get action ife index 9",
6279 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9",
6280 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9",
6281 + "matchCount": "1",
6282 + "teardown": [
6283 + "$TC actions flush action ife"
6284 +@@ -231,7 +231,7 @@
6285 + "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9",
6286 + "expExitCode": "0",
6287 + "verifyCmd": "$TC actions get action ife index 9",
6288 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9",
6289 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9",
6290 + "matchCount": "1",
6291 + "teardown": [
6292 + "$TC actions flush action ife"
6293 +@@ -255,7 +255,7 @@
6294 + "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9",
6295 + "expExitCode": "0",
6296 + "verifyCmd": "$TC actions get action ife index 9",
6297 +- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9",
6298 ++ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9",
6299 + "matchCount": "1",
6300 + "teardown": [
6301 + "$TC actions flush action ife"
6302 +@@ -279,7 +279,7 @@
6303 + "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9",
6304 + "expExitCode": "0",
6305 + "verifyCmd": "$TC actions get action ife index 9",
6306 +- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9",
6307 ++ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9",
6308 + "matchCount": "1",
6309 + "teardown": [
6310 + "$TC actions flush action ife"
6311 +@@ -303,7 +303,7 @@
6312 + "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9",
6313 + "expExitCode": "0",
6314 + "verifyCmd": "$TC actions get action ife index 9",
6315 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9",
6316 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9",
6317 + "matchCount": "1",
6318 + "teardown": [
6319 + "$TC actions flush action ife"
6320 +@@ -327,7 +327,7 @@
6321 + "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9",
6322 + "expExitCode": "0",
6323 + "verifyCmd": "$TC actions get action ife index 9",
6324 +- "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9",
6325 ++ "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9",
6326 + "matchCount": "1",
6327 + "teardown": [
6328 + "$TC actions flush action ife"
6329 +@@ -351,7 +351,7 @@
6330 + "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99",
6331 + "expExitCode": "0",
6332 + "verifyCmd": "$TC actions get action ife index 99",
6333 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99",
6334 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99",
6335 + "matchCount": "1",
6336 + "teardown": [
6337 + "$TC actions flush action ife"
6338 +@@ -375,7 +375,7 @@
6339 + "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99",
6340 + "expExitCode": "255",
6341 + "verifyCmd": "$TC actions get action ife index 99",
6342 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99",
6343 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99",
6344 + "matchCount": "0",
6345 + "teardown": []
6346 + },
6347 +@@ -397,7 +397,7 @@
6348 + "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1",
6349 + "expExitCode": "0",
6350 + "verifyCmd": "$TC actions get action ife index 1",
6351 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1",
6352 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1",
6353 + "matchCount": "1",
6354 + "teardown": [
6355 + "$TC actions flush action ife"
6356 +@@ -421,7 +421,7 @@
6357 + "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1",
6358 + "expExitCode": "0",
6359 + "verifyCmd": "$TC actions get action ife index 1",
6360 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1",
6361 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1",
6362 + "matchCount": "1",
6363 + "teardown": [
6364 + "$TC actions flush action ife"
6365 +@@ -445,7 +445,7 @@
6366 + "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
6367 + "expExitCode": "0",
6368 + "verifyCmd": "$TC actions get action ife index 1",
6369 +- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
6370 ++ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
6371 + "matchCount": "1",
6372 + "teardown": [
6373 + "$TC actions flush action ife"
6374 +@@ -469,7 +469,7 @@
6375 + "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
6376 + "expExitCode": "0",
6377 + "verifyCmd": "$TC actions get action ife index 1",
6378 +- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
6379 ++ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
6380 + "matchCount": "1",
6381 + "teardown": [
6382 + "$TC actions flush action ife"
6383 +@@ -493,7 +493,7 @@
6384 + "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77",
6385 + "expExitCode": "0",
6386 + "verifyCmd": "$TC actions get action ife index 77",
6387 +- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77",
6388 ++ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77",
6389 + "matchCount": "1",
6390 + "teardown": [
6391 + "$TC actions flush action ife"
6392 +@@ -517,7 +517,7 @@
6393 + "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77",
6394 + "expExitCode": "0",
6395 + "verifyCmd": "$TC actions get action ife index 77",
6396 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77",
6397 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77",
6398 + "matchCount": "1",
6399 + "teardown": [
6400 + "$TC actions flush action ife"
6401 +@@ -541,7 +541,7 @@
6402 + "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77",
6403 + "expExitCode": "0",
6404 + "verifyCmd": "$TC actions get action ife index 77",
6405 +- "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77",
6406 ++ "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77",
6407 + "matchCount": "1",
6408 + "teardown": [
6409 + "$TC actions flush action ife"
6410 +@@ -565,7 +565,7 @@
6411 + "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1",
6412 + "expExitCode": "0",
6413 + "verifyCmd": "$TC actions get action ife index 1",
6414 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1",
6415 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1",
6416 + "matchCount": "1",
6417 + "teardown": [
6418 + "$TC actions flush action ife"
6419 +@@ -589,7 +589,7 @@
6420 + "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1",
6421 + "expExitCode": "255",
6422 + "verifyCmd": "$TC actions get action ife index 1",
6423 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1",
6424 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1",
6425 + "matchCount": "0",
6426 + "teardown": []
6427 + },
6428 +@@ -611,7 +611,7 @@
6429 + "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1",
6430 + "expExitCode": "0",
6431 + "verifyCmd": "$TC actions get action ife index 1",
6432 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1",
6433 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1",
6434 + "matchCount": "1",
6435 + "teardown": [
6436 + "$TC actions flush action ife"
6437 +@@ -635,7 +635,7 @@
6438 + "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1",
6439 + "expExitCode": "0",
6440 + "verifyCmd": "$TC actions get action ife index 1",
6441 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
6442 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
6443 + "matchCount": "1",
6444 + "teardown": [
6445 + "$TC actions flush action ife"
6446 +@@ -659,7 +659,7 @@
6447 + "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11",
6448 + "expExitCode": "0",
6449 + "verifyCmd": "$TC actions get action ife index 11",
6450 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
6451 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
6452 + "matchCount": "1",
6453 + "teardown": [
6454 + "$TC actions flush action ife"
6455 +@@ -683,7 +683,7 @@
6456 + "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1",
6457 + "expExitCode": "0",
6458 + "verifyCmd": "$TC actions get action ife index 1",
6459 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1",
6460 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1",
6461 + "matchCount": "1",
6462 + "teardown": [
6463 + "$TC actions flush action ife"
6464 +@@ -707,7 +707,7 @@
6465 + "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21",
6466 + "expExitCode": "0",
6467 + "verifyCmd": "$TC actions get action ife index 21",
6468 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21",
6469 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21",
6470 + "matchCount": "1",
6471 + "teardown": [
6472 + "$TC actions flush action ife"
6473 +@@ -731,7 +731,7 @@
6474 + "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21",
6475 + "expExitCode": "0",
6476 + "verifyCmd": "$TC actions get action ife index 21",
6477 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21",
6478 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21",
6479 + "matchCount": "1",
6480 + "teardown": [
6481 + "$TC actions flush action ife"
6482 +@@ -739,7 +739,7 @@
6483 + },
6484 + {
6485 + "id": "fac3",
6486 +- "name": "Create valid ife encode action with index at 32-bit maximnum",
6487 ++ "name": "Create valid ife encode action with index at 32-bit maximum",
6488 + "category": [
6489 + "actions",
6490 + "ife"
6491 +@@ -755,7 +755,7 @@
6492 + "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295",
6493 + "expExitCode": "0",
6494 + "verifyCmd": "$TC actions get action ife index 4294967295",
6495 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295",
6496 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295",
6497 + "matchCount": "1",
6498 + "teardown": [
6499 + "$TC actions flush action ife"
6500 +@@ -779,7 +779,7 @@
6501 + "cmdUnderTest": "$TC actions add action ife decode pass index 1",
6502 + "expExitCode": "0",
6503 + "verifyCmd": "$TC actions get action ife index 1",
6504 +- "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
6505 ++ "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
6506 + "matchCount": "1",
6507 + "teardown": [
6508 + "$TC actions flush action ife"
6509 +@@ -803,7 +803,7 @@
6510 + "cmdUnderTest": "$TC actions add action ife decode pipe index 1",
6511 + "expExitCode": "0",
6512 + "verifyCmd": "$TC actions get action ife index 1",
6513 +- "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
6514 ++ "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
6515 + "matchCount": "1",
6516 + "teardown": [
6517 + "$TC actions flush action ife"
6518 +@@ -827,7 +827,7 @@
6519 + "cmdUnderTest": "$TC actions add action ife decode continue index 1",
6520 + "expExitCode": "0",
6521 + "verifyCmd": "$TC actions get action ife index 1",
6522 +- "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
6523 ++ "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
6524 + "matchCount": "1",
6525 + "teardown": [
6526 + "$TC actions flush action ife"
6527 +@@ -851,7 +851,7 @@
6528 + "cmdUnderTest": "$TC actions add action ife decode drop index 1",
6529 + "expExitCode": "0",
6530 + "verifyCmd": "$TC actions get action ife index 1",
6531 +- "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
6532 ++ "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
6533 + "matchCount": "1",
6534 + "teardown": [
6535 + "$TC actions flush action ife"
6536 +@@ -875,7 +875,7 @@
6537 + "cmdUnderTest": "$TC actions add action ife decode reclassify index 1",
6538 + "expExitCode": "0",
6539 + "verifyCmd": "$TC actions get action ife index 1",
6540 +- "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
6541 ++ "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
6542 + "matchCount": "1",
6543 + "teardown": [
6544 + "$TC actions flush action ife"
6545 +@@ -899,7 +899,7 @@
6546 + "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1",
6547 + "expExitCode": "0",
6548 + "verifyCmd": "$TC actions get action ife index 1",
6549 +- "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
6550 ++ "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
6551 + "matchCount": "1",
6552 + "teardown": [
6553 + "$TC actions flush action ife"
6554 +@@ -923,7 +923,7 @@
6555 + "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999",
6556 + "expExitCode": "255",
6557 + "verifyCmd": "$TC actions get action ife index 4294967295999",
6558 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999",
6559 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999",
6560 + "matchCount": "0",
6561 + "teardown": []
6562 + },
6563 +@@ -945,7 +945,7 @@
6564 + "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4",
6565 + "expExitCode": "255",
6566 + "verifyCmd": "$TC actions get action ife index 4",
6567 +- "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4",
6568 ++ "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4",
6569 + "matchCount": "0",
6570 + "teardown": []
6571 + },
6572 +@@ -967,7 +967,7 @@
6573 + "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1",
6574 + "expExitCode": "0",
6575 + "verifyCmd": "$TC actions get action ife index 4",
6576 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
6577 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
6578 + "matchCount": "1",
6579 + "teardown": [
6580 + "$TC actions flush action ife"
6581 +@@ -991,7 +991,7 @@
6582 + "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4",
6583 + "expExitCode": "255",
6584 + "verifyCmd": "$TC actions get action ife index 4",
6585 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4",
6586 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4",
6587 + "matchCount": "0",
6588 + "teardown": []
6589 + },
6590 +@@ -1013,7 +1013,7 @@
6591 + "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4",
6592 + "expExitCode": "255",
6593 + "verifyCmd": "$TC actions get action ife index 4",
6594 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4",
6595 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4",
6596 + "matchCount": "0",
6597 + "teardown": []
6598 + },
6599 +diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
6600 +index 10b2d894e4362..e7e15a7336b6d 100644
6601 +--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
6602 ++++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
6603 +@@ -81,35 +81,6 @@
6604 + ]
6605 + ]
6606 + },
6607 +- {
6608 +- "id": "ba4e",
6609 +- "name": "Add tunnel_key set action with missing mandatory id parameter",
6610 +- "category": [
6611 +- "actions",
6612 +- "tunnel_key"
6613 +- ],
6614 +- "setup": [
6615 +- [
6616 +- "$TC actions flush action tunnel_key",
6617 +- 0,
6618 +- 1,
6619 +- 255
6620 +- ]
6621 +- ],
6622 +- "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
6623 +- "expExitCode": "255",
6624 +- "verifyCmd": "$TC actions list action tunnel_key",
6625 +- "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
6626 +- "matchCount": "0",
6627 +- "teardown": [
6628 +- [
6629 +- "$TC actions flush action tunnel_key",
6630 +- 0,
6631 +- 1,
6632 +- 255
6633 +- ]
6634 +- ]
6635 +- },
6636 + {
6637 + "id": "a5e0",
6638 + "name": "Add tunnel_key set action with invalid src_ip parameter",
6639 +@@ -634,7 +605,7 @@
6640 + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
6641 + "expExitCode": "0",
6642 + "verifyCmd": "$TC actions get action tunnel_key index 4",
6643 +- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
6644 ++ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
6645 + "matchCount": "1",
6646 + "teardown": [
6647 + "$TC actions flush action tunnel_key"