Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.20 commit in: /
Date: Wed, 27 Feb 2019 11:24:57
Message-Id: 1551266663.d6703e74d2be53d9effce9da6731c14dc249ddcb.mpagano@gentoo
1 commit: d6703e74d2be53d9effce9da6731c14dc249ddcb
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 27 11:24:23 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 27 11:24:23 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d6703e74
7
8 proj/linux-patches: Linux patch 4.20.13
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1012_linux-4.20.13.patch | 8585 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 8589 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 1c8cc61..8e75833 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -91,6 +91,10 @@ Patch: 1011_linux-4.20.12.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.20.12
23
24 +Patch: 1012_linux-4.20.13.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.20.13
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1012_linux-4.20.13.patch b/1012_linux-4.20.13.patch
33 new file mode 100644
34 index 0000000..706b92d
35 --- /dev/null
36 +++ b/1012_linux-4.20.13.patch
37 @@ -0,0 +1,8585 @@
38 +diff --git a/Makefile b/Makefile
39 +index 0a92b4e116210..c83abc1e689b4 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 20
46 +-SUBLEVEL = 12
47 ++SUBLEVEL = 13
48 + EXTRAVERSION =
49 + NAME = Shy Crocodile
50 +
51 +diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
52 +index f393b663413e4..2ad77fb43639c 100644
53 +--- a/arch/arc/include/asm/cache.h
54 ++++ b/arch/arc/include/asm/cache.h
55 +@@ -52,6 +52,17 @@
56 + #define cache_line_size() SMP_CACHE_BYTES
57 + #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
58 +
59 ++/*
60 ++ * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
61 ++ * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
62 ++ * alignment for any atomic64_t embedded in buffer.
63 ++ * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
64 ++ * value of 4 (and not 8) in ARC ABI.
65 ++ */
66 ++#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
67 ++#define ARCH_SLAB_MINALIGN 8
68 ++#endif
69 ++
70 + extern void arc_cache_init(void);
71 + extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
72 + extern void read_decode_cache_bcr(void);
73 +diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
74 +index 8b90d25a15cca..1f945d0f40daa 100644
75 +--- a/arch/arc/kernel/head.S
76 ++++ b/arch/arc/kernel/head.S
77 +@@ -17,6 +17,7 @@
78 + #include <asm/entry.h>
79 + #include <asm/arcregs.h>
80 + #include <asm/cache.h>
81 ++#include <asm/irqflags.h>
82 +
83 + .macro CPU_EARLY_SETUP
84 +
85 +@@ -47,6 +48,15 @@
86 + sr r5, [ARC_REG_DC_CTRL]
87 +
88 + 1:
89 ++
90 ++#ifdef CONFIG_ISA_ARCV2
91 ++ ; Unaligned access is disabled at reset, so re-enable early as
92 ++ ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
93 ++ ; by default
94 ++ lr r5, [status32]
95 ++ bset r5, r5, STATUS_AD_BIT
96 ++ kflag r5
97 ++#endif
98 + .endm
99 +
100 + .section .init.text, "ax",@progbits
101 +@@ -93,9 +103,9 @@ ENTRY(stext)
102 + #ifdef CONFIG_ARC_UBOOT_SUPPORT
103 + ; Uboot - kernel ABI
104 + ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
105 +- ; r1 = magic number (board identity, unused as of now
106 ++ ; r1 = magic number (always zero as of now)
107 + ; r2 = pointer to uboot provided cmdline or external DTB in mem
108 +- ; These are handled later in setup_arch()
109 ++ ; These are handled later in handle_uboot_args()
110 + st r0, [@uboot_tag]
111 + st r2, [@uboot_arg]
112 + #endif
113 +diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
114 +index eea8c5ce63350..80dd1a716ca72 100644
115 +--- a/arch/arc/kernel/setup.c
116 ++++ b/arch/arc/kernel/setup.c
117 +@@ -452,43 +452,80 @@ void setup_processor(void)
118 + arc_chk_core_config();
119 + }
120 +
121 +-static inline int is_kernel(unsigned long addr)
122 ++static inline bool uboot_arg_invalid(unsigned long addr)
123 + {
124 +- if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
125 +- return 1;
126 +- return 0;
127 ++ /*
128 ++ * Check that it is a untranslated address (although MMU is not enabled
129 ++ * yet, it being a high address ensures this is not by fluke)
130 ++ */
131 ++ if (addr < PAGE_OFFSET)
132 ++ return true;
133 ++
134 ++ /* Check that address doesn't clobber resident kernel image */
135 ++ return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
136 + }
137 +
138 +-void __init setup_arch(char **cmdline_p)
139 ++#define IGNORE_ARGS "Ignore U-boot args: "
140 ++
141 ++/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
142 ++#define UBOOT_TAG_NONE 0
143 ++#define UBOOT_TAG_CMDLINE 1
144 ++#define UBOOT_TAG_DTB 2
145 ++
146 ++void __init handle_uboot_args(void)
147 + {
148 ++ bool use_embedded_dtb = true;
149 ++ bool append_cmdline = false;
150 ++
151 + #ifdef CONFIG_ARC_UBOOT_SUPPORT
152 +- /* make sure that uboot passed pointer to cmdline/dtb is valid */
153 +- if (uboot_tag && is_kernel((unsigned long)uboot_arg))
154 +- panic("Invalid uboot arg\n");
155 ++ /* check that we know this tag */
156 ++ if (uboot_tag != UBOOT_TAG_NONE &&
157 ++ uboot_tag != UBOOT_TAG_CMDLINE &&
158 ++ uboot_tag != UBOOT_TAG_DTB) {
159 ++ pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
160 ++ goto ignore_uboot_args;
161 ++ }
162 ++
163 ++ if (uboot_tag != UBOOT_TAG_NONE &&
164 ++ uboot_arg_invalid((unsigned long)uboot_arg)) {
165 ++ pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
166 ++ goto ignore_uboot_args;
167 ++ }
168 ++
169 ++ /* see if U-boot passed an external Device Tree blob */
170 ++ if (uboot_tag == UBOOT_TAG_DTB) {
171 ++ machine_desc = setup_machine_fdt((void *)uboot_arg);
172 +
173 +- /* See if u-boot passed an external Device Tree blob */
174 +- machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */
175 +- if (!machine_desc)
176 ++ /* external Device Tree blob is invalid - use embedded one */
177 ++ use_embedded_dtb = !machine_desc;
178 ++ }
179 ++
180 ++ if (uboot_tag == UBOOT_TAG_CMDLINE)
181 ++ append_cmdline = true;
182 ++
183 ++ignore_uboot_args:
184 + #endif
185 +- {
186 +- /* No, so try the embedded one */
187 ++
188 ++ if (use_embedded_dtb) {
189 + machine_desc = setup_machine_fdt(__dtb_start);
190 + if (!machine_desc)
191 + panic("Embedded DT invalid\n");
192 ++ }
193 +
194 +- /*
195 +- * If we are here, it is established that @uboot_arg didn't
196 +- * point to DT blob. Instead if u-boot says it is cmdline,
197 +- * append to embedded DT cmdline.
198 +- * setup_machine_fdt() would have populated @boot_command_line
199 +- */
200 +- if (uboot_tag == 1) {
201 +- /* Ensure a whitespace between the 2 cmdlines */
202 +- strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
203 +- strlcat(boot_command_line, uboot_arg,
204 +- COMMAND_LINE_SIZE);
205 +- }
206 ++ /*
207 ++ * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
208 ++ * append processing can only happen after.
209 ++ */
210 ++ if (append_cmdline) {
211 ++ /* Ensure a whitespace between the 2 cmdlines */
212 ++ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
213 ++ strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
214 + }
215 ++}
216 ++
217 ++void __init setup_arch(char **cmdline_p)
218 ++{
219 ++ handle_uboot_args();
220 +
221 + /* Save unparsed command line copy for /proc/cmdline */
222 + *cmdline_p = boot_command_line;
223 +diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
224 +index 2c118a6ab3587..0dc23fc227ed2 100644
225 +--- a/arch/arm/probes/kprobes/opt-arm.c
226 ++++ b/arch/arm/probes/kprobes/opt-arm.c
227 +@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
228 + }
229 +
230 + /* Copy arch-dep-instance from template. */
231 +- memcpy(code, (unsigned char *)optprobe_template_entry,
232 ++ memcpy(code, (unsigned long *)&optprobe_template_entry,
233 + TMPL_END_IDX * sizeof(kprobe_opcode_t));
234 +
235 + /* Adjust buffer according to instruction. */
236 +diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
237 +index 951c4231bdb85..4c47b3fd958b6 100644
238 +--- a/arch/mips/configs/ath79_defconfig
239 ++++ b/arch/mips/configs/ath79_defconfig
240 +@@ -71,6 +71,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
241 + # CONFIG_SERIAL_8250_PCI is not set
242 + CONFIG_SERIAL_8250_NR_UARTS=1
243 + CONFIG_SERIAL_8250_RUNTIME_UARTS=1
244 ++CONFIG_SERIAL_OF_PLATFORM=y
245 + CONFIG_SERIAL_AR933X=y
246 + CONFIG_SERIAL_AR933X_CONSOLE=y
247 + # CONFIG_HW_RANDOM is not set
248 +diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
249 +index 4c41ed0a637e5..415a08376c362 100644
250 +--- a/arch/mips/jazz/jazzdma.c
251 ++++ b/arch/mips/jazz/jazzdma.c
252 +@@ -74,14 +74,15 @@ static int __init vdma_init(void)
253 + get_order(VDMA_PGTBL_SIZE));
254 + BUG_ON(!pgtbl);
255 + dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
256 +- pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
257 ++ pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
258 +
259 + /*
260 + * Clear the R4030 translation table
261 + */
262 + vdma_pgtbl_init();
263 +
264 +- r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
265 ++ r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
266 ++ CPHYSADDR((unsigned long)pgtbl));
267 + r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
268 + r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
269 +
270 +diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
271 +index aeb7b1b0f2024..252c00985c973 100644
272 +--- a/arch/mips/net/ebpf_jit.c
273 ++++ b/arch/mips/net/ebpf_jit.c
274 +@@ -343,12 +343,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
275 + const struct bpf_prog *prog = ctx->skf;
276 + int stack_adjust = ctx->stack_size;
277 + int store_offset = stack_adjust - 8;
278 ++ enum reg_val_type td;
279 + int r0 = MIPS_R_V0;
280 +
281 +- if (dest_reg == MIPS_R_RA &&
282 +- get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
283 ++ if (dest_reg == MIPS_R_RA) {
284 + /* Don't let zero extended value escape. */
285 +- emit_instr(ctx, sll, r0, r0, 0);
286 ++ td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
287 ++ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
288 ++ emit_instr(ctx, sll, r0, r0, 0);
289 ++ }
290 +
291 + if (ctx->flags & EBPF_SAVE_RA) {
292 + emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
293 +diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
294 +index 2582df1c529bb..0964c236e3e5a 100644
295 +--- a/arch/parisc/kernel/ptrace.c
296 ++++ b/arch/parisc/kernel/ptrace.c
297 +@@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
298 +
299 + long do_syscall_trace_enter(struct pt_regs *regs)
300 + {
301 +- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
302 +- tracehook_report_syscall_entry(regs)) {
303 ++ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
304 ++ int rc = tracehook_report_syscall_entry(regs);
305 ++
306 + /*
307 +- * Tracing decided this syscall should not happen or the
308 +- * debugger stored an invalid system call number. Skip
309 +- * the system call and the system call restart handling.
310 ++ * As tracesys_next does not set %r28 to -ENOSYS
311 ++ * when %r20 is set to -1, initialize it here.
312 + */
313 +- regs->gr[20] = -1UL;
314 +- goto out;
315 ++ regs->gr[28] = -ENOSYS;
316 ++
317 ++ if (rc) {
318 ++ /*
319 ++ * A nonzero return code from
320 ++ * tracehook_report_syscall_entry() tells us
321 ++ * to prevent the syscall execution. Skip
322 ++ * the syscall call and the syscall restart handling.
323 ++ *
324 ++ * Note that the tracer may also just change
325 ++ * regs->gr[20] to an invalid syscall number,
326 ++ * that is handled by tracesys_next.
327 ++ */
328 ++ regs->gr[20] = -1UL;
329 ++ return -1;
330 ++ }
331 + }
332 +
333 + /* Do the secure computing check after ptrace. */
334 +@@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
335 + regs->gr[24] & 0xffffffff,
336 + regs->gr[23] & 0xffffffff);
337 +
338 +-out:
339 + /*
340 + * Sign extend the syscall number to 64bit since it may have been
341 + * modified by a compat ptrace call
342 +diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
343 +index 3b67b9533c82f..438512759e827 100644
344 +--- a/arch/powerpc/kernel/head_8xx.S
345 ++++ b/arch/powerpc/kernel/head_8xx.S
346 +@@ -927,11 +927,12 @@ start_here:
347 +
348 + /* set up the PTE pointers for the Abatron bdiGDB.
349 + */
350 +- tovirt(r6,r6)
351 + lis r5, abatron_pteptrs@h
352 + ori r5, r5, abatron_pteptrs@l
353 + stw r5, 0xf0(0) /* Must match your Abatron config file */
354 + tophys(r5,r5)
355 ++ lis r6, swapper_pg_dir@h
356 ++ ori r6, r6, swapper_pg_dir@l
357 + stw r6, 0(r5)
358 +
359 + /* Now turn on the MMU for real! */
360 +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
361 +index a153257bf7d98..d62fa148558b9 100644
362 +--- a/arch/s390/kvm/vsie.c
363 ++++ b/arch/s390/kvm/vsie.c
364 +@@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
365 + scb_s->crycbd = 0;
366 +
367 + apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
368 +- if (!apie_h && !key_msk)
369 ++ if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0))
370 + return 0;
371 +
372 + if (!crycb_addr)
373 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
374 +index e5c0174e330e5..5a0cbc717997b 100644
375 +--- a/arch/x86/include/asm/kvm_host.h
376 ++++ b/arch/x86/include/asm/kvm_host.h
377 +@@ -299,6 +299,7 @@ union kvm_mmu_extended_role {
378 + unsigned int cr4_smap:1;
379 + unsigned int cr4_smep:1;
380 + unsigned int cr4_la57:1;
381 ++ unsigned int maxphyaddr:6;
382 + };
383 + };
384 +
385 +@@ -397,6 +398,7 @@ struct kvm_mmu {
386 + void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
387 + u64 *spte, const void *pte);
388 + hpa_t root_hpa;
389 ++ gpa_t root_cr3;
390 + union kvm_mmu_role mmu_role;
391 + u8 root_level;
392 + u8 shadow_root_level;
393 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
394 +index 7bcfa61375c09..98d13c6a64be0 100644
395 +--- a/arch/x86/kvm/cpuid.c
396 ++++ b/arch/x86/kvm/cpuid.c
397 +@@ -337,6 +337,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
398 + unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
399 + unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
400 + unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
401 ++ unsigned f_la57 = 0;
402 +
403 + /* cpuid 1.edx */
404 + const u32 kvm_cpuid_1_edx_x86_features =
405 +@@ -491,7 +492,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
406 + // TSC_ADJUST is emulated
407 + entry->ebx |= F(TSC_ADJUST);
408 + entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
409 ++ f_la57 = entry->ecx & F(LA57);
410 + cpuid_mask(&entry->ecx, CPUID_7_ECX);
411 ++ /* Set LA57 based on hardware capability. */
412 ++ entry->ecx |= f_la57;
413 + entry->ecx |= f_umip;
414 + /* PKU is not yet implemented for shadow paging. */
415 + if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
416 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
417 +index 7c03c0f35444f..e763e5445e3ca 100644
418 +--- a/arch/x86/kvm/mmu.c
419 ++++ b/arch/x86/kvm/mmu.c
420 +@@ -3517,6 +3517,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
421 + &invalid_list);
422 + mmu->root_hpa = INVALID_PAGE;
423 + }
424 ++ mmu->root_cr3 = 0;
425 + }
426 +
427 + kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
428 +@@ -3572,6 +3573,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
429 + vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
430 + } else
431 + BUG();
432 ++ vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
433 +
434 + return 0;
435 + }
436 +@@ -3580,10 +3582,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
437 + {
438 + struct kvm_mmu_page *sp;
439 + u64 pdptr, pm_mask;
440 +- gfn_t root_gfn;
441 ++ gfn_t root_gfn, root_cr3;
442 + int i;
443 +
444 +- root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
445 ++ root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
446 ++ root_gfn = root_cr3 >> PAGE_SHIFT;
447 +
448 + if (mmu_check_root(vcpu, root_gfn))
449 + return 1;
450 +@@ -3608,7 +3611,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
451 + ++sp->root_count;
452 + spin_unlock(&vcpu->kvm->mmu_lock);
453 + vcpu->arch.mmu->root_hpa = root;
454 +- return 0;
455 ++ goto set_root_cr3;
456 + }
457 +
458 + /*
459 +@@ -3674,6 +3677,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
460 + vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
461 + }
462 +
463 ++set_root_cr3:
464 ++ vcpu->arch.mmu->root_cr3 = root_cr3;
465 ++
466 + return 0;
467 + }
468 +
469 +@@ -4125,7 +4131,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
470 + struct kvm_mmu_root_info root;
471 + struct kvm_mmu *mmu = vcpu->arch.mmu;
472 +
473 +- root.cr3 = mmu->get_cr3(vcpu);
474 ++ root.cr3 = mmu->root_cr3;
475 + root.hpa = mmu->root_hpa;
476 +
477 + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
478 +@@ -4138,6 +4144,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
479 + }
480 +
481 + mmu->root_hpa = root.hpa;
482 ++ mmu->root_cr3 = root.cr3;
483 +
484 + return i < KVM_MMU_NUM_PREV_ROOTS;
485 + }
486 +@@ -4731,6 +4738,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
487 + ext.cr4_pse = !!is_pse(vcpu);
488 + ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
489 + ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
490 ++ ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
491 +
492 + ext.valid = 1;
493 +
494 +@@ -5477,11 +5485,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
495 + vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
496 +
497 + vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
498 ++ vcpu->arch.root_mmu.root_cr3 = 0;
499 + vcpu->arch.root_mmu.translate_gpa = translate_gpa;
500 + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
501 + vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
502 +
503 + vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
504 ++ vcpu->arch.guest_mmu.root_cr3 = 0;
505 + vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
506 + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
507 + vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
508 +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
509 +index 2f6787fc71066..c54a493e139a7 100644
510 +--- a/arch/x86/xen/enlighten_pv.c
511 ++++ b/arch/x86/xen/enlighten_pv.c
512 +@@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
513 + val = native_read_msr_safe(msr, err);
514 + switch (msr) {
515 + case MSR_IA32_APICBASE:
516 +-#ifdef CONFIG_X86_X2APIC
517 +- if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
518 +-#endif
519 +- val &= ~X2APIC_ENABLE;
520 ++ val &= ~X2APIC_ENABLE;
521 + break;
522 + }
523 + return val;
524 +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
525 +index bb3d96dea6dba..26d4164d394fb 100644
526 +--- a/drivers/acpi/bus.c
527 ++++ b/drivers/acpi/bus.c
528 +@@ -1054,18 +1054,6 @@ void __init acpi_early_init(void)
529 + goto error0;
530 + }
531 +
532 +- /*
533 +- * ACPI 2.0 requires the EC driver to be loaded and work before
534 +- * the EC device is found in the namespace (i.e. before
535 +- * acpi_load_tables() is called).
536 +- *
537 +- * This is accomplished by looking for the ECDT table, and getting
538 +- * the EC parameters out of that.
539 +- *
540 +- * Ignore the result. Not having an ECDT is not fatal.
541 +- */
542 +- status = acpi_ec_ecdt_probe();
543 +-
544 + #ifdef CONFIG_X86
545 + if (!acpi_ioapic) {
546 + /* compatible (0) means level (3) */
547 +@@ -1142,6 +1130,18 @@ static int __init acpi_bus_init(void)
548 + goto error1;
549 + }
550 +
551 ++ /*
552 ++ * ACPI 2.0 requires the EC driver to be loaded and work before the EC
553 ++ * device is found in the namespace.
554 ++ *
555 ++ * This is accomplished by looking for the ECDT table and getting the EC
556 ++ * parameters out of that.
557 ++ *
558 ++ * Do that before calling acpi_initialize_objects() which may trigger EC
559 ++ * address space accesses.
560 ++ */
561 ++ acpi_ec_ecdt_probe();
562 ++
563 + status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
564 + if (ACPI_FAILURE(status)) {
565 + printk(KERN_ERR PREFIX
566 +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
567 +index 8535e7999769b..2a2d7ec772526 100644
568 +--- a/drivers/acpi/nfit/core.c
569 ++++ b/drivers/acpi/nfit/core.c
570 +@@ -724,6 +724,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
571 + struct acpi_nfit_memory_map *memdev;
572 + struct acpi_nfit_desc *acpi_desc;
573 + struct nfit_mem *nfit_mem;
574 ++ u16 physical_id;
575 +
576 + mutex_lock(&acpi_desc_lock);
577 + list_for_each_entry(acpi_desc, &acpi_descs, list) {
578 +@@ -731,10 +732,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
579 + list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
580 + memdev = __to_nfit_memdev(nfit_mem);
581 + if (memdev->device_handle == device_handle) {
582 ++ *flags = memdev->flags;
583 ++ physical_id = memdev->physical_id;
584 + mutex_unlock(&acpi_desc->init_mutex);
585 + mutex_unlock(&acpi_desc_lock);
586 +- *flags = memdev->flags;
587 +- return memdev->physical_id;
588 ++ return physical_id;
589 + }
590 + }
591 + mutex_unlock(&acpi_desc->init_mutex);
592 +diff --git a/drivers/atm/he.c b/drivers/atm/he.c
593 +index 29f102dcfec49..329ce9072ee9f 100644
594 +--- a/drivers/atm/he.c
595 ++++ b/drivers/atm/he.c
596 +@@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
597 + instead of '/ 512', use '>> 9' to prevent a call
598 + to divdu3 on x86 platforms
599 + */
600 +- rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
601 ++ rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
602 +
603 + if (rate_cps < 10)
604 + rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
605 +diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
606 +index 2fe225a697df8..3487e03d4bc61 100644
607 +--- a/drivers/clk/at91/at91sam9x5.c
608 ++++ b/drivers/clk/at91/at91sam9x5.c
609 +@@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
610 + return;
611 +
612 + at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1,
613 +- nck(at91sam9x5_systemck),
614 +- nck(at91sam9x35_periphck), 0);
615 ++ nck(at91sam9x5_systemck), 31, 0);
616 + if (!at91sam9x5_pmc)
617 + return;
618 +
619 +@@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
620 + parent_names[1] = "mainck";
621 + parent_names[2] = "plladivck";
622 + parent_names[3] = "utmick";
623 +- parent_names[4] = "mck";
624 ++ parent_names[4] = "masterck";
625 + for (i = 0; i < 2; i++) {
626 + char name[6];
627 +
628 +diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
629 +index d69ad96fe988b..cd0ef7274fdbf 100644
630 +--- a/drivers/clk/at91/sama5d2.c
631 ++++ b/drivers/clk/at91/sama5d2.c
632 +@@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
633 + parent_names[1] = "mainck";
634 + parent_names[2] = "plladivck";
635 + parent_names[3] = "utmick";
636 +- parent_names[4] = "mck";
637 ++ parent_names[4] = "masterck";
638 + for (i = 0; i < 3; i++) {
639 + char name[6];
640 +
641 +@@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
642 + parent_names[1] = "mainck";
643 + parent_names[2] = "plladivck";
644 + parent_names[3] = "utmick";
645 +- parent_names[4] = "mck";
646 ++ parent_names[4] = "masterck";
647 + parent_names[5] = "audiopll_pmcck";
648 + for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
649 + hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
650 +diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
651 +index e358be7f6c8d5..b645a9d59cdbd 100644
652 +--- a/drivers/clk/at91/sama5d4.c
653 ++++ b/drivers/clk/at91/sama5d4.c
654 +@@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
655 + parent_names[1] = "mainck";
656 + parent_names[2] = "plladivck";
657 + parent_names[3] = "utmick";
658 +- parent_names[4] = "mck";
659 ++ parent_names[4] = "masterck";
660 + for (i = 0; i < 3; i++) {
661 + char name[6];
662 +
663 +diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
664 +index 242c3370544e6..9ed46d188cb5b 100644
665 +--- a/drivers/cpufreq/scmi-cpufreq.c
666 ++++ b/drivers/cpufreq/scmi-cpufreq.c
667 +@@ -187,8 +187,8 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
668 +
669 + cpufreq_cooling_unregister(priv->cdev);
670 + dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
671 +- kfree(priv);
672 + dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
673 ++ kfree(priv);
674 +
675 + return 0;
676 + }
677 +diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
678 +index 00e954f22bc92..74401e0adb29c 100644
679 +--- a/drivers/gpio/gpio-mt7621.c
680 ++++ b/drivers/gpio/gpio-mt7621.c
681 +@@ -30,6 +30,7 @@
682 + #define GPIO_REG_EDGE 0xA0
683 +
684 + struct mtk_gc {
685 ++ struct irq_chip irq_chip;
686 + struct gpio_chip chip;
687 + spinlock_t lock;
688 + int bank;
689 +@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
690 + return 0;
691 + }
692 +
693 +-static struct irq_chip mediatek_gpio_irq_chip = {
694 +- .irq_unmask = mediatek_gpio_irq_unmask,
695 +- .irq_mask = mediatek_gpio_irq_mask,
696 +- .irq_mask_ack = mediatek_gpio_irq_mask,
697 +- .irq_set_type = mediatek_gpio_irq_type,
698 +-};
699 +-
700 + static int
701 + mediatek_gpio_xlate(struct gpio_chip *chip,
702 + const struct of_phandle_args *spec, u32 *flags)
703 +@@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
704 + return ret;
705 + }
706 +
707 ++ rg->irq_chip.name = dev_name(dev);
708 ++ rg->irq_chip.parent_device = dev;
709 ++ rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
710 ++ rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
711 ++ rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
712 ++ rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
713 ++
714 + if (mtk->gpio_irq) {
715 + /*
716 + * Manually request the irq here instead of passing
717 +@@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
718 + return ret;
719 + }
720 +
721 +- ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
722 ++ ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
723 + 0, handle_simple_irq, IRQ_TYPE_NONE);
724 + if (ret) {
725 + dev_err(dev, "failed to add gpiochip_irqchip\n");
726 + return ret;
727 + }
728 +
729 +- gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
730 ++ gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
731 + mtk->gpio_irq, NULL);
732 + }
733 +
734 +@@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev)
735 + mtk->gpio_irq = irq_of_parse_and_map(np, 0);
736 + mtk->dev = dev;
737 + platform_set_drvdata(pdev, mtk);
738 +- mediatek_gpio_irq_chip.name = dev_name(dev);
739 +
740 + for (i = 0; i < MTK_BANK_CNT; i++) {
741 + ret = mediatek_gpio_bank_probe(dev, np, i);
742 +diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
743 +index e9600b556f397..bcc6be4a5cb2e 100644
744 +--- a/drivers/gpio/gpio-pxa.c
745 ++++ b/drivers/gpio/gpio-pxa.c
746 +@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
747 + {
748 + switch (gpio_type) {
749 + case PXA3XX_GPIO:
750 ++ case MMP2_GPIO:
751 + return false;
752 +
753 + default:
754 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
755 +index 8f3d44e5e7878..722b1421d8f39 100644
756 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
757 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
758 +@@ -212,6 +212,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
759 + }
760 +
761 + if (amdgpu_device_is_px(dev)) {
762 ++ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
763 + pm_runtime_use_autosuspend(dev->dev);
764 + pm_runtime_set_autosuspend_delay(dev->dev, 5000);
765 + pm_runtime_set_active(dev->dev);
766 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
767 +index 8c9abaa7601a7..62df4bd0a0fc2 100644
768 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
769 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
770 +@@ -637,12 +637,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
771 + struct ttm_bo_global *glob = adev->mman.bdev.glob;
772 + struct amdgpu_vm_bo_base *bo_base;
773 +
774 ++#if 0
775 + if (vm->bulk_moveable) {
776 + spin_lock(&glob->lru_lock);
777 + ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
778 + spin_unlock(&glob->lru_lock);
779 + return;
780 + }
781 ++#endif
782 +
783 + memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
784 +
785 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
786 +index e3843c5929edf..fffece5e42c56 100644
787 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
788 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
789 +@@ -1074,8 +1074,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
790 + * the GPU device is not already present in the topology device
791 + * list then return NULL. This means a new topology device has to
792 + * be created for this GPU.
793 +- * TODO: Rather than assiging @gpu to first topology device withtout
794 +- * gpu attached, it will better to have more stringent check.
795 + */
796 + static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
797 + {
798 +@@ -1083,12 +1081,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
799 + struct kfd_topology_device *out_dev = NULL;
800 +
801 + down_write(&topology_lock);
802 +- list_for_each_entry(dev, &topology_device_list, list)
803 ++ list_for_each_entry(dev, &topology_device_list, list) {
804 ++ /* Discrete GPUs need their own topology device list
805 ++ * entries. Don't assign them to CPU/APU nodes.
806 ++ */
807 ++ if (!gpu->device_info->needs_iommu_device &&
808 ++ dev->node_props.cpu_cores_count)
809 ++ continue;
810 ++
811 + if (!dev->gpu && (dev->node_props.simd_count > 0)) {
812 + dev->gpu = gpu;
813 + out_dev = dev;
814 + break;
815 + }
816 ++ }
817 + up_write(&topology_lock);
818 + return out_dev;
819 + }
820 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
821 +index 315a245aedc29..d92120b62e89f 100644
822 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
823 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
824 +@@ -704,12 +704,13 @@ static int dm_suspend(void *handle)
825 + struct amdgpu_display_manager *dm = &adev->dm;
826 + int ret = 0;
827 +
828 ++ WARN_ON(adev->dm.cached_state);
829 ++ adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
830 ++
831 + s3_handle_mst(adev->ddev, true);
832 +
833 + amdgpu_dm_irq_suspend(adev);
834 +
835 +- WARN_ON(adev->dm.cached_state);
836 +- adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
837 +
838 + dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
839 +
840 +diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
841 +index 4443a916a0fb6..e84275f15e7ad 100644
842 +--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
843 ++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
844 +@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
845 +
846 + pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
847 +
848 +- if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
849 ++ if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
850 + /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
851 + pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
852 + /* un-mute audio */
853 +@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
854 + pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
855 + pipe_ctx->stream_res.stream_enc, true);
856 + if (pipe_ctx->stream_res.audio) {
857 ++ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
858 ++
859 + if (option != KEEP_ACQUIRED_RESOURCE ||
860 + !dc->debug.az_endpoint_mute_only) {
861 + /*only disalbe az_endpoint if power down or free*/
862 +@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
863 + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
864 + pipe_ctx->stream_res.audio = NULL;
865 + }
866 ++ if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
867 ++ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
868 ++ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
869 +
870 + /* TODO: notify audio driver for if audio modes list changed
871 + * add audio mode list change flag */
872 +diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
873 +index 9e2e998b198f0..e0c02a9889b2c 100644
874 +--- a/drivers/gpu/drm/i915/intel_fbdev.c
875 ++++ b/drivers/gpu/drm/i915/intel_fbdev.c
876 +@@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
877 + bool *enabled, int width, int height)
878 + {
879 + struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
880 +- unsigned long conn_configured, conn_seq, mask;
881 + unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
882 ++ unsigned long conn_configured, conn_seq;
883 + int i, j;
884 + bool *save_enabled;
885 + bool fallback = true, ret = true;
886 +@@ -355,10 +355,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
887 + drm_modeset_backoff(&ctx);
888 +
889 + memcpy(save_enabled, enabled, count);
890 +- mask = GENMASK(count - 1, 0);
891 ++ conn_seq = GENMASK(count - 1, 0);
892 + conn_configured = 0;
893 + retry:
894 +- conn_seq = conn_configured;
895 + for (i = 0; i < count; i++) {
896 + struct drm_fb_helper_connector *fb_conn;
897 + struct drm_connector *connector;
898 +@@ -371,7 +370,8 @@ retry:
899 + if (conn_configured & BIT(i))
900 + continue;
901 +
902 +- if (conn_seq == 0 && !connector->has_tile)
903 ++ /* First pass, only consider tiled connectors */
904 ++ if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
905 + continue;
906 +
907 + if (connector->status == connector_status_connected)
908 +@@ -475,8 +475,10 @@ retry:
909 + conn_configured |= BIT(i);
910 + }
911 +
912 +- if ((conn_configured & mask) != mask && conn_configured != conn_seq)
913 ++ if (conn_configured != conn_seq) { /* repeat until no more are found */
914 ++ conn_seq = conn_configured;
915 + goto retry;
916 ++ }
917 +
918 + /*
919 + * If the BIOS didn't enable everything it could, fall back to have the
920 +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
921 +index bf5f294f172fa..611ac340fb289 100644
922 +--- a/drivers/gpu/drm/meson/meson_drv.c
923 ++++ b/drivers/gpu/drm/meson/meson_drv.c
924 +@@ -368,8 +368,10 @@ static int meson_probe_remote(struct platform_device *pdev,
925 + remote_node = of_graph_get_remote_port_parent(ep);
926 + if (!remote_node ||
927 + remote_node == parent || /* Ignore parent endpoint */
928 +- !of_device_is_available(remote_node))
929 ++ !of_device_is_available(remote_node)) {
930 ++ of_node_put(remote_node);
931 + continue;
932 ++ }
933 +
934 + count += meson_probe_remote(pdev, match, remote, remote_node);
935 +
936 +@@ -388,10 +390,13 @@ static int meson_drv_probe(struct platform_device *pdev)
937 +
938 + for_each_endpoint_of_node(np, ep) {
939 + remote = of_graph_get_remote_port_parent(ep);
940 +- if (!remote || !of_device_is_available(remote))
941 ++ if (!remote || !of_device_is_available(remote)) {
942 ++ of_node_put(remote);
943 + continue;
944 ++ }
945 +
946 + count += meson_probe_remote(pdev, &match, np, remote);
947 ++ of_node_put(remote);
948 + }
949 +
950 + if (count && !match)
951 +diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
952 +index dec1e081f5295..6a8fb6fd183c3 100644
953 +--- a/drivers/gpu/drm/radeon/radeon_kms.c
954 ++++ b/drivers/gpu/drm/radeon/radeon_kms.c
955 +@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
956 + }
957 +
958 + if (radeon_is_px(dev)) {
959 ++ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
960 + pm_runtime_use_autosuspend(dev->dev);
961 + pm_runtime_set_autosuspend_delay(dev->dev, 5000);
962 + pm_runtime_set_active(dev->dev);
963 +diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
964 +index bf49c55b0f2c7..9f27d5464804b 100644
965 +--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
966 ++++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
967 +@@ -704,17 +704,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
968 + remote = of_graph_get_remote_port_parent(ep);
969 + if (!remote)
970 + continue;
971 ++ of_node_put(remote);
972 +
973 + /* does this node match any registered engines? */
974 + list_for_each_entry(frontend, &drv->frontend_list, list) {
975 + if (remote == frontend->node) {
976 +- of_node_put(remote);
977 + of_node_put(port);
978 ++ of_node_put(ep);
979 + return frontend;
980 + }
981 + }
982 + }
983 +-
984 ++ of_node_put(port);
985 + return ERR_PTR(-EINVAL);
986 + }
987 +
988 +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
989 +index c3040079b1cb6..4adec4ab7d066 100644
990 +--- a/drivers/hwmon/nct6775.c
991 ++++ b/drivers/hwmon/nct6775.c
992 +@@ -44,8 +44,8 @@
993 + * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3
994 + * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3
995 + * (0xd451)
996 +- * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3
997 +- * (0xd459)
998 ++ * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3
999 ++ * (0xd429)
1000 + *
1001 + * #temp lists the number of monitored temperature sources (first value) plus
1002 + * the number of directly connectable temperature sensors (second value).
1003 +@@ -138,7 +138,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
1004 + #define SIO_NCT6795_ID 0xd350
1005 + #define SIO_NCT6796_ID 0xd420
1006 + #define SIO_NCT6797_ID 0xd450
1007 +-#define SIO_NCT6798_ID 0xd458
1008 ++#define SIO_NCT6798_ID 0xd428
1009 + #define SIO_ID_MASK 0xFFF8
1010 +
1011 + enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
1012 +@@ -4508,7 +4508,8 @@ static int __maybe_unused nct6775_resume(struct device *dev)
1013 +
1014 + if (data->kind == nct6791 || data->kind == nct6792 ||
1015 + data->kind == nct6793 || data->kind == nct6795 ||
1016 +- data->kind == nct6796)
1017 ++ data->kind == nct6796 || data->kind == nct6797 ||
1018 ++ data->kind == nct6798)
1019 + nct6791_enable_io_mapping(sioreg);
1020 +
1021 + superio_exit(sioreg);
1022 +@@ -4644,7 +4645,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
1023 +
1024 + if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
1025 + sio_data->kind == nct6793 || sio_data->kind == nct6795 ||
1026 +- sio_data->kind == nct6796)
1027 ++ sio_data->kind == nct6796 || sio_data->kind == nct6797 ||
1028 ++ sio_data->kind == nct6798)
1029 + nct6791_enable_io_mapping(sioaddr);
1030 +
1031 + superio_exit(sioaddr);
1032 +diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
1033 +index 8844c9565d2a4..7053be59ad2e4 100644
1034 +--- a/drivers/hwmon/tmp421.c
1035 ++++ b/drivers/hwmon/tmp421.c
1036 +@@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = {
1037 + .data = (void *)2
1038 + },
1039 + {
1040 +- .compatible = "ti,tmp422",
1041 ++ .compatible = "ti,tmp442",
1042 + .data = (void *)3
1043 + },
1044 + { },
1045 +diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
1046 +index 691c6f0489386..2428c7d89c6be 100644
1047 +--- a/drivers/infiniband/hw/mthca/mthca_provider.c
1048 ++++ b/drivers/infiniband/hw/mthca/mthca_provider.c
1049 +@@ -533,7 +533,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
1050 + {
1051 + struct mthca_ucontext *context;
1052 +
1053 +- qp = kmalloc(sizeof *qp, GFP_KERNEL);
1054 ++ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1055 + if (!qp)
1056 + return ERR_PTR(-ENOMEM);
1057 +
1058 +@@ -599,7 +599,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
1059 + if (pd->uobject)
1060 + return ERR_PTR(-EINVAL);
1061 +
1062 +- qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
1063 ++ qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
1064 + if (!qp)
1065 + return ERR_PTR(-ENOMEM);
1066 +
1067 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1068 +index eed0eb3bb04c6..0466f2ac9ad08 100644
1069 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
1070 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
1071 +@@ -2942,7 +2942,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
1072 + {
1073 + struct srp_target_port *target = host_to_target(scmnd->device->host);
1074 + struct srp_rdma_ch *ch;
1075 +- int i, j;
1076 + u8 status;
1077 +
1078 + shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1079 +@@ -2954,15 +2953,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
1080 + if (status)
1081 + return FAILED;
1082 +
1083 +- for (i = 0; i < target->ch_count; i++) {
1084 +- ch = &target->ch[i];
1085 +- for (j = 0; j < target->req_ring_size; ++j) {
1086 +- struct srp_request *req = &ch->req_ring[j];
1087 +-
1088 +- srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
1089 +- }
1090 +- }
1091 +-
1092 + return SUCCESS;
1093 + }
1094 +
1095 +diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
1096 +index 4ac378e489023..40ca1e8fa09fc 100644
1097 +--- a/drivers/isdn/hardware/avm/b1.c
1098 ++++ b/drivers/isdn/hardware/avm/b1.c
1099 +@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
1100 + int i, j;
1101 +
1102 + for (j = 0; j < AVM_MAXVERSION; j++)
1103 +- cinfo->version[j] = "\0\0" + 1;
1104 ++ cinfo->version[j] = "";
1105 + for (i = 0, j = 0;
1106 + j < AVM_MAXVERSION && i < cinfo->versionlen;
1107 + j++, i += cinfo->versionbuf[i] + 1)
1108 +diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
1109 +index 1b2239c1d5694..dc1cded716c1a 100644
1110 +--- a/drivers/isdn/i4l/isdn_tty.c
1111 ++++ b/drivers/isdn/i4l/isdn_tty.c
1112 +@@ -1437,15 +1437,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
1113 + {
1114 + modem_info *info = (modem_info *) tty->driver_data;
1115 +
1116 ++ mutex_lock(&modem_info_mutex);
1117 + if (!old_termios)
1118 + isdn_tty_change_speed(info);
1119 + else {
1120 + if (tty->termios.c_cflag == old_termios->c_cflag &&
1121 + tty->termios.c_ispeed == old_termios->c_ispeed &&
1122 +- tty->termios.c_ospeed == old_termios->c_ospeed)
1123 ++ tty->termios.c_ospeed == old_termios->c_ospeed) {
1124 ++ mutex_unlock(&modem_info_mutex);
1125 + return;
1126 ++ }
1127 + isdn_tty_change_speed(info);
1128 + }
1129 ++ mutex_unlock(&modem_info_mutex);
1130 + }
1131 +
1132 + /*
1133 +diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
1134 +index a2e74feee2b2f..fd64df5a57a5e 100644
1135 +--- a/drivers/leds/leds-lp5523.c
1136 ++++ b/drivers/leds/leds-lp5523.c
1137 +@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
1138 +
1139 + /* Let the programs run for couple of ms and check the engine status */
1140 + usleep_range(3000, 6000);
1141 +- lp55xx_read(chip, LP5523_REG_STATUS, &status);
1142 ++ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
1143 ++ if (ret)
1144 ++ return ret;
1145 + status &= LP5523_ENG_STATUS_MASK;
1146 +
1147 + if (status != LP5523_ENG_STATUS_MASK) {
1148 +diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
1149 +index 8c5dfdce4326c..f461460a2aeb9 100644
1150 +--- a/drivers/mfd/Kconfig
1151 ++++ b/drivers/mfd/Kconfig
1152 +@@ -102,6 +102,7 @@ config MFD_AAT2870_CORE
1153 + config MFD_AT91_USART
1154 + tristate "AT91 USART Driver"
1155 + select MFD_CORE
1156 ++ depends on ARCH_AT91 || COMPILE_TEST
1157 + help
1158 + Select this to get support for AT91 USART IP. This is a wrapper
1159 + over at91-usart-serial driver and usart-spi-driver. Only one function
1160 +diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
1161 +index 30d09d1771717..11ab17f64c649 100644
1162 +--- a/drivers/mfd/ab8500-core.c
1163 ++++ b/drivers/mfd/ab8500-core.c
1164 +@@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
1165 + mutex_unlock(&ab8500->lock);
1166 + dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
1167 +
1168 +- return ret;
1169 ++ return (ret < 0) ? ret : 0;
1170 + }
1171 +
1172 + static int ab8500_get_register(struct device *dev, u8 bank,
1173 +diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
1174 +index 0be511dd93d01..f8e0fa97bb31e 100644
1175 +--- a/drivers/mfd/axp20x.c
1176 ++++ b/drivers/mfd/axp20x.c
1177 +@@ -640,9 +640,9 @@ static const struct mfd_cell axp221_cells[] = {
1178 +
1179 + static const struct mfd_cell axp223_cells[] = {
1180 + {
1181 +- .name = "axp221-pek",
1182 +- .num_resources = ARRAY_SIZE(axp22x_pek_resources),
1183 +- .resources = axp22x_pek_resources,
1184 ++ .name = "axp221-pek",
1185 ++ .num_resources = ARRAY_SIZE(axp22x_pek_resources),
1186 ++ .resources = axp22x_pek_resources,
1187 + }, {
1188 + .name = "axp22x-adc",
1189 + .of_compatible = "x-powers,axp221-adc",
1190 +@@ -650,7 +650,7 @@ static const struct mfd_cell axp223_cells[] = {
1191 + .name = "axp20x-battery-power-supply",
1192 + .of_compatible = "x-powers,axp221-battery-power-supply",
1193 + }, {
1194 +- .name = "axp20x-regulator",
1195 ++ .name = "axp20x-regulator",
1196 + }, {
1197 + .name = "axp20x-ac-power-supply",
1198 + .of_compatible = "x-powers,axp221-ac-power-supply",
1199 +@@ -666,9 +666,9 @@ static const struct mfd_cell axp223_cells[] = {
1200 +
1201 + static const struct mfd_cell axp152_cells[] = {
1202 + {
1203 +- .name = "axp20x-pek",
1204 +- .num_resources = ARRAY_SIZE(axp152_pek_resources),
1205 +- .resources = axp152_pek_resources,
1206 ++ .name = "axp20x-pek",
1207 ++ .num_resources = ARRAY_SIZE(axp152_pek_resources),
1208 ++ .resources = axp152_pek_resources,
1209 + },
1210 + };
1211 +
1212 +@@ -697,87 +697,101 @@ static const struct resource axp288_charger_resources[] = {
1213 +
1214 + static const struct mfd_cell axp288_cells[] = {
1215 + {
1216 +- .name = "axp288_adc",
1217 +- .num_resources = ARRAY_SIZE(axp288_adc_resources),
1218 +- .resources = axp288_adc_resources,
1219 +- },
1220 +- {
1221 +- .name = "axp288_extcon",
1222 +- .num_resources = ARRAY_SIZE(axp288_extcon_resources),
1223 +- .resources = axp288_extcon_resources,
1224 +- },
1225 +- {
1226 +- .name = "axp288_charger",
1227 +- .num_resources = ARRAY_SIZE(axp288_charger_resources),
1228 +- .resources = axp288_charger_resources,
1229 +- },
1230 +- {
1231 +- .name = "axp288_fuel_gauge",
1232 +- .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
1233 +- .resources = axp288_fuel_gauge_resources,
1234 +- },
1235 +- {
1236 +- .name = "axp221-pek",
1237 +- .num_resources = ARRAY_SIZE(axp288_power_button_resources),
1238 +- .resources = axp288_power_button_resources,
1239 +- },
1240 +- {
1241 +- .name = "axp288_pmic_acpi",
1242 ++ .name = "axp288_adc",
1243 ++ .num_resources = ARRAY_SIZE(axp288_adc_resources),
1244 ++ .resources = axp288_adc_resources,
1245 ++ }, {
1246 ++ .name = "axp288_extcon",
1247 ++ .num_resources = ARRAY_SIZE(axp288_extcon_resources),
1248 ++ .resources = axp288_extcon_resources,
1249 ++ }, {
1250 ++ .name = "axp288_charger",
1251 ++ .num_resources = ARRAY_SIZE(axp288_charger_resources),
1252 ++ .resources = axp288_charger_resources,
1253 ++ }, {
1254 ++ .name = "axp288_fuel_gauge",
1255 ++ .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
1256 ++ .resources = axp288_fuel_gauge_resources,
1257 ++ }, {
1258 ++ .name = "axp221-pek",
1259 ++ .num_resources = ARRAY_SIZE(axp288_power_button_resources),
1260 ++ .resources = axp288_power_button_resources,
1261 ++ }, {
1262 ++ .name = "axp288_pmic_acpi",
1263 + },
1264 + };
1265 +
1266 + static const struct mfd_cell axp803_cells[] = {
1267 + {
1268 +- .name = "axp221-pek",
1269 +- .num_resources = ARRAY_SIZE(axp803_pek_resources),
1270 +- .resources = axp803_pek_resources,
1271 ++ .name = "axp221-pek",
1272 ++ .num_resources = ARRAY_SIZE(axp803_pek_resources),
1273 ++ .resources = axp803_pek_resources,
1274 ++ }, {
1275 ++ .name = "axp20x-gpio",
1276 ++ .of_compatible = "x-powers,axp813-gpio",
1277 ++ }, {
1278 ++ .name = "axp813-adc",
1279 ++ .of_compatible = "x-powers,axp813-adc",
1280 ++ }, {
1281 ++ .name = "axp20x-battery-power-supply",
1282 ++ .of_compatible = "x-powers,axp813-battery-power-supply",
1283 ++ }, {
1284 ++ .name = "axp20x-ac-power-supply",
1285 ++ .of_compatible = "x-powers,axp813-ac-power-supply",
1286 ++ .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
1287 ++ .resources = axp20x_ac_power_supply_resources,
1288 + },
1289 +- { .name = "axp20x-regulator" },
1290 ++ { .name = "axp20x-regulator" },
1291 + };
1292 +
1293 + static const struct mfd_cell axp806_self_working_cells[] = {
1294 + {
1295 +- .name = "axp221-pek",
1296 +- .num_resources = ARRAY_SIZE(axp806_pek_resources),
1297 +- .resources = axp806_pek_resources,
1298 ++ .name = "axp221-pek",
1299 ++ .num_resources = ARRAY_SIZE(axp806_pek_resources),
1300 ++ .resources = axp806_pek_resources,
1301 + },
1302 +- { .name = "axp20x-regulator" },
1303 ++ { .name = "axp20x-regulator" },
1304 + };
1305 +
1306 + static const struct mfd_cell axp806_cells[] = {
1307 + {
1308 +- .id = 2,
1309 +- .name = "axp20x-regulator",
1310 ++ .id = 2,
1311 ++ .name = "axp20x-regulator",
1312 + },
1313 + };
1314 +
1315 + static const struct mfd_cell axp809_cells[] = {
1316 + {
1317 +- .name = "axp221-pek",
1318 +- .num_resources = ARRAY_SIZE(axp809_pek_resources),
1319 +- .resources = axp809_pek_resources,
1320 ++ .name = "axp221-pek",
1321 ++ .num_resources = ARRAY_SIZE(axp809_pek_resources),
1322 ++ .resources = axp809_pek_resources,
1323 + }, {
1324 +- .id = 1,
1325 +- .name = "axp20x-regulator",
1326 ++ .id = 1,
1327 ++ .name = "axp20x-regulator",
1328 + },
1329 + };
1330 +
1331 + static const struct mfd_cell axp813_cells[] = {
1332 + {
1333 +- .name = "axp221-pek",
1334 +- .num_resources = ARRAY_SIZE(axp803_pek_resources),
1335 +- .resources = axp803_pek_resources,
1336 ++ .name = "axp221-pek",
1337 ++ .num_resources = ARRAY_SIZE(axp803_pek_resources),
1338 ++ .resources = axp803_pek_resources,
1339 + }, {
1340 +- .name = "axp20x-regulator",
1341 ++ .name = "axp20x-regulator",
1342 + }, {
1343 +- .name = "axp20x-gpio",
1344 +- .of_compatible = "x-powers,axp813-gpio",
1345 ++ .name = "axp20x-gpio",
1346 ++ .of_compatible = "x-powers,axp813-gpio",
1347 + }, {
1348 +- .name = "axp813-adc",
1349 +- .of_compatible = "x-powers,axp813-adc",
1350 ++ .name = "axp813-adc",
1351 ++ .of_compatible = "x-powers,axp813-adc",
1352 + }, {
1353 + .name = "axp20x-battery-power-supply",
1354 + .of_compatible = "x-powers,axp813-battery-power-supply",
1355 ++ }, {
1356 ++ .name = "axp20x-ac-power-supply",
1357 ++ .of_compatible = "x-powers,axp813-ac-power-supply",
1358 ++ .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
1359 ++ .resources = axp20x_ac_power_supply_resources,
1360 + },
1361 + };
1362 +
1363 +diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
1364 +index 503979c81dae1..fab3cdc27ed64 100644
1365 +--- a/drivers/mfd/bd9571mwv.c
1366 ++++ b/drivers/mfd/bd9571mwv.c
1367 +@@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = {
1368 + };
1369 +
1370 + static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
1371 ++ regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
1372 + regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
1373 + regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
1374 + regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
1375 +diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
1376 +index b99a194ce5a4a..2d0fee488c5aa 100644
1377 +--- a/drivers/mfd/cros_ec_dev.c
1378 ++++ b/drivers/mfd/cros_ec_dev.c
1379 +@@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev)
1380 +
1381 + cros_ec_debugfs_remove(ec);
1382 +
1383 ++ mfd_remove_devices(ec->dev);
1384 + cdev_del(&ec->cdev);
1385 + device_unregister(&ec->class_dev);
1386 + return 0;
1387 +diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
1388 +index 5970b8def5487..aec20e1c7d3d5 100644
1389 +--- a/drivers/mfd/db8500-prcmu.c
1390 ++++ b/drivers/mfd/db8500-prcmu.c
1391 +@@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = {
1392 + .irq_unmask = prcmu_irq_unmask,
1393 + };
1394 +
1395 +-static __init char *fw_project_name(u32 project)
1396 ++static char *fw_project_name(u32 project)
1397 + {
1398 + switch (project) {
1399 + case PRCMU_FW_PROJECT_U8500:
1400 +@@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
1401 + INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
1402 + }
1403 +
1404 +-static void __init init_prcm_registers(void)
1405 ++static void init_prcm_registers(void)
1406 + {
1407 + u32 val;
1408 +
1409 +diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
1410 +index f475e848252fa..d0bf50e3568d7 100644
1411 +--- a/drivers/mfd/mc13xxx-core.c
1412 ++++ b/drivers/mfd/mc13xxx-core.c
1413 +@@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
1414 +
1415 + mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
1416 +
1417 +- mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
1418 ++ ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
1419 ++ if (ret)
1420 ++ goto out;
1421 +
1422 + adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
1423 + MC13XXX_ADC0_CHRGRAWDIV;
1424 +diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
1425 +index 77b64bd64df36..ab24e176ef448 100644
1426 +--- a/drivers/mfd/mt6397-core.c
1427 ++++ b/drivers/mfd/mt6397-core.c
1428 +@@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev)
1429 +
1430 + default:
1431 + dev_err(&pdev->dev, "unsupported chip: %d\n", id);
1432 +- ret = -ENODEV;
1433 +- break;
1434 ++ return -ENODEV;
1435 + }
1436 +
1437 + if (ret) {
1438 +diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
1439 +index 52fafea06067e..8d420c37b2a61 100644
1440 +--- a/drivers/mfd/qcom_rpm.c
1441 ++++ b/drivers/mfd/qcom_rpm.c
1442 +@@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
1443 + return -EFAULT;
1444 + }
1445 +
1446 ++ writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
1447 ++ writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
1448 ++ writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
1449 ++
1450 + dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
1451 + fw_version[1],
1452 + fw_version[2]);
1453 +diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
1454 +index c2d47d78705b8..fd111296b9592 100644
1455 +--- a/drivers/mfd/ti_am335x_tscadc.c
1456 ++++ b/drivers/mfd/ti_am335x_tscadc.c
1457 +@@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
1458 + cell->pdata_size = sizeof(tscadc);
1459 + }
1460 +
1461 +- err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
1462 +- tscadc->used_cells, NULL, 0, NULL);
1463 ++ err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
1464 ++ tscadc->cells, tscadc->used_cells, NULL,
1465 ++ 0, NULL);
1466 + if (err < 0)
1467 + goto err_disable_clk;
1468 +
1469 +diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
1470 +index 910f569ff77c1..8bcdecf494d05 100644
1471 +--- a/drivers/mfd/tps65218.c
1472 ++++ b/drivers/mfd/tps65218.c
1473 +@@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client,
1474 +
1475 + mutex_init(&tps->tps_lock);
1476 +
1477 +- ret = regmap_add_irq_chip(tps->regmap, tps->irq,
1478 +- IRQF_ONESHOT, 0, &tps65218_irq_chip,
1479 +- &tps->irq_data);
1480 ++ ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
1481 ++ IRQF_ONESHOT, 0, &tps65218_irq_chip,
1482 ++ &tps->irq_data);
1483 + if (ret < 0)
1484 + return ret;
1485 +
1486 +@@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client,
1487 + ARRAY_SIZE(tps65218_cells), NULL, 0,
1488 + regmap_irq_get_domain(tps->irq_data));
1489 +
1490 +- if (ret < 0)
1491 +- goto err_irq;
1492 +-
1493 +- return 0;
1494 +-
1495 +-err_irq:
1496 +- regmap_del_irq_chip(tps->irq, tps->irq_data);
1497 +-
1498 + return ret;
1499 + }
1500 +
1501 +-static int tps65218_remove(struct i2c_client *client)
1502 +-{
1503 +- struct tps65218 *tps = i2c_get_clientdata(client);
1504 +-
1505 +- regmap_del_irq_chip(tps->irq, tps->irq_data);
1506 +-
1507 +- return 0;
1508 +-}
1509 +-
1510 + static const struct i2c_device_id tps65218_id_table[] = {
1511 + { "tps65218", TPS65218 },
1512 + { },
1513 +@@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = {
1514 + .of_match_table = of_tps65218_match_table,
1515 + },
1516 + .probe = tps65218_probe,
1517 +- .remove = tps65218_remove,
1518 + .id_table = tps65218_id_table,
1519 + };
1520 +
1521 +diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
1522 +index 4be3d239da9ec..299016bc46d90 100644
1523 +--- a/drivers/mfd/twl-core.c
1524 ++++ b/drivers/mfd/twl-core.c
1525 +@@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
1526 + * letting it generate the right frequencies for USB, MADC, and
1527 + * other purposes.
1528 + */
1529 +-static inline int __init protect_pm_master(void)
1530 ++static inline int protect_pm_master(void)
1531 + {
1532 + int e = 0;
1533 +
1534 +@@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void)
1535 + return e;
1536 + }
1537 +
1538 +-static inline int __init unprotect_pm_master(void)
1539 ++static inline int unprotect_pm_master(void)
1540 + {
1541 + int e = 0;
1542 +
1543 +diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
1544 +index 1ee68bd440fbc..16c6e2accfaa5 100644
1545 +--- a/drivers/mfd/wm5110-tables.c
1546 ++++ b/drivers/mfd/wm5110-tables.c
1547 +@@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = {
1548 + { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
1549 + { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
1550 + { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
1551 ++ { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
1552 + { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
1553 + { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
1554 + { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
1555 +@@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
1556 + case ARIZONA_ASRC_ENABLE:
1557 + case ARIZONA_ASRC_STATUS:
1558 + case ARIZONA_ASRC_RATE1:
1559 ++ case ARIZONA_ASRC_RATE2:
1560 + case ARIZONA_ISRC_1_CTRL_1:
1561 + case ARIZONA_ISRC_1_CTRL_2:
1562 + case ARIZONA_ISRC_1_CTRL_3:
1563 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1564 +index a70bb1bb90e7d..a6eacf2099c30 100644
1565 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
1566 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1567 +@@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
1568 + goto err_device_destroy;
1569 + }
1570 +
1571 +- clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1572 +- /* Make sure we don't have a race with AENQ Links state handler */
1573 +- if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1574 +- netif_carrier_on(adapter->netdev);
1575 +-
1576 + rc = ena_enable_msix_and_set_admin_interrupts(adapter,
1577 + adapter->num_queues);
1578 + if (rc) {
1579 +@@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
1580 + }
1581 +
1582 + set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
1583 ++
1584 ++ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1585 ++ if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1586 ++ netif_carrier_on(adapter->netdev);
1587 ++
1588 + mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
1589 + dev_err(&pdev->dev,
1590 + "Device reset completed successfully, Driver info: %s\n",
1591 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1592 +index 6e0f47f2c8a37..3e53be0fcd7ec 100644
1593 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1594 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1595 +@@ -2051,6 +2051,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1596 + bool nonlinear = skb_is_nonlinear(skb);
1597 + struct rtnl_link_stats64 *percpu_stats;
1598 + struct dpaa_percpu_priv *percpu_priv;
1599 ++ struct netdev_queue *txq;
1600 + struct dpaa_priv *priv;
1601 + struct qm_fd fd;
1602 + int offset = 0;
1603 +@@ -2100,6 +2101,11 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1604 + if (unlikely(err < 0))
1605 + goto skb_to_fd_failed;
1606 +
1607 ++ txq = netdev_get_tx_queue(net_dev, queue_mapping);
1608 ++
1609 ++ /* LLTX requires to do our own update of trans_start */
1610 ++ txq->trans_start = jiffies;
1611 ++
1612 + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1613 + fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
1614 + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1615 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1616 +index ad1779fc410e6..a78bfafd212c8 100644
1617 +--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1618 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1619 +@@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
1620 + struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
1621 + int i;
1622 +
1623 +- vf_cb->mac_cb = NULL;
1624 +-
1625 +- kfree(vf_cb);
1626 +-
1627 + for (i = 0; i < handle->q_num; i++)
1628 + hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
1629 ++
1630 ++ kfree(vf_cb);
1631 + }
1632 +
1633 + static int hns_ae_wait_flow_down(struct hnae_handle *handle)
1634 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1635 +index db00bf1c23f5a..d47d4f86ac11d 100644
1636 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1637 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1638 +@@ -620,6 +620,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
1639 + }
1640 + #endif
1641 +
1642 ++#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
1643 ++
1644 + /* We reach this function only after checking that any of
1645 + * the (IPv4 | IPv6) bits are set in cqe->status.
1646 + */
1647 +@@ -627,9 +629,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
1648 + netdev_features_t dev_features)
1649 + {
1650 + __wsum hw_checksum = 0;
1651 ++ void *hdr;
1652 ++
1653 ++ /* CQE csum doesn't cover padding octets in short ethernet
1654 ++ * frames. And the pad field is appended prior to calculating
1655 ++ * and appending the FCS field.
1656 ++ *
1657 ++ * Detecting these padded frames requires to verify and parse
1658 ++ * IP headers, so we simply force all those small frames to skip
1659 ++ * checksum complete.
1660 ++ */
1661 ++ if (short_frame(skb->len))
1662 ++ return -EINVAL;
1663 +
1664 +- void *hdr = (u8 *)va + sizeof(struct ethhdr);
1665 +-
1666 ++ hdr = (u8 *)va + sizeof(struct ethhdr);
1667 + hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
1668 +
1669 + if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
1670 +@@ -822,6 +835,11 @@ xdp_drop_no_cnt:
1671 + skb_record_rx_queue(skb, cq_ring);
1672 +
1673 + if (likely(dev->features & NETIF_F_RXCSUM)) {
1674 ++ /* TODO: For IP non TCP/UDP packets when csum complete is
1675 ++ * not an option (not supported or any other reason) we can
1676 ++ * actually check cqe IPOK status bit and report
1677 ++ * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
1678 ++ */
1679 + if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
1680 + MLX4_CQE_STATUS_UDP)) &&
1681 + (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
1682 +diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
1683 +index 4b4351141b94c..76b84d08a058b 100644
1684 +--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
1685 ++++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
1686 +@@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
1687 + int i;
1688 +
1689 + if (chunk->nsg > 0)
1690 +- pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
1691 ++ pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages,
1692 + PCI_DMA_BIDIRECTIONAL);
1693 +
1694 + for (i = 0; i < chunk->npages; ++i)
1695 +- __free_pages(sg_page(&chunk->mem[i]),
1696 +- get_order(chunk->mem[i].length));
1697 ++ __free_pages(sg_page(&chunk->sg[i]),
1698 ++ get_order(chunk->sg[i].length));
1699 + }
1700 +
1701 + static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
1702 +@@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
1703 +
1704 + for (i = 0; i < chunk->npages; ++i)
1705 + dma_free_coherent(&dev->persist->pdev->dev,
1706 +- chunk->mem[i].length,
1707 +- lowmem_page_address(sg_page(&chunk->mem[i])),
1708 +- sg_dma_address(&chunk->mem[i]));
1709 ++ chunk->buf[i].size,
1710 ++ chunk->buf[i].addr,
1711 ++ chunk->buf[i].dma_addr);
1712 + }
1713 +
1714 + void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
1715 +@@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
1716 + return 0;
1717 + }
1718 +
1719 +-static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
1720 +- int order, gfp_t gfp_mask)
1721 ++static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
1722 ++ int order, gfp_t gfp_mask)
1723 + {
1724 +- void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
1725 +- &sg_dma_address(mem), gfp_mask);
1726 +- if (!buf)
1727 ++ buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
1728 ++ &buf->dma_addr, gfp_mask);
1729 ++ if (!buf->addr)
1730 + return -ENOMEM;
1731 +
1732 +- if (offset_in_page(buf)) {
1733 +- dma_free_coherent(dev, PAGE_SIZE << order,
1734 +- buf, sg_dma_address(mem));
1735 ++ if (offset_in_page(buf->addr)) {
1736 ++ dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
1737 ++ buf->dma_addr);
1738 + return -ENOMEM;
1739 + }
1740 +
1741 +- sg_set_buf(mem, buf, PAGE_SIZE << order);
1742 +- sg_dma_len(mem) = PAGE_SIZE << order;
1743 ++ buf->size = PAGE_SIZE << order;
1744 + return 0;
1745 + }
1746 +
1747 +@@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1748 +
1749 + while (npages > 0) {
1750 + if (!chunk) {
1751 +- chunk = kmalloc_node(sizeof(*chunk),
1752 ++ chunk = kzalloc_node(sizeof(*chunk),
1753 + gfp_mask & ~(__GFP_HIGHMEM |
1754 + __GFP_NOWARN),
1755 + dev->numa_node);
1756 + if (!chunk) {
1757 +- chunk = kmalloc(sizeof(*chunk),
1758 ++ chunk = kzalloc(sizeof(*chunk),
1759 + gfp_mask & ~(__GFP_HIGHMEM |
1760 + __GFP_NOWARN));
1761 + if (!chunk)
1762 + goto fail;
1763 + }
1764 ++ chunk->coherent = coherent;
1765 +
1766 +- sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
1767 +- chunk->npages = 0;
1768 +- chunk->nsg = 0;
1769 ++ if (!coherent)
1770 ++ sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
1771 + list_add_tail(&chunk->list, &icm->chunk_list);
1772 + }
1773 +
1774 +@@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1775 +
1776 + if (coherent)
1777 + ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
1778 +- &chunk->mem[chunk->npages],
1779 +- cur_order, mask);
1780 ++ &chunk->buf[chunk->npages],
1781 ++ cur_order, mask);
1782 + else
1783 +- ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
1784 ++ ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
1785 + cur_order, mask,
1786 + dev->numa_node);
1787 +
1788 +@@ -205,7 +204,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1789 + if (coherent)
1790 + ++chunk->nsg;
1791 + else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
1792 +- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
1793 ++ chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
1794 + chunk->npages,
1795 + PCI_DMA_BIDIRECTIONAL);
1796 +
1797 +@@ -220,7 +219,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1798 + }
1799 +
1800 + if (!coherent && chunk) {
1801 +- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
1802 ++ chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
1803 + chunk->npages,
1804 + PCI_DMA_BIDIRECTIONAL);
1805 +
1806 +@@ -320,7 +319,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
1807 + u64 idx;
1808 + struct mlx4_icm_chunk *chunk;
1809 + struct mlx4_icm *icm;
1810 +- struct page *page = NULL;
1811 ++ void *addr = NULL;
1812 +
1813 + if (!table->lowmem)
1814 + return NULL;
1815 +@@ -336,28 +335,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
1816 +
1817 + list_for_each_entry(chunk, &icm->chunk_list, list) {
1818 + for (i = 0; i < chunk->npages; ++i) {
1819 ++ dma_addr_t dma_addr;
1820 ++ size_t len;
1821 ++
1822 ++ if (table->coherent) {
1823 ++ len = chunk->buf[i].size;
1824 ++ dma_addr = chunk->buf[i].dma_addr;
1825 ++ addr = chunk->buf[i].addr;
1826 ++ } else {
1827 ++ struct page *page;
1828 ++
1829 ++ len = sg_dma_len(&chunk->sg[i]);
1830 ++ dma_addr = sg_dma_address(&chunk->sg[i]);
1831 ++
1832 ++ /* XXX: we should never do this for highmem
1833 ++ * allocation. This function either needs
1834 ++ * to be split, or the kernel virtual address
1835 ++ * return needs to be made optional.
1836 ++ */
1837 ++ page = sg_page(&chunk->sg[i]);
1838 ++ addr = lowmem_page_address(page);
1839 ++ }
1840 ++
1841 + if (dma_handle && dma_offset >= 0) {
1842 +- if (sg_dma_len(&chunk->mem[i]) > dma_offset)
1843 +- *dma_handle = sg_dma_address(&chunk->mem[i]) +
1844 +- dma_offset;
1845 +- dma_offset -= sg_dma_len(&chunk->mem[i]);
1846 ++ if (len > dma_offset)
1847 ++ *dma_handle = dma_addr + dma_offset;
1848 ++ dma_offset -= len;
1849 + }
1850 ++
1851 + /*
1852 + * DMA mapping can merge pages but not split them,
1853 + * so if we found the page, dma_handle has already
1854 + * been assigned to.
1855 + */
1856 +- if (chunk->mem[i].length > offset) {
1857 +- page = sg_page(&chunk->mem[i]);
1858 ++ if (len > offset)
1859 + goto out;
1860 +- }
1861 +- offset -= chunk->mem[i].length;
1862 ++ offset -= len;
1863 + }
1864 + }
1865 +
1866 ++ addr = NULL;
1867 + out:
1868 + mutex_unlock(&table->mutex);
1869 +- return page ? lowmem_page_address(page) + offset : NULL;
1870 ++ return addr ? addr + offset : NULL;
1871 + }
1872 +
1873 + int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
1874 +diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
1875 +index c9169a490557c..d199874b1c074 100644
1876 +--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
1877 ++++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
1878 +@@ -47,11 +47,21 @@ enum {
1879 + MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
1880 + };
1881 +
1882 ++struct mlx4_icm_buf {
1883 ++ void *addr;
1884 ++ size_t size;
1885 ++ dma_addr_t dma_addr;
1886 ++};
1887 ++
1888 + struct mlx4_icm_chunk {
1889 + struct list_head list;
1890 + int npages;
1891 + int nsg;
1892 +- struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
1893 ++ bool coherent;
1894 ++ union {
1895 ++ struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
1896 ++ struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
1897 ++ };
1898 + };
1899 +
1900 + struct mlx4_icm {
1901 +@@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
1902 +
1903 + static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
1904 + {
1905 +- return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
1906 ++ if (iter->chunk->coherent)
1907 ++ return iter->chunk->buf[iter->page_idx].dma_addr;
1908 ++ else
1909 ++ return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
1910 + }
1911 +
1912 + static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
1913 + {
1914 +- return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
1915 ++ if (iter->chunk->coherent)
1916 ++ return iter->chunk->buf[iter->page_idx].size;
1917 ++ else
1918 ++ return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
1919 + }
1920 +
1921 + int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
1922 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1923 +index 1183248029264..7c72b3b5eedfa 100644
1924 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
1925 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1926 +@@ -636,6 +636,7 @@ enum {
1927 + MLX5E_STATE_ASYNC_EVENTS_ENABLED,
1928 + MLX5E_STATE_OPENED,
1929 + MLX5E_STATE_DESTROYING,
1930 ++ MLX5E_STATE_XDP_TX_ENABLED,
1931 + };
1932 +
1933 + struct mlx5e_rqt {
1934 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
1935 +index ad6d471d00dd4..4a33c9a7cac7e 100644
1936 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
1937 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
1938 +@@ -262,7 +262,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1939 + int sq_num;
1940 + int i;
1941 +
1942 +- if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
1943 ++ /* this flag is sufficient, no need to test internal sq state */
1944 ++ if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
1945 + return -ENETDOWN;
1946 +
1947 + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1948 +@@ -275,9 +276,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1949 +
1950 + sq = &priv->channels.c[sq_num]->xdpsq;
1951 +
1952 +- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
1953 +- return -ENETDOWN;
1954 +-
1955 + for (i = 0; i < n; i++) {
1956 + struct xdp_frame *xdpf = frames[i];
1957 + struct mlx5e_xdp_info xdpi;
1958 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
1959 +index 6dfab045925f0..4d096623178b9 100644
1960 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
1961 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
1962 +@@ -49,6 +49,23 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
1963 + int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1964 + u32 flags);
1965 +
1966 ++static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
1967 ++{
1968 ++ set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
1969 ++}
1970 ++
1971 ++static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
1972 ++{
1973 ++ clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
1974 ++ /* let other device's napi(s) see our new state */
1975 ++ synchronize_rcu();
1976 ++}
1977 ++
1978 ++static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
1979 ++{
1980 ++ return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
1981 ++}
1982 ++
1983 + static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
1984 + {
1985 + struct mlx5_wq_cyc *wq = &sq->wq;
1986 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1987 +index 9577d06578398..1d66a4e22d64f 100644
1988 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1989 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1990 +@@ -2903,6 +2903,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
1991 +
1992 + mlx5e_build_tx2sq_maps(priv);
1993 + mlx5e_activate_channels(&priv->channels);
1994 ++ mlx5e_xdp_tx_enable(priv);
1995 + netif_tx_start_all_queues(priv->netdev);
1996 +
1997 + if (MLX5_ESWITCH_MANAGER(priv->mdev))
1998 +@@ -2924,6 +2925,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
1999 + */
2000 + netif_tx_stop_all_queues(priv->netdev);
2001 + netif_tx_disable(priv->netdev);
2002 ++ mlx5e_xdp_tx_disable(priv);
2003 + mlx5e_deactivate_channels(&priv->channels);
2004 + }
2005 +
2006 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
2007 +index 820fe85100b08..4dccc84fdcf2c 100644
2008 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
2009 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
2010 +@@ -143,6 +143,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
2011 +
2012 + s->tx_packets += sq_stats->packets;
2013 + s->tx_bytes += sq_stats->bytes;
2014 ++ s->tx_queue_dropped += sq_stats->dropped;
2015 + }
2016 + }
2017 + }
2018 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2019 +index 4e0151918db13..3fba80a8b436f 100644
2020 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2021 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2022 +@@ -98,6 +98,7 @@ struct mlx5e_tc_flow_parse_attr {
2023 + struct ip_tunnel_info tun_info;
2024 + struct mlx5_flow_spec spec;
2025 + int num_mod_hdr_actions;
2026 ++ int max_mod_hdr_actions;
2027 + void *mod_hdr_actions;
2028 + int mirred_ifindex;
2029 + };
2030 +@@ -1888,9 +1889,9 @@ static struct mlx5_fields fields[] = {
2031 + OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
2032 + };
2033 +
2034 +-/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
2035 +- * max from the SW pedit action. On success, it says how many HW actions were
2036 +- * actually parsed.
2037 ++/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
2038 ++ * max from the SW pedit action. On success, attr->num_mod_hdr_actions
2039 ++ * says how many HW actions were actually parsed.
2040 + */
2041 + static int offload_pedit_fields(struct pedit_headers *masks,
2042 + struct pedit_headers *vals,
2043 +@@ -1914,9 +1915,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
2044 + add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
2045 +
2046 + action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2047 +- action = parse_attr->mod_hdr_actions;
2048 +- max_actions = parse_attr->num_mod_hdr_actions;
2049 +- nactions = 0;
2050 ++ action = parse_attr->mod_hdr_actions +
2051 ++ parse_attr->num_mod_hdr_actions * action_size;
2052 ++
2053 ++ max_actions = parse_attr->max_mod_hdr_actions;
2054 ++ nactions = parse_attr->num_mod_hdr_actions;
2055 +
2056 + for (i = 0; i < ARRAY_SIZE(fields); i++) {
2057 + f = &fields[i];
2058 +@@ -2027,7 +2030,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2059 + if (!parse_attr->mod_hdr_actions)
2060 + return -ENOMEM;
2061 +
2062 +- parse_attr->num_mod_hdr_actions = max_actions;
2063 ++ parse_attr->max_mod_hdr_actions = max_actions;
2064 + return 0;
2065 + }
2066 +
2067 +@@ -2073,9 +2076,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2068 + goto out_err;
2069 + }
2070 +
2071 +- err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
2072 +- if (err)
2073 +- goto out_err;
2074 ++ if (!parse_attr->mod_hdr_actions) {
2075 ++ err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
2076 ++ if (err)
2077 ++ goto out_err;
2078 ++ }
2079 +
2080 + err = offload_pedit_fields(masks, vals, parse_attr, extack);
2081 + if (err < 0)
2082 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
2083 +index 6dacaeba2fbff..0b03d65474e93 100644
2084 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
2085 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
2086 +@@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
2087 + num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
2088 + contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
2089 + if (unlikely(contig_wqebbs_room < num_wqebbs)) {
2090 ++#ifdef CONFIG_MLX5_EN_IPSEC
2091 ++ struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
2092 ++#endif
2093 + mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
2094 + mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
2095 ++#ifdef CONFIG_MLX5_EN_IPSEC
2096 ++ wqe->eth = cur_eth;
2097 ++#endif
2098 + }
2099 +
2100 + /* fill wqe */
2101 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
2102 +index 8a291eb36c64c..7338c9bac4e6a 100644
2103 +--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
2104 ++++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
2105 +@@ -78,6 +78,7 @@ config MLXSW_SPECTRUM
2106 + depends on IPV6 || IPV6=n
2107 + depends on NET_IPGRE || NET_IPGRE=n
2108 + depends on IPV6_GRE || IPV6_GRE=n
2109 ++ depends on VXLAN || VXLAN=n
2110 + select GENERIC_ALLOCATOR
2111 + select PARMAN
2112 + select MLXFW
2113 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
2114 +index c7901a3f2a794..a903e97793f9a 100644
2115 +--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
2116 ++++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
2117 +@@ -1367,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
2118 + u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
2119 +
2120 + if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
2121 +- break;
2122 ++ return 0;
2123 + cond_resched();
2124 + } while (time_before(jiffies, end));
2125 +- return 0;
2126 ++ return -EBUSY;
2127 + }
2128 +
2129 + static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
2130 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
2131 +index e3c6fe8b1d406..1dcf152b28138 100644
2132 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
2133 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
2134 +@@ -75,7 +75,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
2135 + act_set = mlxsw_afa_block_first_set(rulei->act_block);
2136 + mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
2137 +
2138 +- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
2139 ++ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
2140 ++ if (err)
2141 ++ goto err_ptce2_write;
2142 ++
2143 ++ return 0;
2144 ++
2145 ++err_ptce2_write:
2146 ++ cregion->ops->entry_remove(cregion, centry);
2147 ++ return err;
2148 + }
2149 +
2150 + static void
2151 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2152 +index c514af438fc28..b606db9833e9e 100644
2153 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2154 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2155 +@@ -1219,7 +1219,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
2156 + static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
2157 + {
2158 + return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
2159 +- MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
2160 ++ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
2161 + }
2162 +
2163 + static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
2164 +@@ -1276,7 +1276,7 @@ out:
2165 + static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2166 + const char *mac, u16 fid, bool adding,
2167 + enum mlxsw_reg_sfd_rec_action action,
2168 +- bool dynamic)
2169 ++ enum mlxsw_reg_sfd_rec_policy policy)
2170 + {
2171 + char *sfd_pl;
2172 + u8 num_rec;
2173 +@@ -1287,8 +1287,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2174 + return -ENOMEM;
2175 +
2176 + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
2177 +- mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
2178 +- mac, fid, action, local_port);
2179 ++ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
2180 + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
2181 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
2182 + if (err)
2183 +@@ -1307,7 +1306,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2184 + bool dynamic)
2185 + {
2186 + return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
2187 +- MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
2188 ++ MLXSW_REG_SFD_REC_ACTION_NOP,
2189 ++ mlxsw_sp_sfd_rec_policy(dynamic));
2190 + }
2191 +
2192 + int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
2193 +@@ -1315,7 +1315,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
2194 + {
2195 + return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
2196 + MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
2197 +- false);
2198 ++ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
2199 + }
2200 +
2201 + static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
2202 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2203 +index c6f4bab67a5fc..9e728ec82c218 100644
2204 +--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2205 ++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2206 +@@ -1603,6 +1603,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
2207 + cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
2208 + rx_prod.bd_prod = cpu_to_le16(bd_prod);
2209 + rx_prod.cqe_prod = cpu_to_le16(cq_prod);
2210 ++
2211 ++ /* Make sure chain element is updated before ringing the doorbell */
2212 ++ dma_wmb();
2213 ++
2214 + DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
2215 + }
2216 +
2217 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
2218 +index 6c5092e7771cd..c5e25580a43fa 100644
2219 +--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
2220 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
2221 +@@ -263,6 +263,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
2222 + struct stmmac_extra_stats *x, u32 chan)
2223 + {
2224 + u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
2225 ++ u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
2226 + int ret = 0;
2227 +
2228 + /* ABNORMAL interrupts */
2229 +@@ -282,8 +283,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
2230 + x->normal_irq_n++;
2231 +
2232 + if (likely(intr_status & XGMAC_RI)) {
2233 +- u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
2234 +- if (likely(value & XGMAC_RIE)) {
2235 ++ if (likely(intr_en & XGMAC_RIE)) {
2236 + x->rx_normal_irq_n++;
2237 + ret |= handle_rx;
2238 + }
2239 +@@ -295,7 +295,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
2240 + }
2241 +
2242 + /* Clear interrupts */
2243 +- writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
2244 ++ writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
2245 +
2246 + return ret;
2247 + }
2248 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2249 +index c4a35e932f052..5d83d6a7694b0 100644
2250 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2251 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2252 +@@ -3525,27 +3525,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget)
2253 + struct stmmac_channel *ch =
2254 + container_of(napi, struct stmmac_channel, napi);
2255 + struct stmmac_priv *priv = ch->priv_data;
2256 +- int work_done = 0, work_rem = budget;
2257 ++ int work_done, rx_done = 0, tx_done = 0;
2258 + u32 chan = ch->index;
2259 +
2260 + priv->xstats.napi_poll++;
2261 +
2262 +- if (ch->has_tx) {
2263 +- int done = stmmac_tx_clean(priv, work_rem, chan);
2264 ++ if (ch->has_tx)
2265 ++ tx_done = stmmac_tx_clean(priv, budget, chan);
2266 ++ if (ch->has_rx)
2267 ++ rx_done = stmmac_rx(priv, budget, chan);
2268 +
2269 +- work_done += done;
2270 +- work_rem -= done;
2271 +- }
2272 +-
2273 +- if (ch->has_rx) {
2274 +- int done = stmmac_rx(priv, work_rem, chan);
2275 ++ work_done = max(rx_done, tx_done);
2276 ++ work_done = min(work_done, budget);
2277 +
2278 +- work_done += done;
2279 +- work_rem -= done;
2280 +- }
2281 ++ if (work_done < budget && napi_complete_done(napi, work_done)) {
2282 ++ int stat;
2283 +
2284 +- if (work_done < budget && napi_complete_done(napi, work_done))
2285 + stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
2286 ++ stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2287 ++ &priv->xstats, chan);
2288 ++ if (stat && napi_reschedule(napi))
2289 ++ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2290 ++ }
2291 +
2292 + return work_done;
2293 + }
2294 +@@ -4194,6 +4195,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2295 + return ret;
2296 + }
2297 +
2298 ++ /* Rx Watchdog is available in the COREs newer than the 3.40.
2299 ++ * In some case, for example on bugged HW this feature
2300 ++ * has to be disable and this can be done by passing the
2301 ++ * riwt_off field from the platform.
2302 ++ */
2303 ++ if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
2304 ++ (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
2305 ++ priv->use_riwt = 1;
2306 ++ dev_info(priv->device,
2307 ++ "Enable RX Mitigation via HW Watchdog Timer\n");
2308 ++ }
2309 ++
2310 + return 0;
2311 + }
2312 +
2313 +@@ -4326,18 +4339,6 @@ int stmmac_dvr_probe(struct device *device,
2314 + if (flow_ctrl)
2315 + priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
2316 +
2317 +- /* Rx Watchdog is available in the COREs newer than the 3.40.
2318 +- * In some case, for example on bugged HW this feature
2319 +- * has to be disable and this can be done by passing the
2320 +- * riwt_off field from the platform.
2321 +- */
2322 +- if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
2323 +- (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
2324 +- priv->use_riwt = 1;
2325 +- dev_info(priv->device,
2326 +- "Enable RX Mitigation via HW Watchdog Timer\n");
2327 +- }
2328 +-
2329 + /* Setup channels NAPI */
2330 + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
2331 +
2332 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
2333 +index c54a50dbd5ac2..d819e8eaba122 100644
2334 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
2335 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
2336 +@@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
2337 + */
2338 + static void stmmac_pci_remove(struct pci_dev *pdev)
2339 + {
2340 ++ int i;
2341 ++
2342 + stmmac_dvr_remove(&pdev->dev);
2343 ++
2344 ++ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
2345 ++ if (pci_resource_len(pdev, i) == 0)
2346 ++ continue;
2347 ++ pcim_iounmap_regions(pdev, BIT(i));
2348 ++ break;
2349 ++ }
2350 ++
2351 + pci_disable_device(pdev);
2352 + }
2353 +
2354 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2355 +index 531294f4978bc..58ea18af9813a 100644
2356 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2357 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2358 +@@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
2359 + /* Queue 0 is not AVB capable */
2360 + if (queue <= 0 || queue >= tx_queues_count)
2361 + return -EINVAL;
2362 ++ if (!priv->dma_cap.av)
2363 ++ return -EOPNOTSUPP;
2364 + if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
2365 + return -EOPNOTSUPP;
2366 +
2367 +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
2368 +index a0cd1c41cf5f0..2e6e11d8cf5cb 100644
2369 +--- a/drivers/net/geneve.c
2370 ++++ b/drivers/net/geneve.c
2371 +@@ -1426,9 +1426,13 @@ static void geneve_link_config(struct net_device *dev,
2372 + }
2373 + #if IS_ENABLED(CONFIG_IPV6)
2374 + case AF_INET6: {
2375 +- struct rt6_info *rt = rt6_lookup(geneve->net,
2376 +- &info->key.u.ipv6.dst, NULL, 0,
2377 +- NULL, 0);
2378 ++ struct rt6_info *rt;
2379 ++
2380 ++ if (!__in6_dev_get(dev))
2381 ++ break;
2382 ++
2383 ++ rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
2384 ++ NULL, 0);
2385 +
2386 + if (rt && rt->dst.dev)
2387 + ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
2388 +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
2389 +index 51611c7a23d1c..22dfbd4c6aaf4 100644
2390 +--- a/drivers/net/phy/micrel.c
2391 ++++ b/drivers/net/phy/micrel.c
2392 +@@ -1076,6 +1076,7 @@ static struct phy_driver ksphy_driver[] = {
2393 + .driver_data = &ksz9021_type,
2394 + .probe = kszphy_probe,
2395 + .config_init = ksz9031_config_init,
2396 ++ .soft_reset = genphy_soft_reset,
2397 + .read_status = ksz9031_read_status,
2398 + .ack_interrupt = kszphy_ack_interrupt,
2399 + .config_intr = kszphy_config_intr,
2400 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
2401 +index 9b8dd0d0ee42c..b60c82065fd11 100644
2402 +--- a/drivers/net/phy/phylink.c
2403 ++++ b/drivers/net/phy/phylink.c
2404 +@@ -475,6 +475,17 @@ static void phylink_run_resolve(struct phylink *pl)
2405 + queue_work(system_power_efficient_wq, &pl->resolve);
2406 + }
2407 +
2408 ++static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
2409 ++{
2410 ++ unsigned long state = pl->phylink_disable_state;
2411 ++
2412 ++ set_bit(bit, &pl->phylink_disable_state);
2413 ++ if (state == 0) {
2414 ++ queue_work(system_power_efficient_wq, &pl->resolve);
2415 ++ flush_work(&pl->resolve);
2416 ++ }
2417 ++}
2418 ++
2419 + static void phylink_fixed_poll(struct timer_list *t)
2420 + {
2421 + struct phylink *pl = container_of(t, struct phylink, link_poll);
2422 +@@ -928,9 +939,7 @@ void phylink_stop(struct phylink *pl)
2423 + if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
2424 + del_timer_sync(&pl->link_poll);
2425 +
2426 +- set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
2427 +- queue_work(system_power_efficient_wq, &pl->resolve);
2428 +- flush_work(&pl->resolve);
2429 ++ phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
2430 + }
2431 + EXPORT_SYMBOL_GPL(phylink_stop);
2432 +
2433 +@@ -1637,9 +1646,7 @@ static void phylink_sfp_link_down(void *upstream)
2434 +
2435 + ASSERT_RTNL();
2436 +
2437 +- set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
2438 +- queue_work(system_power_efficient_wq, &pl->resolve);
2439 +- flush_work(&pl->resolve);
2440 ++ phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
2441 + }
2442 +
2443 + static void phylink_sfp_link_up(void *upstream)
2444 +diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
2445 +index ad9db652874dc..fef701bfad62e 100644
2446 +--- a/drivers/net/phy/sfp-bus.c
2447 ++++ b/drivers/net/phy/sfp-bus.c
2448 +@@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
2449 + return ret;
2450 + }
2451 + }
2452 ++ bus->socket_ops->attach(bus->sfp);
2453 + if (bus->started)
2454 + bus->socket_ops->start(bus->sfp);
2455 + bus->netdev->sfp_bus = bus;
2456 +@@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
2457 + if (bus->registered) {
2458 + if (bus->started)
2459 + bus->socket_ops->stop(bus->sfp);
2460 ++ bus->socket_ops->detach(bus->sfp);
2461 + if (bus->phydev && ops && ops->disconnect_phy)
2462 + ops->disconnect_phy(bus->upstream);
2463 + }
2464 +diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
2465 +index fd8bb998ae52d..68c8fbf099f87 100644
2466 +--- a/drivers/net/phy/sfp.c
2467 ++++ b/drivers/net/phy/sfp.c
2468 +@@ -184,6 +184,7 @@ struct sfp {
2469 +
2470 + struct gpio_desc *gpio[GPIO_MAX];
2471 +
2472 ++ bool attached;
2473 + unsigned int state;
2474 + struct delayed_work poll;
2475 + struct delayed_work timeout;
2476 +@@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
2477 + */
2478 + switch (sfp->sm_mod_state) {
2479 + default:
2480 +- if (event == SFP_E_INSERT) {
2481 ++ if (event == SFP_E_INSERT && sfp->attached) {
2482 + sfp_module_tx_disable(sfp);
2483 + sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
2484 + }
2485 +@@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
2486 + mutex_unlock(&sfp->sm_mutex);
2487 + }
2488 +
2489 ++static void sfp_attach(struct sfp *sfp)
2490 ++{
2491 ++ sfp->attached = true;
2492 ++ if (sfp->state & SFP_F_PRESENT)
2493 ++ sfp_sm_event(sfp, SFP_E_INSERT);
2494 ++}
2495 ++
2496 ++static void sfp_detach(struct sfp *sfp)
2497 ++{
2498 ++ sfp->attached = false;
2499 ++ sfp_sm_event(sfp, SFP_E_REMOVE);
2500 ++}
2501 ++
2502 + static void sfp_start(struct sfp *sfp)
2503 + {
2504 + sfp_sm_event(sfp, SFP_E_DEV_UP);
2505 +@@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
2506 + }
2507 +
2508 + static const struct sfp_socket_ops sfp_module_ops = {
2509 ++ .attach = sfp_attach,
2510 ++ .detach = sfp_detach,
2511 + .start = sfp_start,
2512 + .stop = sfp_stop,
2513 + .module_info = sfp_module_info,
2514 +@@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev)
2515 + dev_info(sfp->dev, "Host maximum power %u.%uW\n",
2516 + sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
2517 +
2518 +- sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
2519 +- if (!sfp->sfp_bus)
2520 +- return -ENOMEM;
2521 +-
2522 + /* Get the initial state, and always signal TX disable,
2523 + * since the network interface will not be up.
2524 + */
2525 +@@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev)
2526 + sfp->state |= SFP_F_RATE_SELECT;
2527 + sfp_set_state(sfp, sfp->state);
2528 + sfp_module_tx_disable(sfp);
2529 +- rtnl_lock();
2530 +- if (sfp->state & SFP_F_PRESENT)
2531 +- sfp_sm_event(sfp, SFP_E_INSERT);
2532 +- rtnl_unlock();
2533 +
2534 + for (i = 0; i < GPIO_MAX; i++) {
2535 + if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
2536 +@@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev)
2537 + dev_warn(sfp->dev,
2538 + "No tx_disable pin: SFP modules will always be emitting.\n");
2539 +
2540 ++ sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
2541 ++ if (!sfp->sfp_bus)
2542 ++ return -ENOMEM;
2543 ++
2544 + return 0;
2545 + }
2546 +
2547 +diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
2548 +index 31b0acf337e27..64f54b0bbd8c4 100644
2549 +--- a/drivers/net/phy/sfp.h
2550 ++++ b/drivers/net/phy/sfp.h
2551 +@@ -7,6 +7,8 @@
2552 + struct sfp;
2553 +
2554 + struct sfp_socket_ops {
2555 ++ void (*attach)(struct sfp *sfp);
2556 ++ void (*detach)(struct sfp *sfp);
2557 + void (*start)(struct sfp *sfp);
2558 + void (*stop)(struct sfp *sfp);
2559 + int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
2560 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2561 +index 364f514d56d87..86db1205a3968 100644
2562 +--- a/drivers/net/team/team.c
2563 ++++ b/drivers/net/team/team.c
2564 +@@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
2565 + }
2566 + }
2567 +
2568 +-static bool __team_option_inst_tmp_find(const struct list_head *opts,
2569 +- const struct team_option_inst *needle)
2570 +-{
2571 +- struct team_option_inst *opt_inst;
2572 +-
2573 +- list_for_each_entry(opt_inst, opts, tmp_list)
2574 +- if (opt_inst == needle)
2575 +- return true;
2576 +- return false;
2577 +-}
2578 +-
2579 + static int __team_options_register(struct team *team,
2580 + const struct team_option *option,
2581 + size_t option_count)
2582 +@@ -2460,7 +2449,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2583 + int err = 0;
2584 + int i;
2585 + struct nlattr *nl_option;
2586 +- LIST_HEAD(opt_inst_list);
2587 +
2588 + rtnl_lock();
2589 +
2590 +@@ -2480,6 +2468,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2591 + struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2592 + struct nlattr *attr;
2593 + struct nlattr *attr_data;
2594 ++ LIST_HEAD(opt_inst_list);
2595 + enum team_option_type opt_type;
2596 + int opt_port_ifindex = 0; /* != 0 for per-port options */
2597 + u32 opt_array_index = 0;
2598 +@@ -2584,23 +2573,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2599 + if (err)
2600 + goto team_put;
2601 + opt_inst->changed = true;
2602 +-
2603 +- /* dumb/evil user-space can send us duplicate opt,
2604 +- * keep only the last one
2605 +- */
2606 +- if (__team_option_inst_tmp_find(&opt_inst_list,
2607 +- opt_inst))
2608 +- continue;
2609 +-
2610 + list_add(&opt_inst->tmp_list, &opt_inst_list);
2611 + }
2612 + if (!opt_found) {
2613 + err = -ENOENT;
2614 + goto team_put;
2615 + }
2616 +- }
2617 +
2618 +- err = team_nl_send_event_options_get(team, &opt_inst_list);
2619 ++ err = team_nl_send_event_options_get(team, &opt_inst_list);
2620 ++ if (err)
2621 ++ break;
2622 ++ }
2623 +
2624 + team_put:
2625 + team_nl_team_put(team);
2626 +diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
2627 +index 4d6409605207c..af13d8cf94ad4 100644
2628 +--- a/drivers/net/wan/fsl_ucc_hdlc.c
2629 ++++ b/drivers/net/wan/fsl_ucc_hdlc.c
2630 +@@ -1049,6 +1049,54 @@ static const struct net_device_ops uhdlc_ops = {
2631 + .ndo_tx_timeout = uhdlc_tx_timeout,
2632 + };
2633 +
2634 ++static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
2635 ++{
2636 ++ struct device_node *np;
2637 ++ struct platform_device *pdev;
2638 ++ struct resource *res;
2639 ++ static int siram_init_flag;
2640 ++ int ret = 0;
2641 ++
2642 ++ np = of_find_compatible_node(NULL, NULL, name);
2643 ++ if (!np)
2644 ++ return -EINVAL;
2645 ++
2646 ++ pdev = of_find_device_by_node(np);
2647 ++ if (!pdev) {
2648 ++ pr_err("%pOFn: failed to lookup pdev\n", np);
2649 ++ of_node_put(np);
2650 ++ return -EINVAL;
2651 ++ }
2652 ++
2653 ++ of_node_put(np);
2654 ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2655 ++ if (!res) {
2656 ++ ret = -EINVAL;
2657 ++ goto error_put_device;
2658 ++ }
2659 ++ *ptr = ioremap(res->start, resource_size(res));
2660 ++ if (!*ptr) {
2661 ++ ret = -ENOMEM;
2662 ++ goto error_put_device;
2663 ++ }
2664 ++
2665 ++ /* We've remapped the addresses, and we don't need the device any
2666 ++ * more, so we should release it.
2667 ++ */
2668 ++ put_device(&pdev->dev);
2669 ++
2670 ++ if (init_flag && siram_init_flag == 0) {
2671 ++ memset_io(*ptr, 0, resource_size(res));
2672 ++ siram_init_flag = 1;
2673 ++ }
2674 ++ return 0;
2675 ++
2676 ++error_put_device:
2677 ++ put_device(&pdev->dev);
2678 ++
2679 ++ return ret;
2680 ++}
2681 ++
2682 + static int ucc_hdlc_probe(struct platform_device *pdev)
2683 + {
2684 + struct device_node *np = pdev->dev.of_node;
2685 +@@ -1143,6 +1191,15 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
2686 + ret = ucc_of_parse_tdm(np, utdm, ut_info);
2687 + if (ret)
2688 + goto free_utdm;
2689 ++
2690 ++ ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
2691 ++ (void __iomem **)&utdm->si_regs);
2692 ++ if (ret)
2693 ++ goto free_utdm;
2694 ++ ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
2695 ++ (void __iomem **)&utdm->siram);
2696 ++ if (ret)
2697 ++ goto unmap_si_regs;
2698 + }
2699 +
2700 + if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
2701 +@@ -1151,7 +1208,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
2702 + ret = uhdlc_init(uhdlc_priv);
2703 + if (ret) {
2704 + dev_err(&pdev->dev, "Failed to init uhdlc\n");
2705 +- goto free_utdm;
2706 ++ goto undo_uhdlc_init;
2707 + }
2708 +
2709 + dev = alloc_hdlcdev(uhdlc_priv);
2710 +@@ -1181,6 +1238,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
2711 + free_dev:
2712 + free_netdev(dev);
2713 + undo_uhdlc_init:
2714 ++ iounmap(utdm->siram);
2715 ++unmap_si_regs:
2716 ++ iounmap(utdm->si_regs);
2717 + free_utdm:
2718 + if (uhdlc_priv->tsa)
2719 + kfree(utdm);
2720 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
2721 +index ea517864186b4..76f25008491a5 100644
2722 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
2723 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
2724 +@@ -159,39 +159,49 @@ static const struct ieee80211_ops mt76x0u_ops = {
2725 + .wake_tx_queue = mt76_wake_tx_queue,
2726 + };
2727 +
2728 +-static int mt76x0u_register_device(struct mt76x02_dev *dev)
2729 ++static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
2730 + {
2731 +- struct ieee80211_hw *hw = dev->mt76.hw;
2732 + int err;
2733 +
2734 +- err = mt76u_alloc_queues(&dev->mt76);
2735 +- if (err < 0)
2736 +- goto out_err;
2737 +-
2738 +- err = mt76u_mcu_init_rx(&dev->mt76);
2739 +- if (err < 0)
2740 +- goto out_err;
2741 +-
2742 + mt76x0_chip_onoff(dev, true, true);
2743 +- if (!mt76x02_wait_for_mac(&dev->mt76)) {
2744 +- err = -ETIMEDOUT;
2745 +- goto out_err;
2746 +- }
2747 ++
2748 ++ if (!mt76x02_wait_for_mac(&dev->mt76))
2749 ++ return -ETIMEDOUT;
2750 +
2751 + err = mt76x0u_mcu_init(dev);
2752 + if (err < 0)
2753 +- goto out_err;
2754 ++ return err;
2755 +
2756 + mt76x0_init_usb_dma(dev);
2757 + err = mt76x0_init_hardware(dev);
2758 + if (err < 0)
2759 +- goto out_err;
2760 ++ return err;
2761 +
2762 + mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
2763 + mt76_wr(dev, MT_TXOP_CTRL_CFG,
2764 + FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
2765 + FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
2766 +
2767 ++ return 0;
2768 ++}
2769 ++
2770 ++static int mt76x0u_register_device(struct mt76x02_dev *dev)
2771 ++{
2772 ++ struct ieee80211_hw *hw = dev->mt76.hw;
2773 ++ int err;
2774 ++
2775 ++ err = mt76u_alloc_queues(&dev->mt76);
2776 ++ if (err < 0)
2777 ++ goto out_err;
2778 ++
2779 ++ err = mt76u_mcu_init_rx(&dev->mt76);
2780 ++ if (err < 0)
2781 ++ goto out_err;
2782 ++
2783 ++ err = mt76x0u_init_hardware(dev);
2784 ++ if (err < 0)
2785 ++ goto out_err;
2786 ++
2787 + err = mt76x0_register_device(dev);
2788 + if (err < 0)
2789 + goto out_err;
2790 +@@ -300,6 +310,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
2791 +
2792 + mt76u_stop_queues(&dev->mt76);
2793 + mt76x0u_mac_stop(dev);
2794 ++ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
2795 ++ mt76x0_chip_onoff(dev, false, false);
2796 + usb_kill_urb(usb->mcu.res.urb);
2797 +
2798 + return 0;
2799 +@@ -327,7 +339,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
2800 + tasklet_enable(&usb->rx_tasklet);
2801 + tasklet_enable(&usb->tx_tasklet);
2802 +
2803 +- ret = mt76x0_init_hardware(dev);
2804 ++ ret = mt76x0u_init_hardware(dev);
2805 + if (ret)
2806 + goto err;
2807 +
2808 +diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
2809 +index a7f37063518ec..3d05bc1937d40 100644
2810 +--- a/drivers/pinctrl/pinctrl-max77620.c
2811 ++++ b/drivers/pinctrl/pinctrl-max77620.c
2812 +@@ -34,14 +34,12 @@ enum max77620_pin_ppdrv {
2813 + MAX77620_PIN_PP_DRV,
2814 + };
2815 +
2816 +-enum max77620_pinconf_param {
2817 +- MAX77620_ACTIVE_FPS_SOURCE = PIN_CONFIG_END + 1,
2818 +- MAX77620_ACTIVE_FPS_POWER_ON_SLOTS,
2819 +- MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS,
2820 +- MAX77620_SUSPEND_FPS_SOURCE,
2821 +- MAX77620_SUSPEND_FPS_POWER_ON_SLOTS,
2822 +- MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS,
2823 +-};
2824 ++#define MAX77620_ACTIVE_FPS_SOURCE (PIN_CONFIG_END + 1)
2825 ++#define MAX77620_ACTIVE_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 2)
2826 ++#define MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 3)
2827 ++#define MAX77620_SUSPEND_FPS_SOURCE (PIN_CONFIG_END + 4)
2828 ++#define MAX77620_SUSPEND_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 5)
2829 ++#define MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 6)
2830 +
2831 + struct max77620_pin_function {
2832 + const char *name;
2833 +diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
2834 +index 7aae52a09ff03..4ffd56ff809eb 100644
2835 +--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
2836 ++++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
2837 +@@ -79,7 +79,7 @@ enum {
2838 + .intr_cfg_reg = 0, \
2839 + .intr_status_reg = 0, \
2840 + .intr_target_reg = 0, \
2841 +- .tile = NORTH, \
2842 ++ .tile = SOUTH, \
2843 + .mux_bit = -1, \
2844 + .pull_bit = pull, \
2845 + .drv_bit = drv, \
2846 +diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
2847 +index bf07735275a49..0fc382cb977bf 100644
2848 +--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
2849 ++++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
2850 +@@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
2851 + }
2852 +
2853 + static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
2854 +- unsigned int tid, int pg_idx, bool reply)
2855 ++ unsigned int tid, int pg_idx)
2856 + {
2857 + struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
2858 + GFP_KERNEL);
2859 +@@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
2860 + req = (struct cpl_set_tcb_field *)skb->head;
2861 + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
2862 + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2863 +- req->reply = V_NO_REPLY(reply ? 0 : 1);
2864 ++ req->reply = V_NO_REPLY(1);
2865 + req->cpu_idx = 0;
2866 + req->word = htons(31);
2867 + req->mask = cpu_to_be64(0xF0000000);
2868 +@@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
2869 + * @tid: connection id
2870 + * @hcrc: header digest enabled
2871 + * @dcrc: data digest enabled
2872 +- * @reply: request reply from h/w
2873 + * set up the iscsi digest settings for a connection identified by tid
2874 + */
2875 + static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2876 +- int hcrc, int dcrc, int reply)
2877 ++ int hcrc, int dcrc)
2878 + {
2879 + struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
2880 + GFP_KERNEL);
2881 +@@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2882 + req = (struct cpl_set_tcb_field *)skb->head;
2883 + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
2884 + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2885 +- req->reply = V_NO_REPLY(reply ? 0 : 1);
2886 ++ req->reply = V_NO_REPLY(1);
2887 + req->cpu_idx = 0;
2888 + req->word = htons(31);
2889 + req->mask = cpu_to_be64(0x0F000000);
2890 +diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2891 +index 064ef57351828..bd6cc014cab04 100644
2892 +--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2893 ++++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2894 +@@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
2895 + struct cxgbi_sock *csk;
2896 +
2897 + csk = lookup_tid(t, tid);
2898 +- if (!csk)
2899 ++ if (!csk) {
2900 + pr_err("can't find conn. for tid %u.\n", tid);
2901 ++ return;
2902 ++ }
2903 +
2904 + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2905 + "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
2906 + csk, csk->state, csk->flags, csk->tid, rpl->status);
2907 +
2908 +- if (rpl->status != CPL_ERR_NONE)
2909 ++ if (rpl->status != CPL_ERR_NONE) {
2910 + pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
2911 + csk, tid, rpl->status);
2912 ++ csk->err = -EINVAL;
2913 ++ }
2914 ++
2915 ++ complete(&csk->cmpl);
2916 +
2917 + __kfree_skb(skb);
2918 + }
2919 +@@ -1984,7 +1990,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
2920 + }
2921 +
2922 + static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2923 +- int pg_idx, bool reply)
2924 ++ int pg_idx)
2925 + {
2926 + struct sk_buff *skb;
2927 + struct cpl_set_tcb_field *req;
2928 +@@ -2000,7 +2006,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2929 + req = (struct cpl_set_tcb_field *)skb->head;
2930 + INIT_TP_WR(req, csk->tid);
2931 + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
2932 +- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
2933 ++ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2934 + req->word_cookie = htons(0);
2935 + req->mask = cpu_to_be64(0x3 << 8);
2936 + req->val = cpu_to_be64(pg_idx << 8);
2937 +@@ -2009,12 +2015,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2938 + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2939 + "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
2940 +
2941 ++ reinit_completion(&csk->cmpl);
2942 + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2943 +- return 0;
2944 ++ wait_for_completion(&csk->cmpl);
2945 ++
2946 ++ return csk->err;
2947 + }
2948 +
2949 + static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2950 +- int hcrc, int dcrc, int reply)
2951 ++ int hcrc, int dcrc)
2952 + {
2953 + struct sk_buff *skb;
2954 + struct cpl_set_tcb_field *req;
2955 +@@ -2032,7 +2041,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2956 + req = (struct cpl_set_tcb_field *)skb->head;
2957 + INIT_TP_WR(req, tid);
2958 + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2959 +- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
2960 ++ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2961 + req->word_cookie = htons(0);
2962 + req->mask = cpu_to_be64(0x3 << 4);
2963 + req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
2964 +@@ -2042,8 +2051,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2965 + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2966 + "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
2967 +
2968 ++ reinit_completion(&csk->cmpl);
2969 + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2970 +- return 0;
2971 ++ wait_for_completion(&csk->cmpl);
2972 ++
2973 ++ return csk->err;
2974 + }
2975 +
2976 + static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
2977 +diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
2978 +index 75f876409fb9d..245742557c036 100644
2979 +--- a/drivers/scsi/cxgbi/libcxgbi.c
2980 ++++ b/drivers/scsi/cxgbi/libcxgbi.c
2981 +@@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
2982 + skb_queue_head_init(&csk->receive_queue);
2983 + skb_queue_head_init(&csk->write_queue);
2984 + timer_setup(&csk->retry_timer, NULL, 0);
2985 ++ init_completion(&csk->cmpl);
2986 + rwlock_init(&csk->callback_lock);
2987 + csk->cdev = cdev;
2988 + csk->flags = 0;
2989 +@@ -2251,14 +2252,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2990 + if (!err && conn->hdrdgst_en)
2991 + err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2992 + conn->hdrdgst_en,
2993 +- conn->datadgst_en, 0);
2994 ++ conn->datadgst_en);
2995 + break;
2996 + case ISCSI_PARAM_DATADGST_EN:
2997 + err = iscsi_set_param(cls_conn, param, buf, buflen);
2998 + if (!err && conn->datadgst_en)
2999 + err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
3000 + conn->hdrdgst_en,
3001 +- conn->datadgst_en, 0);
3002 ++ conn->datadgst_en);
3003 + break;
3004 + case ISCSI_PARAM_MAX_R2T:
3005 + return iscsi_tcp_set_max_r2t(conn, buf);
3006 +@@ -2384,7 +2385,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
3007 +
3008 + ppm = csk->cdev->cdev2ppm(csk->cdev);
3009 + err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
3010 +- ppm->tformat.pgsz_idx_dflt, 0);
3011 ++ ppm->tformat.pgsz_idx_dflt);
3012 + if (err < 0)
3013 + return err;
3014 +
3015 +diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
3016 +index 5d5d8b50d8426..1917ff57651d7 100644
3017 +--- a/drivers/scsi/cxgbi/libcxgbi.h
3018 ++++ b/drivers/scsi/cxgbi/libcxgbi.h
3019 +@@ -149,6 +149,7 @@ struct cxgbi_sock {
3020 + struct sk_buff_head receive_queue;
3021 + struct sk_buff_head write_queue;
3022 + struct timer_list retry_timer;
3023 ++ struct completion cmpl;
3024 + int err;
3025 + rwlock_t callback_lock;
3026 + void *user_data;
3027 +@@ -490,9 +491,9 @@ struct cxgbi_device {
3028 + struct cxgbi_ppm *,
3029 + struct cxgbi_task_tag_info *);
3030 + int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
3031 +- unsigned int, int, int, int);
3032 ++ unsigned int, int, int);
3033 + int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
3034 +- unsigned int, int, bool);
3035 ++ unsigned int, int);
3036 +
3037 + void (*csk_release_offload_resources)(struct cxgbi_sock *);
3038 + int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
3039 +diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
3040 +index 08c7b1e25fe48..dde84f7443136 100644
3041 +--- a/drivers/scsi/isci/init.c
3042 ++++ b/drivers/scsi/isci/init.c
3043 +@@ -588,6 +588,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
3044 + shost->max_lun = ~0;
3045 + shost->max_cmd_len = MAX_COMMAND_SIZE;
3046 +
3047 ++ /* turn on DIF support */
3048 ++ scsi_host_set_prot(shost,
3049 ++ SHOST_DIF_TYPE1_PROTECTION |
3050 ++ SHOST_DIF_TYPE2_PROTECTION |
3051 ++ SHOST_DIF_TYPE3_PROTECTION);
3052 ++ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
3053 ++
3054 + err = scsi_add_host(shost, &pdev->dev);
3055 + if (err)
3056 + goto err_shost;
3057 +@@ -675,13 +682,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3058 + goto err_host_alloc;
3059 + }
3060 + pci_info->hosts[i] = h;
3061 +-
3062 +- /* turn on DIF support */
3063 +- scsi_host_set_prot(to_shost(h),
3064 +- SHOST_DIF_TYPE1_PROTECTION |
3065 +- SHOST_DIF_TYPE2_PROTECTION |
3066 +- SHOST_DIF_TYPE3_PROTECTION);
3067 +- scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
3068 + }
3069 +
3070 + err = isci_setup_interrupts(pdev);
3071 +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
3072 +index 2f0a4f2c5ff80..d4821b9dea45d 100644
3073 +--- a/drivers/scsi/qedi/qedi_iscsi.c
3074 ++++ b/drivers/scsi/qedi/qedi_iscsi.c
3075 +@@ -954,6 +954,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
3076 +
3077 + qedi_ep = ep->dd_data;
3078 + if (qedi_ep->state == EP_STATE_IDLE ||
3079 ++ qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
3080 + qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
3081 + return -1;
3082 +
3083 +@@ -1036,6 +1037,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
3084 +
3085 + switch (qedi_ep->state) {
3086 + case EP_STATE_OFLDCONN_START:
3087 ++ case EP_STATE_OFLDCONN_NONE:
3088 + goto ep_release_conn;
3089 + case EP_STATE_OFLDCONN_FAILED:
3090 + break;
3091 +@@ -1226,6 +1228,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
3092 +
3093 + if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
3094 + QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
3095 ++ qedi_ep->state = EP_STATE_OFLDCONN_NONE;
3096 + ret = -EIO;
3097 + goto set_path_exit;
3098 + }
3099 +diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
3100 +index 11260776212fa..892d70d545537 100644
3101 +--- a/drivers/scsi/qedi/qedi_iscsi.h
3102 ++++ b/drivers/scsi/qedi/qedi_iscsi.h
3103 +@@ -59,6 +59,7 @@ enum {
3104 + EP_STATE_OFLDCONN_FAILED = 0x2000,
3105 + EP_STATE_CONNECT_FAILED = 0x4000,
3106 + EP_STATE_DISCONN_TIMEDOUT = 0x8000,
3107 ++ EP_STATE_OFLDCONN_NONE = 0x10000,
3108 + };
3109 +
3110 + struct qedi_conn;
3111 +diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
3112 +index 15a50cc7e4b36..c8589926014d4 100644
3113 +--- a/drivers/scsi/qla1280.c
3114 ++++ b/drivers/scsi/qla1280.c
3115 +@@ -4259,7 +4259,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3116 + ha->devnum = devnum; /* specifies microcode load address */
3117 +
3118 + #ifdef QLA_64BIT_PTR
3119 +- if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
3120 ++ if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
3121 + if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
3122 + printk(KERN_WARNING "scsi(%li): Unable to set a "
3123 + "suitable DMA mask - aborting\n", ha->host_no);
3124 +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
3125 +index 051164f755a4c..a13396c56a6a1 100644
3126 +--- a/drivers/scsi/qla4xxx/ql4_os.c
3127 ++++ b/drivers/scsi/qla4xxx/ql4_os.c
3128 +@@ -7237,6 +7237,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
3129 +
3130 + rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
3131 + fw_ddb_entry);
3132 ++ if (rc)
3133 ++ goto free_sess;
3134 +
3135 + ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
3136 + __func__, fnode_sess->dev.kobj.name);
3137 +diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
3138 +index 3f81bab48ac24..7d8442c377dfa 100644
3139 +--- a/drivers/scsi/sd_zbc.c
3140 ++++ b/drivers/scsi/sd_zbc.c
3141 +@@ -142,10 +142,12 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
3142 + return -EOPNOTSUPP;
3143 +
3144 + /*
3145 +- * Get a reply buffer for the number of requested zones plus a header.
3146 +- * For ATA, buffers must be aligned to 512B.
3147 ++ * Get a reply buffer for the number of requested zones plus a header,
3148 ++ * without exceeding the device maximum command size. For ATA disks,
3149 ++ * buffers must be aligned to 512B.
3150 + */
3151 +- buflen = roundup((nrz + 1) * 64, 512);
3152 ++ buflen = min(queue_max_hw_sectors(disk->queue) << 9,
3153 ++ roundup((nrz + 1) * 64, 512));
3154 + buf = kmalloc(buflen, gfp_mask);
3155 + if (!buf)
3156 + return -ENOMEM;
3157 +diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
3158 +index 58087d3916d05..5417ce09b1054 100644
3159 +--- a/drivers/scsi/ufs/ufs.h
3160 ++++ b/drivers/scsi/ufs/ufs.h
3161 +@@ -195,7 +195,7 @@ enum ufs_desc_def_size {
3162 + QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
3163 + QUERY_DESC_UNIT_DEF_SIZE = 0x23,
3164 + QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
3165 +- QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
3166 ++ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
3167 + QUERY_DESC_POWER_DEF_SIZE = 0x62,
3168 + QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
3169 + };
3170 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
3171 +index 1cb35ab8a4ec2..2772ff4357fc4 100644
3172 +--- a/drivers/scsi/ufs/ufshcd.c
3173 ++++ b/drivers/scsi/ufs/ufshcd.c
3174 +@@ -7924,6 +7924,8 @@ out:
3175 + trace_ufshcd_system_resume(dev_name(hba->dev), ret,
3176 + ktime_to_us(ktime_sub(ktime_get(), start)),
3177 + hba->curr_dev_pwr_mode, hba->uic_link_state);
3178 ++ if (!ret)
3179 ++ hba->is_sys_suspended = false;
3180 + return ret;
3181 + }
3182 + EXPORT_SYMBOL(ufshcd_system_resume);
3183 +diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
3184 +index f78c34647ca2d..76480df195a87 100644
3185 +--- a/drivers/soc/fsl/qe/qe_tdm.c
3186 ++++ b/drivers/soc/fsl/qe/qe_tdm.c
3187 +@@ -44,10 +44,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
3188 + const char *sprop;
3189 + int ret = 0;
3190 + u32 val;
3191 +- struct resource *res;
3192 +- struct device_node *np2;
3193 +- static int siram_init_flag;
3194 +- struct platform_device *pdev;
3195 +
3196 + sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
3197 + if (sprop) {
3198 +@@ -124,57 +120,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
3199 + utdm->siram_entry_id = val;
3200 +
3201 + set_si_param(utdm, ut_info);
3202 +-
3203 +- np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si");
3204 +- if (!np2)
3205 +- return -EINVAL;
3206 +-
3207 +- pdev = of_find_device_by_node(np2);
3208 +- if (!pdev) {
3209 +- pr_err("%pOFn: failed to lookup pdev\n", np2);
3210 +- of_node_put(np2);
3211 +- return -EINVAL;
3212 +- }
3213 +-
3214 +- of_node_put(np2);
3215 +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3216 +- utdm->si_regs = devm_ioremap_resource(&pdev->dev, res);
3217 +- if (IS_ERR(utdm->si_regs)) {
3218 +- ret = PTR_ERR(utdm->si_regs);
3219 +- goto err_miss_siram_property;
3220 +- }
3221 +-
3222 +- np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram");
3223 +- if (!np2) {
3224 +- ret = -EINVAL;
3225 +- goto err_miss_siram_property;
3226 +- }
3227 +-
3228 +- pdev = of_find_device_by_node(np2);
3229 +- if (!pdev) {
3230 +- ret = -EINVAL;
3231 +- pr_err("%pOFn: failed to lookup pdev\n", np2);
3232 +- of_node_put(np2);
3233 +- goto err_miss_siram_property;
3234 +- }
3235 +-
3236 +- of_node_put(np2);
3237 +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3238 +- utdm->siram = devm_ioremap_resource(&pdev->dev, res);
3239 +- if (IS_ERR(utdm->siram)) {
3240 +- ret = PTR_ERR(utdm->siram);
3241 +- goto err_miss_siram_property;
3242 +- }
3243 +-
3244 +- if (siram_init_flag == 0) {
3245 +- memset_io(utdm->siram, 0, resource_size(res));
3246 +- siram_init_flag = 1;
3247 +- }
3248 +-
3249 +- return ret;
3250 +-
3251 +-err_miss_siram_property:
3252 +- devm_iounmap(&pdev->dev, utdm->si_regs);
3253 + return ret;
3254 + }
3255 + EXPORT_SYMBOL(ucc_of_parse_tdm);
3256 +diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
3257 +index d1cb0d78ab844..e44ca93dcdc68 100644
3258 +--- a/drivers/staging/erofs/dir.c
3259 ++++ b/drivers/staging/erofs/dir.c
3260 +@@ -53,8 +53,11 @@ static int erofs_fill_dentries(struct dir_context *ctx,
3261 + strnlen(de_name, maxsize - nameoff) :
3262 + le16_to_cpu(de[1].nameoff) - nameoff;
3263 +
3264 +- /* the corrupted directory found */
3265 +- BUG_ON(de_namelen < 0);
3266 ++ /* a corrupted entry is found */
3267 ++ if (unlikely(de_namelen < 0)) {
3268 ++ DBG_BUGON(1);
3269 ++ return -EIO;
3270 ++ }
3271 +
3272 + #ifdef CONFIG_EROFS_FS_DEBUG
3273 + dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
3274 +diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
3275 +index 04c61a9d7b766..d7fbf5f4600f3 100644
3276 +--- a/drivers/staging/erofs/inode.c
3277 ++++ b/drivers/staging/erofs/inode.c
3278 +@@ -133,7 +133,13 @@ static int fill_inline_data(struct inode *inode, void *data,
3279 + return -ENOMEM;
3280 +
3281 + m_pofs += vi->inode_isize + vi->xattr_isize;
3282 +- BUG_ON(m_pofs + inode->i_size > PAGE_SIZE);
3283 ++
3284 ++ /* inline symlink data shouldn't across page boundary as well */
3285 ++ if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
3286 ++ DBG_BUGON(1);
3287 ++ kfree(lnk);
3288 ++ return -EIO;
3289 ++ }
3290 +
3291 + /* get in-page inline data */
3292 + memcpy(lnk, data + m_pofs, inode->i_size);
3293 +@@ -171,7 +177,7 @@ static int fill_inode(struct inode *inode, int isdir)
3294 + return PTR_ERR(page);
3295 + }
3296 +
3297 +- BUG_ON(!PageUptodate(page));
3298 ++ DBG_BUGON(!PageUptodate(page));
3299 + data = page_address(page);
3300 +
3301 + err = read_inode(inode, data + ofs);
3302 +diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
3303 +index 3ac4599bbe011..8929443558676 100644
3304 +--- a/drivers/staging/erofs/internal.h
3305 ++++ b/drivers/staging/erofs/internal.h
3306 +@@ -194,50 +194,70 @@ struct erofs_workgroup {
3307 +
3308 + #define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
3309 +
3310 +-static inline bool erofs_workgroup_try_to_freeze(
3311 +- struct erofs_workgroup *grp, int v)
3312 ++#if defined(CONFIG_SMP)
3313 ++static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
3314 ++ int val)
3315 + {
3316 +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
3317 +- if (v != atomic_cmpxchg(&grp->refcount,
3318 +- v, EROFS_LOCKED_MAGIC))
3319 +- return false;
3320 + preempt_disable();
3321 ++ if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
3322 ++ preempt_enable();
3323 ++ return false;
3324 ++ }
3325 ++ return true;
3326 ++}
3327 ++
3328 ++static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
3329 ++ int orig_val)
3330 ++{
3331 ++ /*
3332 ++ * other observers should notice all modifications
3333 ++ * in the freezing period.
3334 ++ */
3335 ++ smp_mb();
3336 ++ atomic_set(&grp->refcount, orig_val);
3337 ++ preempt_enable();
3338 ++}
3339 ++
3340 ++static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
3341 ++{
3342 ++ return atomic_cond_read_relaxed(&grp->refcount,
3343 ++ VAL != EROFS_LOCKED_MAGIC);
3344 ++}
3345 + #else
3346 ++static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
3347 ++ int val)
3348 ++{
3349 + preempt_disable();
3350 +- if (atomic_read(&grp->refcount) != v) {
3351 ++ /* no need to spin on UP platforms, let's just disable preemption. */
3352 ++ if (val != atomic_read(&grp->refcount)) {
3353 + preempt_enable();
3354 + return false;
3355 + }
3356 +-#endif
3357 + return true;
3358 + }
3359 +
3360 +-static inline void erofs_workgroup_unfreeze(
3361 +- struct erofs_workgroup *grp, int v)
3362 ++static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
3363 ++ int orig_val)
3364 + {
3365 +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
3366 +- atomic_set(&grp->refcount, v);
3367 +-#endif
3368 + preempt_enable();
3369 + }
3370 +
3371 ++static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
3372 ++{
3373 ++ int v = atomic_read(&grp->refcount);
3374 ++
3375 ++ /* workgroup is never freezed on uniprocessor systems */
3376 ++ DBG_BUGON(v == EROFS_LOCKED_MAGIC);
3377 ++ return v;
3378 ++}
3379 ++#endif
3380 ++
3381 + static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
3382 + {
3383 +- const int locked = (int)EROFS_LOCKED_MAGIC;
3384 + int o;
3385 +
3386 + repeat:
3387 +- o = atomic_read(&grp->refcount);
3388 +-
3389 +- /* spin if it is temporarily locked at the reclaim path */
3390 +- if (unlikely(o == locked)) {
3391 +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
3392 +- do
3393 +- cpu_relax();
3394 +- while (atomic_read(&grp->refcount) == locked);
3395 +-#endif
3396 +- goto repeat;
3397 +- }
3398 ++ o = erofs_wait_on_workgroup_freezed(grp);
3399 +
3400 + if (unlikely(o <= 0))
3401 + return -1;
3402 +diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
3403 +index f69e619807a17..1c2eb69682efc 100644
3404 +--- a/drivers/staging/erofs/super.c
3405 ++++ b/drivers/staging/erofs/super.c
3406 +@@ -40,7 +40,6 @@ static int __init erofs_init_inode_cache(void)
3407 +
3408 + static void erofs_exit_inode_cache(void)
3409 + {
3410 +- BUG_ON(erofs_inode_cachep == NULL);
3411 + kmem_cache_destroy(erofs_inode_cachep);
3412 + }
3413 +
3414 +@@ -303,8 +302,8 @@ static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
3415 + int ret = 1; /* 0 - busy */
3416 + struct address_space *const mapping = page->mapping;
3417 +
3418 +- BUG_ON(!PageLocked(page));
3419 +- BUG_ON(mapping->a_ops != &managed_cache_aops);
3420 ++ DBG_BUGON(!PageLocked(page));
3421 ++ DBG_BUGON(mapping->a_ops != &managed_cache_aops);
3422 +
3423 + if (PagePrivate(page))
3424 + ret = erofs_try_to_free_cached_page(mapping, page);
3425 +@@ -317,10 +316,10 @@ static void managed_cache_invalidatepage(struct page *page,
3426 + {
3427 + const unsigned int stop = length + offset;
3428 +
3429 +- BUG_ON(!PageLocked(page));
3430 ++ DBG_BUGON(!PageLocked(page));
3431 +
3432 +- /* Check for overflow */
3433 +- BUG_ON(stop > PAGE_SIZE || stop < length);
3434 ++ /* Check for potential overflow in debug mode */
3435 ++ DBG_BUGON(stop > PAGE_SIZE || stop < length);
3436 +
3437 + if (offset == 0 && stop == PAGE_SIZE)
3438 + while (!managed_cache_releasepage(page, GFP_NOFS))
3439 +@@ -442,12 +441,6 @@ static int erofs_read_super(struct super_block *sb,
3440 +
3441 + erofs_register_super(sb);
3442 +
3443 +- /*
3444 +- * We already have a positive dentry, which was instantiated
3445 +- * by d_make_root. Just need to d_rehash it.
3446 +- */
3447 +- d_rehash(sb->s_root);
3448 +-
3449 + if (!silent)
3450 + infoln("mounted on %s with opts: %s.", dev_name,
3451 + (char *)data);
3452 +@@ -655,7 +648,7 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data)
3453 + unsigned int org_inject_rate = erofs_get_fault_rate(sbi);
3454 + int err;
3455 +
3456 +- BUG_ON(!sb_rdonly(sb));
3457 ++ DBG_BUGON(!sb_rdonly(sb));
3458 + err = parse_options(sb, data);
3459 + if (err)
3460 + goto out;
3461 +diff --git a/drivers/staging/erofs/unzip_pagevec.h b/drivers/staging/erofs/unzip_pagevec.h
3462 +index 0956615b86f72..23856ba2742d8 100644
3463 +--- a/drivers/staging/erofs/unzip_pagevec.h
3464 ++++ b/drivers/staging/erofs/unzip_pagevec.h
3465 +@@ -150,7 +150,7 @@ z_erofs_pagevec_ctor_dequeue(struct z_erofs_pagevec_ctor *ctor,
3466 + erofs_vtptr_t t;
3467 +
3468 + if (unlikely(ctor->index >= ctor->nr)) {
3469 +- BUG_ON(ctor->next == NULL);
3470 ++ DBG_BUGON(!ctor->next);
3471 + z_erofs_pagevec_ctor_pagedown(ctor, true);
3472 + }
3473 +
3474 +diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
3475 +index 45e88bada907f..1c4b3e0343f58 100644
3476 +--- a/drivers/staging/erofs/unzip_vle.c
3477 ++++ b/drivers/staging/erofs/unzip_vle.c
3478 +@@ -20,9 +20,6 @@ static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
3479 +
3480 + void z_erofs_exit_zip_subsystem(void)
3481 + {
3482 +- BUG_ON(z_erofs_workqueue == NULL);
3483 +- BUG_ON(z_erofs_workgroup_cachep == NULL);
3484 +-
3485 + destroy_workqueue(z_erofs_workqueue);
3486 + kmem_cache_destroy(z_erofs_workgroup_cachep);
3487 + }
3488 +@@ -366,7 +363,10 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
3489 + struct z_erofs_vle_work *work;
3490 +
3491 + /* if multiref is disabled, grp should never be nullptr */
3492 +- BUG_ON(grp != NULL);
3493 ++ if (unlikely(grp)) {
3494 ++ DBG_BUGON(1);
3495 ++ return ERR_PTR(-EINVAL);
3496 ++ }
3497 +
3498 + /* no available workgroup, let's allocate one */
3499 + grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
3500 +@@ -745,7 +745,7 @@ static inline void z_erofs_vle_read_endio(struct bio *bio)
3501 + bool cachemngd = false;
3502 +
3503 + DBG_BUGON(PageUptodate(page));
3504 +- BUG_ON(page->mapping == NULL);
3505 ++ DBG_BUGON(!page->mapping);
3506 +
3507 + #ifdef EROFS_FS_HAS_MANAGED_CACHE
3508 + if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
3509 +@@ -803,7 +803,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
3510 +
3511 + might_sleep();
3512 + work = z_erofs_vle_grab_primary_work(grp);
3513 +- BUG_ON(!READ_ONCE(work->nr_pages));
3514 ++ DBG_BUGON(!READ_ONCE(work->nr_pages));
3515 +
3516 + mutex_lock(&work->lock);
3517 + nr_pages = work->nr_pages;
3518 +@@ -852,8 +852,8 @@ repeat:
3519 + else
3520 + pagenr = z_erofs_onlinepage_index(page);
3521 +
3522 +- BUG_ON(pagenr >= nr_pages);
3523 +- BUG_ON(pages[pagenr] != NULL);
3524 ++ DBG_BUGON(pagenr >= nr_pages);
3525 ++ DBG_BUGON(pages[pagenr]);
3526 +
3527 + pages[pagenr] = page;
3528 + }
3529 +@@ -876,9 +876,8 @@ repeat:
3530 + if (z_erofs_is_stagingpage(page))
3531 + continue;
3532 + #ifdef EROFS_FS_HAS_MANAGED_CACHE
3533 +- else if (page->mapping == mngda) {
3534 +- BUG_ON(PageLocked(page));
3535 +- BUG_ON(!PageUptodate(page));
3536 ++ if (page->mapping == mngda) {
3537 ++ DBG_BUGON(!PageUptodate(page));
3538 + continue;
3539 + }
3540 + #endif
3541 +@@ -886,8 +885,8 @@ repeat:
3542 + /* only non-head page could be reused as a compressed page */
3543 + pagenr = z_erofs_onlinepage_index(page);
3544 +
3545 +- BUG_ON(pagenr >= nr_pages);
3546 +- BUG_ON(pages[pagenr] != NULL);
3547 ++ DBG_BUGON(pagenr >= nr_pages);
3548 ++ DBG_BUGON(pages[pagenr]);
3549 + ++sparsemem_pages;
3550 + pages[pagenr] = page;
3551 +
3552 +@@ -897,9 +896,6 @@ repeat:
3553 + llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
3554 +
3555 + if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
3556 +- /* FIXME! this should be fixed in the future */
3557 +- BUG_ON(grp->llen != llen);
3558 +-
3559 + err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
3560 + pages, nr_pages, work->pageofs);
3561 + goto out;
3562 +@@ -914,10 +910,8 @@ repeat:
3563 + if (err != -ENOTSUPP)
3564 + goto out_percpu;
3565 +
3566 +- if (sparsemem_pages >= nr_pages) {
3567 +- BUG_ON(sparsemem_pages > nr_pages);
3568 ++ if (sparsemem_pages >= nr_pages)
3569 + goto skip_allocpage;
3570 +- }
3571 +
3572 + for (i = 0; i < nr_pages; ++i) {
3573 + if (pages[i] != NULL)
3574 +@@ -1010,7 +1004,7 @@ static void z_erofs_vle_unzip_wq(struct work_struct *work)
3575 + struct z_erofs_vle_unzip_io_sb, io.u.work);
3576 + LIST_HEAD(page_pool);
3577 +
3578 +- BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
3579 ++ DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
3580 + z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
3581 +
3582 + put_pages_list(&page_pool);
3583 +@@ -1344,7 +1338,6 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
3584 + continue;
3585 + }
3586 +
3587 +- BUG_ON(PagePrivate(page));
3588 + set_page_private(page, (unsigned long)head);
3589 + head = page;
3590 + }
3591 +diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
3592 +index 1a428658cbea2..16ac335ee59f4 100644
3593 +--- a/drivers/staging/erofs/unzip_vle_lz4.c
3594 ++++ b/drivers/staging/erofs/unzip_vle_lz4.c
3595 +@@ -57,7 +57,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
3596 + if (compressed_pages[j] != page)
3597 + continue;
3598 +
3599 +- BUG_ON(mirrored[j]);
3600 ++ DBG_BUGON(mirrored[j]);
3601 + memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
3602 + mirrored[j] = true;
3603 + break;
3604 +diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
3605 +index d2e3ace910469..b535898ca753f 100644
3606 +--- a/drivers/staging/erofs/utils.c
3607 ++++ b/drivers/staging/erofs/utils.c
3608 +@@ -23,9 +23,6 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
3609 + list_del(&page->lru);
3610 + } else {
3611 + page = alloc_pages(gfp | __GFP_NOFAIL, 0);
3612 +-
3613 +- BUG_ON(page == NULL);
3614 +- BUG_ON(page->mapping != NULL);
3615 + }
3616 + return page;
3617 + }
3618 +@@ -58,7 +55,7 @@ repeat:
3619 + /* decrease refcount added by erofs_workgroup_put */
3620 + if (unlikely(oldcount == 1))
3621 + atomic_long_dec(&erofs_global_shrink_cnt);
3622 +- BUG_ON(index != grp->index);
3623 ++ DBG_BUGON(index != grp->index);
3624 + }
3625 + rcu_read_unlock();
3626 + return grp;
3627 +@@ -71,8 +68,11 @@ int erofs_register_workgroup(struct super_block *sb,
3628 + struct erofs_sb_info *sbi;
3629 + int err;
3630 +
3631 +- /* grp->refcount should not < 1 */
3632 +- BUG_ON(!atomic_read(&grp->refcount));
3633 ++ /* grp shouldn't be broken or used before */
3634 ++ if (unlikely(atomic_read(&grp->refcount) != 1)) {
3635 ++ DBG_BUGON(1);
3636 ++ return -EINVAL;
3637 ++ }
3638 +
3639 + err = radix_tree_preload(GFP_NOFS);
3640 + if (err)
3641 +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
3642 +index 9cd404acdb82b..ac7620120491b 100644
3643 +--- a/drivers/target/target_core_user.c
3644 ++++ b/drivers/target/target_core_user.c
3645 +@@ -148,7 +148,7 @@ struct tcmu_dev {
3646 + size_t ring_size;
3647 +
3648 + struct mutex cmdr_lock;
3649 +- struct list_head cmdr_queue;
3650 ++ struct list_head qfull_queue;
3651 +
3652 + uint32_t dbi_max;
3653 + uint32_t dbi_thresh;
3654 +@@ -159,6 +159,7 @@ struct tcmu_dev {
3655 +
3656 + struct timer_list cmd_timer;
3657 + unsigned int cmd_time_out;
3658 ++ struct list_head inflight_queue;
3659 +
3660 + struct timer_list qfull_timer;
3661 + int qfull_time_out;
3662 +@@ -179,7 +180,7 @@ struct tcmu_dev {
3663 + struct tcmu_cmd {
3664 + struct se_cmd *se_cmd;
3665 + struct tcmu_dev *tcmu_dev;
3666 +- struct list_head cmdr_queue_entry;
3667 ++ struct list_head queue_entry;
3668 +
3669 + uint16_t cmd_id;
3670 +
3671 +@@ -192,6 +193,7 @@ struct tcmu_cmd {
3672 + unsigned long deadline;
3673 +
3674 + #define TCMU_CMD_BIT_EXPIRED 0
3675 ++#define TCMU_CMD_BIT_INFLIGHT 1
3676 + unsigned long flags;
3677 + };
3678 + /*
3679 +@@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
3680 + if (!tcmu_cmd)
3681 + return NULL;
3682 +
3683 +- INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
3684 ++ INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
3685 + tcmu_cmd->se_cmd = se_cmd;
3686 + tcmu_cmd->tcmu_dev = udev;
3687 +
3688 +@@ -915,11 +917,13 @@ setup_timer:
3689 + return 0;
3690 +
3691 + tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
3692 +- mod_timer(timer, tcmu_cmd->deadline);
3693 ++ if (!timer_pending(timer))
3694 ++ mod_timer(timer, tcmu_cmd->deadline);
3695 ++
3696 + return 0;
3697 + }
3698 +
3699 +-static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
3700 ++static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
3701 + {
3702 + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
3703 + unsigned int tmo;
3704 +@@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
3705 + if (ret)
3706 + return ret;
3707 +
3708 +- list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
3709 ++ list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
3710 + pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
3711 + tcmu_cmd->cmd_id, udev->name);
3712 + return 0;
3713 +@@ -999,7 +1003,7 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
3714 + base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
3715 + command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
3716 +
3717 +- if (!list_empty(&udev->cmdr_queue))
3718 ++ if (!list_empty(&udev->qfull_queue))
3719 + goto queue;
3720 +
3721 + mb = udev->mb_addr;
3722 +@@ -1096,13 +1100,16 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
3723 + UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
3724 + tcmu_flush_dcache_range(mb, sizeof(*mb));
3725 +
3726 ++ list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
3727 ++ set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
3728 ++
3729 + /* TODO: only if FLUSH and FUA? */
3730 + uio_event_notify(&udev->uio_info);
3731 +
3732 + return 0;
3733 +
3734 + queue:
3735 +- if (add_to_cmdr_queue(tcmu_cmd)) {
3736 ++ if (add_to_qfull_queue(tcmu_cmd)) {
3737 + *scsi_err = TCM_OUT_OF_RESOURCES;
3738 + return -1;
3739 + }
3740 +@@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
3741 + if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
3742 + goto out;
3743 +
3744 ++ list_del_init(&cmd->queue_entry);
3745 ++
3746 + tcmu_cmd_reset_dbi_cur(cmd);
3747 +
3748 + if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
3749 +@@ -1194,9 +1203,29 @@ out:
3750 + tcmu_free_cmd(cmd);
3751 + }
3752 +
3753 ++static void tcmu_set_next_deadline(struct list_head *queue,
3754 ++ struct timer_list *timer)
3755 ++{
3756 ++ struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
3757 ++ unsigned long deadline = 0;
3758 ++
3759 ++ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
3760 ++ if (!time_after(jiffies, tcmu_cmd->deadline)) {
3761 ++ deadline = tcmu_cmd->deadline;
3762 ++ break;
3763 ++ }
3764 ++ }
3765 ++
3766 ++ if (deadline)
3767 ++ mod_timer(timer, deadline);
3768 ++ else
3769 ++ del_timer(timer);
3770 ++}
3771 ++
3772 + static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3773 + {
3774 + struct tcmu_mailbox *mb;
3775 ++ struct tcmu_cmd *cmd;
3776 + int handled = 0;
3777 +
3778 + if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
3779 +@@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3780 + while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
3781 +
3782 + struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
3783 +- struct tcmu_cmd *cmd;
3784 +
3785 + tcmu_flush_dcache_range(entry, sizeof(*entry));
3786 +
3787 +@@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3788 + /* no more pending commands */
3789 + del_timer(&udev->cmd_timer);
3790 +
3791 +- if (list_empty(&udev->cmdr_queue)) {
3792 ++ if (list_empty(&udev->qfull_queue)) {
3793 + /*
3794 + * no more pending or waiting commands so try to
3795 + * reclaim blocks if needed.
3796 +@@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3797 + tcmu_global_max_blocks)
3798 + schedule_delayed_work(&tcmu_unmap_work, 0);
3799 + }
3800 ++ } else if (udev->cmd_time_out) {
3801 ++ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
3802 + }
3803 +
3804 + return handled;
3805 +@@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
3806 + if (!time_after(jiffies, cmd->deadline))
3807 + return 0;
3808 +
3809 +- is_running = list_empty(&cmd->cmdr_queue_entry);
3810 ++ is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
3811 + se_cmd = cmd->se_cmd;
3812 +
3813 + if (is_running) {
3814 +@@ -1288,12 +1318,11 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
3815 + */
3816 + scsi_status = SAM_STAT_CHECK_CONDITION;
3817 + } else {
3818 +- list_del_init(&cmd->cmdr_queue_entry);
3819 +-
3820 + idr_remove(&udev->commands, id);
3821 + tcmu_free_cmd(cmd);
3822 + scsi_status = SAM_STAT_TASK_SET_FULL;
3823 + }
3824 ++ list_del_init(&cmd->queue_entry);
3825 +
3826 + pr_debug("Timing out cmd %u on dev %s that is %s.\n",
3827 + id, udev->name, is_running ? "inflight" : "queued");
3828 +@@ -1372,7 +1401,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
3829 +
3830 + INIT_LIST_HEAD(&udev->node);
3831 + INIT_LIST_HEAD(&udev->timedout_entry);
3832 +- INIT_LIST_HEAD(&udev->cmdr_queue);
3833 ++ INIT_LIST_HEAD(&udev->qfull_queue);
3834 ++ INIT_LIST_HEAD(&udev->inflight_queue);
3835 + idr_init(&udev->commands);
3836 +
3837 + timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
3838 +@@ -1383,7 +1413,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
3839 + return &udev->se_dev;
3840 + }
3841 +
3842 +-static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
3843 ++static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
3844 + {
3845 + struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
3846 + LIST_HEAD(cmds);
3847 +@@ -1391,15 +1421,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
3848 + sense_reason_t scsi_ret;
3849 + int ret;
3850 +
3851 +- if (list_empty(&udev->cmdr_queue))
3852 ++ if (list_empty(&udev->qfull_queue))
3853 + return true;
3854 +
3855 + pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
3856 +
3857 +- list_splice_init(&udev->cmdr_queue, &cmds);
3858 ++ list_splice_init(&udev->qfull_queue, &cmds);
3859 +
3860 +- list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
3861 +- list_del_init(&tcmu_cmd->cmdr_queue_entry);
3862 ++ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
3863 ++ list_del_init(&tcmu_cmd->queue_entry);
3864 +
3865 + pr_debug("removing cmd %u on dev %s from queue\n",
3866 + tcmu_cmd->cmd_id, udev->name);
3867 +@@ -1437,14 +1467,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
3868 + * cmd was requeued, so just put all cmds back in
3869 + * the queue
3870 + */
3871 +- list_splice_tail(&cmds, &udev->cmdr_queue);
3872 ++ list_splice_tail(&cmds, &udev->qfull_queue);
3873 + drained = false;
3874 +- goto done;
3875 ++ break;
3876 + }
3877 + }
3878 +- if (list_empty(&udev->cmdr_queue))
3879 +- del_timer(&udev->qfull_timer);
3880 +-done:
3881 ++
3882 ++ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
3883 + return drained;
3884 + }
3885 +
3886 +@@ -1454,7 +1483,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
3887 +
3888 + mutex_lock(&udev->cmdr_lock);
3889 + tcmu_handle_completions(udev);
3890 +- run_cmdr_queue(udev, false);
3891 ++ run_qfull_queue(udev, false);
3892 + mutex_unlock(&udev->cmdr_lock);
3893 +
3894 + return 0;
3895 +@@ -1982,7 +2011,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev)
3896 + /* complete IO that has executed successfully */
3897 + tcmu_handle_completions(udev);
3898 + /* fail IO waiting to be queued */
3899 +- run_cmdr_queue(udev, true);
3900 ++ run_qfull_queue(udev, true);
3901 +
3902 + unlock:
3903 + mutex_unlock(&udev->cmdr_lock);
3904 +@@ -1997,7 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
3905 + mutex_lock(&udev->cmdr_lock);
3906 +
3907 + idr_for_each_entry(&udev->commands, cmd, i) {
3908 +- if (!list_empty(&cmd->cmdr_queue_entry))
3909 ++ if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
3910 + continue;
3911 +
3912 + pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
3913 +@@ -2006,6 +2035,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
3914 +
3915 + idr_remove(&udev->commands, i);
3916 + if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
3917 ++ list_del_init(&cmd->queue_entry);
3918 + if (err_level == 1) {
3919 + /*
3920 + * Userspace was not able to start the
3921 +@@ -2666,6 +2696,10 @@ static void check_timedout_devices(void)
3922 +
3923 + mutex_lock(&udev->cmdr_lock);
3924 + idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
3925 ++
3926 ++ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
3927 ++ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
3928 ++
3929 + mutex_unlock(&udev->cmdr_lock);
3930 +
3931 + spin_lock_bh(&timed_out_udevs_lock);
3932 +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
3933 +index 73a4adeab096b..11bd8b6422ebf 100644
3934 +--- a/drivers/vhost/scsi.c
3935 ++++ b/drivers/vhost/scsi.c
3936 +@@ -1132,16 +1132,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
3937 + struct vhost_virtqueue *vq,
3938 + struct vhost_scsi_ctx *vc)
3939 + {
3940 +- struct virtio_scsi_ctrl_tmf_resp __user *resp;
3941 + struct virtio_scsi_ctrl_tmf_resp rsp;
3942 ++ struct iov_iter iov_iter;
3943 + int ret;
3944 +
3945 + pr_debug("%s\n", __func__);
3946 + memset(&rsp, 0, sizeof(rsp));
3947 + rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
3948 +- resp = vq->iov[vc->out].iov_base;
3949 +- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
3950 +- if (!ret)
3951 ++
3952 ++ iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
3953 ++
3954 ++ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
3955 ++ if (likely(ret == sizeof(rsp)))
3956 + vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
3957 + else
3958 + pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
3959 +@@ -1152,16 +1154,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
3960 + struct vhost_virtqueue *vq,
3961 + struct vhost_scsi_ctx *vc)
3962 + {
3963 +- struct virtio_scsi_ctrl_an_resp __user *resp;
3964 + struct virtio_scsi_ctrl_an_resp rsp;
3965 ++ struct iov_iter iov_iter;
3966 + int ret;
3967 +
3968 + pr_debug("%s\n", __func__);
3969 + memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
3970 + rsp.response = VIRTIO_SCSI_S_OK;
3971 +- resp = vq->iov[vc->out].iov_base;
3972 +- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
3973 +- if (!ret)
3974 ++
3975 ++ iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
3976 ++
3977 ++ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
3978 ++ if (likely(ret == sizeof(rsp)))
3979 + vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
3980 + else
3981 + pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
3982 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
3983 +index 5eaeca805c95c..b214a72d5caad 100644
3984 +--- a/drivers/vhost/vhost.c
3985 ++++ b/drivers/vhost/vhost.c
3986 +@@ -1035,8 +1035,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
3987 + int type, ret;
3988 +
3989 + ret = copy_from_iter(&type, sizeof(type), from);
3990 +- if (ret != sizeof(type))
3991 ++ if (ret != sizeof(type)) {
3992 ++ ret = -EINVAL;
3993 + goto done;
3994 ++ }
3995 +
3996 + switch (type) {
3997 + case VHOST_IOTLB_MSG:
3998 +@@ -1055,8 +1057,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
3999 +
4000 + iov_iter_advance(from, offset);
4001 + ret = copy_from_iter(&msg, sizeof(msg), from);
4002 +- if (ret != sizeof(msg))
4003 ++ if (ret != sizeof(msg)) {
4004 ++ ret = -EINVAL;
4005 + goto done;
4006 ++ }
4007 + if (vhost_process_iotlb_msg(dev, &msg)) {
4008 + ret = -EFAULT;
4009 + goto done;
4010 +diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
4011 +index f9ef0673a083c..aded3213bfb51 100644
4012 +--- a/drivers/video/backlight/pwm_bl.c
4013 ++++ b/drivers/video/backlight/pwm_bl.c
4014 +@@ -268,6 +268,16 @@ static int pwm_backlight_parse_dt(struct device *dev,
4015 +
4016 + memset(data, 0, sizeof(*data));
4017 +
4018 ++ /*
4019 ++ * These values are optional and set as 0 by default, the out values
4020 ++ * are modified only if a valid u32 value can be decoded.
4021 ++ */
4022 ++ of_property_read_u32(node, "post-pwm-on-delay-ms",
4023 ++ &data->post_pwm_on_delay);
4024 ++ of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
4025 ++
4026 ++ data->enable_gpio = -EINVAL;
4027 ++
4028 + /*
4029 + * Determine the number of brightness levels, if this property is not
4030 + * set a default table of brightness levels will be used.
4031 +@@ -380,15 +390,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
4032 + data->max_brightness--;
4033 + }
4034 +
4035 +- /*
4036 +- * These values are optional and set as 0 by default, the out values
4037 +- * are modified only if a valid u32 value can be decoded.
4038 +- */
4039 +- of_property_read_u32(node, "post-pwm-on-delay-ms",
4040 +- &data->post_pwm_on_delay);
4041 +- of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
4042 +-
4043 +- data->enable_gpio = -EINVAL;
4044 + return 0;
4045 + }
4046 +
4047 +diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
4048 +index 5c4a764717c4d..81208cd3f4ecb 100644
4049 +--- a/drivers/watchdog/mt7621_wdt.c
4050 ++++ b/drivers/watchdog/mt7621_wdt.c
4051 +@@ -17,6 +17,7 @@
4052 + #include <linux/watchdog.h>
4053 + #include <linux/moduleparam.h>
4054 + #include <linux/platform_device.h>
4055 ++#include <linux/mod_devicetable.h>
4056 +
4057 + #include <asm/mach-ralink/ralink_regs.h>
4058 +
4059 +diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
4060 +index 98967f0a7d10e..db7c57d82cfdc 100644
4061 +--- a/drivers/watchdog/rt2880_wdt.c
4062 ++++ b/drivers/watchdog/rt2880_wdt.c
4063 +@@ -18,6 +18,7 @@
4064 + #include <linux/watchdog.h>
4065 + #include <linux/moduleparam.h>
4066 + #include <linux/platform_device.h>
4067 ++#include <linux/mod_devicetable.h>
4068 +
4069 + #include <asm/mach-ralink/ralink_regs.h>
4070 +
4071 +diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
4072 +index 2e5d845b50914..7aa64d1b119c2 100644
4073 +--- a/drivers/xen/pvcalls-back.c
4074 ++++ b/drivers/xen/pvcalls-back.c
4075 +@@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque)
4076 +
4077 + /* write the data, then modify the indexes */
4078 + virt_wmb();
4079 +- if (ret < 0)
4080 ++ if (ret < 0) {
4081 ++ atomic_set(&map->read, 0);
4082 + intf->in_error = ret;
4083 +- else
4084 ++ } else
4085 + intf->in_prod = prod + ret;
4086 + /* update the indexes, then notify the other end */
4087 + virt_wmb();
4088 +@@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
4089 + static void pvcalls_sk_state_change(struct sock *sock)
4090 + {
4091 + struct sock_mapping *map = sock->sk_user_data;
4092 +- struct pvcalls_data_intf *intf;
4093 +
4094 + if (map == NULL)
4095 + return;
4096 +
4097 +- intf = map->ring;
4098 +- intf->in_error = -ENOTCONN;
4099 ++ atomic_inc(&map->read);
4100 + notify_remote_via_irq(map->irq);
4101 + }
4102 +
4103 +diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
4104 +index 77224d8f3e6fe..91da7e44d5d4f 100644
4105 +--- a/drivers/xen/pvcalls-front.c
4106 ++++ b/drivers/xen/pvcalls-front.c
4107 +@@ -31,6 +31,12 @@
4108 + #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
4109 + #define PVCALLS_FRONT_MAX_SPIN 5000
4110 +
4111 ++static struct proto pvcalls_proto = {
4112 ++ .name = "PVCalls",
4113 ++ .owner = THIS_MODULE,
4114 ++ .obj_size = sizeof(struct sock),
4115 ++};
4116 ++
4117 + struct pvcalls_bedata {
4118 + struct xen_pvcalls_front_ring ring;
4119 + grant_ref_t ref;
4120 +@@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock)
4121 + return ret;
4122 + }
4123 +
4124 ++static void free_active_ring(struct sock_mapping *map)
4125 ++{
4126 ++ if (!map->active.ring)
4127 ++ return;
4128 ++
4129 ++ free_pages((unsigned long)map->active.data.in,
4130 ++ map->active.ring->ring_order);
4131 ++ free_page((unsigned long)map->active.ring);
4132 ++}
4133 ++
4134 ++static int alloc_active_ring(struct sock_mapping *map)
4135 ++{
4136 ++ void *bytes;
4137 ++
4138 ++ map->active.ring = (struct pvcalls_data_intf *)
4139 ++ get_zeroed_page(GFP_KERNEL);
4140 ++ if (!map->active.ring)
4141 ++ goto out;
4142 ++
4143 ++ map->active.ring->ring_order = PVCALLS_RING_ORDER;
4144 ++ bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
4145 ++ PVCALLS_RING_ORDER);
4146 ++ if (!bytes)
4147 ++ goto out;
4148 ++
4149 ++ map->active.data.in = bytes;
4150 ++ map->active.data.out = bytes +
4151 ++ XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
4152 ++
4153 ++ return 0;
4154 ++
4155 ++out:
4156 ++ free_active_ring(map);
4157 ++ return -ENOMEM;
4158 ++}
4159 ++
4160 + static int create_active(struct sock_mapping *map, int *evtchn)
4161 + {
4162 + void *bytes;
4163 +@@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
4164 + *evtchn = -1;
4165 + init_waitqueue_head(&map->active.inflight_conn_req);
4166 +
4167 +- map->active.ring = (struct pvcalls_data_intf *)
4168 +- __get_free_page(GFP_KERNEL | __GFP_ZERO);
4169 +- if (map->active.ring == NULL)
4170 +- goto out_error;
4171 +- map->active.ring->ring_order = PVCALLS_RING_ORDER;
4172 +- bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
4173 +- PVCALLS_RING_ORDER);
4174 +- if (bytes == NULL)
4175 +- goto out_error;
4176 ++ bytes = map->active.data.in;
4177 + for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
4178 + map->active.ring->ref[i] = gnttab_grant_foreign_access(
4179 + pvcalls_front_dev->otherend_id,
4180 +@@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
4181 + pvcalls_front_dev->otherend_id,
4182 + pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
4183 +
4184 +- map->active.data.in = bytes;
4185 +- map->active.data.out = bytes +
4186 +- XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
4187 +-
4188 + ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
4189 + if (ret)
4190 + goto out_error;
4191 +@@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
4192 + out_error:
4193 + if (*evtchn >= 0)
4194 + xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
4195 +- free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
4196 +- free_page((unsigned long)map->active.ring);
4197 + return ret;
4198 + }
4199 +
4200 +@@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
4201 + return PTR_ERR(map);
4202 +
4203 + bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
4204 ++ ret = alloc_active_ring(map);
4205 ++ if (ret < 0) {
4206 ++ pvcalls_exit_sock(sock);
4207 ++ return ret;
4208 ++ }
4209 +
4210 + spin_lock(&bedata->socket_lock);
4211 + ret = get_request(bedata, &req_id);
4212 + if (ret < 0) {
4213 + spin_unlock(&bedata->socket_lock);
4214 ++ free_active_ring(map);
4215 + pvcalls_exit_sock(sock);
4216 + return ret;
4217 + }
4218 + ret = create_active(map, &evtchn);
4219 + if (ret < 0) {
4220 + spin_unlock(&bedata->socket_lock);
4221 ++ free_active_ring(map);
4222 + pvcalls_exit_sock(sock);
4223 + return ret;
4224 + }
4225 +@@ -560,15 +595,13 @@ static int __read_ring(struct pvcalls_data_intf *intf,
4226 + error = intf->in_error;
4227 + /* get pointers before reading from the ring */
4228 + virt_rmb();
4229 +- if (error < 0)
4230 +- return error;
4231 +
4232 + size = pvcalls_queued(prod, cons, array_size);
4233 + masked_prod = pvcalls_mask(prod, array_size);
4234 + masked_cons = pvcalls_mask(cons, array_size);
4235 +
4236 + if (size == 0)
4237 +- return 0;
4238 ++ return error ?: size;
4239 +
4240 + if (len > size)
4241 + len = size;
4242 +@@ -780,25 +813,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
4243 + }
4244 + }
4245 +
4246 +- spin_lock(&bedata->socket_lock);
4247 +- ret = get_request(bedata, &req_id);
4248 +- if (ret < 0) {
4249 ++ map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
4250 ++ if (map2 == NULL) {
4251 + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4252 + (void *)&map->passive.flags);
4253 +- spin_unlock(&bedata->socket_lock);
4254 ++ pvcalls_exit_sock(sock);
4255 ++ return -ENOMEM;
4256 ++ }
4257 ++ ret = alloc_active_ring(map2);
4258 ++ if (ret < 0) {
4259 ++ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4260 ++ (void *)&map->passive.flags);
4261 ++ kfree(map2);
4262 + pvcalls_exit_sock(sock);
4263 + return ret;
4264 + }
4265 +- map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
4266 +- if (map2 == NULL) {
4267 ++ spin_lock(&bedata->socket_lock);
4268 ++ ret = get_request(bedata, &req_id);
4269 ++ if (ret < 0) {
4270 + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4271 + (void *)&map->passive.flags);
4272 + spin_unlock(&bedata->socket_lock);
4273 ++ free_active_ring(map2);
4274 ++ kfree(map2);
4275 + pvcalls_exit_sock(sock);
4276 +- return -ENOMEM;
4277 ++ return ret;
4278 + }
4279 ++
4280 + ret = create_active(map2, &evtchn);
4281 + if (ret < 0) {
4282 ++ free_active_ring(map2);
4283 + kfree(map2);
4284 + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4285 + (void *)&map->passive.flags);
4286 +@@ -839,7 +883,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
4287 +
4288 + received:
4289 + map2->sock = newsock;
4290 +- newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
4291 ++ newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
4292 + if (!newsock->sk) {
4293 + bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
4294 + map->passive.inflight_req_id = PVCALLS_INVALID_ID;
4295 +@@ -1032,8 +1076,8 @@ int pvcalls_front_release(struct socket *sock)
4296 + spin_lock(&bedata->socket_lock);
4297 + list_del(&map->list);
4298 + spin_unlock(&bedata->socket_lock);
4299 +- if (READ_ONCE(map->passive.inflight_req_id) !=
4300 +- PVCALLS_INVALID_ID) {
4301 ++ if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
4302 ++ READ_ONCE(map->passive.inflight_req_id) != 0) {
4303 + pvcalls_front_free_map(bedata,
4304 + map->passive.accept_map);
4305 + }
4306 +diff --git a/fs/afs/flock.c b/fs/afs/flock.c
4307 +index 0568fd9868210..e432bd27a2e7b 100644
4308 +--- a/fs/afs/flock.c
4309 ++++ b/fs/afs/flock.c
4310 +@@ -208,7 +208,7 @@ again:
4311 + /* The new front of the queue now owns the state variables. */
4312 + next = list_entry(vnode->pending_locks.next,
4313 + struct file_lock, fl_u.afs.link);
4314 +- vnode->lock_key = afs_file_key(next->fl_file);
4315 ++ vnode->lock_key = key_get(afs_file_key(next->fl_file));
4316 + vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
4317 + vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
4318 + goto again;
4319 +@@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
4320 + /* The new front of the queue now owns the state variables. */
4321 + next = list_entry(vnode->pending_locks.next,
4322 + struct file_lock, fl_u.afs.link);
4323 +- vnode->lock_key = afs_file_key(next->fl_file);
4324 ++ vnode->lock_key = key_get(afs_file_key(next->fl_file));
4325 + vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
4326 + vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
4327 + afs_lock_may_be_available(vnode);
4328 +diff --git a/fs/afs/inode.c b/fs/afs/inode.c
4329 +index 6b17d36204142..1a4ce07fb406d 100644
4330 +--- a/fs/afs/inode.c
4331 ++++ b/fs/afs/inode.c
4332 +@@ -414,7 +414,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
4333 + } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
4334 + valid = true;
4335 + } else {
4336 +- vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
4337 + vnode->cb_v_break = vnode->volume->cb_v_break;
4338 + valid = false;
4339 + }
4340 +@@ -546,6 +545,8 @@ void afs_evict_inode(struct inode *inode)
4341 + #endif
4342 +
4343 + afs_put_permits(rcu_access_pointer(vnode->permit_cache));
4344 ++ key_put(vnode->lock_key);
4345 ++ vnode->lock_key = NULL;
4346 + _leave("");
4347 + }
4348 +
4349 +diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
4350 +index 07bc10f076aac..d443e2bfa0946 100644
4351 +--- a/fs/afs/protocol_yfs.h
4352 ++++ b/fs/afs/protocol_yfs.h
4353 +@@ -161,3 +161,14 @@ struct yfs_xdr_YFSStoreVolumeStatus {
4354 + struct yfs_xdr_u64 max_quota;
4355 + struct yfs_xdr_u64 file_quota;
4356 + } __packed;
4357 ++
4358 ++enum yfs_lock_type {
4359 ++ yfs_LockNone = -1,
4360 ++ yfs_LockRead = 0,
4361 ++ yfs_LockWrite = 1,
4362 ++ yfs_LockExtend = 2,
4363 ++ yfs_LockRelease = 3,
4364 ++ yfs_LockMandatoryRead = 0x100,
4365 ++ yfs_LockMandatoryWrite = 0x101,
4366 ++ yfs_LockMandatoryExtend = 0x102,
4367 ++};
4368 +diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
4369 +index a7b44863d502e..2c588f9bbbda2 100644
4370 +--- a/fs/afs/rxrpc.c
4371 ++++ b/fs/afs/rxrpc.c
4372 +@@ -23,6 +23,7 @@ struct workqueue_struct *afs_async_calls;
4373 + static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
4374 + static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *);
4375 + static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
4376 ++static void afs_delete_async_call(struct work_struct *);
4377 + static void afs_process_async_call(struct work_struct *);
4378 + static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
4379 + static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
4380 +@@ -203,20 +204,26 @@ void afs_put_call(struct afs_call *call)
4381 + }
4382 + }
4383 +
4384 ++static struct afs_call *afs_get_call(struct afs_call *call,
4385 ++ enum afs_call_trace why)
4386 ++{
4387 ++ int u = atomic_inc_return(&call->usage);
4388 ++
4389 ++ trace_afs_call(call, why, u,
4390 ++ atomic_read(&call->net->nr_outstanding_calls),
4391 ++ __builtin_return_address(0));
4392 ++ return call;
4393 ++}
4394 ++
4395 + /*
4396 + * Queue the call for actual work.
4397 + */
4398 + static void afs_queue_call_work(struct afs_call *call)
4399 + {
4400 + if (call->type->work) {
4401 +- int u = atomic_inc_return(&call->usage);
4402 +-
4403 +- trace_afs_call(call, afs_call_trace_work, u,
4404 +- atomic_read(&call->net->nr_outstanding_calls),
4405 +- __builtin_return_address(0));
4406 +-
4407 + INIT_WORK(&call->work, call->type->work);
4408 +
4409 ++ afs_get_call(call, afs_call_trace_work);
4410 + if (!queue_work(afs_wq, &call->work))
4411 + afs_put_call(call);
4412 + }
4413 +@@ -398,6 +405,12 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
4414 + }
4415 + }
4416 +
4417 ++ /* If the call is going to be asynchronous, we need an extra ref for
4418 ++ * the call to hold itself so the caller need not hang on to its ref.
4419 ++ */
4420 ++ if (call->async)
4421 ++ afs_get_call(call, afs_call_trace_get);
4422 ++
4423 + /* create a call */
4424 + rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
4425 + (unsigned long)call,
4426 +@@ -438,15 +451,17 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
4427 + goto error_do_abort;
4428 + }
4429 +
4430 +- /* at this point, an async call may no longer exist as it may have
4431 +- * already completed */
4432 +- if (call->async)
4433 ++ /* Note that at this point, we may have received the reply or an abort
4434 ++ * - and an asynchronous call may already have completed.
4435 ++ */
4436 ++ if (call->async) {
4437 ++ afs_put_call(call);
4438 + return -EINPROGRESS;
4439 ++ }
4440 +
4441 + return afs_wait_for_call_to_complete(call, ac);
4442 +
4443 + error_do_abort:
4444 +- call->state = AFS_CALL_COMPLETE;
4445 + if (ret != -ECONNABORTED) {
4446 + rxrpc_kernel_abort_call(call->net->socket, rxcall,
4447 + RX_USER_ABORT, ret, "KSD");
4448 +@@ -463,8 +478,24 @@ error_do_abort:
4449 + error_kill_call:
4450 + if (call->type->done)
4451 + call->type->done(call);
4452 +- afs_put_call(call);
4453 ++
4454 ++ /* We need to dispose of the extra ref we grabbed for an async call.
4455 ++ * The call, however, might be queued on afs_async_calls and we need to
4456 ++ * make sure we don't get any more notifications that might requeue it.
4457 ++ */
4458 ++ if (call->rxcall) {
4459 ++ rxrpc_kernel_end_call(call->net->socket, call->rxcall);
4460 ++ call->rxcall = NULL;
4461 ++ }
4462 ++ if (call->async) {
4463 ++ if (cancel_work_sync(&call->async_work))
4464 ++ afs_put_call(call);
4465 ++ afs_put_call(call);
4466 ++ }
4467 ++
4468 + ac->error = ret;
4469 ++ call->state = AFS_CALL_COMPLETE;
4470 ++ afs_put_call(call);
4471 + _leave(" = %d", ret);
4472 + return ret;
4473 + }
4474 +diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
4475 +index 12658c1363ae4..5aa57929e8c23 100644
4476 +--- a/fs/afs/yfsclient.c
4477 ++++ b/fs/afs/yfsclient.c
4478 +@@ -803,7 +803,7 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc,
4479 + bp = xdr_encode_YFSFid(bp, &vnode->fid);
4480 + bp = xdr_encode_string(bp, name, namesz);
4481 + bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
4482 +- bp = xdr_encode_u32(bp, 0); /* ViceLockType */
4483 ++ bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */
4484 + yfs_check_req(call, bp);
4485 +
4486 + afs_use_fs_server(call, fc->cbi);
4487 +diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
4488 +index 041c27ea8de15..f74193da0e092 100644
4489 +--- a/fs/ceph/snap.c
4490 ++++ b/fs/ceph/snap.c
4491 +@@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
4492 + capsnap->size);
4493 +
4494 + spin_lock(&mdsc->snap_flush_lock);
4495 +- list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
4496 ++ if (list_empty(&ci->i_snap_flush_item))
4497 ++ list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
4498 + spin_unlock(&mdsc->snap_flush_lock);
4499 + return 1; /* caller may want to ceph_flush_snaps */
4500 + }
4501 +diff --git a/fs/proc/base.c b/fs/proc/base.c
4502 +index ce34654794472..bde45ca75ba3e 100644
4503 +--- a/fs/proc/base.c
4504 ++++ b/fs/proc/base.c
4505 +@@ -1084,10 +1084,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
4506 +
4507 + task_lock(p);
4508 + if (!p->vfork_done && process_shares_mm(p, mm)) {
4509 +- pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
4510 +- task_pid_nr(p), p->comm,
4511 +- p->signal->oom_score_adj, oom_adj,
4512 +- task_pid_nr(task), task->comm);
4513 + p->signal->oom_score_adj = oom_adj;
4514 + if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
4515 + p->signal->oom_score_adj_min = (short)oom_adj;
4516 +diff --git a/include/keys/user-type.h b/include/keys/user-type.h
4517 +index e098cbe27db54..12babe9915944 100644
4518 +--- a/include/keys/user-type.h
4519 ++++ b/include/keys/user-type.h
4520 +@@ -31,7 +31,7 @@
4521 + struct user_key_payload {
4522 + struct rcu_head rcu; /* RCU destructor */
4523 + unsigned short datalen; /* length of this data */
4524 +- char data[0]; /* actual data */
4525 ++ char data[0] __aligned(__alignof__(u64)); /* actual data */
4526 + };
4527 +
4528 + extern struct key_type key_type_user;
4529 +diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
4530 +index 3e7dafb3ea809..7ddaeb5182e33 100644
4531 +--- a/include/linux/compiler-clang.h
4532 ++++ b/include/linux/compiler-clang.h
4533 +@@ -3,9 +3,8 @@
4534 + #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
4535 + #endif
4536 +
4537 +-/* Some compiler specific definitions are overwritten here
4538 +- * for Clang compiler
4539 +- */
4540 ++/* Compiler specific definitions for Clang compiler */
4541 ++
4542 + #define uninitialized_var(x) x = *(&(x))
4543 +
4544 + /* same as gcc, this was present in clang-2.6 so we can assume it works
4545 +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
4546 +index 977ddf2774f97..c61c4bb2bd15f 100644
4547 +--- a/include/linux/compiler-gcc.h
4548 ++++ b/include/linux/compiler-gcc.h
4549 +@@ -58,10 +58,6 @@
4550 + (typeof(ptr)) (__ptr + (off)); \
4551 + })
4552 +
4553 +-/* Make the optimizer believe the variable can be manipulated arbitrarily. */
4554 +-#define OPTIMIZER_HIDE_VAR(var) \
4555 +- __asm__ ("" : "=r" (var) : "0" (var))
4556 +-
4557 + /*
4558 + * A trick to suppress uninitialized variable warning without generating any
4559 + * code
4560 +diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
4561 +index 517bd14e12224..b17f3cd18334d 100644
4562 +--- a/include/linux/compiler-intel.h
4563 ++++ b/include/linux/compiler-intel.h
4564 +@@ -5,9 +5,7 @@
4565 +
4566 + #ifdef __ECC
4567 +
4568 +-/* Some compiler specific definitions are overwritten here
4569 +- * for Intel ECC compiler
4570 +- */
4571 ++/* Compiler specific definitions for Intel ECC compiler */
4572 +
4573 + #include <asm/intrinsics.h>
4574 +
4575 +diff --git a/include/linux/compiler.h b/include/linux/compiler.h
4576 +index fc5004a4b07d7..445348facea97 100644
4577 +--- a/include/linux/compiler.h
4578 ++++ b/include/linux/compiler.h
4579 +@@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
4580 + #endif
4581 +
4582 + #ifndef OPTIMIZER_HIDE_VAR
4583 +-#define OPTIMIZER_HIDE_VAR(var) barrier()
4584 ++/* Make the optimizer believe the variable can be manipulated arbitrarily. */
4585 ++#define OPTIMIZER_HIDE_VAR(var) \
4586 ++ __asm__ ("" : "=r" (var) : "0" (var))
4587 + #endif
4588 +
4589 + /* Not-quite-unique ID. */
4590 +diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
4591 +index 59ddf9af909e4..2dd0a9ed5b361 100644
4592 +--- a/include/linux/qed/qed_chain.h
4593 ++++ b/include/linux/qed/qed_chain.h
4594 +@@ -663,6 +663,37 @@ out:
4595 + static inline void qed_chain_set_prod(struct qed_chain *p_chain,
4596 + u32 prod_idx, void *p_prod_elem)
4597 + {
4598 ++ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
4599 ++ u32 cur_prod, page_mask, page_cnt, page_diff;
4600 ++
4601 ++ cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
4602 ++ p_chain->u.chain32.prod_idx;
4603 ++
4604 ++ /* Assume that number of elements in a page is power of 2 */
4605 ++ page_mask = ~p_chain->elem_per_page_mask;
4606 ++
4607 ++ /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
4608 ++ * reaches the first element of next page before the page index
4609 ++ * is incremented. See qed_chain_produce().
4610 ++ * Index wrap around is not a problem because the difference
4611 ++ * between current and given producer indices is always
4612 ++ * positive and lower than the chain's capacity.
4613 ++ */
4614 ++ page_diff = (((cur_prod - 1) & page_mask) -
4615 ++ ((prod_idx - 1) & page_mask)) /
4616 ++ p_chain->elem_per_page;
4617 ++
4618 ++ page_cnt = qed_chain_get_page_cnt(p_chain);
4619 ++ if (is_chain_u16(p_chain))
4620 ++ p_chain->pbl.c.u16.prod_page_idx =
4621 ++ (p_chain->pbl.c.u16.prod_page_idx -
4622 ++ page_diff + page_cnt) % page_cnt;
4623 ++ else
4624 ++ p_chain->pbl.c.u32.prod_page_idx =
4625 ++ (p_chain->pbl.c.u32.prod_page_idx -
4626 ++ page_diff + page_cnt) % page_cnt;
4627 ++ }
4628 ++
4629 + if (is_chain_u16(p_chain))
4630 + p_chain->u.chain16.prod_idx = (u16) prod_idx;
4631 + else
4632 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
4633 +index a6d820ad17f07..8e63c166765ef 100644
4634 +--- a/include/linux/skbuff.h
4635 ++++ b/include/linux/skbuff.h
4636 +@@ -2418,7 +2418,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
4637 +
4638 + if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
4639 + skb_set_transport_header(skb, keys.control.thoff);
4640 +- else
4641 ++ else if (offset_hint >= 0)
4642 + skb_set_transport_header(skb, offset_hint);
4643 + }
4644 +
4645 +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
4646 +index cb462f9ab7dd5..e0348cb0a1dd7 100644
4647 +--- a/include/linux/virtio_net.h
4648 ++++ b/include/linux/virtio_net.h
4649 +@@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
4650 +
4651 + if (!skb_partial_csum_set(skb, start, off))
4652 + return -EINVAL;
4653 ++ } else {
4654 ++ /* gso packets without NEEDS_CSUM do not set transport_offset.
4655 ++ * probe and drop if does not match one of the above types.
4656 ++ */
4657 ++ if (gso_type && skb->network_header) {
4658 ++ if (!skb->protocol)
4659 ++ virtio_net_hdr_set_proto(skb, hdr);
4660 ++retry:
4661 ++ skb_probe_transport_header(skb, -1);
4662 ++ if (!skb_transport_header_was_set(skb)) {
4663 ++ /* UFO does not specify ipv4 or 6: try both */
4664 ++ if (gso_type & SKB_GSO_UDP &&
4665 ++ skb->protocol == htons(ETH_P_IP)) {
4666 ++ skb->protocol = htons(ETH_P_IPV6);
4667 ++ goto retry;
4668 ++ }
4669 ++ return -EINVAL;
4670 ++ }
4671 ++ }
4672 + }
4673 +
4674 + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
4675 +diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
4676 +index 77e2761d4f2f9..ff4eb9869e5ba 100644
4677 +--- a/include/net/netfilter/nf_flow_table.h
4678 ++++ b/include/net/netfilter/nf_flow_table.h
4679 +@@ -84,7 +84,6 @@ struct flow_offload {
4680 + struct nf_flow_route {
4681 + struct {
4682 + struct dst_entry *dst;
4683 +- int ifindex;
4684 + } tuple[FLOW_OFFLOAD_DIR_MAX];
4685 + };
4686 +
4687 +diff --git a/include/sound/soc.h b/include/sound/soc.h
4688 +index 3e0ac310a3df9..e721082c84a36 100644
4689 +--- a/include/sound/soc.h
4690 ++++ b/include/sound/soc.h
4691 +@@ -985,6 +985,12 @@ struct snd_soc_dai_link {
4692 + /* Do not create a PCM for this DAI link (Backend link) */
4693 + unsigned int ignore:1;
4694 +
4695 ++ /*
4696 ++ * This driver uses legacy platform naming. Set by the core, machine
4697 ++ * drivers should not modify this value.
4698 ++ */
4699 ++ unsigned int legacy_platform:1;
4700 ++
4701 + struct list_head list; /* DAI link list of the soc card */
4702 + struct snd_soc_dobj dobj; /* For topology */
4703 + };
4704 +diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
4705 +index 33d291888ba9c..e3f005eae1f76 100644
4706 +--- a/include/trace/events/afs.h
4707 ++++ b/include/trace/events/afs.h
4708 +@@ -25,6 +25,7 @@
4709 + enum afs_call_trace {
4710 + afs_call_trace_alloc,
4711 + afs_call_trace_free,
4712 ++ afs_call_trace_get,
4713 + afs_call_trace_put,
4714 + afs_call_trace_wake,
4715 + afs_call_trace_work,
4716 +@@ -159,6 +160,7 @@ enum afs_file_error {
4717 + #define afs_call_traces \
4718 + EM(afs_call_trace_alloc, "ALLOC") \
4719 + EM(afs_call_trace_free, "FREE ") \
4720 ++ EM(afs_call_trace_get, "GET ") \
4721 + EM(afs_call_trace_put, "PUT ") \
4722 + EM(afs_call_trace_wake, "WAKE ") \
4723 + E_(afs_call_trace_work, "WORK ")
4724 +diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
4725 +index 14565d703291b..e8baca85bac6a 100644
4726 +--- a/include/uapi/linux/inet_diag.h
4727 ++++ b/include/uapi/linux/inet_diag.h
4728 +@@ -137,15 +137,21 @@ enum {
4729 + INET_DIAG_TCLASS,
4730 + INET_DIAG_SKMEMINFO,
4731 + INET_DIAG_SHUTDOWN,
4732 +- INET_DIAG_DCTCPINFO,
4733 +- INET_DIAG_PROTOCOL, /* response attribute only */
4734 ++
4735 ++ /*
4736 ++ * Next extenstions cannot be requested in struct inet_diag_req_v2:
4737 ++ * its field idiag_ext has only 8 bits.
4738 ++ */
4739 ++
4740 ++ INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */
4741 ++ INET_DIAG_PROTOCOL, /* response attribute only */
4742 + INET_DIAG_SKV6ONLY,
4743 + INET_DIAG_LOCALS,
4744 + INET_DIAG_PEERS,
4745 + INET_DIAG_PAD,
4746 +- INET_DIAG_MARK,
4747 +- INET_DIAG_BBRINFO,
4748 +- INET_DIAG_CLASS_ID,
4749 ++ INET_DIAG_MARK, /* only with CAP_NET_ADMIN */
4750 ++ INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */
4751 ++ INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
4752 + INET_DIAG_MD5SIG,
4753 + __INET_DIAG_MAX,
4754 + };
4755 +diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
4756 +index 90daf285de032..d43b145358275 100644
4757 +--- a/kernel/bpf/stackmap.c
4758 ++++ b/kernel/bpf/stackmap.c
4759 +@@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr,
4760 +
4761 + if (nhdr->n_type == BPF_BUILD_ID &&
4762 + nhdr->n_namesz == sizeof("GNU") &&
4763 +- nhdr->n_descsz == BPF_BUILD_ID_SIZE) {
4764 ++ nhdr->n_descsz > 0 &&
4765 ++ nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
4766 + memcpy(build_id,
4767 + note_start + note_offs +
4768 + ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
4769 +- BPF_BUILD_ID_SIZE);
4770 ++ nhdr->n_descsz);
4771 ++ memset(build_id + nhdr->n_descsz, 0,
4772 ++ BPF_BUILD_ID_SIZE - nhdr->n_descsz);
4773 + return 0;
4774 + }
4775 + new_offs = note_offs + sizeof(Elf32_Nhdr) +
4776 +@@ -260,7 +263,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
4777 + return -EFAULT; /* page not mapped */
4778 +
4779 + ret = -EINVAL;
4780 +- page_addr = page_address(page);
4781 ++ page_addr = kmap_atomic(page);
4782 + ehdr = (Elf32_Ehdr *)page_addr;
4783 +
4784 + /* compare magic x7f "ELF" */
4785 +@@ -276,6 +279,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
4786 + else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
4787 + ret = stack_map_get_build_id_64(page_addr, build_id);
4788 + out:
4789 ++ kunmap_atomic(page_addr);
4790 + put_page(page);
4791 + return ret;
4792 + }
4793 +@@ -310,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
4794 + for (i = 0; i < trace_nr; i++) {
4795 + id_offs[i].status = BPF_STACK_BUILD_ID_IP;
4796 + id_offs[i].ip = ips[i];
4797 ++ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
4798 + }
4799 + return;
4800 + }
4801 +@@ -320,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
4802 + /* per entry fall back to ips */
4803 + id_offs[i].status = BPF_STACK_BUILD_ID_IP;
4804 + id_offs[i].ip = ips[i];
4805 ++ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
4806 + continue;
4807 + }
4808 + id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
4809 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4810 +index ff1c4b20cd0a6..b331562989bd2 100644
4811 +--- a/kernel/trace/trace.c
4812 ++++ b/kernel/trace/trace.c
4813 +@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
4814 + const char tgid_space[] = " ";
4815 + const char space[] = " ";
4816 +
4817 ++ print_event_info(buf, m);
4818 ++
4819 + seq_printf(m, "# %s _-----=> irqs-off\n",
4820 + tgid ? tgid_space : space);
4821 + seq_printf(m, "# %s / _----=> need-resched\n",
4822 +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
4823 +index fec67188c4d28..3387408a15c27 100644
4824 +--- a/kernel/trace/trace_kprobe.c
4825 ++++ b/kernel/trace/trace_kprobe.c
4826 +@@ -878,22 +878,14 @@ static const struct file_operations kprobe_profile_ops = {
4827 + static nokprobe_inline int
4828 + fetch_store_strlen(unsigned long addr)
4829 + {
4830 +- mm_segment_t old_fs;
4831 + int ret, len = 0;
4832 + u8 c;
4833 +
4834 +- old_fs = get_fs();
4835 +- set_fs(KERNEL_DS);
4836 +- pagefault_disable();
4837 +-
4838 + do {
4839 +- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
4840 ++ ret = probe_mem_read(&c, (u8 *)addr + len, 1);
4841 + len++;
4842 + } while (c && ret == 0 && len < MAX_STRING_SIZE);
4843 +
4844 +- pagefault_enable();
4845 +- set_fs(old_fs);
4846 +-
4847 + return (ret < 0) ? ret : len;
4848 + }
4849 +
4850 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
4851 +index d4496d9d34f53..ee2bce59d2bff 100644
4852 +--- a/mm/mempolicy.c
4853 ++++ b/mm/mempolicy.c
4854 +@@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
4855 + nodemask_t *nodes)
4856 + {
4857 + unsigned long copy = ALIGN(maxnode-1, 64) / 8;
4858 +- const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
4859 ++ unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
4860 +
4861 + if (copy > nbytes) {
4862 + if (copy > PAGE_SIZE)
4863 +@@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy,
4864 + int uninitialized_var(pval);
4865 + nodemask_t nodes;
4866 +
4867 +- if (nmask != NULL && maxnode < MAX_NUMNODES)
4868 ++ if (nmask != NULL && maxnode < nr_node_ids)
4869 + return -EINVAL;
4870 +
4871 + err = do_get_mempolicy(&pval, &nodes, addr, flags);
4872 +@@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
4873 + unsigned long nr_bits, alloc_size;
4874 + DECLARE_BITMAP(bm, MAX_NUMNODES);
4875 +
4876 +- nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
4877 ++ nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
4878 + alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
4879 +
4880 + if (nmask)
4881 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
4882 +index b85ca809e5092..ffc83bebfe403 100644
4883 +--- a/net/batman-adv/soft-interface.c
4884 ++++ b/net/batman-adv/soft-interface.c
4885 +@@ -227,6 +227,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
4886 +
4887 + switch (ntohs(ethhdr->h_proto)) {
4888 + case ETH_P_8021Q:
4889 ++ if (!pskb_may_pull(skb, sizeof(*vhdr)))
4890 ++ goto dropped;
4891 + vhdr = vlan_eth_hdr(skb);
4892 +
4893 + /* drop batman-in-batman packets to prevent loops */
4894 +diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
4895 +index e56ba3912a905..8b8abf88befbd 100644
4896 +--- a/net/bridge/br_fdb.c
4897 ++++ b/net/bridge/br_fdb.c
4898 +@@ -1102,6 +1102,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
4899 + err = -ENOMEM;
4900 + goto err_unlock;
4901 + }
4902 ++ if (swdev_notify)
4903 ++ fdb->added_by_user = 1;
4904 + fdb->added_by_external_learn = 1;
4905 + fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
4906 + } else {
4907 +@@ -1121,6 +1123,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
4908 + modified = true;
4909 + }
4910 +
4911 ++ if (swdev_notify)
4912 ++ fdb->added_by_user = 1;
4913 ++
4914 + if (modified)
4915 + fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
4916 + }
4917 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
4918 +index 6bac0d6b7b941..024139b51d3a5 100644
4919 +--- a/net/bridge/br_multicast.c
4920 ++++ b/net/bridge/br_multicast.c
4921 +@@ -1422,14 +1422,7 @@ static void br_multicast_query_received(struct net_bridge *br,
4922 + return;
4923 +
4924 + br_multicast_update_query_timer(br, query, max_delay);
4925 +-
4926 +- /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
4927 +- * the arrival port for IGMP Queries where the source address
4928 +- * is 0.0.0.0 should not be added to router port list.
4929 +- */
4930 +- if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
4931 +- saddr->proto == htons(ETH_P_IPV6))
4932 +- br_multicast_mark_router(br, port);
4933 ++ br_multicast_mark_router(br, port);
4934 + }
4935 +
4936 + static void br_ip4_multicast_query(struct net_bridge *br,
4937 +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
4938 +index 664f886f464da..b59b81fc1ab66 100644
4939 +--- a/net/ceph/messenger.c
4940 ++++ b/net/ceph/messenger.c
4941 +@@ -2071,6 +2071,8 @@ static int process_connect(struct ceph_connection *con)
4942 + dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
4943 +
4944 + if (con->auth) {
4945 ++ int len = le32_to_cpu(con->in_reply.authorizer_len);
4946 ++
4947 + /*
4948 + * Any connection that defines ->get_authorizer()
4949 + * should also define ->add_authorizer_challenge() and
4950 +@@ -2080,8 +2082,7 @@ static int process_connect(struct ceph_connection *con)
4951 + */
4952 + if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
4953 + ret = con->ops->add_authorizer_challenge(
4954 +- con, con->auth->authorizer_reply_buf,
4955 +- le32_to_cpu(con->in_reply.authorizer_len));
4956 ++ con, con->auth->authorizer_reply_buf, len);
4957 + if (ret < 0)
4958 + return ret;
4959 +
4960 +@@ -2091,10 +2092,12 @@ static int process_connect(struct ceph_connection *con)
4961 + return 0;
4962 + }
4963 +
4964 +- ret = con->ops->verify_authorizer_reply(con);
4965 +- if (ret < 0) {
4966 +- con->error_msg = "bad authorize reply";
4967 +- return ret;
4968 ++ if (len) {
4969 ++ ret = con->ops->verify_authorizer_reply(con);
4970 ++ if (ret < 0) {
4971 ++ con->error_msg = "bad authorize reply";
4972 ++ return ret;
4973 ++ }
4974 + }
4975 + }
4976 +
4977 +diff --git a/net/core/filter.c b/net/core/filter.c
4978 +index eb0007f30142b..16350f8c8815a 100644
4979 +--- a/net/core/filter.c
4980 ++++ b/net/core/filter.c
4981 +@@ -3935,6 +3935,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4982 + sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
4983 + break;
4984 + case SO_MAX_PACING_RATE: /* 32bit version */
4985 ++ if (val != ~0U)
4986 ++ cmpxchg(&sk->sk_pacing_status,
4987 ++ SK_PACING_NONE,
4988 ++ SK_PACING_NEEDED);
4989 + sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
4990 + sk->sk_pacing_rate = min(sk->sk_pacing_rate,
4991 + sk->sk_max_pacing_rate);
4992 +@@ -3948,7 +3952,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4993 + sk->sk_rcvlowat = val ? : 1;
4994 + break;
4995 + case SO_MARK:
4996 +- sk->sk_mark = val;
4997 ++ if (sk->sk_mark != val) {
4998 ++ sk->sk_mark = val;
4999 ++ sk_dst_reset(sk);
5000 ++ }
5001 + break;
5002 + default:
5003 + ret = -EINVAL;
5004 +@@ -4019,7 +4026,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
5005 + /* Only some options are supported */
5006 + switch (optname) {
5007 + case TCP_BPF_IW:
5008 +- if (val <= 0 || tp->data_segs_out > 0)
5009 ++ if (val <= 0 || tp->data_segs_out > tp->syn_data)
5010 + ret = -EINVAL;
5011 + else
5012 + tp->snd_cwnd = val;
5013 +diff --git a/net/dsa/port.c b/net/dsa/port.c
5014 +index ed0595459df13..792a13068c50b 100644
5015 +--- a/net/dsa/port.c
5016 ++++ b/net/dsa/port.c
5017 +@@ -255,7 +255,7 @@ int dsa_port_vlan_add(struct dsa_port *dp,
5018 + if (netif_is_bridge_master(vlan->obj.orig_dev))
5019 + return -EOPNOTSUPP;
5020 +
5021 +- if (br_vlan_enabled(dp->bridge_dev))
5022 ++ if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev))
5023 + return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
5024 +
5025 + return 0;
5026 +@@ -273,7 +273,7 @@ int dsa_port_vlan_del(struct dsa_port *dp,
5027 + if (netif_is_bridge_master(vlan->obj.orig_dev))
5028 + return -EOPNOTSUPP;
5029 +
5030 +- if (br_vlan_enabled(dp->bridge_dev))
5031 ++ if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev))
5032 + return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
5033 +
5034 + return 0;
5035 +diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
5036 +index 1a4e9ff02762e..5731670c560b0 100644
5037 +--- a/net/ipv4/inet_diag.c
5038 ++++ b/net/ipv4/inet_diag.c
5039 +@@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk,
5040 + + nla_total_size(1) /* INET_DIAG_TOS */
5041 + + nla_total_size(1) /* INET_DIAG_TCLASS */
5042 + + nla_total_size(4) /* INET_DIAG_MARK */
5043 ++ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
5044 + + nla_total_size(sizeof(struct inet_diag_meminfo))
5045 + + nla_total_size(sizeof(struct inet_diag_msg))
5046 + + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
5047 +@@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
5048 + goto errout;
5049 + }
5050 +
5051 +- if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) {
5052 ++ if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
5053 ++ ext & (1 << (INET_DIAG_TCLASS - 1))) {
5054 + u32 classid = 0;
5055 +
5056 + #ifdef CONFIG_SOCK_CGROUP_DATA
5057 + classid = sock_cgroup_classid(&sk->sk_cgrp_data);
5058 + #endif
5059 ++ /* Fallback to socket priority if class id isn't set.
5060 ++ * Classful qdiscs use it as direct reference to class.
5061 ++ * For cgroup2 classid is always zero.
5062 ++ */
5063 ++ if (!classid)
5064 ++ classid = sk->sk_priority;
5065 +
5066 + if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
5067 + goto errout;
5068 +diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
5069 +index fb1e7f237f531..3cd237b42f446 100644
5070 +--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
5071 ++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
5072 +@@ -56,7 +56,7 @@ struct clusterip_config {
5073 + #endif
5074 + enum clusterip_hashmode hash_mode; /* which hashing mode */
5075 + u_int32_t hash_initval; /* hash initialization */
5076 +- struct rcu_head rcu;
5077 ++ struct rcu_head rcu; /* for call_rcu_bh */
5078 + struct net *net; /* netns for pernet list */
5079 + char ifname[IFNAMSIZ]; /* device ifname */
5080 + };
5081 +@@ -72,6 +72,8 @@ struct clusterip_net {
5082 +
5083 + #ifdef CONFIG_PROC_FS
5084 + struct proc_dir_entry *procdir;
5085 ++ /* mutex protects the config->pde*/
5086 ++ struct mutex mutex;
5087 + #endif
5088 + };
5089 +
5090 +@@ -118,17 +120,18 @@ clusterip_config_entry_put(struct clusterip_config *c)
5091 +
5092 + local_bh_disable();
5093 + if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
5094 ++ list_del_rcu(&c->list);
5095 ++ spin_unlock(&cn->lock);
5096 ++ local_bh_enable();
5097 + /* In case anyone still accesses the file, the open/close
5098 + * functions are also incrementing the refcount on their own,
5099 + * so it's safe to remove the entry even if it's in use. */
5100 + #ifdef CONFIG_PROC_FS
5101 ++ mutex_lock(&cn->mutex);
5102 + if (cn->procdir)
5103 + proc_remove(c->pde);
5104 ++ mutex_unlock(&cn->mutex);
5105 + #endif
5106 +- list_del_rcu(&c->list);
5107 +- spin_unlock(&cn->lock);
5108 +- local_bh_enable();
5109 +-
5110 + return;
5111 + }
5112 + local_bh_enable();
5113 +@@ -278,9 +281,11 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
5114 +
5115 + /* create proc dir entry */
5116 + sprintf(buffer, "%pI4", &ip);
5117 ++ mutex_lock(&cn->mutex);
5118 + c->pde = proc_create_data(buffer, 0600,
5119 + cn->procdir,
5120 + &clusterip_proc_fops, c);
5121 ++ mutex_unlock(&cn->mutex);
5122 + if (!c->pde) {
5123 + err = -ENOMEM;
5124 + goto err;
5125 +@@ -833,6 +838,7 @@ static int clusterip_net_init(struct net *net)
5126 + pr_err("Unable to proc dir entry\n");
5127 + return -ENOMEM;
5128 + }
5129 ++ mutex_init(&cn->mutex);
5130 + #endif /* CONFIG_PROC_FS */
5131 +
5132 + return 0;
5133 +@@ -841,9 +847,12 @@ static int clusterip_net_init(struct net *net)
5134 + static void clusterip_net_exit(struct net *net)
5135 + {
5136 + struct clusterip_net *cn = clusterip_pernet(net);
5137 ++
5138 + #ifdef CONFIG_PROC_FS
5139 ++ mutex_lock(&cn->mutex);
5140 + proc_remove(cn->procdir);
5141 + cn->procdir = NULL;
5142 ++ mutex_unlock(&cn->mutex);
5143 + #endif
5144 + nf_unregister_net_hook(net, &cip_arp_ops);
5145 + }
5146 +diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
5147 +index 8b075f0bc3516..6d0b1f3e927bd 100644
5148 +--- a/net/ipv6/netfilter.c
5149 ++++ b/net/ipv6/netfilter.c
5150 +@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
5151 + struct sock *sk = sk_to_full_sk(skb->sk);
5152 + unsigned int hh_len;
5153 + struct dst_entry *dst;
5154 ++ int strict = (ipv6_addr_type(&iph->daddr) &
5155 ++ (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
5156 + struct flowi6 fl6 = {
5157 + .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
5158 +- rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
5159 ++ strict ? skb_dst(skb)->dev->ifindex : 0,
5160 + .flowi6_mark = skb->mark,
5161 + .flowi6_uid = sock_net_uid(net, sk),
5162 + .daddr = iph->daddr,
5163 +diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
5164 +index 8d0ba757a46ce..9b2f272ca1649 100644
5165 +--- a/net/ipv6/seg6.c
5166 ++++ b/net/ipv6/seg6.c
5167 +@@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info)
5168 + rcu_read_unlock();
5169 +
5170 + genlmsg_end(msg, hdr);
5171 +- genlmsg_reply(msg, info);
5172 +-
5173 +- return 0;
5174 ++ return genlmsg_reply(msg, info);
5175 +
5176 + nla_put_failure:
5177 + rcu_read_unlock();
5178 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
5179 +index 1e03305c05492..e8a1dabef803e 100644
5180 +--- a/net/ipv6/sit.c
5181 ++++ b/net/ipv6/sit.c
5182 +@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
5183 + }
5184 +
5185 + err = 0;
5186 +- if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
5187 ++ if (__in6_dev_get(skb->dev) &&
5188 ++ !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
5189 + goto out;
5190 +
5191 + if (t->parms.iph.daddr == 0)
5192 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
5193 +index d01ec252cb81d..848dd38a907a1 100644
5194 +--- a/net/ipv6/udp.c
5195 ++++ b/net/ipv6/udp.c
5196 +@@ -1322,10 +1322,7 @@ do_udp_sendmsg:
5197 + ipc6.opt = opt;
5198 +
5199 + fl6.flowi6_proto = sk->sk_protocol;
5200 +- if (!ipv6_addr_any(daddr))
5201 +- fl6.daddr = *daddr;
5202 +- else
5203 +- fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
5204 ++ fl6.daddr = *daddr;
5205 + if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
5206 + fl6.saddr = np->saddr;
5207 + fl6.fl6_sport = inet->inet_sport;
5208 +@@ -1353,6 +1350,9 @@ do_udp_sendmsg:
5209 + }
5210 + }
5211 +
5212 ++ if (ipv6_addr_any(&fl6.daddr))
5213 ++ fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
5214 ++
5215 + final_p = fl6_update_dst(&fl6, opt, &final);
5216 + if (final_p)
5217 + connected = false;
5218 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
5219 +index 818aa00603495..517dad83c2fa7 100644
5220 +--- a/net/mac80211/cfg.c
5221 ++++ b/net/mac80211/cfg.c
5222 +@@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
5223 + BSS_CHANGED_P2P_PS |
5224 + BSS_CHANGED_TXPOWER;
5225 + int err;
5226 ++ int prev_beacon_int;
5227 +
5228 + old = sdata_dereference(sdata->u.ap.beacon, sdata);
5229 + if (old)
5230 +@@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
5231 +
5232 + sdata->needed_rx_chains = sdata->local->rx_chains;
5233 +
5234 ++ prev_beacon_int = sdata->vif.bss_conf.beacon_int;
5235 + sdata->vif.bss_conf.beacon_int = params->beacon_interval;
5236 +
5237 + if (params->he_cap)
5238 +@@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
5239 + if (!err)
5240 + ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
5241 + mutex_unlock(&local->mtx);
5242 +- if (err)
5243 ++ if (err) {
5244 ++ sdata->vif.bss_conf.beacon_int = prev_beacon_int;
5245 + return err;
5246 ++ }
5247 +
5248 + /*
5249 + * Apply control port protocol, this allows us to
5250 +diff --git a/net/mac80211/main.c b/net/mac80211/main.c
5251 +index 7b8320d4a8e4b..3131356e290a0 100644
5252 +--- a/net/mac80211/main.c
5253 ++++ b/net/mac80211/main.c
5254 +@@ -615,13 +615,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
5255 + * We need a bit of data queued to build aggregates properly, so
5256 + * instruct the TCP stack to allow more than a single ms of data
5257 + * to be queued in the stack. The value is a bit-shift of 1
5258 +- * second, so 8 is ~4ms of queued data. Only affects local TCP
5259 ++ * second, so 7 is ~8ms of queued data. Only affects local TCP
5260 + * sockets.
5261 + * This is the default, anyhow - drivers may need to override it
5262 + * for local reasons (longer buffers, longer completion time, or
5263 + * similar).
5264 + */
5265 +- local->hw.tx_sk_pacing_shift = 8;
5266 ++ local->hw.tx_sk_pacing_shift = 7;
5267 +
5268 + /* set up some defaults */
5269 + local->hw.queues = 1;
5270 +diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
5271 +index 21526630bf655..e84103b405341 100644
5272 +--- a/net/mac80211/mesh.h
5273 ++++ b/net/mac80211/mesh.h
5274 +@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
5275 + * @dst: mesh path destination mac address
5276 + * @mpp: mesh proxy mac address
5277 + * @rhash: rhashtable list pointer
5278 ++ * @walk_list: linked list containing all mesh_path objects.
5279 + * @gate_list: list pointer for known gates list
5280 + * @sdata: mesh subif
5281 + * @next_hop: mesh neighbor to which frames for this destination will be
5282 +@@ -105,6 +106,7 @@ struct mesh_path {
5283 + u8 dst[ETH_ALEN];
5284 + u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
5285 + struct rhash_head rhash;
5286 ++ struct hlist_node walk_list;
5287 + struct hlist_node gate_list;
5288 + struct ieee80211_sub_if_data *sdata;
5289 + struct sta_info __rcu *next_hop;
5290 +@@ -133,12 +135,16 @@ struct mesh_path {
5291 + * gate's mpath may or may not be resolved and active.
5292 + * @gates_lock: protects updates to known_gates
5293 + * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
5294 ++ * @walk_head: linked list containging all mesh_path objects
5295 ++ * @walk_lock: lock protecting walk_head
5296 + * @entries: number of entries in the table
5297 + */
5298 + struct mesh_table {
5299 + struct hlist_head known_gates;
5300 + spinlock_t gates_lock;
5301 + struct rhashtable rhead;
5302 ++ struct hlist_head walk_head;
5303 ++ spinlock_t walk_lock;
5304 + atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
5305 + };
5306 +
5307 +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
5308 +index a5125624a76dc..c3a7396fb9556 100644
5309 +--- a/net/mac80211/mesh_pathtbl.c
5310 ++++ b/net/mac80211/mesh_pathtbl.c
5311 +@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
5312 + return NULL;
5313 +
5314 + INIT_HLIST_HEAD(&newtbl->known_gates);
5315 ++ INIT_HLIST_HEAD(&newtbl->walk_head);
5316 + atomic_set(&newtbl->entries, 0);
5317 + spin_lock_init(&newtbl->gates_lock);
5318 ++ spin_lock_init(&newtbl->walk_lock);
5319 +
5320 + return newtbl;
5321 + }
5322 +@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
5323 + static struct mesh_path *
5324 + __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
5325 + {
5326 +- int i = 0, ret;
5327 +- struct mesh_path *mpath = NULL;
5328 +- struct rhashtable_iter iter;
5329 +-
5330 +- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5331 +- if (ret)
5332 +- return NULL;
5333 +-
5334 +- rhashtable_walk_start(&iter);
5335 ++ int i = 0;
5336 ++ struct mesh_path *mpath;
5337 +
5338 +- while ((mpath = rhashtable_walk_next(&iter))) {
5339 +- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5340 +- continue;
5341 +- if (IS_ERR(mpath))
5342 +- break;
5343 ++ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
5344 + if (i++ == idx)
5345 + break;
5346 + }
5347 +- rhashtable_walk_stop(&iter);
5348 +- rhashtable_walk_exit(&iter);
5349 +
5350 +- if (IS_ERR(mpath) || !mpath)
5351 ++ if (!mpath)
5352 + return NULL;
5353 +
5354 + if (mpath_expired(mpath)) {
5355 +@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
5356 + return ERR_PTR(-ENOMEM);
5357 +
5358 + tbl = sdata->u.mesh.mesh_paths;
5359 ++ spin_lock_bh(&tbl->walk_lock);
5360 + do {
5361 + ret = rhashtable_lookup_insert_fast(&tbl->rhead,
5362 + &new_mpath->rhash,
5363 +@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
5364 + mpath = rhashtable_lookup_fast(&tbl->rhead,
5365 + dst,
5366 + mesh_rht_params);
5367 +-
5368 ++ else if (!ret)
5369 ++ hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
5370 + } while (unlikely(ret == -EEXIST && !mpath));
5371 ++ spin_unlock_bh(&tbl->walk_lock);
5372 +
5373 +- if (ret && ret != -EEXIST)
5374 +- return ERR_PTR(ret);
5375 +-
5376 +- /* At this point either new_mpath was added, or we found a
5377 +- * matching entry already in the table; in the latter case
5378 +- * free the unnecessary new entry.
5379 +- */
5380 +- if (ret == -EEXIST) {
5381 ++ if (ret) {
5382 + kfree(new_mpath);
5383 ++
5384 ++ if (ret != -EEXIST)
5385 ++ return ERR_PTR(ret);
5386 ++
5387 + new_mpath = mpath;
5388 + }
5389 ++
5390 + sdata->u.mesh.mesh_paths_generation++;
5391 + return new_mpath;
5392 + }
5393 +@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
5394 +
5395 + memcpy(new_mpath->mpp, mpp, ETH_ALEN);
5396 + tbl = sdata->u.mesh.mpp_paths;
5397 ++
5398 ++ spin_lock_bh(&tbl->walk_lock);
5399 + ret = rhashtable_lookup_insert_fast(&tbl->rhead,
5400 + &new_mpath->rhash,
5401 + mesh_rht_params);
5402 ++ if (!ret)
5403 ++ hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
5404 ++ spin_unlock_bh(&tbl->walk_lock);
5405 ++
5406 ++ if (ret)
5407 ++ kfree(new_mpath);
5408 +
5409 + sdata->u.mesh.mpp_paths_generation++;
5410 + return ret;
5411 +@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
5412 + struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
5413 + static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
5414 + struct mesh_path *mpath;
5415 +- struct rhashtable_iter iter;
5416 +- int ret;
5417 +-
5418 +- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5419 +- if (ret)
5420 +- return;
5421 +
5422 +- rhashtable_walk_start(&iter);
5423 +-
5424 +- while ((mpath = rhashtable_walk_next(&iter))) {
5425 +- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5426 +- continue;
5427 +- if (IS_ERR(mpath))
5428 +- break;
5429 ++ rcu_read_lock();
5430 ++ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
5431 + if (rcu_access_pointer(mpath->next_hop) == sta &&
5432 + mpath->flags & MESH_PATH_ACTIVE &&
5433 + !(mpath->flags & MESH_PATH_FIXED)) {
5434 +@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
5435 + WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
5436 + }
5437 + }
5438 +- rhashtable_walk_stop(&iter);
5439 +- rhashtable_walk_exit(&iter);
5440 ++ rcu_read_unlock();
5441 + }
5442 +
5443 + static void mesh_path_free_rcu(struct mesh_table *tbl,
5444 +@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
5445 +
5446 + static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
5447 + {
5448 ++ hlist_del_rcu(&mpath->walk_list);
5449 + rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
5450 + mesh_path_free_rcu(tbl, mpath);
5451 + }
5452 +@@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
5453 + struct ieee80211_sub_if_data *sdata = sta->sdata;
5454 + struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
5455 + struct mesh_path *mpath;
5456 +- struct rhashtable_iter iter;
5457 +- int ret;
5458 +-
5459 +- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5460 +- if (ret)
5461 +- return;
5462 +-
5463 +- rhashtable_walk_start(&iter);
5464 +-
5465 +- while ((mpath = rhashtable_walk_next(&iter))) {
5466 +- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5467 +- continue;
5468 +- if (IS_ERR(mpath))
5469 +- break;
5470 ++ struct hlist_node *n;
5471 +
5472 ++ spin_lock_bh(&tbl->walk_lock);
5473 ++ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5474 + if (rcu_access_pointer(mpath->next_hop) == sta)
5475 + __mesh_path_del(tbl, mpath);
5476 + }
5477 +-
5478 +- rhashtable_walk_stop(&iter);
5479 +- rhashtable_walk_exit(&iter);
5480 ++ spin_unlock_bh(&tbl->walk_lock);
5481 + }
5482 +
5483 + static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
5484 +@@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
5485 + {
5486 + struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
5487 + struct mesh_path *mpath;
5488 +- struct rhashtable_iter iter;
5489 +- int ret;
5490 +-
5491 +- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5492 +- if (ret)
5493 +- return;
5494 +-
5495 +- rhashtable_walk_start(&iter);
5496 +-
5497 +- while ((mpath = rhashtable_walk_next(&iter))) {
5498 +- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5499 +- continue;
5500 +- if (IS_ERR(mpath))
5501 +- break;
5502 ++ struct hlist_node *n;
5503 +
5504 ++ spin_lock_bh(&tbl->walk_lock);
5505 ++ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5506 + if (ether_addr_equal(mpath->mpp, proxy))
5507 + __mesh_path_del(tbl, mpath);
5508 + }
5509 +-
5510 +- rhashtable_walk_stop(&iter);
5511 +- rhashtable_walk_exit(&iter);
5512 ++ spin_unlock_bh(&tbl->walk_lock);
5513 + }
5514 +
5515 + static void table_flush_by_iface(struct mesh_table *tbl)
5516 + {
5517 + struct mesh_path *mpath;
5518 +- struct rhashtable_iter iter;
5519 +- int ret;
5520 +-
5521 +- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5522 +- if (ret)
5523 +- return;
5524 +-
5525 +- rhashtable_walk_start(&iter);
5526 ++ struct hlist_node *n;
5527 +
5528 +- while ((mpath = rhashtable_walk_next(&iter))) {
5529 +- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5530 +- continue;
5531 +- if (IS_ERR(mpath))
5532 +- break;
5533 ++ spin_lock_bh(&tbl->walk_lock);
5534 ++ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5535 + __mesh_path_del(tbl, mpath);
5536 + }
5537 +-
5538 +- rhashtable_walk_stop(&iter);
5539 +- rhashtable_walk_exit(&iter);
5540 ++ spin_unlock_bh(&tbl->walk_lock);
5541 + }
5542 +
5543 + /**
5544 +@@ -675,7 +624,7 @@ static int table_path_del(struct mesh_table *tbl,
5545 + {
5546 + struct mesh_path *mpath;
5547 +
5548 +- rcu_read_lock();
5549 ++ spin_lock_bh(&tbl->walk_lock);
5550 + mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
5551 + if (!mpath) {
5552 + rcu_read_unlock();
5553 +@@ -683,7 +632,7 @@ static int table_path_del(struct mesh_table *tbl,
5554 + }
5555 +
5556 + __mesh_path_del(tbl, mpath);
5557 +- rcu_read_unlock();
5558 ++ spin_unlock_bh(&tbl->walk_lock);
5559 + return 0;
5560 + }
5561 +
5562 +@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
5563 + struct mesh_table *tbl)
5564 + {
5565 + struct mesh_path *mpath;
5566 +- struct rhashtable_iter iter;
5567 +- int ret;
5568 ++ struct hlist_node *n;
5569 +
5570 +- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
5571 +- if (ret)
5572 +- return;
5573 +-
5574 +- rhashtable_walk_start(&iter);
5575 +-
5576 +- while ((mpath = rhashtable_walk_next(&iter))) {
5577 +- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5578 +- continue;
5579 +- if (IS_ERR(mpath))
5580 +- break;
5581 ++ spin_lock_bh(&tbl->walk_lock);
5582 ++ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5583 + if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
5584 + (!(mpath->flags & MESH_PATH_FIXED)) &&
5585 + time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
5586 + __mesh_path_del(tbl, mpath);
5587 + }
5588 +-
5589 +- rhashtable_walk_stop(&iter);
5590 +- rhashtable_walk_exit(&iter);
5591 ++ spin_unlock_bh(&tbl->walk_lock);
5592 + }
5593 +
5594 + void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
5595 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
5596 +index 85c365fc7a0c1..46ecc417c4210 100644
5597 +--- a/net/mac80211/rx.c
5598 ++++ b/net/mac80211/rx.c
5599 +@@ -2640,6 +2640,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
5600 + struct ieee80211_sub_if_data *sdata = rx->sdata;
5601 + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
5602 + u16 ac, q, hdrlen;
5603 ++ int tailroom = 0;
5604 +
5605 + hdr = (struct ieee80211_hdr *) skb->data;
5606 + hdrlen = ieee80211_hdrlen(hdr->frame_control);
5607 +@@ -2726,8 +2727,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
5608 + if (!ifmsh->mshcfg.dot11MeshForwarding)
5609 + goto out;
5610 +
5611 ++ if (sdata->crypto_tx_tailroom_needed_cnt)
5612 ++ tailroom = IEEE80211_ENCRYPT_TAILROOM;
5613 ++
5614 + fwd_skb = skb_copy_expand(skb, local->tx_headroom +
5615 +- sdata->encrypt_headroom, 0, GFP_ATOMIC);
5616 ++ sdata->encrypt_headroom,
5617 ++ tailroom, GFP_ATOMIC);
5618 + if (!fwd_skb)
5619 + goto out;
5620 +
5621 +diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
5622 +index b7a4816add765..cc91b4d6aa22f 100644
5623 +--- a/net/netfilter/nf_flow_table_core.c
5624 ++++ b/net/netfilter/nf_flow_table_core.c
5625 +@@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
5626 + {
5627 + struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
5628 + struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
5629 ++ struct dst_entry *other_dst = route->tuple[!dir].dst;
5630 + struct dst_entry *dst = route->tuple[dir].dst;
5631 +
5632 + ft->dir = dir;
5633 +@@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
5634 + ft->src_port = ctt->src.u.tcp.port;
5635 + ft->dst_port = ctt->dst.u.tcp.port;
5636 +
5637 +- ft->iifidx = route->tuple[dir].ifindex;
5638 +- ft->oifidx = route->tuple[!dir].ifindex;
5639 ++ ft->iifidx = other_dst->dev->ifindex;
5640 ++ ft->oifidx = dst->dev->ifindex;
5641 + ft->dst_cache = dst;
5642 + }
5643 +
5644 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5645 +index 6e548d7c9f67b..5114a0d2a41eb 100644
5646 +--- a/net/netfilter/nf_tables_api.c
5647 ++++ b/net/netfilter/nf_tables_api.c
5648 +@@ -307,6 +307,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
5649 + int err;
5650 +
5651 + list_for_each_entry(rule, &ctx->chain->rules, list) {
5652 ++ if (!nft_is_active_next(ctx->net, rule))
5653 ++ continue;
5654 ++
5655 + err = nft_delrule(ctx, rule);
5656 + if (err < 0)
5657 + return err;
5658 +@@ -4474,6 +4477,8 @@ err6:
5659 + err5:
5660 + kfree(trans);
5661 + err4:
5662 ++ if (obj)
5663 ++ obj->use--;
5664 + kfree(elem.priv);
5665 + err3:
5666 + if (nla[NFTA_SET_ELEM_DATA] != NULL)
5667 +diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
5668 +index 6f41dd74729d9..1f1d90c1716b5 100644
5669 +--- a/net/netfilter/nfnetlink_osf.c
5670 ++++ b/net/netfilter/nfnetlink_osf.c
5671 +@@ -66,6 +66,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
5672 + int ttl_check,
5673 + struct nf_osf_hdr_ctx *ctx)
5674 + {
5675 ++ const __u8 *optpinit = ctx->optp;
5676 + unsigned int check_WSS = 0;
5677 + int fmatch = FMATCH_WRONG;
5678 + int foptsize, optnum;
5679 +@@ -155,6 +156,9 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
5680 + }
5681 + }
5682 +
5683 ++ if (fmatch != FMATCH_OK)
5684 ++ ctx->optp = optpinit;
5685 ++
5686 + return fmatch == FMATCH_OK;
5687 + }
5688 +
5689 +diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
5690 +index 7334e0b80a5ef..c90a4640723f5 100644
5691 +--- a/net/netfilter/nft_compat.c
5692 ++++ b/net/netfilter/nft_compat.c
5693 +@@ -282,6 +282,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
5694 + {
5695 + struct xt_target *target = expr->ops->data;
5696 + void *info = nft_expr_priv(expr);
5697 ++ struct module *me = target->me;
5698 + struct xt_tgdtor_param par;
5699 +
5700 + par.net = ctx->net;
5701 +@@ -292,7 +293,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
5702 + par.target->destroy(&par);
5703 +
5704 + if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
5705 +- module_put(target->me);
5706 ++ module_put(me);
5707 + }
5708 +
5709 + static int nft_extension_dump_info(struct sk_buff *skb, int attr,
5710 +diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
5711 +index 974525eb92df7..6e6b9adf7d387 100644
5712 +--- a/net/netfilter/nft_flow_offload.c
5713 ++++ b/net/netfilter/nft_flow_offload.c
5714 +@@ -12,6 +12,7 @@
5715 + #include <net/netfilter/nf_conntrack_core.h>
5716 + #include <linux/netfilter/nf_conntrack_common.h>
5717 + #include <net/netfilter/nf_flow_table.h>
5718 ++#include <net/netfilter/nf_conntrack_helper.h>
5719 +
5720 + struct nft_flow_offload {
5721 + struct nft_flowtable *flowtable;
5722 +@@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
5723 + memset(&fl, 0, sizeof(fl));
5724 + switch (nft_pf(pkt)) {
5725 + case NFPROTO_IPV4:
5726 +- fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
5727 ++ fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
5728 ++ fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
5729 + break;
5730 + case NFPROTO_IPV6:
5731 +- fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
5732 ++ fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
5733 ++ fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
5734 + break;
5735 + }
5736 +
5737 +@@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
5738 + return -ENOENT;
5739 +
5740 + route->tuple[dir].dst = this_dst;
5741 +- route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
5742 + route->tuple[!dir].dst = other_dst;
5743 +- route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
5744 +
5745 + return 0;
5746 + }
5747 +@@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
5748 + {
5749 + struct nft_flow_offload *priv = nft_expr_priv(expr);
5750 + struct nf_flowtable *flowtable = &priv->flowtable->data;
5751 ++ const struct nf_conn_help *help;
5752 + enum ip_conntrack_info ctinfo;
5753 + struct nf_flow_route route;
5754 + struct flow_offload *flow;
5755 +@@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
5756 + goto out;
5757 + }
5758 +
5759 +- if (test_bit(IPS_HELPER_BIT, &ct->status))
5760 ++ help = nfct_help(ct);
5761 ++ if (help)
5762 + goto out;
5763 +
5764 + if (ctinfo == IP_CT_NEW ||
5765 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5766 +index 3b1a78906bc0c..1cd1d83a4be08 100644
5767 +--- a/net/packet/af_packet.c
5768 ++++ b/net/packet/af_packet.c
5769 +@@ -4292,7 +4292,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
5770 + rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
5771 + if (unlikely(rb->frames_per_block == 0))
5772 + goto out;
5773 +- if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
5774 ++ if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
5775 + goto out;
5776 + if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
5777 + req->tp_frame_nr))
5778 +diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
5779 +index 9ccc93f257db0..38bb882bb9587 100644
5780 +--- a/net/sched/cls_tcindex.c
5781 ++++ b/net/sched/cls_tcindex.c
5782 +@@ -48,7 +48,7 @@ struct tcindex_data {
5783 + u32 hash; /* hash table size; 0 if undefined */
5784 + u32 alloc_hash; /* allocated size */
5785 + u32 fall_through; /* 0: only classify if explicit match */
5786 +- struct rcu_head rcu;
5787 ++ struct rcu_work rwork;
5788 + };
5789 +
5790 + static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
5791 +@@ -221,17 +221,11 @@ found:
5792 + return 0;
5793 + }
5794 +
5795 +-static int tcindex_destroy_element(struct tcf_proto *tp,
5796 +- void *arg, struct tcf_walker *walker)
5797 +-{
5798 +- bool last;
5799 +-
5800 +- return tcindex_delete(tp, arg, &last, NULL);
5801 +-}
5802 +-
5803 +-static void __tcindex_destroy(struct rcu_head *head)
5804 ++static void tcindex_destroy_work(struct work_struct *work)
5805 + {
5806 +- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
5807 ++ struct tcindex_data *p = container_of(to_rcu_work(work),
5808 ++ struct tcindex_data,
5809 ++ rwork);
5810 +
5811 + kfree(p->perfect);
5812 + kfree(p->h);
5813 +@@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r)
5814 + return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
5815 + }
5816 +
5817 +-static void __tcindex_partial_destroy(struct rcu_head *head)
5818 ++static void tcindex_partial_destroy_work(struct work_struct *work)
5819 + {
5820 +- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
5821 ++ struct tcindex_data *p = container_of(to_rcu_work(work),
5822 ++ struct tcindex_data,
5823 ++ rwork);
5824 +
5825 + kfree(p->perfect);
5826 + kfree(p);
5827 +@@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp)
5828 + kfree(cp->perfect);
5829 + }
5830 +
5831 +-static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
5832 ++static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
5833 + {
5834 + int i, err = 0;
5835 +
5836 +@@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
5837 + TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
5838 + if (err < 0)
5839 + goto errout;
5840 ++#ifdef CONFIG_NET_CLS_ACT
5841 ++ cp->perfect[i].exts.net = net;
5842 ++#endif
5843 + }
5844 +
5845 + return 0;
5846 +@@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5847 + struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
5848 + {
5849 + struct tcindex_filter_result new_filter_result, *old_r = r;
5850 +- struct tcindex_filter_result cr;
5851 + struct tcindex_data *cp = NULL, *oldp;
5852 + struct tcindex_filter *f = NULL; /* make gcc behave */
5853 ++ struct tcf_result cr = {};
5854 + int err, balloc = 0;
5855 + struct tcf_exts e;
5856 +
5857 +@@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5858 + if (p->perfect) {
5859 + int i;
5860 +
5861 +- if (tcindex_alloc_perfect_hash(cp) < 0)
5862 ++ if (tcindex_alloc_perfect_hash(net, cp) < 0)
5863 + goto errout;
5864 + for (i = 0; i < cp->hash; i++)
5865 + cp->perfect[i].res = p->perfect[i].res;
5866 +@@ -346,13 +345,10 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5867 + cp->h = p->h;
5868 +
5869 + err = tcindex_filter_result_init(&new_filter_result);
5870 +- if (err < 0)
5871 +- goto errout1;
5872 +- err = tcindex_filter_result_init(&cr);
5873 + if (err < 0)
5874 + goto errout1;
5875 + if (old_r)
5876 +- cr.res = r->res;
5877 ++ cr = r->res;
5878 +
5879 + if (tb[TCA_TCINDEX_HASH])
5880 + cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
5881 +@@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5882 + err = -ENOMEM;
5883 + if (!cp->perfect && !cp->h) {
5884 + if (valid_perfect_hash(cp)) {
5885 +- if (tcindex_alloc_perfect_hash(cp) < 0)
5886 ++ if (tcindex_alloc_perfect_hash(net, cp) < 0)
5887 + goto errout_alloc;
5888 + balloc = 1;
5889 + } else {
5890 +@@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5891 + }
5892 +
5893 + if (tb[TCA_TCINDEX_CLASSID]) {
5894 +- cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
5895 +- tcf_bind_filter(tp, &cr.res, base);
5896 ++ cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
5897 ++ tcf_bind_filter(tp, &cr, base);
5898 + }
5899 +
5900 + if (old_r && old_r != r) {
5901 +@@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5902 + }
5903 +
5904 + oldp = p;
5905 +- r->res = cr.res;
5906 ++ r->res = cr;
5907 + tcf_exts_change(&r->exts, &e);
5908 +
5909 + rcu_assign_pointer(tp->root, cp);
5910 +@@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5911 + ; /* nothing */
5912 +
5913 + rcu_assign_pointer(*fp, f);
5914 ++ } else {
5915 ++ tcf_exts_destroy(&new_filter_result.exts);
5916 + }
5917 +
5918 + if (oldp)
5919 +- call_rcu(&oldp->rcu, __tcindex_partial_destroy);
5920 ++ tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
5921 + return 0;
5922 +
5923 + errout_alloc:
5924 +@@ -487,7 +485,6 @@ errout_alloc:
5925 + else if (balloc == 2)
5926 + kfree(cp->h);
5927 + errout1:
5928 +- tcf_exts_destroy(&cr.exts);
5929 + tcf_exts_destroy(&new_filter_result.exts);
5930 + errout:
5931 + kfree(cp);
5932 +@@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp,
5933 + struct netlink_ext_ack *extack)
5934 + {
5935 + struct tcindex_data *p = rtnl_dereference(tp->root);
5936 +- struct tcf_walker walker;
5937 ++ int i;
5938 +
5939 + pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
5940 +- walker.count = 0;
5941 +- walker.skip = 0;
5942 +- walker.fn = tcindex_destroy_element;
5943 +- tcindex_walk(tp, &walker);
5944 +
5945 +- call_rcu(&p->rcu, __tcindex_destroy);
5946 ++ if (p->perfect) {
5947 ++ for (i = 0; i < p->hash; i++) {
5948 ++ struct tcindex_filter_result *r = p->perfect + i;
5949 ++
5950 ++ tcf_unbind_filter(tp, &r->res);
5951 ++ if (tcf_exts_get_net(&r->exts))
5952 ++ tcf_queue_work(&r->rwork,
5953 ++ tcindex_destroy_rexts_work);
5954 ++ else
5955 ++ __tcindex_destroy_rexts(r);
5956 ++ }
5957 ++ }
5958 ++
5959 ++ for (i = 0; p->h && i < p->hash; i++) {
5960 ++ struct tcindex_filter *f, *next;
5961 ++ bool last;
5962 ++
5963 ++ for (f = rtnl_dereference(p->h[i]); f; f = next) {
5964 ++ next = rtnl_dereference(f->next);
5965 ++ tcindex_delete(tp, &f->result, &last, NULL);
5966 ++ }
5967 ++ }
5968 ++
5969 ++ tcf_queue_work(&p->rwork, tcindex_destroy_work);
5970 + }
5971 +
5972 +
5973 +diff --git a/net/sctp/diag.c b/net/sctp/diag.c
5974 +index 078f01a8d582a..435847d98b51c 100644
5975 +--- a/net/sctp/diag.c
5976 ++++ b/net/sctp/diag.c
5977 +@@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
5978 + + nla_total_size(1) /* INET_DIAG_TOS */
5979 + + nla_total_size(1) /* INET_DIAG_TCLASS */
5980 + + nla_total_size(4) /* INET_DIAG_MARK */
5981 ++ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
5982 + + nla_total_size(addrlen * asoc->peer.transport_count)
5983 + + nla_total_size(addrlen * addrcnt)
5984 + + nla_total_size(sizeof(struct inet_diag_meminfo))
5985 +diff --git a/net/sctp/offload.c b/net/sctp/offload.c
5986 +index 123e9f2dc2265..edfcf16e704c4 100644
5987 +--- a/net/sctp/offload.c
5988 ++++ b/net/sctp/offload.c
5989 +@@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
5990 + {
5991 + skb->ip_summed = CHECKSUM_NONE;
5992 + skb->csum_not_inet = 0;
5993 ++ gso_reset_checksum(skb, ~0);
5994 + return sctp_compute_cksum(skb, skb_transport_offset(skb));
5995 + }
5996 +
5997 +diff --git a/net/sctp/stream.c b/net/sctp/stream.c
5998 +index f24633114dfdf..2936ed17bf9ef 100644
5999 +--- a/net/sctp/stream.c
6000 ++++ b/net/sctp/stream.c
6001 +@@ -144,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
6002 + }
6003 + }
6004 +
6005 +- for (i = outcnt; i < stream->outcnt; i++)
6006 ++ for (i = outcnt; i < stream->outcnt; i++) {
6007 + kfree(SCTP_SO(stream, i)->ext);
6008 ++ SCTP_SO(stream, i)->ext = NULL;
6009 ++ }
6010 + }
6011 +
6012 + static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
6013 +diff --git a/net/socket.c b/net/socket.c
6014 +index 334fcc617ef27..93a45f15ee40d 100644
6015 +--- a/net/socket.c
6016 ++++ b/net/socket.c
6017 +@@ -941,8 +941,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
6018 + EXPORT_SYMBOL(dlci_ioctl_set);
6019 +
6020 + static long sock_do_ioctl(struct net *net, struct socket *sock,
6021 +- unsigned int cmd, unsigned long arg,
6022 +- unsigned int ifreq_size)
6023 ++ unsigned int cmd, unsigned long arg)
6024 + {
6025 + int err;
6026 + void __user *argp = (void __user *)arg;
6027 +@@ -968,11 +967,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
6028 + } else {
6029 + struct ifreq ifr;
6030 + bool need_copyout;
6031 +- if (copy_from_user(&ifr, argp, ifreq_size))
6032 ++ if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
6033 + return -EFAULT;
6034 + err = dev_ioctl(net, cmd, &ifr, &need_copyout);
6035 + if (!err && need_copyout)
6036 +- if (copy_to_user(argp, &ifr, ifreq_size))
6037 ++ if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
6038 + return -EFAULT;
6039 + }
6040 + return err;
6041 +@@ -1071,8 +1070,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
6042 + err = open_related_ns(&net->ns, get_net_ns);
6043 + break;
6044 + default:
6045 +- err = sock_do_ioctl(net, sock, cmd, arg,
6046 +- sizeof(struct ifreq));
6047 ++ err = sock_do_ioctl(net, sock, cmd, arg);
6048 + break;
6049 + }
6050 + return err;
6051 +@@ -2750,8 +2748,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
6052 + int err;
6053 +
6054 + set_fs(KERNEL_DS);
6055 +- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
6056 +- sizeof(struct compat_ifreq));
6057 ++ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
6058 + set_fs(old_fs);
6059 + if (!err)
6060 + err = compat_put_timeval(&ktv, up);
6061 +@@ -2767,8 +2764,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
6062 + int err;
6063 +
6064 + set_fs(KERNEL_DS);
6065 +- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
6066 +- sizeof(struct compat_ifreq));
6067 ++ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
6068 + set_fs(old_fs);
6069 + if (!err)
6070 + err = compat_put_timespec(&kts, up);
6071 +@@ -2964,6 +2960,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
6072 + return dev_ioctl(net, cmd, &ifreq, NULL);
6073 + }
6074 +
6075 ++static int compat_ifreq_ioctl(struct net *net, struct socket *sock,
6076 ++ unsigned int cmd,
6077 ++ struct compat_ifreq __user *uifr32)
6078 ++{
6079 ++ struct ifreq __user *uifr;
6080 ++ int err;
6081 ++
6082 ++ /* Handle the fact that while struct ifreq has the same *layout* on
6083 ++ * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
6084 ++ * which are handled elsewhere, it still has different *size* due to
6085 ++ * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
6086 ++ * resulting in struct ifreq being 32 and 40 bytes respectively).
6087 ++ * As a result, if the struct happens to be at the end of a page and
6088 ++ * the next page isn't readable/writable, we get a fault. To prevent
6089 ++ * that, copy back and forth to the full size.
6090 ++ */
6091 ++
6092 ++ uifr = compat_alloc_user_space(sizeof(*uifr));
6093 ++ if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
6094 ++ return -EFAULT;
6095 ++
6096 ++ err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
6097 ++
6098 ++ if (!err) {
6099 ++ switch (cmd) {
6100 ++ case SIOCGIFFLAGS:
6101 ++ case SIOCGIFMETRIC:
6102 ++ case SIOCGIFMTU:
6103 ++ case SIOCGIFMEM:
6104 ++ case SIOCGIFHWADDR:
6105 ++ case SIOCGIFINDEX:
6106 ++ case SIOCGIFADDR:
6107 ++ case SIOCGIFBRDADDR:
6108 ++ case SIOCGIFDSTADDR:
6109 ++ case SIOCGIFNETMASK:
6110 ++ case SIOCGIFPFLAGS:
6111 ++ case SIOCGIFTXQLEN:
6112 ++ case SIOCGMIIPHY:
6113 ++ case SIOCGMIIREG:
6114 ++ case SIOCGIFNAME:
6115 ++ if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
6116 ++ err = -EFAULT;
6117 ++ break;
6118 ++ }
6119 ++ }
6120 ++ return err;
6121 ++}
6122 ++
6123 + static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
6124 + struct compat_ifreq __user *uifr32)
6125 + {
6126 +@@ -3079,8 +3123,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
6127 + }
6128 +
6129 + set_fs(KERNEL_DS);
6130 +- ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
6131 +- sizeof(struct compat_ifreq));
6132 ++ ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
6133 + set_fs(old_fs);
6134 +
6135 + out:
6136 +@@ -3180,21 +3223,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
6137 + case SIOCSIFTXQLEN:
6138 + case SIOCBRADDIF:
6139 + case SIOCBRDELIF:
6140 ++ case SIOCGIFNAME:
6141 + case SIOCSIFNAME:
6142 + case SIOCGMIIPHY:
6143 + case SIOCGMIIREG:
6144 + case SIOCSMIIREG:
6145 +- case SIOCSARP:
6146 +- case SIOCGARP:
6147 +- case SIOCDARP:
6148 +- case SIOCATMARK:
6149 + case SIOCBONDENSLAVE:
6150 + case SIOCBONDRELEASE:
6151 + case SIOCBONDSETHWADDR:
6152 + case SIOCBONDCHANGEACTIVE:
6153 +- case SIOCGIFNAME:
6154 +- return sock_do_ioctl(net, sock, cmd, arg,
6155 +- sizeof(struct compat_ifreq));
6156 ++ return compat_ifreq_ioctl(net, sock, cmd, argp);
6157 ++
6158 ++ case SIOCSARP:
6159 ++ case SIOCGARP:
6160 ++ case SIOCDARP:
6161 ++ case SIOCATMARK:
6162 ++ return sock_do_ioctl(net, sock, cmd, arg);
6163 + }
6164 +
6165 + return -ENOIOCTLCMD;
6166 +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
6167 +index ba765473d1f06..efeee5586b2ac 100644
6168 +--- a/net/sunrpc/auth_gss/auth_gss.c
6169 ++++ b/net/sunrpc/auth_gss/auth_gss.c
6170 +@@ -1563,8 +1563,10 @@ gss_marshal(struct rpc_task *task, __be32 *p)
6171 + cred_len = p++;
6172 +
6173 + spin_lock(&ctx->gc_seq_lock);
6174 +- req->rq_seqno = ctx->gc_seq++;
6175 ++ req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
6176 + spin_unlock(&ctx->gc_seq_lock);
6177 ++ if (req->rq_seqno == MAXSEQ)
6178 ++ goto out_expired;
6179 +
6180 + *p++ = htonl((u32) RPC_GSS_VERSION);
6181 + *p++ = htonl((u32) ctx->gc_proc);
6182 +@@ -1586,14 +1588,18 @@ gss_marshal(struct rpc_task *task, __be32 *p)
6183 + mic.data = (u8 *)(p + 1);
6184 + maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
6185 + if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
6186 +- clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
6187 ++ goto out_expired;
6188 + } else if (maj_stat != 0) {
6189 +- printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
6190 ++ pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
6191 ++ task->tk_status = -EIO;
6192 + goto out_put_ctx;
6193 + }
6194 + p = xdr_encode_opaque(p, NULL, mic.len);
6195 + gss_put_ctx(ctx);
6196 + return p;
6197 ++out_expired:
6198 ++ clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
6199 ++ task->tk_status = -EKEYEXPIRED;
6200 + out_put_ctx:
6201 + gss_put_ctx(ctx);
6202 + return NULL;
6203 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
6204 +index 24cbddc44c884..1ee04e0ec4bca 100644
6205 +--- a/net/sunrpc/clnt.c
6206 ++++ b/net/sunrpc/clnt.c
6207 +@@ -1738,14 +1738,10 @@ rpc_xdr_encode(struct rpc_task *task)
6208 + xdr_buf_init(&req->rq_rcv_buf,
6209 + req->rq_rbuffer,
6210 + req->rq_rcvsize);
6211 +- req->rq_bytes_sent = 0;
6212 +
6213 + p = rpc_encode_header(task);
6214 +- if (p == NULL) {
6215 +- printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
6216 +- rpc_exit(task, -EIO);
6217 ++ if (p == NULL)
6218 + return;
6219 +- }
6220 +
6221 + encode = task->tk_msg.rpc_proc->p_encode;
6222 + if (encode == NULL)
6223 +@@ -1770,10 +1766,17 @@ call_encode(struct rpc_task *task)
6224 + /* Did the encode result in an error condition? */
6225 + if (task->tk_status != 0) {
6226 + /* Was the error nonfatal? */
6227 +- if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM)
6228 ++ switch (task->tk_status) {
6229 ++ case -EAGAIN:
6230 ++ case -ENOMEM:
6231 + rpc_delay(task, HZ >> 4);
6232 +- else
6233 ++ break;
6234 ++ case -EKEYEXPIRED:
6235 ++ task->tk_action = call_refresh;
6236 ++ break;
6237 ++ default:
6238 + rpc_exit(task, task->tk_status);
6239 ++ }
6240 + return;
6241 + }
6242 +
6243 +@@ -2335,7 +2338,8 @@ rpc_encode_header(struct rpc_task *task)
6244 + *p++ = htonl(clnt->cl_vers); /* program version */
6245 + *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
6246 + p = rpcauth_marshcred(task, p);
6247 +- req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
6248 ++ if (p)
6249 ++ req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
6250 + return p;
6251 + }
6252 +
6253 +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
6254 +index 943f08be7c387..f1ec2110efebe 100644
6255 +--- a/net/sunrpc/xprt.c
6256 ++++ b/net/sunrpc/xprt.c
6257 +@@ -1151,6 +1151,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
6258 + struct rpc_xprt *xprt = req->rq_xprt;
6259 +
6260 + if (xprt_request_need_enqueue_transmit(task, req)) {
6261 ++ req->rq_bytes_sent = 0;
6262 + spin_lock(&xprt->queue_lock);
6263 + /*
6264 + * Requests that carry congestion control credits are added
6265 +diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
6266 +index 919fddec01973..dffedf1df02ce 100644
6267 +--- a/net/sunrpc/xprtrdma/verbs.c
6268 ++++ b/net/sunrpc/xprtrdma/verbs.c
6269 +@@ -912,17 +912,13 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
6270 + for (i = 0; i <= buf->rb_sc_last; i++) {
6271 + sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
6272 + if (!sc)
6273 +- goto out_destroy;
6274 ++ return -ENOMEM;
6275 +
6276 + sc->sc_xprt = r_xprt;
6277 + buf->rb_sc_ctxs[i] = sc;
6278 + }
6279 +
6280 + return 0;
6281 +-
6282 +-out_destroy:
6283 +- rpcrdma_sendctxs_destroy(buf);
6284 +- return -ENOMEM;
6285 + }
6286 +
6287 + /* The sendctx queue is not guaranteed to have a size that is a
6288 +diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
6289 +index a264cf2accd0f..d4de871e7d4d7 100644
6290 +--- a/net/xdp/xdp_umem.c
6291 ++++ b/net/xdp/xdp_umem.c
6292 +@@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
6293 + * not know if the device has more tx queues than rx, or the opposite.
6294 + * This might also change during run time.
6295 + */
6296 +-static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
6297 +- u16 queue_id)
6298 ++static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
6299 ++ u16 queue_id)
6300 + {
6301 ++ if (queue_id >= max_t(unsigned int,
6302 ++ dev->real_num_rx_queues,
6303 ++ dev->real_num_tx_queues))
6304 ++ return -EINVAL;
6305 ++
6306 + if (queue_id < dev->real_num_rx_queues)
6307 + dev->_rx[queue_id].umem = umem;
6308 + if (queue_id < dev->real_num_tx_queues)
6309 + dev->_tx[queue_id].umem = umem;
6310 ++
6311 ++ return 0;
6312 + }
6313 +
6314 + struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
6315 +@@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
6316 + goto out_rtnl_unlock;
6317 + }
6318 +
6319 +- xdp_reg_umem_at_qid(dev, umem, queue_id);
6320 ++ err = xdp_reg_umem_at_qid(dev, umem, queue_id);
6321 ++ if (err)
6322 ++ goto out_rtnl_unlock;
6323 ++
6324 + umem->dev = dev;
6325 + umem->queue_id = queue_id;
6326 + if (force_copy)
6327 +diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
6328 +index be0a961450bc2..f5ce993c78e42 100644
6329 +--- a/samples/bpf/Makefile
6330 ++++ b/samples/bpf/Makefile
6331 +@@ -273,6 +273,7 @@ $(obj)/%.o: $(src)/%.c
6332 + -Wno-gnu-variable-sized-type-not-at-end \
6333 + -Wno-address-of-packed-member -Wno-tautological-compare \
6334 + -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
6335 ++ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
6336 + -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
6337 + ifeq ($(DWARF2BTF),y)
6338 + $(BTF_PAHOLE) -J $@
6339 +diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
6340 +new file mode 100644
6341 +index 0000000000000..5cd7c1d1a5d56
6342 +--- /dev/null
6343 ++++ b/samples/bpf/asm_goto_workaround.h
6344 +@@ -0,0 +1,16 @@
6345 ++/* SPDX-License-Identifier: GPL-2.0 */
6346 ++/* Copyright (c) 2019 Facebook */
6347 ++#ifndef __ASM_GOTO_WORKAROUND_H
6348 ++#define __ASM_GOTO_WORKAROUND_H
6349 ++
6350 ++/* this will bring in asm_volatile_goto macro definition
6351 ++ * if enabled by compiler and config options.
6352 ++ */
6353 ++#include <linux/types.h>
6354 ++
6355 ++#ifdef asm_volatile_goto
6356 ++#undef asm_volatile_goto
6357 ++#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
6358 ++#endif
6359 ++
6360 ++#endif
6361 +diff --git a/security/keys/key.c b/security/keys/key.c
6362 +index d97c9394b5dd4..249a6da4d2770 100644
6363 +--- a/security/keys/key.c
6364 ++++ b/security/keys/key.c
6365 +@@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
6366 +
6367 + spin_lock(&user->lock);
6368 + if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
6369 +- if (user->qnkeys + 1 >= maxkeys ||
6370 +- user->qnbytes + quotalen >= maxbytes ||
6371 ++ if (user->qnkeys + 1 > maxkeys ||
6372 ++ user->qnbytes + quotalen > maxbytes ||
6373 + user->qnbytes + quotalen < user->qnbytes)
6374 + goto no_quota;
6375 + }
6376 +diff --git a/security/keys/keyring.c b/security/keys/keyring.c
6377 +index 41bcf57e96f21..99a55145ddcd2 100644
6378 +--- a/security/keys/keyring.c
6379 ++++ b/security/keys/keyring.c
6380 +@@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring,
6381 + BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
6382 + (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
6383 +
6384 +- if (ctx->index_key.description)
6385 +- ctx->index_key.desc_len = strlen(ctx->index_key.description);
6386 +-
6387 + /* Check to see if this top-level keyring is what we are looking for
6388 + * and whether it is valid or not.
6389 + */
6390 +@@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring,
6391 + struct keyring_search_context ctx = {
6392 + .index_key.type = type,
6393 + .index_key.description = description,
6394 ++ .index_key.desc_len = strlen(description),
6395 + .cred = current_cred(),
6396 + .match_data.cmp = key_default_cmp,
6397 + .match_data.raw_data = description,
6398 +diff --git a/security/keys/proc.c b/security/keys/proc.c
6399 +index 5af2934965d80..d38be9db2cc07 100644
6400 +--- a/security/keys/proc.c
6401 ++++ b/security/keys/proc.c
6402 +@@ -166,8 +166,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
6403 + int rc;
6404 +
6405 + struct keyring_search_context ctx = {
6406 +- .index_key.type = key->type,
6407 +- .index_key.description = key->description,
6408 ++ .index_key = key->index_key,
6409 + .cred = m->file->f_cred,
6410 + .match_data.cmp = lookup_user_key_possessed,
6411 + .match_data.raw_data = key,
6412 +diff --git a/security/keys/request_key.c b/security/keys/request_key.c
6413 +index 114f7408feee6..7385536986497 100644
6414 +--- a/security/keys/request_key.c
6415 ++++ b/security/keys/request_key.c
6416 +@@ -545,6 +545,7 @@ struct key *request_key_and_link(struct key_type *type,
6417 + struct keyring_search_context ctx = {
6418 + .index_key.type = type,
6419 + .index_key.description = description,
6420 ++ .index_key.desc_len = strlen(description),
6421 + .cred = current_cred(),
6422 + .match_data.cmp = key_default_cmp,
6423 + .match_data.raw_data = description,
6424 +diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
6425 +index 424e1d90412ea..6797843154f03 100644
6426 +--- a/security/keys/request_key_auth.c
6427 ++++ b/security/keys/request_key_auth.c
6428 +@@ -246,7 +246,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
6429 + struct key *authkey;
6430 + key_ref_t authkey_ref;
6431 +
6432 +- sprintf(description, "%x", target_id);
6433 ++ ctx.index_key.desc_len = sprintf(description, "%x", target_id);
6434 +
6435 + authkey_ref = search_process_keyrings(&ctx);
6436 +
6437 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6438 +index c1042df5e346e..1bddfa7dc2169 100644
6439 +--- a/sound/pci/hda/patch_realtek.c
6440 ++++ b/sound/pci/hda/patch_realtek.c
6441 +@@ -1855,6 +1855,8 @@ enum {
6442 + ALC887_FIXUP_BASS_CHMAP,
6443 + ALC1220_FIXUP_GB_DUAL_CODECS,
6444 + ALC1220_FIXUP_CLEVO_P950,
6445 ++ ALC1220_FIXUP_SYSTEM76_ORYP5,
6446 ++ ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
6447 + };
6448 +
6449 + static void alc889_fixup_coef(struct hda_codec *codec,
6450 +@@ -2056,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
6451 + snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
6452 + }
6453 +
6454 ++static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
6455 ++ const struct hda_fixup *fix, int action);
6456 ++
6457 ++static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
6458 ++ const struct hda_fixup *fix,
6459 ++ int action)
6460 ++{
6461 ++ alc1220_fixup_clevo_p950(codec, fix, action);
6462 ++ alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
6463 ++}
6464 ++
6465 + static const struct hda_fixup alc882_fixups[] = {
6466 + [ALC882_FIXUP_ABIT_AW9D_MAX] = {
6467 + .type = HDA_FIXUP_PINS,
6468 +@@ -2300,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = {
6469 + .type = HDA_FIXUP_FUNC,
6470 + .v.func = alc1220_fixup_clevo_p950,
6471 + },
6472 ++ [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
6473 ++ .type = HDA_FIXUP_FUNC,
6474 ++ .v.func = alc1220_fixup_system76_oryp5,
6475 ++ },
6476 ++ [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
6477 ++ .type = HDA_FIXUP_PINS,
6478 ++ .v.pins = (const struct hda_pintbl[]) {
6479 ++ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
6480 ++ {}
6481 ++ },
6482 ++ .chained = true,
6483 ++ .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
6484 ++ },
6485 + };
6486 +
6487 + static const struct snd_pci_quirk alc882_fixup_tbl[] = {
6488 +@@ -2376,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
6489 + SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
6490 + SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
6491 + SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
6492 ++ SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
6493 ++ SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
6494 + SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
6495 + SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
6496 + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
6497 +@@ -5573,6 +5601,7 @@ enum {
6498 + ALC294_FIXUP_ASUS_HEADSET_MIC,
6499 + ALC294_FIXUP_ASUS_SPK,
6500 + ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
6501 ++ ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
6502 + };
6503 +
6504 + static const struct hda_fixup alc269_fixups[] = {
6505 +@@ -6506,6 +6535,17 @@ static const struct hda_fixup alc269_fixups[] = {
6506 + .chained = true,
6507 + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6508 + },
6509 ++ [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = {
6510 ++ .type = HDA_FIXUP_VERBS,
6511 ++ .v.verbs = (const struct hda_verb[]) {
6512 ++ /* Disable PCBEEP-IN passthrough */
6513 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
6514 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
6515 ++ { }
6516 ++ },
6517 ++ .chained = true,
6518 ++ .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
6519 ++ },
6520 + };
6521 +
6522 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6523 +@@ -7187,7 +7227,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6524 + {0x12, 0x90a60130},
6525 + {0x19, 0x03a11020},
6526 + {0x21, 0x0321101f}),
6527 +- SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
6528 ++ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
6529 + {0x12, 0x90a60130},
6530 + {0x14, 0x90170110},
6531 + {0x19, 0x04a11040},
6532 +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
6533 +index b29d0f65611eb..2d49492d60692 100644
6534 +--- a/sound/soc/soc-core.c
6535 ++++ b/sound/soc/soc-core.c
6536 +@@ -1034,17 +1034,18 @@ static int snd_soc_init_platform(struct snd_soc_card *card,
6537 + * this function should be removed in the future
6538 + */
6539 + /* convert Legacy platform link */
6540 +- if (!platform) {
6541 ++ if (!platform || dai_link->legacy_platform) {
6542 + platform = devm_kzalloc(card->dev,
6543 + sizeof(struct snd_soc_dai_link_component),
6544 + GFP_KERNEL);
6545 + if (!platform)
6546 + return -ENOMEM;
6547 +
6548 +- dai_link->platform = platform;
6549 +- platform->name = dai_link->platform_name;
6550 +- platform->of_node = dai_link->platform_of_node;
6551 +- platform->dai_name = NULL;
6552 ++ dai_link->platform = platform;
6553 ++ dai_link->legacy_platform = 1;
6554 ++ platform->name = dai_link->platform_name;
6555 ++ platform->of_node = dai_link->platform_of_node;
6556 ++ platform->dai_name = NULL;
6557 + }
6558 +
6559 + /* if there's no platform we match on the empty platform */
6560 +diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h
6561 +new file mode 100644
6562 +index 0000000000000..0d18b1d1fbbc8
6563 +--- /dev/null
6564 ++++ b/tools/include/uapi/linux/pkt_sched.h
6565 +@@ -0,0 +1,1163 @@
6566 ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
6567 ++#ifndef __LINUX_PKT_SCHED_H
6568 ++#define __LINUX_PKT_SCHED_H
6569 ++
6570 ++#include <linux/types.h>
6571 ++
6572 ++/* Logical priority bands not depending on specific packet scheduler.
6573 ++ Every scheduler will map them to real traffic classes, if it has
6574 ++ no more precise mechanism to classify packets.
6575 ++
6576 ++ These numbers have no special meaning, though their coincidence
6577 ++ with obsolete IPv6 values is not occasional :-). New IPv6 drafts
6578 ++ preferred full anarchy inspired by diffserv group.
6579 ++
6580 ++ Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
6581 ++ class, actually, as rule it will be handled with more care than
6582 ++ filler or even bulk.
6583 ++ */
6584 ++
6585 ++#define TC_PRIO_BESTEFFORT 0
6586 ++#define TC_PRIO_FILLER 1
6587 ++#define TC_PRIO_BULK 2
6588 ++#define TC_PRIO_INTERACTIVE_BULK 4
6589 ++#define TC_PRIO_INTERACTIVE 6
6590 ++#define TC_PRIO_CONTROL 7
6591 ++
6592 ++#define TC_PRIO_MAX 15
6593 ++
6594 ++/* Generic queue statistics, available for all the elements.
6595 ++ Particular schedulers may have also their private records.
6596 ++ */
6597 ++
6598 ++struct tc_stats {
6599 ++ __u64 bytes; /* Number of enqueued bytes */
6600 ++ __u32 packets; /* Number of enqueued packets */
6601 ++ __u32 drops; /* Packets dropped because of lack of resources */
6602 ++ __u32 overlimits; /* Number of throttle events when this
6603 ++ * flow goes out of allocated bandwidth */
6604 ++ __u32 bps; /* Current flow byte rate */
6605 ++ __u32 pps; /* Current flow packet rate */
6606 ++ __u32 qlen;
6607 ++ __u32 backlog;
6608 ++};
6609 ++
6610 ++struct tc_estimator {
6611 ++ signed char interval;
6612 ++ unsigned char ewma_log;
6613 ++};
6614 ++
6615 ++/* "Handles"
6616 ++ ---------
6617 ++
6618 ++ All the traffic control objects have 32bit identifiers, or "handles".
6619 ++
6620 ++ They can be considered as opaque numbers from user API viewpoint,
6621 ++ but actually they always consist of two fields: major and
6622 ++ minor numbers, which are interpreted by kernel specially,
6623 ++ that may be used by applications, though not recommended.
6624 ++
6625 ++ F.e. qdisc handles always have minor number equal to zero,
6626 ++ classes (or flows) have major equal to parent qdisc major, and
6627 ++ minor uniquely identifying class inside qdisc.
6628 ++
6629 ++ Macros to manipulate handles:
6630 ++ */
6631 ++
6632 ++#define TC_H_MAJ_MASK (0xFFFF0000U)
6633 ++#define TC_H_MIN_MASK (0x0000FFFFU)
6634 ++#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
6635 ++#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
6636 ++#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
6637 ++
6638 ++#define TC_H_UNSPEC (0U)
6639 ++#define TC_H_ROOT (0xFFFFFFFFU)
6640 ++#define TC_H_INGRESS (0xFFFFFFF1U)
6641 ++#define TC_H_CLSACT TC_H_INGRESS
6642 ++
6643 ++#define TC_H_MIN_PRIORITY 0xFFE0U
6644 ++#define TC_H_MIN_INGRESS 0xFFF2U
6645 ++#define TC_H_MIN_EGRESS 0xFFF3U
6646 ++
6647 ++/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
6648 ++enum tc_link_layer {
6649 ++ TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
6650 ++ TC_LINKLAYER_ETHERNET,
6651 ++ TC_LINKLAYER_ATM,
6652 ++};
6653 ++#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
6654 ++
6655 ++struct tc_ratespec {
6656 ++ unsigned char cell_log;
6657 ++ __u8 linklayer; /* lower 4 bits */
6658 ++ unsigned short overhead;
6659 ++ short cell_align;
6660 ++ unsigned short mpu;
6661 ++ __u32 rate;
6662 ++};
6663 ++
6664 ++#define TC_RTAB_SIZE 1024
6665 ++
6666 ++struct tc_sizespec {
6667 ++ unsigned char cell_log;
6668 ++ unsigned char size_log;
6669 ++ short cell_align;
6670 ++ int overhead;
6671 ++ unsigned int linklayer;
6672 ++ unsigned int mpu;
6673 ++ unsigned int mtu;
6674 ++ unsigned int tsize;
6675 ++};
6676 ++
6677 ++enum {
6678 ++ TCA_STAB_UNSPEC,
6679 ++ TCA_STAB_BASE,
6680 ++ TCA_STAB_DATA,
6681 ++ __TCA_STAB_MAX
6682 ++};
6683 ++
6684 ++#define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
6685 ++
6686 ++/* FIFO section */
6687 ++
6688 ++struct tc_fifo_qopt {
6689 ++ __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
6690 ++};
6691 ++
6692 ++/* SKBPRIO section */
6693 ++
6694 ++/*
6695 ++ * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
6696 ++ * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
6697 ++ * to map one to one the DS field of IPV4 and IPV6 headers.
6698 ++ * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
6699 ++ */
6700 ++
6701 ++#define SKBPRIO_MAX_PRIORITY 64
6702 ++
6703 ++struct tc_skbprio_qopt {
6704 ++ __u32 limit; /* Queue length in packets. */
6705 ++};
6706 ++
6707 ++/* PRIO section */
6708 ++
6709 ++#define TCQ_PRIO_BANDS 16
6710 ++#define TCQ_MIN_PRIO_BANDS 2
6711 ++
6712 ++struct tc_prio_qopt {
6713 ++ int bands; /* Number of bands */
6714 ++ __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
6715 ++};
6716 ++
6717 ++/* MULTIQ section */
6718 ++
6719 ++struct tc_multiq_qopt {
6720 ++ __u16 bands; /* Number of bands */
6721 ++ __u16 max_bands; /* Maximum number of queues */
6722 ++};
6723 ++
6724 ++/* PLUG section */
6725 ++
6726 ++#define TCQ_PLUG_BUFFER 0
6727 ++#define TCQ_PLUG_RELEASE_ONE 1
6728 ++#define TCQ_PLUG_RELEASE_INDEFINITE 2
6729 ++#define TCQ_PLUG_LIMIT 3
6730 ++
6731 ++struct tc_plug_qopt {
6732 ++ /* TCQ_PLUG_BUFFER: Inset a plug into the queue and
6733 ++ * buffer any incoming packets
6734 ++ * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
6735 ++ * to beginning of the next plug.
6736 ++ * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
6737 ++ * Stop buffering packets until the next TCQ_PLUG_BUFFER
6738 ++ * command is received (just act as a pass-thru queue).
6739 ++ * TCQ_PLUG_LIMIT: Increase/decrease queue size
6740 ++ */
6741 ++ int action;
6742 ++ __u32 limit;
6743 ++};
6744 ++
6745 ++/* TBF section */
6746 ++
6747 ++struct tc_tbf_qopt {
6748 ++ struct tc_ratespec rate;
6749 ++ struct tc_ratespec peakrate;
6750 ++ __u32 limit;
6751 ++ __u32 buffer;
6752 ++ __u32 mtu;
6753 ++};
6754 ++
6755 ++enum {
6756 ++ TCA_TBF_UNSPEC,
6757 ++ TCA_TBF_PARMS,
6758 ++ TCA_TBF_RTAB,
6759 ++ TCA_TBF_PTAB,
6760 ++ TCA_TBF_RATE64,
6761 ++ TCA_TBF_PRATE64,
6762 ++ TCA_TBF_BURST,
6763 ++ TCA_TBF_PBURST,
6764 ++ TCA_TBF_PAD,
6765 ++ __TCA_TBF_MAX,
6766 ++};
6767 ++
6768 ++#define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
6769 ++
6770 ++
6771 ++/* TEQL section */
6772 ++
6773 ++/* TEQL does not require any parameters */
6774 ++
6775 ++/* SFQ section */
6776 ++
6777 ++struct tc_sfq_qopt {
6778 ++ unsigned quantum; /* Bytes per round allocated to flow */
6779 ++ int perturb_period; /* Period of hash perturbation */
6780 ++ __u32 limit; /* Maximal packets in queue */
6781 ++ unsigned divisor; /* Hash divisor */
6782 ++ unsigned flows; /* Maximal number of flows */
6783 ++};
6784 ++
6785 ++struct tc_sfqred_stats {
6786 ++ __u32 prob_drop; /* Early drops, below max threshold */
6787 ++ __u32 forced_drop; /* Early drops, after max threshold */
6788 ++ __u32 prob_mark; /* Marked packets, below max threshold */
6789 ++ __u32 forced_mark; /* Marked packets, after max threshold */
6790 ++ __u32 prob_mark_head; /* Marked packets, below max threshold */
6791 ++ __u32 forced_mark_head;/* Marked packets, after max threshold */
6792 ++};
6793 ++
6794 ++struct tc_sfq_qopt_v1 {
6795 ++ struct tc_sfq_qopt v0;
6796 ++ unsigned int depth; /* max number of packets per flow */
6797 ++ unsigned int headdrop;
6798 ++/* SFQRED parameters */
6799 ++ __u32 limit; /* HARD maximal flow queue length (bytes) */
6800 ++ __u32 qth_min; /* Min average length threshold (bytes) */
6801 ++ __u32 qth_max; /* Max average length threshold (bytes) */
6802 ++ unsigned char Wlog; /* log(W) */
6803 ++ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
6804 ++ unsigned char Scell_log; /* cell size for idle damping */
6805 ++ unsigned char flags;
6806 ++ __u32 max_P; /* probability, high resolution */
6807 ++/* SFQRED stats */
6808 ++ struct tc_sfqred_stats stats;
6809 ++};
6810 ++
6811 ++
6812 ++struct tc_sfq_xstats {
6813 ++ __s32 allot;
6814 ++};
6815 ++
6816 ++/* RED section */
6817 ++
6818 ++enum {
6819 ++ TCA_RED_UNSPEC,
6820 ++ TCA_RED_PARMS,
6821 ++ TCA_RED_STAB,
6822 ++ TCA_RED_MAX_P,
6823 ++ __TCA_RED_MAX,
6824 ++};
6825 ++
6826 ++#define TCA_RED_MAX (__TCA_RED_MAX - 1)
6827 ++
6828 ++struct tc_red_qopt {
6829 ++ __u32 limit; /* HARD maximal queue length (bytes) */
6830 ++ __u32 qth_min; /* Min average length threshold (bytes) */
6831 ++ __u32 qth_max; /* Max average length threshold (bytes) */
6832 ++ unsigned char Wlog; /* log(W) */
6833 ++ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
6834 ++ unsigned char Scell_log; /* cell size for idle damping */
6835 ++ unsigned char flags;
6836 ++#define TC_RED_ECN 1
6837 ++#define TC_RED_HARDDROP 2
6838 ++#define TC_RED_ADAPTATIVE 4
6839 ++};
6840 ++
6841 ++struct tc_red_xstats {
6842 ++ __u32 early; /* Early drops */
6843 ++ __u32 pdrop; /* Drops due to queue limits */
6844 ++ __u32 other; /* Drops due to drop() calls */
6845 ++ __u32 marked; /* Marked packets */
6846 ++};
6847 ++
6848 ++/* GRED section */
6849 ++
6850 ++#define MAX_DPs 16
6851 ++
6852 ++enum {
6853 ++ TCA_GRED_UNSPEC,
6854 ++ TCA_GRED_PARMS,
6855 ++ TCA_GRED_STAB,
6856 ++ TCA_GRED_DPS,
6857 ++ TCA_GRED_MAX_P,
6858 ++ TCA_GRED_LIMIT,
6859 ++ TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
6860 ++ __TCA_GRED_MAX,
6861 ++};
6862 ++
6863 ++#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
6864 ++
6865 ++enum {
6866 ++ TCA_GRED_VQ_ENTRY_UNSPEC,
6867 ++ TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */
6868 ++ __TCA_GRED_VQ_ENTRY_MAX,
6869 ++};
6870 ++#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
6871 ++
6872 ++enum {
6873 ++ TCA_GRED_VQ_UNSPEC,
6874 ++ TCA_GRED_VQ_PAD,
6875 ++ TCA_GRED_VQ_DP, /* u32 */
6876 ++ TCA_GRED_VQ_STAT_BYTES, /* u64 */
6877 ++ TCA_GRED_VQ_STAT_PACKETS, /* u32 */
6878 ++ TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
6879 ++ TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
6880 ++ TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
6881 ++ TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */
6882 ++ TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
6883 ++ TCA_GRED_VQ_STAT_PDROP, /* u32 */
6884 ++ TCA_GRED_VQ_STAT_OTHER, /* u32 */
6885 ++ TCA_GRED_VQ_FLAGS, /* u32 */
6886 ++ __TCA_GRED_VQ_MAX
6887 ++};
6888 ++
6889 ++#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
6890 ++
6891 ++struct tc_gred_qopt {
6892 ++ __u32 limit; /* HARD maximal queue length (bytes) */
6893 ++ __u32 qth_min; /* Min average length threshold (bytes) */
6894 ++ __u32 qth_max; /* Max average length threshold (bytes) */
6895 ++ __u32 DP; /* up to 2^32 DPs */
6896 ++ __u32 backlog;
6897 ++ __u32 qave;
6898 ++ __u32 forced;
6899 ++ __u32 early;
6900 ++ __u32 other;
6901 ++ __u32 pdrop;
6902 ++ __u8 Wlog; /* log(W) */
6903 ++ __u8 Plog; /* log(P_max/(qth_max-qth_min)) */
6904 ++ __u8 Scell_log; /* cell size for idle damping */
6905 ++ __u8 prio; /* prio of this VQ */
6906 ++ __u32 packets;
6907 ++ __u32 bytesin;
6908 ++};
6909 ++
6910 ++/* gred setup */
6911 ++struct tc_gred_sopt {
6912 ++ __u32 DPs;
6913 ++ __u32 def_DP;
6914 ++ __u8 grio;
6915 ++ __u8 flags;
6916 ++ __u16 pad1;
6917 ++};
6918 ++
6919 ++/* CHOKe section */
6920 ++
6921 ++enum {
6922 ++ TCA_CHOKE_UNSPEC,
6923 ++ TCA_CHOKE_PARMS,
6924 ++ TCA_CHOKE_STAB,
6925 ++ TCA_CHOKE_MAX_P,
6926 ++ __TCA_CHOKE_MAX,
6927 ++};
6928 ++
6929 ++#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
6930 ++
6931 ++struct tc_choke_qopt {
6932 ++ __u32 limit; /* Hard queue length (packets) */
6933 ++ __u32 qth_min; /* Min average threshold (packets) */
6934 ++ __u32 qth_max; /* Max average threshold (packets) */
6935 ++ unsigned char Wlog; /* log(W) */
6936 ++ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
6937 ++ unsigned char Scell_log; /* cell size for idle damping */
6938 ++ unsigned char flags; /* see RED flags */
6939 ++};
6940 ++
6941 ++struct tc_choke_xstats {
6942 ++ __u32 early; /* Early drops */
6943 ++ __u32 pdrop; /* Drops due to queue limits */
6944 ++ __u32 other; /* Drops due to drop() calls */
6945 ++ __u32 marked; /* Marked packets */
6946 ++ __u32 matched; /* Drops due to flow match */
6947 ++};
6948 ++
6949 ++/* HTB section */
6950 ++#define TC_HTB_NUMPRIO 8
6951 ++#define TC_HTB_MAXDEPTH 8
6952 ++#define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */
6953 ++
6954 ++struct tc_htb_opt {
6955 ++ struct tc_ratespec rate;
6956 ++ struct tc_ratespec ceil;
6957 ++ __u32 buffer;
6958 ++ __u32 cbuffer;
6959 ++ __u32 quantum;
6960 ++ __u32 level; /* out only */
6961 ++ __u32 prio;
6962 ++};
6963 ++struct tc_htb_glob {
6964 ++ __u32 version; /* to match HTB/TC */
6965 ++ __u32 rate2quantum; /* bps->quantum divisor */
6966 ++ __u32 defcls; /* default class number */
6967 ++ __u32 debug; /* debug flags */
6968 ++
6969 ++ /* stats */
6970 ++ __u32 direct_pkts; /* count of non shaped packets */
6971 ++};
6972 ++enum {
6973 ++ TCA_HTB_UNSPEC,
6974 ++ TCA_HTB_PARMS,
6975 ++ TCA_HTB_INIT,
6976 ++ TCA_HTB_CTAB,
6977 ++ TCA_HTB_RTAB,
6978 ++ TCA_HTB_DIRECT_QLEN,
6979 ++ TCA_HTB_RATE64,
6980 ++ TCA_HTB_CEIL64,
6981 ++ TCA_HTB_PAD,
6982 ++ __TCA_HTB_MAX,
6983 ++};
6984 ++
6985 ++#define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
6986 ++
6987 ++struct tc_htb_xstats {
6988 ++ __u32 lends;
6989 ++ __u32 borrows;
6990 ++ __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */
6991 ++ __s32 tokens;
6992 ++ __s32 ctokens;
6993 ++};
6994 ++
6995 ++/* HFSC section */
6996 ++
6997 ++struct tc_hfsc_qopt {
6998 ++ __u16 defcls; /* default class */
6999 ++};
7000 ++
7001 ++struct tc_service_curve {
7002 ++ __u32 m1; /* slope of the first segment in bps */
7003 ++ __u32 d; /* x-projection of the first segment in us */
7004 ++ __u32 m2; /* slope of the second segment in bps */
7005 ++};
7006 ++
7007 ++struct tc_hfsc_stats {
7008 ++ __u64 work; /* total work done */
7009 ++ __u64 rtwork; /* work done by real-time criteria */
7010 ++ __u32 period; /* current period */
7011 ++ __u32 level; /* class level in hierarchy */
7012 ++};
7013 ++
7014 ++enum {
7015 ++ TCA_HFSC_UNSPEC,
7016 ++ TCA_HFSC_RSC,
7017 ++ TCA_HFSC_FSC,
7018 ++ TCA_HFSC_USC,
7019 ++ __TCA_HFSC_MAX,
7020 ++};
7021 ++
7022 ++#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
7023 ++
7024 ++
7025 ++/* CBQ section */
7026 ++
7027 ++#define TC_CBQ_MAXPRIO 8
7028 ++#define TC_CBQ_MAXLEVEL 8
7029 ++#define TC_CBQ_DEF_EWMA 5
7030 ++
7031 ++struct tc_cbq_lssopt {
7032 ++ unsigned char change;
7033 ++ unsigned char flags;
7034 ++#define TCF_CBQ_LSS_BOUNDED 1
7035 ++#define TCF_CBQ_LSS_ISOLATED 2
7036 ++ unsigned char ewma_log;
7037 ++ unsigned char level;
7038 ++#define TCF_CBQ_LSS_FLAGS 1
7039 ++#define TCF_CBQ_LSS_EWMA 2
7040 ++#define TCF_CBQ_LSS_MAXIDLE 4
7041 ++#define TCF_CBQ_LSS_MINIDLE 8
7042 ++#define TCF_CBQ_LSS_OFFTIME 0x10
7043 ++#define TCF_CBQ_LSS_AVPKT 0x20
7044 ++ __u32 maxidle;
7045 ++ __u32 minidle;
7046 ++ __u32 offtime;
7047 ++ __u32 avpkt;
7048 ++};
7049 ++
7050 ++struct tc_cbq_wrropt {
7051 ++ unsigned char flags;
7052 ++ unsigned char priority;
7053 ++ unsigned char cpriority;
7054 ++ unsigned char __reserved;
7055 ++ __u32 allot;
7056 ++ __u32 weight;
7057 ++};
7058 ++
7059 ++struct tc_cbq_ovl {
7060 ++ unsigned char strategy;
7061 ++#define TC_CBQ_OVL_CLASSIC 0
7062 ++#define TC_CBQ_OVL_DELAY 1
7063 ++#define TC_CBQ_OVL_LOWPRIO 2
7064 ++#define TC_CBQ_OVL_DROP 3
7065 ++#define TC_CBQ_OVL_RCLASSIC 4
7066 ++ unsigned char priority2;
7067 ++ __u16 pad;
7068 ++ __u32 penalty;
7069 ++};
7070 ++
7071 ++struct tc_cbq_police {
7072 ++ unsigned char police;
7073 ++ unsigned char __res1;
7074 ++ unsigned short __res2;
7075 ++};
7076 ++
7077 ++struct tc_cbq_fopt {
7078 ++ __u32 split;
7079 ++ __u32 defmap;
7080 ++ __u32 defchange;
7081 ++};
7082 ++
7083 ++struct tc_cbq_xstats {
7084 ++ __u32 borrows;
7085 ++ __u32 overactions;
7086 ++ __s32 avgidle;
7087 ++ __s32 undertime;
7088 ++};
7089 ++
7090 ++enum {
7091 ++ TCA_CBQ_UNSPEC,
7092 ++ TCA_CBQ_LSSOPT,
7093 ++ TCA_CBQ_WRROPT,
7094 ++ TCA_CBQ_FOPT,
7095 ++ TCA_CBQ_OVL_STRATEGY,
7096 ++ TCA_CBQ_RATE,
7097 ++ TCA_CBQ_RTAB,
7098 ++ TCA_CBQ_POLICE,
7099 ++ __TCA_CBQ_MAX,
7100 ++};
7101 ++
7102 ++#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
7103 ++
7104 ++/* dsmark section */
7105 ++
7106 ++enum {
7107 ++ TCA_DSMARK_UNSPEC,
7108 ++ TCA_DSMARK_INDICES,
7109 ++ TCA_DSMARK_DEFAULT_INDEX,
7110 ++ TCA_DSMARK_SET_TC_INDEX,
7111 ++ TCA_DSMARK_MASK,
7112 ++ TCA_DSMARK_VALUE,
7113 ++ __TCA_DSMARK_MAX,
7114 ++};
7115 ++
7116 ++#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
7117 ++
7118 ++/* ATM section */
7119 ++
7120 ++enum {
7121 ++ TCA_ATM_UNSPEC,
7122 ++ TCA_ATM_FD, /* file/socket descriptor */
7123 ++ TCA_ATM_PTR, /* pointer to descriptor - later */
7124 ++ TCA_ATM_HDR, /* LL header */
7125 ++ TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
7126 ++ TCA_ATM_ADDR, /* PVC address (for output only) */
7127 ++ TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
7128 ++ __TCA_ATM_MAX,
7129 ++};
7130 ++
7131 ++#define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
7132 ++
7133 ++/* Network emulator */
7134 ++
7135 ++enum {
7136 ++ TCA_NETEM_UNSPEC,
7137 ++ TCA_NETEM_CORR,
7138 ++ TCA_NETEM_DELAY_DIST,
7139 ++ TCA_NETEM_REORDER,
7140 ++ TCA_NETEM_CORRUPT,
7141 ++ TCA_NETEM_LOSS,
7142 ++ TCA_NETEM_RATE,
7143 ++ TCA_NETEM_ECN,
7144 ++ TCA_NETEM_RATE64,
7145 ++ TCA_NETEM_PAD,
7146 ++ TCA_NETEM_LATENCY64,
7147 ++ TCA_NETEM_JITTER64,
7148 ++ TCA_NETEM_SLOT,
7149 ++ TCA_NETEM_SLOT_DIST,
7150 ++ __TCA_NETEM_MAX,
7151 ++};
7152 ++
7153 ++#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
7154 ++
7155 ++struct tc_netem_qopt {
7156 ++ __u32 latency; /* added delay (us) */
7157 ++ __u32 limit; /* fifo limit (packets) */
7158 ++ __u32 loss; /* random packet loss (0=none ~0=100%) */
7159 ++ __u32 gap; /* re-ordering gap (0 for none) */
7160 ++ __u32 duplicate; /* random packet dup (0=none ~0=100%) */
7161 ++ __u32 jitter; /* random jitter in latency (us) */
7162 ++};
7163 ++
7164 ++struct tc_netem_corr {
7165 ++ __u32 delay_corr; /* delay correlation */
7166 ++ __u32 loss_corr; /* packet loss correlation */
7167 ++ __u32 dup_corr; /* duplicate correlation */
7168 ++};
7169 ++
7170 ++struct tc_netem_reorder {
7171 ++ __u32 probability;
7172 ++ __u32 correlation;
7173 ++};
7174 ++
7175 ++struct tc_netem_corrupt {
7176 ++ __u32 probability;
7177 ++ __u32 correlation;
7178 ++};
7179 ++
7180 ++struct tc_netem_rate {
7181 ++ __u32 rate; /* byte/s */
7182 ++ __s32 packet_overhead;
7183 ++ __u32 cell_size;
7184 ++ __s32 cell_overhead;
7185 ++};
7186 ++
7187 ++struct tc_netem_slot {
7188 ++ __s64 min_delay; /* nsec */
7189 ++ __s64 max_delay;
7190 ++ __s32 max_packets;
7191 ++ __s32 max_bytes;
7192 ++ __s64 dist_delay; /* nsec */
7193 ++ __s64 dist_jitter; /* nsec */
7194 ++};
7195 ++
7196 ++enum {
7197 ++ NETEM_LOSS_UNSPEC,
7198 ++ NETEM_LOSS_GI, /* General Intuitive - 4 state model */
7199 ++ NETEM_LOSS_GE, /* Gilbert Elliot models */
7200 ++ __NETEM_LOSS_MAX
7201 ++};
7202 ++#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
7203 ++
7204 ++/* State transition probabilities for 4 state model */
7205 ++struct tc_netem_gimodel {
7206 ++ __u32 p13;
7207 ++ __u32 p31;
7208 ++ __u32 p32;
7209 ++ __u32 p14;
7210 ++ __u32 p23;
7211 ++};
7212 ++
7213 ++/* Gilbert-Elliot models */
7214 ++struct tc_netem_gemodel {
7215 ++ __u32 p;
7216 ++ __u32 r;
7217 ++ __u32 h;
7218 ++ __u32 k1;
7219 ++};
7220 ++
7221 ++#define NETEM_DIST_SCALE 8192
7222 ++#define NETEM_DIST_MAX 16384
7223 ++
7224 ++/* DRR */
7225 ++
7226 ++enum {
7227 ++ TCA_DRR_UNSPEC,
7228 ++ TCA_DRR_QUANTUM,
7229 ++ __TCA_DRR_MAX
7230 ++};
7231 ++
7232 ++#define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
7233 ++
7234 ++struct tc_drr_stats {
7235 ++ __u32 deficit;
7236 ++};
7237 ++
7238 ++/* MQPRIO */
7239 ++#define TC_QOPT_BITMASK 15
7240 ++#define TC_QOPT_MAX_QUEUE 16
7241 ++
7242 ++enum {
7243 ++ TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */
7244 ++ TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */
7245 ++ __TC_MQPRIO_HW_OFFLOAD_MAX
7246 ++};
7247 ++
7248 ++#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
7249 ++
7250 ++enum {
7251 ++ TC_MQPRIO_MODE_DCB,
7252 ++ TC_MQPRIO_MODE_CHANNEL,
7253 ++ __TC_MQPRIO_MODE_MAX
7254 ++};
7255 ++
7256 ++#define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
7257 ++
7258 ++enum {
7259 ++ TC_MQPRIO_SHAPER_DCB,
7260 ++ TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */
7261 ++ __TC_MQPRIO_SHAPER_MAX
7262 ++};
7263 ++
7264 ++#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
7265 ++
7266 ++struct tc_mqprio_qopt {
7267 ++ __u8 num_tc;
7268 ++ __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
7269 ++ __u8 hw;
7270 ++ __u16 count[TC_QOPT_MAX_QUEUE];
7271 ++ __u16 offset[TC_QOPT_MAX_QUEUE];
7272 ++};
7273 ++
7274 ++#define TC_MQPRIO_F_MODE 0x1
7275 ++#define TC_MQPRIO_F_SHAPER 0x2
7276 ++#define TC_MQPRIO_F_MIN_RATE 0x4
7277 ++#define TC_MQPRIO_F_MAX_RATE 0x8
7278 ++
7279 ++enum {
7280 ++ TCA_MQPRIO_UNSPEC,
7281 ++ TCA_MQPRIO_MODE,
7282 ++ TCA_MQPRIO_SHAPER,
7283 ++ TCA_MQPRIO_MIN_RATE64,
7284 ++ TCA_MQPRIO_MAX_RATE64,
7285 ++ __TCA_MQPRIO_MAX,
7286 ++};
7287 ++
7288 ++#define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
7289 ++
7290 ++/* SFB */
7291 ++
7292 ++enum {
7293 ++ TCA_SFB_UNSPEC,
7294 ++ TCA_SFB_PARMS,
7295 ++ __TCA_SFB_MAX,
7296 ++};
7297 ++
7298 ++#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
7299 ++
7300 ++/*
7301 ++ * Note: increment, decrement are Q0.16 fixed-point values.
7302 ++ */
7303 ++struct tc_sfb_qopt {
7304 ++ __u32 rehash_interval; /* delay between hash move, in ms */
7305 ++ __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */
7306 ++ __u32 max; /* max len of qlen_min */
7307 ++ __u32 bin_size; /* maximum queue length per bin */
7308 ++ __u32 increment; /* probability increment, (d1 in Blue) */
7309 ++ __u32 decrement; /* probability decrement, (d2 in Blue) */
7310 ++ __u32 limit; /* max SFB queue length */
7311 ++ __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */
7312 ++ __u32 penalty_burst;
7313 ++};
7314 ++
7315 ++struct tc_sfb_xstats {
7316 ++ __u32 earlydrop;
7317 ++ __u32 penaltydrop;
7318 ++ __u32 bucketdrop;
7319 ++ __u32 queuedrop;
7320 ++ __u32 childdrop; /* drops in child qdisc */
7321 ++ __u32 marked;
7322 ++ __u32 maxqlen;
7323 ++ __u32 maxprob;
7324 ++ __u32 avgprob;
7325 ++};
7326 ++
7327 ++#define SFB_MAX_PROB 0xFFFF
7328 ++
7329 ++/* QFQ */
7330 ++enum {
7331 ++ TCA_QFQ_UNSPEC,
7332 ++ TCA_QFQ_WEIGHT,
7333 ++ TCA_QFQ_LMAX,
7334 ++ __TCA_QFQ_MAX
7335 ++};
7336 ++
7337 ++#define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1)
7338 ++
7339 ++struct tc_qfq_stats {
7340 ++ __u32 weight;
7341 ++ __u32 lmax;
7342 ++};
7343 ++
7344 ++/* CODEL */
7345 ++
7346 ++enum {
7347 ++ TCA_CODEL_UNSPEC,
7348 ++ TCA_CODEL_TARGET,
7349 ++ TCA_CODEL_LIMIT,
7350 ++ TCA_CODEL_INTERVAL,
7351 ++ TCA_CODEL_ECN,
7352 ++ TCA_CODEL_CE_THRESHOLD,
7353 ++ __TCA_CODEL_MAX
7354 ++};
7355 ++
7356 ++#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
7357 ++
7358 ++struct tc_codel_xstats {
7359 ++ __u32 maxpacket; /* largest packet we've seen so far */
7360 ++ __u32 count; /* how many drops we've done since the last time we
7361 ++ * entered dropping state
7362 ++ */
7363 ++ __u32 lastcount; /* count at entry to dropping state */
7364 ++ __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
7365 ++ __s32 drop_next; /* time to drop next packet */
7366 ++ __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
7367 ++ __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
7368 ++ __u32 dropping; /* are we in dropping state ? */
7369 ++ __u32 ce_mark; /* number of CE marked packets because of ce_threshold */
7370 ++};
7371 ++
7372 ++/* FQ_CODEL */
7373 ++
7374 ++enum {
7375 ++ TCA_FQ_CODEL_UNSPEC,
7376 ++ TCA_FQ_CODEL_TARGET,
7377 ++ TCA_FQ_CODEL_LIMIT,
7378 ++ TCA_FQ_CODEL_INTERVAL,
7379 ++ TCA_FQ_CODEL_ECN,
7380 ++ TCA_FQ_CODEL_FLOWS,
7381 ++ TCA_FQ_CODEL_QUANTUM,
7382 ++ TCA_FQ_CODEL_CE_THRESHOLD,
7383 ++ TCA_FQ_CODEL_DROP_BATCH_SIZE,
7384 ++ TCA_FQ_CODEL_MEMORY_LIMIT,
7385 ++ __TCA_FQ_CODEL_MAX
7386 ++};
7387 ++
7388 ++#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1)
7389 ++
7390 ++enum {
7391 ++ TCA_FQ_CODEL_XSTATS_QDISC,
7392 ++ TCA_FQ_CODEL_XSTATS_CLASS,
7393 ++};
7394 ++
7395 ++struct tc_fq_codel_qd_stats {
7396 ++ __u32 maxpacket; /* largest packet we've seen so far */
7397 ++ __u32 drop_overlimit; /* number of time max qdisc
7398 ++ * packet limit was hit
7399 ++ */
7400 ++ __u32 ecn_mark; /* number of packets we ECN marked
7401 ++ * instead of being dropped
7402 ++ */
7403 ++ __u32 new_flow_count; /* number of time packets
7404 ++ * created a 'new flow'
7405 ++ */
7406 ++ __u32 new_flows_len; /* count of flows in new list */
7407 ++ __u32 old_flows_len; /* count of flows in old list */
7408 ++ __u32 ce_mark; /* packets above ce_threshold */
7409 ++ __u32 memory_usage; /* in bytes */
7410 ++ __u32 drop_overmemory;
7411 ++};
7412 ++
7413 ++struct tc_fq_codel_cl_stats {
7414 ++ __s32 deficit;
7415 ++ __u32 ldelay; /* in-queue delay seen by most recently
7416 ++ * dequeued packet
7417 ++ */
7418 ++ __u32 count;
7419 ++ __u32 lastcount;
7420 ++ __u32 dropping;
7421 ++ __s32 drop_next;
7422 ++};
7423 ++
7424 ++struct tc_fq_codel_xstats {
7425 ++ __u32 type;
7426 ++ union {
7427 ++ struct tc_fq_codel_qd_stats qdisc_stats;
7428 ++ struct tc_fq_codel_cl_stats class_stats;
7429 ++ };
7430 ++};
7431 ++
7432 ++/* FQ */
7433 ++
7434 ++enum {
7435 ++ TCA_FQ_UNSPEC,
7436 ++
7437 ++ TCA_FQ_PLIMIT, /* limit of total number of packets in queue */
7438 ++
7439 ++ TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */
7440 ++
7441 ++ TCA_FQ_QUANTUM, /* RR quantum */
7442 ++
7443 ++ TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */
7444 ++
7445 ++ TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
7446 ++
7447 ++ TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
7448 ++
7449 ++ TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
7450 ++
7451 ++ TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
7452 ++
7453 ++ TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
7454 ++
7455 ++ TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
7456 ++
7457 ++ TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
7458 ++
7459 ++ TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
7460 ++
7461 ++ __TCA_FQ_MAX
7462 ++};
7463 ++
7464 ++#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
7465 ++
7466 ++struct tc_fq_qd_stats {
7467 ++ __u64 gc_flows;
7468 ++ __u64 highprio_packets;
7469 ++ __u64 tcp_retrans;
7470 ++ __u64 throttled;
7471 ++ __u64 flows_plimit;
7472 ++ __u64 pkts_too_long;
7473 ++ __u64 allocation_errors;
7474 ++ __s64 time_next_delayed_flow;
7475 ++ __u32 flows;
7476 ++ __u32 inactive_flows;
7477 ++ __u32 throttled_flows;
7478 ++ __u32 unthrottle_latency_ns;
7479 ++ __u64 ce_mark; /* packets above ce_threshold */
7480 ++};
7481 ++
7482 ++/* Heavy-Hitter Filter */
7483 ++
7484 ++enum {
7485 ++ TCA_HHF_UNSPEC,
7486 ++ TCA_HHF_BACKLOG_LIMIT,
7487 ++ TCA_HHF_QUANTUM,
7488 ++ TCA_HHF_HH_FLOWS_LIMIT,
7489 ++ TCA_HHF_RESET_TIMEOUT,
7490 ++ TCA_HHF_ADMIT_BYTES,
7491 ++ TCA_HHF_EVICT_TIMEOUT,
7492 ++ TCA_HHF_NON_HH_WEIGHT,
7493 ++ __TCA_HHF_MAX
7494 ++};
7495 ++
7496 ++#define TCA_HHF_MAX (__TCA_HHF_MAX - 1)
7497 ++
7498 ++struct tc_hhf_xstats {
7499 ++ __u32 drop_overlimit; /* number of times max qdisc packet limit
7500 ++ * was hit
7501 ++ */
7502 ++ __u32 hh_overlimit; /* number of times max heavy-hitters was hit */
7503 ++ __u32 hh_tot_count; /* number of captured heavy-hitters so far */
7504 ++ __u32 hh_cur_count; /* number of current heavy-hitters */
7505 ++};
7506 ++
7507 ++/* PIE */
7508 ++enum {
7509 ++ TCA_PIE_UNSPEC,
7510 ++ TCA_PIE_TARGET,
7511 ++ TCA_PIE_LIMIT,
7512 ++ TCA_PIE_TUPDATE,
7513 ++ TCA_PIE_ALPHA,
7514 ++ TCA_PIE_BETA,
7515 ++ TCA_PIE_ECN,
7516 ++ TCA_PIE_BYTEMODE,
7517 ++ __TCA_PIE_MAX
7518 ++};
7519 ++#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
7520 ++
7521 ++struct tc_pie_xstats {
7522 ++ __u32 prob; /* current probability */
7523 ++ __u32 delay; /* current delay in ms */
7524 ++ __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
7525 ++ __u32 packets_in; /* total number of packets enqueued */
7526 ++ __u32 dropped; /* packets dropped due to pie_action */
7527 ++ __u32 overlimit; /* dropped due to lack of space in queue */
7528 ++ __u32 maxq; /* maximum queue size */
7529 ++ __u32 ecn_mark; /* packets marked with ecn*/
7530 ++};
7531 ++
7532 ++/* CBS */
7533 ++struct tc_cbs_qopt {
7534 ++ __u8 offload;
7535 ++ __u8 _pad[3];
7536 ++ __s32 hicredit;
7537 ++ __s32 locredit;
7538 ++ __s32 idleslope;
7539 ++ __s32 sendslope;
7540 ++};
7541 ++
7542 ++enum {
7543 ++ TCA_CBS_UNSPEC,
7544 ++ TCA_CBS_PARMS,
7545 ++ __TCA_CBS_MAX,
7546 ++};
7547 ++
7548 ++#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
7549 ++
7550 ++
7551 ++/* ETF */
7552 ++struct tc_etf_qopt {
7553 ++ __s32 delta;
7554 ++ __s32 clockid;
7555 ++ __u32 flags;
7556 ++#define TC_ETF_DEADLINE_MODE_ON BIT(0)
7557 ++#define TC_ETF_OFFLOAD_ON BIT(1)
7558 ++};
7559 ++
7560 ++enum {
7561 ++ TCA_ETF_UNSPEC,
7562 ++ TCA_ETF_PARMS,
7563 ++ __TCA_ETF_MAX,
7564 ++};
7565 ++
7566 ++#define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
7567 ++
7568 ++
7569 ++/* CAKE */
7570 ++enum {
7571 ++ TCA_CAKE_UNSPEC,
7572 ++ TCA_CAKE_PAD,
7573 ++ TCA_CAKE_BASE_RATE64,
7574 ++ TCA_CAKE_DIFFSERV_MODE,
7575 ++ TCA_CAKE_ATM,
7576 ++ TCA_CAKE_FLOW_MODE,
7577 ++ TCA_CAKE_OVERHEAD,
7578 ++ TCA_CAKE_RTT,
7579 ++ TCA_CAKE_TARGET,
7580 ++ TCA_CAKE_AUTORATE,
7581 ++ TCA_CAKE_MEMORY,
7582 ++ TCA_CAKE_NAT,
7583 ++ TCA_CAKE_RAW,
7584 ++ TCA_CAKE_WASH,
7585 ++ TCA_CAKE_MPU,
7586 ++ TCA_CAKE_INGRESS,
7587 ++ TCA_CAKE_ACK_FILTER,
7588 ++ TCA_CAKE_SPLIT_GSO,
7589 ++ __TCA_CAKE_MAX
7590 ++};
7591 ++#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
7592 ++
7593 ++enum {
7594 ++ __TCA_CAKE_STATS_INVALID,
7595 ++ TCA_CAKE_STATS_PAD,
7596 ++ TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
7597 ++ TCA_CAKE_STATS_MEMORY_LIMIT,
7598 ++ TCA_CAKE_STATS_MEMORY_USED,
7599 ++ TCA_CAKE_STATS_AVG_NETOFF,
7600 ++ TCA_CAKE_STATS_MIN_NETLEN,
7601 ++ TCA_CAKE_STATS_MAX_NETLEN,
7602 ++ TCA_CAKE_STATS_MIN_ADJLEN,
7603 ++ TCA_CAKE_STATS_MAX_ADJLEN,
7604 ++ TCA_CAKE_STATS_TIN_STATS,
7605 ++ TCA_CAKE_STATS_DEFICIT,
7606 ++ TCA_CAKE_STATS_COBALT_COUNT,
7607 ++ TCA_CAKE_STATS_DROPPING,
7608 ++ TCA_CAKE_STATS_DROP_NEXT_US,
7609 ++ TCA_CAKE_STATS_P_DROP,
7610 ++ TCA_CAKE_STATS_BLUE_TIMER_US,
7611 ++ __TCA_CAKE_STATS_MAX
7612 ++};
7613 ++#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
7614 ++
7615 ++enum {
7616 ++ __TCA_CAKE_TIN_STATS_INVALID,
7617 ++ TCA_CAKE_TIN_STATS_PAD,
7618 ++ TCA_CAKE_TIN_STATS_SENT_PACKETS,
7619 ++ TCA_CAKE_TIN_STATS_SENT_BYTES64,
7620 ++ TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
7621 ++ TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
7622 ++ TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
7623 ++ TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
7624 ++ TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
7625 ++ TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
7626 ++ TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
7627 ++ TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
7628 ++ TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
7629 ++ TCA_CAKE_TIN_STATS_TARGET_US,
7630 ++ TCA_CAKE_TIN_STATS_INTERVAL_US,
7631 ++ TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
7632 ++ TCA_CAKE_TIN_STATS_WAY_MISSES,
7633 ++ TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
7634 ++ TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
7635 ++ TCA_CAKE_TIN_STATS_AVG_DELAY_US,
7636 ++ TCA_CAKE_TIN_STATS_BASE_DELAY_US,
7637 ++ TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
7638 ++ TCA_CAKE_TIN_STATS_BULK_FLOWS,
7639 ++ TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
7640 ++ TCA_CAKE_TIN_STATS_MAX_SKBLEN,
7641 ++ TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
7642 ++ __TCA_CAKE_TIN_STATS_MAX
7643 ++};
7644 ++#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
7645 ++#define TC_CAKE_MAX_TINS (8)
7646 ++
7647 ++enum {
7648 ++ CAKE_FLOW_NONE = 0,
7649 ++ CAKE_FLOW_SRC_IP,
7650 ++ CAKE_FLOW_DST_IP,
7651 ++ CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
7652 ++ CAKE_FLOW_FLOWS,
7653 ++ CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
7654 ++ CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
7655 ++ CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */
7656 ++ CAKE_FLOW_MAX,
7657 ++};
7658 ++
7659 ++enum {
7660 ++ CAKE_DIFFSERV_DIFFSERV3 = 0,
7661 ++ CAKE_DIFFSERV_DIFFSERV4,
7662 ++ CAKE_DIFFSERV_DIFFSERV8,
7663 ++ CAKE_DIFFSERV_BESTEFFORT,
7664 ++ CAKE_DIFFSERV_PRECEDENCE,
7665 ++ CAKE_DIFFSERV_MAX
7666 ++};
7667 ++
7668 ++enum {
7669 ++ CAKE_ACK_NONE = 0,
7670 ++ CAKE_ACK_FILTER,
7671 ++ CAKE_ACK_AGGRESSIVE,
7672 ++ CAKE_ACK_MAX
7673 ++};
7674 ++
7675 ++enum {
7676 ++ CAKE_ATM_NONE = 0,
7677 ++ CAKE_ATM_ATM,
7678 ++ CAKE_ATM_PTM,
7679 ++ CAKE_ATM_MAX
7680 ++};
7681 ++
7682 ++
7683 ++/* TAPRIO */
7684 ++enum {
7685 ++ TC_TAPRIO_CMD_SET_GATES = 0x00,
7686 ++ TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
7687 ++ TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
7688 ++};
7689 ++
7690 ++enum {
7691 ++ TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
7692 ++ TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
7693 ++ TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
7694 ++ TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
7695 ++ TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
7696 ++ __TCA_TAPRIO_SCHED_ENTRY_MAX,
7697 ++};
7698 ++#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
7699 ++
7700 ++/* The format for schedule entry list is:
7701 ++ * [TCA_TAPRIO_SCHED_ENTRY_LIST]
7702 ++ * [TCA_TAPRIO_SCHED_ENTRY]
7703 ++ * [TCA_TAPRIO_SCHED_ENTRY_CMD]
7704 ++ * [TCA_TAPRIO_SCHED_ENTRY_GATES]
7705 ++ * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
7706 ++ */
7707 ++enum {
7708 ++ TCA_TAPRIO_SCHED_UNSPEC,
7709 ++ TCA_TAPRIO_SCHED_ENTRY,
7710 ++ __TCA_TAPRIO_SCHED_MAX,
7711 ++};
7712 ++
7713 ++#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
7714 ++
7715 ++enum {
7716 ++ TCA_TAPRIO_ATTR_UNSPEC,
7717 ++ TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
7718 ++ TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
7719 ++ TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
7720 ++ TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
7721 ++ TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
7722 ++ TCA_TAPRIO_PAD,
7723 ++ __TCA_TAPRIO_ATTR_MAX,
7724 ++};
7725 ++
7726 ++#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
7727 ++
7728 ++#endif
7729 +diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
7730 +index ecd79b7fb1073..d5e992f7c7dd5 100644
7731 +--- a/tools/testing/selftests/bpf/Makefile
7732 ++++ b/tools/testing/selftests/bpf/Makefile
7733 +@@ -53,7 +53,10 @@ TEST_PROGS := test_kmod.sh \
7734 + test_flow_dissector.sh \
7735 + test_xdp_vlan.sh
7736 +
7737 +-TEST_PROGS_EXTENDED := with_addr.sh
7738 ++TEST_PROGS_EXTENDED := with_addr.sh \
7739 ++ with_tunnels.sh \
7740 ++ tcp_client.py \
7741 ++ tcp_server.py
7742 +
7743 + # Compile but not part of 'make run_tests'
7744 + TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
7745 +diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
7746 +index 6ac7232b0fdeb..3ec4ce156074c 100644
7747 +--- a/tools/testing/selftests/bpf/test_progs.c
7748 ++++ b/tools/testing/selftests/bpf/test_progs.c
7749 +@@ -1136,7 +1136,9 @@ static void test_stacktrace_build_id(void)
7750 + int i, j;
7751 + struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
7752 + int build_id_matches = 0;
7753 ++ int retry = 1;
7754 +
7755 ++retry:
7756 + err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
7757 + if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
7758 + goto out;
7759 +@@ -1249,6 +1251,19 @@ static void test_stacktrace_build_id(void)
7760 + previous_key = key;
7761 + } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
7762 +
7763 ++ /* stack_map_get_build_id_offset() is racy and sometimes can return
7764 ++ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
7765 ++ * try it one more time.
7766 ++ */
7767 ++ if (build_id_matches < 1 && retry--) {
7768 ++ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
7769 ++ close(pmu_fd);
7770 ++ bpf_object__close(obj);
7771 ++ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
7772 ++ __func__);
7773 ++ goto retry;
7774 ++ }
7775 ++
7776 + if (CHECK(build_id_matches < 1, "build id match",
7777 + "Didn't find expected build ID from the map\n"))
7778 + goto disable_pmu;
7779 +@@ -1289,7 +1304,9 @@ static void test_stacktrace_build_id_nmi(void)
7780 + int i, j;
7781 + struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
7782 + int build_id_matches = 0;
7783 ++ int retry = 1;
7784 +
7785 ++retry:
7786 + err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
7787 + if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
7788 + return;
7789 +@@ -1384,6 +1401,19 @@ static void test_stacktrace_build_id_nmi(void)
7790 + previous_key = key;
7791 + } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
7792 +
7793 ++ /* stack_map_get_build_id_offset() is racy and sometimes can return
7794 ++ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
7795 ++ * try it one more time.
7796 ++ */
7797 ++ if (build_id_matches < 1 && retry--) {
7798 ++ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
7799 ++ close(pmu_fd);
7800 ++ bpf_object__close(obj);
7801 ++ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
7802 ++ __func__);
7803 ++ goto retry;
7804 ++ }
7805 ++
7806 + if (CHECK(build_id_matches < 1, "build id match",
7807 + "Didn't find expected build ID from the map\n"))
7808 + goto disable_pmu;
7809 +diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
7810 +index aeeb76a54d633..e38f1cb7089d3 100644
7811 +--- a/tools/testing/selftests/bpf/test_sock_addr.c
7812 ++++ b/tools/testing/selftests/bpf/test_sock_addr.c
7813 +@@ -44,6 +44,7 @@
7814 + #define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
7815 + #define SRC6_IP "::1"
7816 + #define SRC6_REWRITE_IP "::6"
7817 ++#define WILDCARD6_IP "::"
7818 + #define SERV6_PORT 6060
7819 + #define SERV6_REWRITE_PORT 6666
7820 +
7821 +@@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test);
7822 + static int bind6_prog_load(const struct sock_addr_test *test);
7823 + static int connect4_prog_load(const struct sock_addr_test *test);
7824 + static int connect6_prog_load(const struct sock_addr_test *test);
7825 ++static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
7826 + static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
7827 + static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
7828 + static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
7829 + static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
7830 + static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
7831 + static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
7832 ++static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
7833 +
7834 + static struct sock_addr_test tests[] = {
7835 + /* bind */
7836 +@@ -462,6 +465,34 @@ static struct sock_addr_test tests[] = {
7837 + SRC6_REWRITE_IP,
7838 + SYSCALL_ENOTSUPP,
7839 + },
7840 ++ {
7841 ++ "sendmsg6: set dst IP = [::] (BSD'ism)",
7842 ++ sendmsg6_rw_wildcard_prog_load,
7843 ++ BPF_CGROUP_UDP6_SENDMSG,
7844 ++ BPF_CGROUP_UDP6_SENDMSG,
7845 ++ AF_INET6,
7846 ++ SOCK_DGRAM,
7847 ++ SERV6_IP,
7848 ++ SERV6_PORT,
7849 ++ SERV6_REWRITE_IP,
7850 ++ SERV6_REWRITE_PORT,
7851 ++ SRC6_REWRITE_IP,
7852 ++ SUCCESS,
7853 ++ },
7854 ++ {
7855 ++ "sendmsg6: preserve dst IP = [::] (BSD'ism)",
7856 ++ sendmsg_allow_prog_load,
7857 ++ BPF_CGROUP_UDP6_SENDMSG,
7858 ++ BPF_CGROUP_UDP6_SENDMSG,
7859 ++ AF_INET6,
7860 ++ SOCK_DGRAM,
7861 ++ WILDCARD6_IP,
7862 ++ SERV6_PORT,
7863 ++ SERV6_REWRITE_IP,
7864 ++ SERV6_PORT,
7865 ++ SRC6_IP,
7866 ++ SUCCESS,
7867 ++ },
7868 + {
7869 + "sendmsg6: deny call",
7870 + sendmsg_deny_prog_load,
7871 +@@ -714,16 +745,27 @@ static int connect6_prog_load(const struct sock_addr_test *test)
7872 + return load_path(test, CONNECT6_PROG_PATH);
7873 + }
7874 +
7875 +-static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
7876 ++static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
7877 ++ int32_t rc)
7878 + {
7879 + struct bpf_insn insns[] = {
7880 +- /* return 0 */
7881 +- BPF_MOV64_IMM(BPF_REG_0, 0),
7882 ++ /* return rc */
7883 ++ BPF_MOV64_IMM(BPF_REG_0, rc),
7884 + BPF_EXIT_INSN(),
7885 + };
7886 + return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
7887 + }
7888 +
7889 ++static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
7890 ++{
7891 ++ return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
7892 ++}
7893 ++
7894 ++static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
7895 ++{
7896 ++ return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
7897 ++}
7898 ++
7899 + static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
7900 + {
7901 + struct sockaddr_in dst4_rw_addr;
7902 +@@ -844,6 +886,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
7903 + return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
7904 + }
7905 +
7906 ++static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
7907 ++{
7908 ++ return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
7909 ++}
7910 ++
7911 + static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
7912 + {
7913 + return load_path(test, SENDMSG6_PROG_PATH);
7914 +diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
7915 +index d8313d0438b74..b90dff8d3a94b 100755
7916 +--- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
7917 ++++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
7918 +@@ -1,7 +1,7 @@
7919 + #!/bin/bash
7920 + # SPDX-License-Identifier: GPL-2.0
7921 +
7922 +-ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding"
7923 ++ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn"
7924 + NUM_NETIFS=4
7925 + CHECK_TC="yes"
7926 + source lib.sh
7927 +@@ -96,6 +96,51 @@ flooding()
7928 + flood_test $swp2 $h1 $h2
7929 + }
7930 +
7931 ++vlan_deletion()
7932 ++{
7933 ++ # Test that the deletion of a VLAN on a bridge port does not affect
7934 ++ # the PVID VLAN
7935 ++ log_info "Add and delete a VLAN on bridge port $swp1"
7936 ++
7937 ++ bridge vlan add vid 10 dev $swp1
7938 ++ bridge vlan del vid 10 dev $swp1
7939 ++
7940 ++ ping_ipv4
7941 ++ ping_ipv6
7942 ++}
7943 ++
7944 ++extern_learn()
7945 ++{
7946 ++ local mac=de:ad:be:ef:13:37
7947 ++ local ageing_time
7948 ++
7949 ++ # Test that externally learned FDB entries can roam, but not age out
7950 ++ RET=0
7951 ++
7952 ++ bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1
7953 ++
7954 ++ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
7955 ++ check_err $? "Did not find FDB entry when should"
7956 ++
7957 ++ # Wait for 10 seconds after the ageing time to make sure the FDB entry
7958 ++ # was not aged out
7959 ++ ageing_time=$(bridge_ageing_time_get br0)
7960 ++ sleep $((ageing_time + 10))
7961 ++
7962 ++ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
7963 ++ check_err $? "FDB entry was aged out when should not"
7964 ++
7965 ++ $MZ $h2 -c 1 -p 64 -a $mac -t ip -q
7966 ++
7967 ++ bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37
7968 ++ check_err $? "FDB entry did not roam when should"
7969 ++
7970 ++ log_test "Externally learned FDB entry - ageing & roaming"
7971 ++
7972 ++ bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null
7973 ++ bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null
7974 ++}
7975 ++
7976 + trap cleanup EXIT
7977 +
7978 + setup_prepare
7979 +diff --git a/tools/testing/selftests/net/ip_defrag.c b/tools/testing/selftests/net/ip_defrag.c
7980 +index 61ae2782388e9..5d56cc0838f62 100644
7981 +--- a/tools/testing/selftests/net/ip_defrag.c
7982 ++++ b/tools/testing/selftests/net/ip_defrag.c
7983 +@@ -203,6 +203,7 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
7984 + {
7985 + struct ip *iphdr = (struct ip *)ip_frame;
7986 + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame;
7987 ++ const bool ipv4 = !ipv6;
7988 + int res;
7989 + int offset;
7990 + int frag_len;
7991 +@@ -239,19 +240,53 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
7992 + iphdr->ip_sum = 0;
7993 + }
7994 +
7995 ++ /* Occasionally test in-order fragments. */
7996 ++ if (!cfg_overlap && (rand() % 100 < 15)) {
7997 ++ offset = 0;
7998 ++ while (offset < (UDP_HLEN + payload_len)) {
7999 ++ send_fragment(fd_raw, addr, alen, offset, ipv6);
8000 ++ offset += max_frag_len;
8001 ++ }
8002 ++ return;
8003 ++ }
8004 ++
8005 ++ /* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */
8006 ++ if (ipv4 && !cfg_overlap && (rand() % 100 < 20) &&
8007 ++ (payload_len > 9 * max_frag_len)) {
8008 ++ offset = 6 * max_frag_len;
8009 ++ while (offset < (UDP_HLEN + payload_len)) {
8010 ++ send_fragment(fd_raw, addr, alen, offset, ipv6);
8011 ++ offset += max_frag_len;
8012 ++ }
8013 ++ offset = 3 * max_frag_len;
8014 ++ while (offset < 6 * max_frag_len) {
8015 ++ send_fragment(fd_raw, addr, alen, offset, ipv6);
8016 ++ offset += max_frag_len;
8017 ++ }
8018 ++ offset = 0;
8019 ++ while (offset < 3 * max_frag_len) {
8020 ++ send_fragment(fd_raw, addr, alen, offset, ipv6);
8021 ++ offset += max_frag_len;
8022 ++ }
8023 ++ return;
8024 ++ }
8025 ++
8026 + /* Odd fragments. */
8027 + offset = max_frag_len;
8028 + while (offset < (UDP_HLEN + payload_len)) {
8029 + send_fragment(fd_raw, addr, alen, offset, ipv6);
8030 ++ /* IPv4 ignores duplicates, so randomly send a duplicate. */
8031 ++ if (ipv4 && (1 == rand() % 100))
8032 ++ send_fragment(fd_raw, addr, alen, offset, ipv6);
8033 + offset += 2 * max_frag_len;
8034 + }
8035 +
8036 + if (cfg_overlap) {
8037 + /* Send an extra random fragment. */
8038 +- offset = rand() % (UDP_HLEN + payload_len - 1);
8039 +- /* sendto() returns EINVAL if offset + frag_len is too small. */
8040 + if (ipv6) {
8041 + struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN);
8042 ++ /* sendto() returns EINVAL if offset + frag_len is too small. */
8043 ++ offset = rand() % (UDP_HLEN + payload_len - 1);
8044 + frag_len = max_frag_len + rand() % 256;
8045 + /* In IPv6 if !!(frag_len % 8), the fragment is dropped. */
8046 + frag_len &= ~0x7;
8047 +@@ -259,13 +294,29 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
8048 + ip6hdr->ip6_plen = htons(frag_len);
8049 + frag_len += IP6_HLEN;
8050 + } else {
8051 +- frag_len = IP4_HLEN + UDP_HLEN + rand() % 256;
8052 ++ /* In IPv4, duplicates and some fragments completely inside
8053 ++ * previously sent fragments are dropped/ignored. So
8054 ++ * random offset and frag_len can result in a dropped
8055 ++ * fragment instead of a dropped queue/packet. So we
8056 ++ * hard-code offset and frag_len.
8057 ++ *
8058 ++ * See ade446403bfb ("net: ipv4: do not handle duplicate
8059 ++ * fragments as overlapping").
8060 ++ */
8061 ++ if (max_frag_len * 4 < payload_len || max_frag_len < 16) {
8062 ++ /* not enough payload to play with random offset and frag_len. */
8063 ++ offset = 8;
8064 ++ frag_len = IP4_HLEN + UDP_HLEN + max_frag_len;
8065 ++ } else {
8066 ++ offset = rand() % (payload_len / 2);
8067 ++ frag_len = 2 * max_frag_len + 1 + rand() % 256;
8068 ++ }
8069 + iphdr->ip_off = htons(offset / 8 | IP4_MF);
8070 + iphdr->ip_len = htons(frag_len);
8071 + }
8072 + res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen);
8073 + if (res < 0)
8074 +- error(1, errno, "sendto overlap");
8075 ++ error(1, errno, "sendto overlap: %d", frag_len);
8076 + if (res != frag_len)
8077 + error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len);
8078 + frag_counter++;
8079 +@@ -275,6 +326,9 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
8080 + offset = 0;
8081 + while (offset < (UDP_HLEN + payload_len)) {
8082 + send_fragment(fd_raw, addr, alen, offset, ipv6);
8083 ++ /* IPv4 ignores duplicates, so randomly send a duplicate. */
8084 ++ if (ipv4 && (1 == rand() % 100))
8085 ++ send_fragment(fd_raw, addr, alen, offset, ipv6);
8086 + offset += 2 * max_frag_len;
8087 + }
8088 + }
8089 +@@ -282,7 +336,11 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
8090 + static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
8091 + {
8092 + int fd_tx_raw, fd_rx_udp;
8093 +- struct timeval tv = { .tv_sec = 0, .tv_usec = 10 * 1000 };
8094 ++ /* Frag queue timeout is set to one second in the calling script;
8095 ++ * socket timeout should be just a bit longer to avoid tests interfering
8096 ++ * with each other.
8097 ++ */
8098 ++ struct timeval tv = { .tv_sec = 1, .tv_usec = 10 };
8099 + int idx;
8100 + int min_frag_len = ipv6 ? 1280 : 8;
8101 +
8102 +@@ -308,12 +366,32 @@ static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
8103 + payload_len += (rand() % 4096)) {
8104 + if (cfg_verbose)
8105 + printf("payload_len: %d\n", payload_len);
8106 +- max_frag_len = min_frag_len;
8107 +- do {
8108 ++
8109 ++ if (cfg_overlap) {
8110 ++ /* With overlaps, one send/receive pair below takes
8111 ++ * at least one second (== timeout) to run, so there
8112 ++ * is not enough test time to run a nested loop:
8113 ++ * the full overlap test takes 20-30 seconds.
8114 ++ */
8115 ++ max_frag_len = min_frag_len +
8116 ++ rand() % (1500 - FRAG_HLEN - min_frag_len);
8117 + send_udp_frags(fd_tx_raw, addr, alen, ipv6);
8118 + recv_validate_udp(fd_rx_udp);
8119 +- max_frag_len += 8 * (rand() % 8);
8120 +- } while (max_frag_len < (1500 - FRAG_HLEN) && max_frag_len <= payload_len);
8121 ++ } else {
8122 ++ /* Without overlaps, each packet reassembly (== one
8123 ++ * send/receive pair below) takes very little time to
8124 ++ * run, so we can easily afford more thourough testing
8125 ++ * with a nested loop: the full non-overlap test takes
8126 ++ * less than one second).
8127 ++ */
8128 ++ max_frag_len = min_frag_len;
8129 ++ do {
8130 ++ send_udp_frags(fd_tx_raw, addr, alen, ipv6);
8131 ++ recv_validate_udp(fd_rx_udp);
8132 ++ max_frag_len += 8 * (rand() % 8);
8133 ++ } while (max_frag_len < (1500 - FRAG_HLEN) &&
8134 ++ max_frag_len <= payload_len);
8135 ++ }
8136 + }
8137 +
8138 + /* Cleanup. */
8139 +diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
8140 +index f346727960449..7dd79a9efb177 100755
8141 +--- a/tools/testing/selftests/net/ip_defrag.sh
8142 ++++ b/tools/testing/selftests/net/ip_defrag.sh
8143 +@@ -11,10 +11,17 @@ readonly NETNS="ns-$(mktemp -u XXXXXX)"
8144 + setup() {
8145 + ip netns add "${NETNS}"
8146 + ip -netns "${NETNS}" link set lo up
8147 ++
8148 + ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1
8149 + ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1
8150 ++ ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_time=1 >/dev/null 2>&1
8151 ++
8152 + ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1
8153 + ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1
8154 ++ ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_time=1 >/dev/null 2>&1
8155 ++
8156 ++ # DST cache can get full with a lot of frags, with GC not keeping up with the test.
8157 ++ ip netns exec "${NETNS}" sysctl -w net.ipv6.route.max_size=65536 >/dev/null 2>&1
8158 + }
8159 +
8160 + cleanup() {
8161 +@@ -27,7 +34,6 @@ setup
8162 + echo "ipv4 defrag"
8163 + ip netns exec "${NETNS}" ./ip_defrag -4
8164 +
8165 +-
8166 + echo "ipv4 defrag with overlaps"
8167 + ip netns exec "${NETNS}" ./ip_defrag -4o
8168 +
8169 +@@ -37,3 +43,4 @@ ip netns exec "${NETNS}" ./ip_defrag -6
8170 + echo "ipv6 defrag with overlaps"
8171 + ip netns exec "${NETNS}" ./ip_defrag -6o
8172 +
8173 ++echo "all tests done"
8174 +diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
8175 +index 637ea0219617f..0da3545cabdb6 100644
8176 +--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
8177 ++++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
8178 +@@ -17,7 +17,7 @@
8179 + "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2",
8180 + "expExitCode": "0",
8181 + "verifyCmd": "$TC actions get action ife index 2",
8182 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2",
8183 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2",
8184 + "matchCount": "1",
8185 + "teardown": [
8186 + "$TC actions flush action ife"
8187 +@@ -41,7 +41,7 @@
8188 + "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2",
8189 + "expExitCode": "0",
8190 + "verifyCmd": "$TC actions get action ife index 2",
8191 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2",
8192 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2",
8193 + "matchCount": "1",
8194 + "teardown": [
8195 + "$TC actions flush action ife"
8196 +@@ -65,7 +65,7 @@
8197 + "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2",
8198 + "expExitCode": "0",
8199 + "verifyCmd": "$TC actions get action ife index 2",
8200 +- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2",
8201 ++ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2",
8202 + "matchCount": "1",
8203 + "teardown": [
8204 + "$TC actions flush action ife"
8205 +@@ -89,7 +89,7 @@
8206 + "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2",
8207 + "expExitCode": "0",
8208 + "verifyCmd": "$TC actions get action ife index 2",
8209 +- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2",
8210 ++ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2",
8211 + "matchCount": "1",
8212 + "teardown": [
8213 + "$TC actions flush action ife"
8214 +@@ -113,7 +113,7 @@
8215 + "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2",
8216 + "expExitCode": "0",
8217 + "verifyCmd": "$TC actions get action ife index 2",
8218 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2",
8219 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2",
8220 + "matchCount": "1",
8221 + "teardown": [
8222 + "$TC actions flush action ife"
8223 +@@ -137,7 +137,7 @@
8224 + "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2",
8225 + "expExitCode": "0",
8226 + "verifyCmd": "$TC actions get action ife index 2",
8227 +- "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2",
8228 ++ "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2",
8229 + "matchCount": "1",
8230 + "teardown": [
8231 + "$TC actions flush action ife"
8232 +@@ -161,7 +161,7 @@
8233 + "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90",
8234 + "expExitCode": "0",
8235 + "verifyCmd": "$TC actions get action ife index 90",
8236 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90",
8237 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90",
8238 + "matchCount": "1",
8239 + "teardown": [
8240 + "$TC actions flush action ife"
8241 +@@ -185,7 +185,7 @@
8242 + "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90",
8243 + "expExitCode": "255",
8244 + "verifyCmd": "$TC actions get action ife index 90",
8245 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90",
8246 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90",
8247 + "matchCount": "0",
8248 + "teardown": []
8249 + },
8250 +@@ -207,7 +207,7 @@
8251 + "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9",
8252 + "expExitCode": "0",
8253 + "verifyCmd": "$TC actions get action ife index 9",
8254 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9",
8255 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9",
8256 + "matchCount": "1",
8257 + "teardown": [
8258 + "$TC actions flush action ife"
8259 +@@ -231,7 +231,7 @@
8260 + "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9",
8261 + "expExitCode": "0",
8262 + "verifyCmd": "$TC actions get action ife index 9",
8263 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9",
8264 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9",
8265 + "matchCount": "1",
8266 + "teardown": [
8267 + "$TC actions flush action ife"
8268 +@@ -255,7 +255,7 @@
8269 + "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9",
8270 + "expExitCode": "0",
8271 + "verifyCmd": "$TC actions get action ife index 9",
8272 +- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9",
8273 ++ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9",
8274 + "matchCount": "1",
8275 + "teardown": [
8276 + "$TC actions flush action ife"
8277 +@@ -279,7 +279,7 @@
8278 + "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9",
8279 + "expExitCode": "0",
8280 + "verifyCmd": "$TC actions get action ife index 9",
8281 +- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9",
8282 ++ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9",
8283 + "matchCount": "1",
8284 + "teardown": [
8285 + "$TC actions flush action ife"
8286 +@@ -303,7 +303,7 @@
8287 + "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9",
8288 + "expExitCode": "0",
8289 + "verifyCmd": "$TC actions get action ife index 9",
8290 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9",
8291 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9",
8292 + "matchCount": "1",
8293 + "teardown": [
8294 + "$TC actions flush action ife"
8295 +@@ -327,7 +327,7 @@
8296 + "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9",
8297 + "expExitCode": "0",
8298 + "verifyCmd": "$TC actions get action ife index 9",
8299 +- "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9",
8300 ++ "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9",
8301 + "matchCount": "1",
8302 + "teardown": [
8303 + "$TC actions flush action ife"
8304 +@@ -351,7 +351,7 @@
8305 + "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99",
8306 + "expExitCode": "0",
8307 + "verifyCmd": "$TC actions get action ife index 99",
8308 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99",
8309 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99",
8310 + "matchCount": "1",
8311 + "teardown": [
8312 + "$TC actions flush action ife"
8313 +@@ -375,7 +375,7 @@
8314 + "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99",
8315 + "expExitCode": "255",
8316 + "verifyCmd": "$TC actions get action ife index 99",
8317 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99",
8318 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99",
8319 + "matchCount": "0",
8320 + "teardown": []
8321 + },
8322 +@@ -397,7 +397,7 @@
8323 + "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1",
8324 + "expExitCode": "0",
8325 + "verifyCmd": "$TC actions get action ife index 1",
8326 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1",
8327 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1",
8328 + "matchCount": "1",
8329 + "teardown": [
8330 + "$TC actions flush action ife"
8331 +@@ -421,7 +421,7 @@
8332 + "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1",
8333 + "expExitCode": "0",
8334 + "verifyCmd": "$TC actions get action ife index 1",
8335 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1",
8336 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1",
8337 + "matchCount": "1",
8338 + "teardown": [
8339 + "$TC actions flush action ife"
8340 +@@ -445,7 +445,7 @@
8341 + "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
8342 + "expExitCode": "0",
8343 + "verifyCmd": "$TC actions get action ife index 1",
8344 +- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
8345 ++ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
8346 + "matchCount": "1",
8347 + "teardown": [
8348 + "$TC actions flush action ife"
8349 +@@ -469,7 +469,7 @@
8350 + "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
8351 + "expExitCode": "0",
8352 + "verifyCmd": "$TC actions get action ife index 1",
8353 +- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
8354 ++ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
8355 + "matchCount": "1",
8356 + "teardown": [
8357 + "$TC actions flush action ife"
8358 +@@ -493,7 +493,7 @@
8359 + "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77",
8360 + "expExitCode": "0",
8361 + "verifyCmd": "$TC actions get action ife index 77",
8362 +- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77",
8363 ++ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77",
8364 + "matchCount": "1",
8365 + "teardown": [
8366 + "$TC actions flush action ife"
8367 +@@ -517,7 +517,7 @@
8368 + "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77",
8369 + "expExitCode": "0",
8370 + "verifyCmd": "$TC actions get action ife index 77",
8371 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77",
8372 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77",
8373 + "matchCount": "1",
8374 + "teardown": [
8375 + "$TC actions flush action ife"
8376 +@@ -541,7 +541,7 @@
8377 + "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77",
8378 + "expExitCode": "0",
8379 + "verifyCmd": "$TC actions get action ife index 77",
8380 +- "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77",
8381 ++ "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77",
8382 + "matchCount": "1",
8383 + "teardown": [
8384 + "$TC actions flush action ife"
8385 +@@ -565,7 +565,7 @@
8386 + "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1",
8387 + "expExitCode": "0",
8388 + "verifyCmd": "$TC actions get action ife index 1",
8389 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1",
8390 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1",
8391 + "matchCount": "1",
8392 + "teardown": [
8393 + "$TC actions flush action ife"
8394 +@@ -589,7 +589,7 @@
8395 + "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1",
8396 + "expExitCode": "255",
8397 + "verifyCmd": "$TC actions get action ife index 1",
8398 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1",
8399 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1",
8400 + "matchCount": "0",
8401 + "teardown": []
8402 + },
8403 +@@ -611,7 +611,7 @@
8404 + "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1",
8405 + "expExitCode": "0",
8406 + "verifyCmd": "$TC actions get action ife index 1",
8407 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1",
8408 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1",
8409 + "matchCount": "1",
8410 + "teardown": [
8411 + "$TC actions flush action ife"
8412 +@@ -635,7 +635,7 @@
8413 + "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1",
8414 + "expExitCode": "0",
8415 + "verifyCmd": "$TC actions get action ife index 1",
8416 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
8417 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
8418 + "matchCount": "1",
8419 + "teardown": [
8420 + "$TC actions flush action ife"
8421 +@@ -659,7 +659,7 @@
8422 + "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11",
8423 + "expExitCode": "0",
8424 + "verifyCmd": "$TC actions get action ife index 11",
8425 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
8426 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
8427 + "matchCount": "1",
8428 + "teardown": [
8429 + "$TC actions flush action ife"
8430 +@@ -683,7 +683,7 @@
8431 + "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1",
8432 + "expExitCode": "0",
8433 + "verifyCmd": "$TC actions get action ife index 1",
8434 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1",
8435 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1",
8436 + "matchCount": "1",
8437 + "teardown": [
8438 + "$TC actions flush action ife"
8439 +@@ -707,7 +707,7 @@
8440 + "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21",
8441 + "expExitCode": "0",
8442 + "verifyCmd": "$TC actions get action ife index 21",
8443 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21",
8444 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21",
8445 + "matchCount": "1",
8446 + "teardown": [
8447 + "$TC actions flush action ife"
8448 +@@ -731,7 +731,7 @@
8449 + "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21",
8450 + "expExitCode": "0",
8451 + "verifyCmd": "$TC actions get action ife index 21",
8452 +- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21",
8453 ++ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21",
8454 + "matchCount": "1",
8455 + "teardown": [
8456 + "$TC actions flush action ife"
8457 +@@ -739,7 +739,7 @@
8458 + },
8459 + {
8460 + "id": "fac3",
8461 +- "name": "Create valid ife encode action with index at 32-bit maximnum",
8462 ++ "name": "Create valid ife encode action with index at 32-bit maximum",
8463 + "category": [
8464 + "actions",
8465 + "ife"
8466 +@@ -755,7 +755,7 @@
8467 + "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295",
8468 + "expExitCode": "0",
8469 + "verifyCmd": "$TC actions get action ife index 4294967295",
8470 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295",
8471 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295",
8472 + "matchCount": "1",
8473 + "teardown": [
8474 + "$TC actions flush action ife"
8475 +@@ -779,7 +779,7 @@
8476 + "cmdUnderTest": "$TC actions add action ife decode pass index 1",
8477 + "expExitCode": "0",
8478 + "verifyCmd": "$TC actions get action ife index 1",
8479 +- "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
8480 ++ "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
8481 + "matchCount": "1",
8482 + "teardown": [
8483 + "$TC actions flush action ife"
8484 +@@ -803,7 +803,7 @@
8485 + "cmdUnderTest": "$TC actions add action ife decode pipe index 1",
8486 + "expExitCode": "0",
8487 + "verifyCmd": "$TC actions get action ife index 1",
8488 +- "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
8489 ++ "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
8490 + "matchCount": "1",
8491 + "teardown": [
8492 + "$TC actions flush action ife"
8493 +@@ -827,7 +827,7 @@
8494 + "cmdUnderTest": "$TC actions add action ife decode continue index 1",
8495 + "expExitCode": "0",
8496 + "verifyCmd": "$TC actions get action ife index 1",
8497 +- "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
8498 ++ "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
8499 + "matchCount": "1",
8500 + "teardown": [
8501 + "$TC actions flush action ife"
8502 +@@ -851,7 +851,7 @@
8503 + "cmdUnderTest": "$TC actions add action ife decode drop index 1",
8504 + "expExitCode": "0",
8505 + "verifyCmd": "$TC actions get action ife index 1",
8506 +- "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
8507 ++ "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
8508 + "matchCount": "1",
8509 + "teardown": [
8510 + "$TC actions flush action ife"
8511 +@@ -875,7 +875,7 @@
8512 + "cmdUnderTest": "$TC actions add action ife decode reclassify index 1",
8513 + "expExitCode": "0",
8514 + "verifyCmd": "$TC actions get action ife index 1",
8515 +- "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
8516 ++ "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
8517 + "matchCount": "1",
8518 + "teardown": [
8519 + "$TC actions flush action ife"
8520 +@@ -899,7 +899,7 @@
8521 + "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1",
8522 + "expExitCode": "0",
8523 + "verifyCmd": "$TC actions get action ife index 1",
8524 +- "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
8525 ++ "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
8526 + "matchCount": "1",
8527 + "teardown": [
8528 + "$TC actions flush action ife"
8529 +@@ -923,7 +923,7 @@
8530 + "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999",
8531 + "expExitCode": "255",
8532 + "verifyCmd": "$TC actions get action ife index 4294967295999",
8533 +- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999",
8534 ++ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999",
8535 + "matchCount": "0",
8536 + "teardown": []
8537 + },
8538 +@@ -945,7 +945,7 @@
8539 + "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4",
8540 + "expExitCode": "255",
8541 + "verifyCmd": "$TC actions get action ife index 4",
8542 +- "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4",
8543 ++ "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4",
8544 + "matchCount": "0",
8545 + "teardown": []
8546 + },
8547 +@@ -967,7 +967,7 @@
8548 + "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1",
8549 + "expExitCode": "0",
8550 + "verifyCmd": "$TC actions get action ife index 4",
8551 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
8552 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
8553 + "matchCount": "1",
8554 + "teardown": [
8555 + "$TC actions flush action ife"
8556 +@@ -991,7 +991,7 @@
8557 + "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4",
8558 + "expExitCode": "255",
8559 + "verifyCmd": "$TC actions get action ife index 4",
8560 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4",
8561 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4",
8562 + "matchCount": "0",
8563 + "teardown": []
8564 + },
8565 +@@ -1013,7 +1013,7 @@
8566 + "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4",
8567 + "expExitCode": "255",
8568 + "verifyCmd": "$TC actions get action ife index 4",
8569 +- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4",
8570 ++ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4",
8571 + "matchCount": "0",
8572 + "teardown": []
8573 + },
8574 +diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
8575 +index 10b2d894e4362..e7e15a7336b6d 100644
8576 +--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
8577 ++++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
8578 +@@ -81,35 +81,6 @@
8579 + ]
8580 + ]
8581 + },
8582 +- {
8583 +- "id": "ba4e",
8584 +- "name": "Add tunnel_key set action with missing mandatory id parameter",
8585 +- "category": [
8586 +- "actions",
8587 +- "tunnel_key"
8588 +- ],
8589 +- "setup": [
8590 +- [
8591 +- "$TC actions flush action tunnel_key",
8592 +- 0,
8593 +- 1,
8594 +- 255
8595 +- ]
8596 +- ],
8597 +- "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
8598 +- "expExitCode": "255",
8599 +- "verifyCmd": "$TC actions list action tunnel_key",
8600 +- "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
8601 +- "matchCount": "0",
8602 +- "teardown": [
8603 +- [
8604 +- "$TC actions flush action tunnel_key",
8605 +- 0,
8606 +- 1,
8607 +- 255
8608 +- ]
8609 +- ]
8610 +- },
8611 + {
8612 + "id": "a5e0",
8613 + "name": "Add tunnel_key set action with invalid src_ip parameter",
8614 +@@ -634,7 +605,7 @@
8615 + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
8616 + "expExitCode": "0",
8617 + "verifyCmd": "$TC actions get action tunnel_key index 4",
8618 +- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
8619 ++ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
8620 + "matchCount": "1",
8621 + "teardown": [
8622 + "$TC actions flush action tunnel_key"