Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.2 commit in: /
Date: Sun, 25 Aug 2019 17:38:37
Message-Id: 1566754695.4a453651128d111aace968223e7ed8dc5a8132b4.mpagano@gentoo
1 commit: 4a453651128d111aace968223e7ed8dc5a8132b4
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Aug 25 17:38:15 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Aug 25 17:38:15 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4a453651
7
8 Linux patch 5.2.10
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1009_linux-5.2.10.patch | 5449 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5453 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 04259bc..2056b84 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -79,6 +79,10 @@ Patch: 1008_linux-5.2.9.patch
21 From: https://www.kernel.org
22 Desc: Linux 5.2.9
23
24 +Patch: 1009_linux-5.2.10.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 5.2.10
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1009_linux-5.2.10.patch b/1009_linux-5.2.10.patch
33 new file mode 100644
34 index 0000000..883c14a
35 --- /dev/null
36 +++ b/1009_linux-5.2.10.patch
37 @@ -0,0 +1,5449 @@
38 +diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst
39 +index cb85af559dff..178f4104f5cf 100644
40 +--- a/Documentation/networking/tls-offload.rst
41 ++++ b/Documentation/networking/tls-offload.rst
42 +@@ -445,24 +445,6 @@ These flags will be acted upon accordingly by the core ``ktls`` code.
43 + TLS device feature flags only control adding of new TLS connection
44 + offloads, old connections will remain active after flags are cleared.
45 +
46 +-Known bugs
47 +-==========
48 +-
49 +-skb_orphan() leaks clear text
50 +------------------------------
51 +-
52 +-Currently drivers depend on the :c:member:`sk` member of
53 +-:c:type:`struct sk_buff <sk_buff>` to identify segments requiring
54 +-encryption. Any operation which removes or does not preserve the socket
55 +-association such as :c:func:`skb_orphan` or :c:func:`skb_clone`
56 +-will cause the driver to miss the packets and lead to clear text leaks.
57 +-
58 +-Redirects leak clear text
59 +--------------------------
60 +-
61 +-In the RX direction, if segment has already been decrypted by the device
62 +-and it gets redirected or mirrored - clear text will be transmitted out.
63 +-
64 + .. _pre_tls_data:
65 +
66 + Transmission of pre-TLS data
67 +diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
68 +index 7cdf7282e022..65b6c1109cc8 100644
69 +--- a/Documentation/vm/hmm.rst
70 ++++ b/Documentation/vm/hmm.rst
71 +@@ -231,7 +231,7 @@ respect in order to keep things properly synchronized. The usage pattern is::
72 + ret = hmm_range_snapshot(&range);
73 + if (ret) {
74 + up_read(&mm->mmap_sem);
75 +- if (ret == -EAGAIN) {
76 ++ if (ret == -EBUSY) {
77 + /*
78 + * No need to check hmm_range_wait_until_valid() return value
79 + * on retry we will get proper error with hmm_range_snapshot()
80 +diff --git a/Makefile b/Makefile
81 +index cfc667fe9959..35fee16d5006 100644
82 +--- a/Makefile
83 ++++ b/Makefile
84 +@@ -1,7 +1,7 @@
85 + # SPDX-License-Identifier: GPL-2.0
86 + VERSION = 5
87 + PATCHLEVEL = 2
88 +-SUBLEVEL = 9
89 ++SUBLEVEL = 10
90 + EXTRAVERSION =
91 + NAME = Bobtail Squid
92 +
93 +diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
94 +index 79155a8cfe7c..89e4c8b79349 100644
95 +--- a/arch/arm64/include/asm/arch_gicv3.h
96 ++++ b/arch/arm64/include/asm/arch_gicv3.h
97 +@@ -155,6 +155,12 @@ static inline void gic_pmr_mask_irqs(void)
98 + BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF |
99 + GIC_PRIO_PSR_I_SET));
100 + BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
101 ++ /*
102 ++ * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared
103 ++ * and non-secure PMR accesses are not subject to the shifts that
104 ++ * are applied to IRQ priorities
105 ++ */
106 ++ BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON);
107 + gic_write_pmr(GIC_PRIO_IRQOFF);
108 + }
109 +
110 +diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
111 +index ae7e605085d7..9c0e0178ea29 100644
112 +--- a/arch/arm64/include/asm/daifflags.h
113 ++++ b/arch/arm64/include/asm/daifflags.h
114 +@@ -13,6 +13,8 @@
115 + #define DAIF_PROCCTX 0
116 + #define DAIF_PROCCTX_NOIRQ PSR_I_BIT
117 + #define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
118 ++#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
119 ++
120 +
121 + /* mask/save/unmask/restore all exceptions, including interrupts. */
122 + static inline void local_daif_mask(void)
123 +diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
124 +index c9e9a6978e73..d3cb42fd51ec 100644
125 +--- a/arch/arm64/include/asm/efi.h
126 ++++ b/arch/arm64/include/asm/efi.h
127 +@@ -105,7 +105,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
128 + ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
129 +
130 + #define alloc_screen_info(x...) &screen_info
131 +-#define free_screen_info(x...)
132 ++
133 ++static inline void free_screen_info(efi_system_table_t *sys_table_arg,
134 ++ struct screen_info *si)
135 ++{
136 ++}
137 +
138 + /* redeclare as 'hidden' so the compiler will generate relative references */
139 + extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
140 +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
141 +index b7ba75809751..fb04f10a78ab 100644
142 +--- a/arch/arm64/include/asm/memory.h
143 ++++ b/arch/arm64/include/asm/memory.h
144 +@@ -210,7 +210,11 @@ extern u64 vabits_user;
145 + #define __tag_reset(addr) untagged_addr(addr)
146 + #define __tag_get(addr) (__u8)((u64)(addr) >> 56)
147 + #else
148 +-#define __tag_set(addr, tag) (addr)
149 ++static inline const void *__tag_set(const void *addr, u8 tag)
150 ++{
151 ++ return addr;
152 ++}
153 ++
154 + #define __tag_reset(addr) (addr)
155 + #define __tag_get(addr) 0
156 + #endif
157 +@@ -301,8 +305,8 @@ static inline void *phys_to_virt(phys_addr_t x)
158 + #define page_to_virt(page) ({ \
159 + unsigned long __addr = \
160 + ((__page_to_voff(page)) | PAGE_OFFSET); \
161 +- unsigned long __addr_tag = \
162 +- __tag_set(__addr, page_kasan_tag(page)); \
163 ++ const void *__addr_tag = \
164 ++ __tag_set((void *)__addr, page_kasan_tag(page)); \
165 + ((void *)__addr_tag); \
166 + })
167 +
168 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
169 +index fca26759081a..b9574d850f14 100644
170 +--- a/arch/arm64/include/asm/pgtable.h
171 ++++ b/arch/arm64/include/asm/pgtable.h
172 +@@ -419,8 +419,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
173 + PMD_TYPE_SECT)
174 +
175 + #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
176 +-#define pud_sect(pud) (0)
177 +-#define pud_table(pud) (1)
178 ++static inline bool pud_sect(pud_t pud) { return false; }
179 ++static inline bool pud_table(pud_t pud) { return true; }
180 + #else
181 + #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
182 + PUD_TYPE_SECT)
183 +diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
184 +index 81693244f58d..701eaa738187 100644
185 +--- a/arch/arm64/include/asm/ptrace.h
186 ++++ b/arch/arm64/include/asm/ptrace.h
187 +@@ -30,7 +30,7 @@
188 + * in the the priority mask, it indicates that PSR.I should be set and
189 + * interrupt disabling temporarily does not rely on IRQ priorities.
190 + */
191 +-#define GIC_PRIO_IRQON 0xc0
192 ++#define GIC_PRIO_IRQON 0xe0
193 + #define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
194 + #define GIC_PRIO_PSR_I_SET (1 << 4)
195 +
196 +diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
197 +index 1285c7b2947f..171773257974 100644
198 +--- a/arch/arm64/kernel/ftrace.c
199 ++++ b/arch/arm64/kernel/ftrace.c
200 +@@ -73,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
201 +
202 + if (offset < -SZ_128M || offset >= SZ_128M) {
203 + #ifdef CONFIG_ARM64_MODULE_PLTS
204 +- struct plt_entry trampoline;
205 ++ struct plt_entry trampoline, *dst;
206 + struct module *mod;
207 +
208 + /*
209 +@@ -106,23 +106,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
210 + * to check if the actual opcodes are in fact identical,
211 + * regardless of the offset in memory so use memcmp() instead.
212 + */
213 +- trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
214 +- if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
215 +- sizeof(trampoline))) {
216 +- if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
217 ++ dst = mod->arch.ftrace_trampoline;
218 ++ trampoline = get_plt_entry(addr, dst);
219 ++ if (memcmp(dst, &trampoline, sizeof(trampoline))) {
220 ++ if (plt_entry_is_initialized(dst)) {
221 + pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
222 + return -EINVAL;
223 + }
224 +
225 + /* point the trampoline to our ftrace entry point */
226 + module_disable_ro(mod);
227 +- *mod->arch.ftrace_trampoline = trampoline;
228 ++ *dst = trampoline;
229 + module_enable_ro(mod, true);
230 +
231 +- /* update trampoline before patching in the branch */
232 +- smp_wmb();
233 ++ /*
234 ++ * Ensure updated trampoline is visible to instruction
235 ++ * fetch before we patch in the branch.
236 ++ */
237 ++ __flush_icache_range((unsigned long)&dst[0],
238 ++ (unsigned long)&dst[1]);
239 + }
240 +- addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
241 ++ addr = (unsigned long)dst;
242 + #else /* CONFIG_ARM64_MODULE_PLTS */
243 + return -EINVAL;
244 + #endif /* CONFIG_ARM64_MODULE_PLTS */
245 +diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
246 +index 88ce502c8e6f..624f2501f3f8 100644
247 +--- a/arch/arm64/kernel/probes/kprobes.c
248 ++++ b/arch/arm64/kernel/probes/kprobes.c
249 +@@ -21,6 +21,7 @@
250 + #include <asm/ptrace.h>
251 + #include <asm/cacheflush.h>
252 + #include <asm/debug-monitors.h>
253 ++#include <asm/daifflags.h>
254 + #include <asm/system_misc.h>
255 + #include <asm/insn.h>
256 + #include <linux/uaccess.h>
257 +@@ -165,33 +166,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
258 + __this_cpu_write(current_kprobe, p);
259 + }
260 +
261 +-/*
262 +- * When PSTATE.D is set (masked), then software step exceptions can not be
263 +- * generated.
264 +- * SPSR's D bit shows the value of PSTATE.D immediately before the
265 +- * exception was taken. PSTATE.D is set while entering into any exception
266 +- * mode, however software clears it for any normal (none-debug-exception)
267 +- * mode in the exception entry. Therefore, when we are entering into kprobe
268 +- * breakpoint handler from any normal mode then SPSR.D bit is already
269 +- * cleared, however it is set when we are entering from any debug exception
270 +- * mode.
271 +- * Since we always need to generate single step exception after a kprobe
272 +- * breakpoint exception therefore we need to clear it unconditionally, when
273 +- * we become sure that the current breakpoint exception is for kprobe.
274 +- */
275 +-static void __kprobes
276 +-spsr_set_debug_flag(struct pt_regs *regs, int mask)
277 +-{
278 +- unsigned long spsr = regs->pstate;
279 +-
280 +- if (mask)
281 +- spsr |= PSR_D_BIT;
282 +- else
283 +- spsr &= ~PSR_D_BIT;
284 +-
285 +- regs->pstate = spsr;
286 +-}
287 +-
288 + /*
289 + * Interrupts need to be disabled before single-step mode is set, and not
290 + * reenabled until after single-step mode ends.
291 +@@ -203,17 +177,17 @@ spsr_set_debug_flag(struct pt_regs *regs, int mask)
292 + static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
293 + struct pt_regs *regs)
294 + {
295 +- kcb->saved_irqflag = regs->pstate;
296 ++ kcb->saved_irqflag = regs->pstate & DAIF_MASK;
297 + regs->pstate |= PSR_I_BIT;
298 ++ /* Unmask PSTATE.D for enabling software step exceptions. */
299 ++ regs->pstate &= ~PSR_D_BIT;
300 + }
301 +
302 + static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
303 + struct pt_regs *regs)
304 + {
305 +- if (kcb->saved_irqflag & PSR_I_BIT)
306 +- regs->pstate |= PSR_I_BIT;
307 +- else
308 +- regs->pstate &= ~PSR_I_BIT;
309 ++ regs->pstate &= ~DAIF_MASK;
310 ++ regs->pstate |= kcb->saved_irqflag;
311 + }
312 +
313 + static void __kprobes
314 +@@ -250,8 +224,6 @@ static void __kprobes setup_singlestep(struct kprobe *p,
315 +
316 + set_ss_context(kcb, slot); /* mark pending ss */
317 +
318 +- spsr_set_debug_flag(regs, 0);
319 +-
320 + /* IRQs and single stepping do not mix well. */
321 + kprobes_save_local_irqflag(kcb, regs);
322 + kernel_enable_single_step(regs);
323 +diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
324 +index b21cba90f82d..491184a9f081 100644
325 +--- a/arch/arm64/kernel/return_address.c
326 ++++ b/arch/arm64/kernel/return_address.c
327 +@@ -8,6 +8,7 @@
328 +
329 + #include <linux/export.h>
330 + #include <linux/ftrace.h>
331 ++#include <linux/kprobes.h>
332 +
333 + #include <asm/stack_pointer.h>
334 + #include <asm/stacktrace.h>
335 +@@ -29,6 +30,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
336 + return 0;
337 + }
338 + }
339 ++NOKPROBE_SYMBOL(save_return_addr);
340 +
341 + void *return_address(unsigned int level)
342 + {
343 +@@ -52,3 +54,4 @@ void *return_address(unsigned int level)
344 + return NULL;
345 + }
346 + EXPORT_SYMBOL_GPL(return_address);
347 ++NOKPROBE_SYMBOL(return_address);
348 +diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
349 +index 62d395151abe..cd7dab54d17b 100644
350 +--- a/arch/arm64/kernel/stacktrace.c
351 ++++ b/arch/arm64/kernel/stacktrace.c
352 +@@ -7,6 +7,7 @@
353 + #include <linux/kernel.h>
354 + #include <linux/export.h>
355 + #include <linux/ftrace.h>
356 ++#include <linux/kprobes.h>
357 + #include <linux/sched.h>
358 + #include <linux/sched/debug.h>
359 + #include <linux/sched/task_stack.h>
360 +@@ -73,6 +74,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
361 +
362 + return 0;
363 + }
364 ++NOKPROBE_SYMBOL(unwind_frame);
365 +
366 + void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
367 + int (*fn)(struct stackframe *, void *), void *data)
368 +@@ -87,6 +89,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
369 + break;
370 + }
371 + }
372 ++NOKPROBE_SYMBOL(walk_stackframe);
373 +
374 + #ifdef CONFIG_STACKTRACE
375 + struct stack_trace_data {
376 +diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
377 +index d66613e6ad08..8a38ccf8dc02 100644
378 +--- a/arch/arm64/kvm/regmap.c
379 ++++ b/arch/arm64/kvm/regmap.c
380 +@@ -178,13 +178,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
381 + switch (spsr_idx) {
382 + case KVM_SPSR_SVC:
383 + write_sysreg_el1(v, spsr);
384 ++ break;
385 + case KVM_SPSR_ABT:
386 + write_sysreg(v, spsr_abt);
387 ++ break;
388 + case KVM_SPSR_UND:
389 + write_sysreg(v, spsr_und);
390 ++ break;
391 + case KVM_SPSR_IRQ:
392 + write_sysreg(v, spsr_irq);
393 ++ break;
394 + case KVM_SPSR_FIQ:
395 + write_sysreg(v, spsr_fiq);
396 ++ break;
397 + }
398 + }
399 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
400 +index 2d115016feb4..414b8e0f19e0 100644
401 +--- a/arch/arm64/mm/fault.c
402 ++++ b/arch/arm64/mm/fault.c
403 +@@ -800,6 +800,53 @@ void __init hook_debug_fault_code(int nr,
404 + debug_fault_info[nr].name = name;
405 + }
406 +
407 ++/*
408 ++ * In debug exception context, we explicitly disable preemption despite
409 ++ * having interrupts disabled.
410 ++ * This serves two purposes: it makes it much less likely that we would
411 ++ * accidentally schedule in exception context and it will force a warning
412 ++ * if we somehow manage to schedule by accident.
413 ++ */
414 ++static void debug_exception_enter(struct pt_regs *regs)
415 ++{
416 ++ /*
417 ++ * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
418 ++ * already disabled to preserve the last enabled/disabled addresses.
419 ++ */
420 ++ if (interrupts_enabled(regs))
421 ++ trace_hardirqs_off();
422 ++
423 ++ if (user_mode(regs)) {
424 ++ RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
425 ++ } else {
426 ++ /*
427 ++ * We might have interrupted pretty much anything. In
428 ++ * fact, if we're a debug exception, we can even interrupt
429 ++ * NMI processing. We don't want this code makes in_nmi()
430 ++ * to return true, but we need to notify RCU.
431 ++ */
432 ++ rcu_nmi_enter();
433 ++ }
434 ++
435 ++ preempt_disable();
436 ++
437 ++ /* This code is a bit fragile. Test it. */
438 ++ RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
439 ++}
440 ++NOKPROBE_SYMBOL(debug_exception_enter);
441 ++
442 ++static void debug_exception_exit(struct pt_regs *regs)
443 ++{
444 ++ preempt_enable_no_resched();
445 ++
446 ++ if (!user_mode(regs))
447 ++ rcu_nmi_exit();
448 ++
449 ++ if (interrupts_enabled(regs))
450 ++ trace_hardirqs_on();
451 ++}
452 ++NOKPROBE_SYMBOL(debug_exception_exit);
453 ++
454 + #ifdef CONFIG_ARM64_ERRATUM_1463225
455 + DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
456 +
457 +@@ -840,12 +887,7 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
458 + if (cortex_a76_erratum_1463225_debug_handler(regs))
459 + return;
460 +
461 +- /*
462 +- * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
463 +- * already disabled to preserve the last enabled/disabled addresses.
464 +- */
465 +- if (interrupts_enabled(regs))
466 +- trace_hardirqs_off();
467 ++ debug_exception_enter(regs);
468 +
469 + if (user_mode(regs) && !is_ttbr0_addr(pc))
470 + arm64_apply_bp_hardening();
471 +@@ -855,7 +897,6 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
472 + inf->sig, inf->code, (void __user *)pc, esr);
473 + }
474 +
475 +- if (interrupts_enabled(regs))
476 +- trace_hardirqs_on();
477 ++ debug_exception_exit(regs);
478 + }
479 + NOKPROBE_SYMBOL(do_debug_exception);
480 +diff --git a/arch/mips/vdso/vdso.h b/arch/mips/vdso/vdso.h
481 +index 14b1931be69c..b65b169778e3 100644
482 +--- a/arch/mips/vdso/vdso.h
483 ++++ b/arch/mips/vdso/vdso.h
484 +@@ -9,6 +9,7 @@
485 + #if _MIPS_SIM != _MIPS_SIM_ABI64 && defined(CONFIG_64BIT)
486 +
487 + /* Building 32-bit VDSO for the 64-bit kernel. Fake a 32-bit Kconfig. */
488 ++#define BUILD_VDSO32_64
489 + #undef CONFIG_64BIT
490 + #define CONFIG_32BIT 1
491 + #ifndef __ASSEMBLY__
492 +diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
493 +index dad9825e4087..3c17fc7c2b93 100644
494 +--- a/arch/powerpc/platforms/pseries/papr_scm.c
495 ++++ b/arch/powerpc/platforms/pseries/papr_scm.c
496 +@@ -199,12 +199,32 @@ static const struct attribute_group *papr_scm_dimm_groups[] = {
497 + NULL,
498 + };
499 +
500 ++static inline int papr_scm_node(int node)
501 ++{
502 ++ int min_dist = INT_MAX, dist;
503 ++ int nid, min_node;
504 ++
505 ++ if ((node == NUMA_NO_NODE) || node_online(node))
506 ++ return node;
507 ++
508 ++ min_node = first_online_node;
509 ++ for_each_online_node(nid) {
510 ++ dist = node_distance(node, nid);
511 ++ if (dist < min_dist) {
512 ++ min_dist = dist;
513 ++ min_node = nid;
514 ++ }
515 ++ }
516 ++ return min_node;
517 ++}
518 ++
519 + static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
520 + {
521 + struct device *dev = &p->pdev->dev;
522 + struct nd_mapping_desc mapping;
523 + struct nd_region_desc ndr_desc;
524 + unsigned long dimm_flags;
525 ++ int target_nid, online_nid;
526 +
527 + p->bus_desc.ndctl = papr_scm_ndctl;
528 + p->bus_desc.module = THIS_MODULE;
529 +@@ -243,8 +263,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
530 +
531 + memset(&ndr_desc, 0, sizeof(ndr_desc));
532 + ndr_desc.attr_groups = region_attr_groups;
533 +- ndr_desc.numa_node = dev_to_node(&p->pdev->dev);
534 +- ndr_desc.target_node = ndr_desc.numa_node;
535 ++ target_nid = dev_to_node(&p->pdev->dev);
536 ++ online_nid = papr_scm_node(target_nid);
537 ++ ndr_desc.numa_node = online_nid;
538 ++ ndr_desc.target_node = target_nid;
539 + ndr_desc.res = &p->res;
540 + ndr_desc.of_node = p->dn;
541 + ndr_desc.provider_data = p;
542 +@@ -259,6 +281,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
543 + ndr_desc.res, p->dn);
544 + goto err;
545 + }
546 ++ if (target_nid != online_nid)
547 ++ dev_info(dev, "Region registered with target node %d and online node %d",
548 ++ target_nid, online_nid);
549 +
550 + return 0;
551 +
552 +diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
553 +index 853b65ef656d..f0227bdce0f0 100644
554 +--- a/arch/riscv/include/asm/switch_to.h
555 ++++ b/arch/riscv/include/asm/switch_to.h
556 +@@ -16,7 +16,13 @@ extern void __fstate_restore(struct task_struct *restore_from);
557 +
558 + static inline void __fstate_clean(struct pt_regs *regs)
559 + {
560 +- regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
561 ++ regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
562 ++}
563 ++
564 ++static inline void fstate_off(struct task_struct *task,
565 ++ struct pt_regs *regs)
566 ++{
567 ++ regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF;
568 + }
569 +
570 + static inline void fstate_save(struct task_struct *task,
571 +diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
572 +index f23794bd1e90..fb3a082362eb 100644
573 +--- a/arch/riscv/kernel/process.c
574 ++++ b/arch/riscv/kernel/process.c
575 +@@ -64,8 +64,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
576 + unsigned long sp)
577 + {
578 + regs->sstatus = SR_SPIE;
579 +- if (has_fpu)
580 ++ if (has_fpu) {
581 + regs->sstatus |= SR_FS_INITIAL;
582 ++ /*
583 ++ * Restore the initial value to the FP register
584 ++ * before starting the user program.
585 ++ */
586 ++ fstate_restore(current, regs);
587 ++ }
588 + regs->sepc = pc;
589 + regs->sp = sp;
590 + set_fs(USER_DS);
591 +@@ -75,10 +81,11 @@ void flush_thread(void)
592 + {
593 + #ifdef CONFIG_FPU
594 + /*
595 +- * Reset FPU context
596 ++ * Reset FPU state and context
597 + * frm: round to nearest, ties to even (IEEE default)
598 + * fflags: accrued exceptions cleared
599 + */
600 ++ fstate_off(current, task_pt_regs(current));
601 + memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
602 + #endif
603 + }
604 +diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
605 +index f1d6ffe43e42..49a5852fd07d 100644
606 +--- a/arch/riscv/kernel/vdso/Makefile
607 ++++ b/arch/riscv/kernel/vdso/Makefile
608 +@@ -37,7 +37,7 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
609 + # these symbols in the kernel code rather than hand-coded addresses.
610 +
611 + SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
612 +- -Wl,--hash-style=both
613 ++ -Wl,--build-id -Wl,--hash-style=both
614 + $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
615 + $(call if_changed,vdsold)
616 +
617 +diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
618 +index bc96b16288c1..af6a65ac04cf 100644
619 +--- a/arch/sh/kernel/hw_breakpoint.c
620 ++++ b/arch/sh/kernel/hw_breakpoint.c
621 +@@ -157,6 +157,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
622 + switch (sh_type) {
623 + case SH_BREAKPOINT_READ:
624 + *gen_type = HW_BREAKPOINT_R;
625 ++ break;
626 + case SH_BREAKPOINT_WRITE:
627 + *gen_type = HW_BREAKPOINT_W;
628 + break;
629 +diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
630 +index 176cb46bcf12..0634bfb82a0b 100644
631 +--- a/arch/xtensa/kernel/setup.c
632 ++++ b/arch/xtensa/kernel/setup.c
633 +@@ -515,6 +515,7 @@ void cpu_reset(void)
634 + "add %2, %2, %7\n\t"
635 + "addi %0, %0, -1\n\t"
636 + "bnez %0, 1b\n\t"
637 ++ "isync\n\t"
638 + /* Jump to identity mapping */
639 + "jx %3\n"
640 + "2:\n\t"
641 +diff --git a/block/blk-mq.c b/block/blk-mq.c
642 +index ce0f5f4ede70..68106a41f90d 100644
643 +--- a/block/blk-mq.c
644 ++++ b/block/blk-mq.c
645 +@@ -2674,8 +2674,6 @@ void blk_mq_release(struct request_queue *q)
646 + struct blk_mq_hw_ctx *hctx, *next;
647 + int i;
648 +
649 +- cancel_delayed_work_sync(&q->requeue_work);
650 +-
651 + queue_for_each_hw_ctx(q, hctx, i)
652 + WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
653 +
654 +diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
655 +index 977c659dcd18..9bfa3ea4ed63 100644
656 +--- a/block/blk-sysfs.c
657 ++++ b/block/blk-sysfs.c
658 +@@ -892,6 +892,9 @@ static void __blk_release_queue(struct work_struct *work)
659 +
660 + blk_free_queue_stats(q->stats);
661 +
662 ++ if (queue_is_mq(q))
663 ++ cancel_delayed_work_sync(&q->requeue_work);
664 ++
665 + blk_exit_queue(q);
666 +
667 + blk_queue_free_zone_bitmaps(q);
668 +diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
669 +index 72312ad2e142..c25cdbf817f1 100644
670 +--- a/drivers/ata/libahci_platform.c
671 ++++ b/drivers/ata/libahci_platform.c
672 +@@ -338,6 +338,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
673 + hpriv->phys[port] = NULL;
674 + rc = 0;
675 + break;
676 ++ case -EPROBE_DEFER:
677 ++ /* Do not complain yet */
678 ++ break;
679 +
680 + default:
681 + dev_err(dev,
682 +diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
683 +index 173e6f2dd9af..eefda51f97d3 100644
684 +--- a/drivers/ata/libata-zpodd.c
685 ++++ b/drivers/ata/libata-zpodd.c
686 +@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
687 + unsigned int ret;
688 + struct rm_feature_desc *desc;
689 + struct ata_taskfile tf;
690 +- static const char cdb[] = { GPCMD_GET_CONFIGURATION,
691 ++ static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
692 + 2, /* only 1 feature descriptor requested */
693 + 0, 3, /* 3, removable medium feature */
694 + 0, 0, 0,/* reserved */
695 +diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
696 +index bf868260f435..4838c6a9f0f2 100644
697 +--- a/drivers/char/tpm/tpm-chip.c
698 ++++ b/drivers/char/tpm/tpm-chip.c
699 +@@ -554,6 +554,20 @@ static int tpm_add_hwrng(struct tpm_chip *chip)
700 + return hwrng_register(&chip->hwrng);
701 + }
702 +
703 ++static int tpm_get_pcr_allocation(struct tpm_chip *chip)
704 ++{
705 ++ int rc;
706 ++
707 ++ rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ?
708 ++ tpm2_get_pcr_allocation(chip) :
709 ++ tpm1_get_pcr_allocation(chip);
710 ++
711 ++ if (rc > 0)
712 ++ return -ENODEV;
713 ++
714 ++ return rc;
715 ++}
716 ++
717 + /*
718 + * tpm_chip_register() - create a character device for the TPM chip
719 + * @chip: TPM chip to use.
720 +@@ -573,6 +587,12 @@ int tpm_chip_register(struct tpm_chip *chip)
721 + if (rc)
722 + return rc;
723 + rc = tpm_auto_startup(chip);
724 ++ if (rc) {
725 ++ tpm_chip_stop(chip);
726 ++ return rc;
727 ++ }
728 ++
729 ++ rc = tpm_get_pcr_allocation(chip);
730 + tpm_chip_stop(chip);
731 + if (rc)
732 + return rc;
733 +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
734 +index e503ffc3aa39..a7fea3e0ca86 100644
735 +--- a/drivers/char/tpm/tpm.h
736 ++++ b/drivers/char/tpm/tpm.h
737 +@@ -394,6 +394,7 @@ int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf);
738 + ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
739 + const char *desc, size_t min_cap_length);
740 + int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max);
741 ++int tpm1_get_pcr_allocation(struct tpm_chip *chip);
742 + unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
743 + int tpm_pm_suspend(struct device *dev);
744 + int tpm_pm_resume(struct device *dev);
745 +@@ -449,6 +450,7 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
746 + ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
747 + u32 *value, const char *desc);
748 +
749 ++ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip);
750 + int tpm2_auto_startup(struct tpm_chip *chip);
751 + void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
752 + unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
753 +diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
754 +index faacbe1ffa1a..149e953ca369 100644
755 +--- a/drivers/char/tpm/tpm1-cmd.c
756 ++++ b/drivers/char/tpm/tpm1-cmd.c
757 +@@ -699,18 +699,6 @@ int tpm1_auto_startup(struct tpm_chip *chip)
758 + goto out;
759 + }
760 +
761 +- chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
762 +- GFP_KERNEL);
763 +- if (!chip->allocated_banks) {
764 +- rc = -ENOMEM;
765 +- goto out;
766 +- }
767 +-
768 +- chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
769 +- chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
770 +- chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
771 +- chip->nr_allocated_banks = 1;
772 +-
773 + return rc;
774 + out:
775 + if (rc > 0)
776 +@@ -779,3 +767,27 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
777 + return rc;
778 + }
779 +
780 ++/**
781 ++ * tpm1_get_pcr_allocation() - initialize the allocated bank
782 ++ * @chip: TPM chip to use.
783 ++ *
784 ++ * The function initializes the SHA1 allocated bank to extend PCR
785 ++ *
786 ++ * Return:
787 ++ * * 0 on success,
788 ++ * * < 0 on error.
789 ++ */
790 ++int tpm1_get_pcr_allocation(struct tpm_chip *chip)
791 ++{
792 ++ chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
793 ++ GFP_KERNEL);
794 ++ if (!chip->allocated_banks)
795 ++ return -ENOMEM;
796 ++
797 ++ chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
798 ++ chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
799 ++ chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
800 ++ chip->nr_allocated_banks = 1;
801 ++
802 ++ return 0;
803 ++}
804 +diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
805 +index d103545e4055..ba9acae83bff 100644
806 +--- a/drivers/char/tpm/tpm2-cmd.c
807 ++++ b/drivers/char/tpm/tpm2-cmd.c
808 +@@ -840,7 +840,7 @@ struct tpm2_pcr_selection {
809 + u8 pcr_select[3];
810 + } __packed;
811 +
812 +-static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
813 ++ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
814 + {
815 + struct tpm2_pcr_selection pcr_selection;
816 + struct tpm_buf buf;
817 +@@ -1040,10 +1040,6 @@ int tpm2_auto_startup(struct tpm_chip *chip)
818 + goto out;
819 + }
820 +
821 +- rc = tpm2_get_pcr_allocation(chip);
822 +- if (rc)
823 +- goto out;
824 +-
825 + rc = tpm2_get_cc_attrs_tbl(chip);
826 +
827 + out:
828 +diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
829 +index 44db83a6d01c..44a46dcc0518 100644
830 +--- a/drivers/clk/at91/clk-generated.c
831 ++++ b/drivers/clk/at91/clk-generated.c
832 +@@ -141,6 +141,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
833 + continue;
834 +
835 + div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
836 ++ if (div > GENERATED_MAX_DIV + 1)
837 ++ div = GENERATED_MAX_DIV + 1;
838 +
839 + clk_generated_best_diff(req, parent, parent_rate, div,
840 + &best_diff, &best_rate);
841 +diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
842 +index 0201809bbd37..9dfa28d6fd9f 100644
843 +--- a/drivers/clk/renesas/renesas-cpg-mssr.c
844 ++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
845 +@@ -576,17 +576,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
846 + unsigned int reg = id / 32;
847 + unsigned int bit = id % 32;
848 + u32 bitmask = BIT(bit);
849 +- unsigned long flags;
850 +- u32 value;
851 +
852 + dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
853 +
854 + /* Reset module */
855 +- spin_lock_irqsave(&priv->rmw_lock, flags);
856 +- value = readl(priv->base + SRCR(reg));
857 +- value |= bitmask;
858 +- writel(value, priv->base + SRCR(reg));
859 +- spin_unlock_irqrestore(&priv->rmw_lock, flags);
860 ++ writel(bitmask, priv->base + SRCR(reg));
861 +
862 + /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
863 + udelay(35);
864 +@@ -603,16 +597,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
865 + unsigned int reg = id / 32;
866 + unsigned int bit = id % 32;
867 + u32 bitmask = BIT(bit);
868 +- unsigned long flags;
869 +- u32 value;
870 +
871 + dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
872 +
873 +- spin_lock_irqsave(&priv->rmw_lock, flags);
874 +- value = readl(priv->base + SRCR(reg));
875 +- value |= bitmask;
876 +- writel(value, priv->base + SRCR(reg));
877 +- spin_unlock_irqrestore(&priv->rmw_lock, flags);
878 ++ writel(bitmask, priv->base + SRCR(reg));
879 + return 0;
880 + }
881 +
882 +diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
883 +index 91d3d721c801..3c219af25100 100644
884 +--- a/drivers/clk/sprd/Kconfig
885 ++++ b/drivers/clk/sprd/Kconfig
886 +@@ -3,6 +3,7 @@ config SPRD_COMMON_CLK
887 + tristate "Clock support for Spreadtrum SoCs"
888 + depends on ARCH_SPRD || COMPILE_TEST
889 + default ARCH_SPRD
890 ++ select REGMAP_MMIO
891 +
892 + if SPRD_COMMON_CLK
893 +
894 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
895 +index 4b192e0ce92f..ed7977d0dd01 100644
896 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
897 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
898 +@@ -1148,7 +1148,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
899 + adev->asic_type != CHIP_FIJI &&
900 + adev->asic_type != CHIP_POLARIS10 &&
901 + adev->asic_type != CHIP_POLARIS11 &&
902 +- adev->asic_type != CHIP_POLARIS12) ?
903 ++ adev->asic_type != CHIP_POLARIS12 &&
904 ++ adev->asic_type != CHIP_VEGAM) ?
905 + VI_BO_SIZE_ALIGN : 1;
906 +
907 + mapping_flags = AMDGPU_VM_PAGE_READABLE;
908 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
909 +index 2f6239b6be6f..fe028561dc0e 100644
910 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
911 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
912 +@@ -1093,29 +1093,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
913 + return r;
914 + }
915 +
916 +- fence = amdgpu_ctx_get_fence(ctx, entity,
917 +- deps[i].handle);
918 ++ fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
919 ++ amdgpu_ctx_put(ctx);
920 ++
921 ++ if (IS_ERR(fence))
922 ++ return PTR_ERR(fence);
923 ++ else if (!fence)
924 ++ continue;
925 +
926 + if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
927 +- struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
928 ++ struct drm_sched_fence *s_fence;
929 + struct dma_fence *old = fence;
930 +
931 ++ s_fence = to_drm_sched_fence(fence);
932 + fence = dma_fence_get(&s_fence->scheduled);
933 + dma_fence_put(old);
934 + }
935 +
936 +- if (IS_ERR(fence)) {
937 +- r = PTR_ERR(fence);
938 +- amdgpu_ctx_put(ctx);
939 ++ r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
940 ++ dma_fence_put(fence);
941 ++ if (r)
942 + return r;
943 +- } else if (fence) {
944 +- r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
945 +- true);
946 +- dma_fence_put(fence);
947 +- amdgpu_ctx_put(ctx);
948 +- if (r)
949 +- return r;
950 +- }
951 + }
952 + return 0;
953 + }
954 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
955 +index 8930d66f2204..91bfb24f963e 100644
956 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
957 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
958 +@@ -703,7 +703,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
959 + thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
960 + bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
961 +
962 +- data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
963 ++ data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
964 + if (!data)
965 + return -ENOMEM;
966 +
967 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
968 +index abeaab4bf1bc..d55519bc34e5 100644
969 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
970 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
971 +@@ -144,12 +144,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
972 + struct amdgpu_device *adev = ddev->dev_private;
973 + enum amd_pm_state_type pm;
974 +
975 +- if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
976 +- pm = amdgpu_smu_get_current_power_state(adev);
977 +- else if (adev->powerplay.pp_funcs->get_current_power_state)
978 ++ if (is_support_sw_smu(adev)) {
979 ++ if (adev->smu.ppt_funcs->get_current_power_state)
980 ++ pm = amdgpu_smu_get_current_power_state(adev);
981 ++ else
982 ++ pm = adev->pm.dpm.user_state;
983 ++ } else if (adev->powerplay.pp_funcs->get_current_power_state) {
984 + pm = amdgpu_dpm_get_current_power_state(adev);
985 +- else
986 ++ } else {
987 + pm = adev->pm.dpm.user_state;
988 ++ }
989 +
990 + return snprintf(buf, PAGE_SIZE, "%s\n",
991 + (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
992 +@@ -176,7 +180,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
993 + goto fail;
994 + }
995 +
996 +- if (adev->powerplay.pp_funcs->dispatch_tasks) {
997 ++ if (is_support_sw_smu(adev)) {
998 ++ mutex_lock(&adev->pm.mutex);
999 ++ adev->pm.dpm.user_state = state;
1000 ++ mutex_unlock(&adev->pm.mutex);
1001 ++ } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
1002 + amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
1003 + } else {
1004 + mutex_lock(&adev->pm.mutex);
1005 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1006 +index 2f18c64d531f..2f7f0a2e4a6c 100644
1007 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1008 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1009 +@@ -4553,7 +4553,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
1010 + value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
1011 + value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
1012 + value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
1013 +- WREG32(mmSQ_CMD, value);
1014 ++ WREG32_SOC15(GC, 0, mmSQ_CMD, value);
1015 + }
1016 +
1017 + static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
1018 +diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
1019 +index eec329ab6037..61a6d183c153 100644
1020 +--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
1021 ++++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
1022 +@@ -63,7 +63,8 @@ int smu_get_power_num_states(struct smu_context *smu,
1023 +
1024 + /* not support power state */
1025 + memset(state_info, 0, sizeof(struct pp_states_info));
1026 +- state_info->nums = 0;
1027 ++ state_info->nums = 1;
1028 ++ state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
1029 +
1030 + return 0;
1031 + }
1032 +diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
1033 +index ee777469293a..e4e22bbae2a7 100644
1034 +--- a/drivers/gpu/drm/bridge/Kconfig
1035 ++++ b/drivers/gpu/drm/bridge/Kconfig
1036 +@@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC
1037 + config DRM_LVDS_ENCODER
1038 + tristate "Transparent parallel to LVDS encoder support"
1039 + depends on OF
1040 ++ select DRM_KMS_HELPER
1041 + select DRM_PANEL_BRIDGE
1042 + help
1043 + Support for transparent parallel to LVDS encoders that don't require
1044 +@@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024
1045 +
1046 + config DRM_TOSHIBA_TC358764
1047 + tristate "TC358764 DSI/LVDS bridge"
1048 +- depends on DRM && DRM_PANEL
1049 + depends on OF
1050 + select DRM_MIPI_DSI
1051 ++ select DRM_KMS_HELPER
1052 ++ select DRM_PANEL
1053 + help
1054 + Toshiba TC358764 DSI/LVDS bridge driver.
1055 +
1056 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
1057 +index ec9c1b7d3103..8989f8af716b 100644
1058 +--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
1059 ++++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
1060 +@@ -94,12 +94,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
1061 + scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
1062 + do {
1063 + cpu_relax();
1064 +- } while (retry > 1 &&
1065 ++ } while (--retry > 1 &&
1066 + scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
1067 + do {
1068 + cpu_relax();
1069 + scaler_write(1, SCALER_INT_EN);
1070 +- } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
1071 ++ } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
1072 +
1073 + return retry ? 0 : -EIO;
1074 + }
1075 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
1076 +index 4a0fe8a25ad7..a56eef3cfee7 100644
1077 +--- a/drivers/gpu/drm/msm/msm_drv.c
1078 ++++ b/drivers/gpu/drm/msm/msm_drv.c
1079 +@@ -1267,7 +1267,8 @@ static int add_gpu_components(struct device *dev,
1080 + if (!np)
1081 + return 0;
1082 +
1083 +- drm_of_component_match_add(dev, matchptr, compare_of, np);
1084 ++ if (of_device_is_available(np))
1085 ++ drm_of_component_match_add(dev, matchptr, compare_of, np);
1086 +
1087 + of_node_put(np);
1088 +
1089 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
1090 +index 847b7866137d..bdaf5ffd2504 100644
1091 +--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
1092 ++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
1093 +@@ -766,16 +766,20 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
1094 + struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
1095 + int slots;
1096 +
1097 +- /* When restoring duplicated states, we need to make sure that the
1098 +- * bw remains the same and avoid recalculating it, as the connector's
1099 +- * bpc may have changed after the state was duplicated
1100 +- */
1101 +- if (!state->duplicated)
1102 +- asyh->dp.pbn =
1103 +- drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
1104 +- connector->display_info.bpc * 3);
1105 ++ if (crtc_state->mode_changed || crtc_state->connectors_changed) {
1106 ++ /*
1107 ++ * When restoring duplicated states, we need to make sure that
1108 ++ * the bw remains the same and avoid recalculating it, as the
1109 ++ * connector's bpc may have changed after the state was
1110 ++ * duplicated
1111 ++ */
1112 ++ if (!state->duplicated) {
1113 ++ const int bpp = connector->display_info.bpc * 3;
1114 ++ const int clock = crtc_state->adjusted_mode.clock;
1115 ++
1116 ++ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
1117 ++ }
1118 +
1119 +- if (crtc_state->mode_changed) {
1120 + slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
1121 + mstc->port,
1122 + asyh->dp.pbn);
1123 +diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
1124 +index b3d502421b79..0a38e8e9bc78 100644
1125 +--- a/drivers/hid/hid-holtek-kbd.c
1126 ++++ b/drivers/hid/hid-holtek-kbd.c
1127 +@@ -123,9 +123,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
1128 +
1129 + /* Locate the boot interface, to receive the LED change events */
1130 + struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
1131 ++ struct hid_device *boot_hid;
1132 ++ struct hid_input *boot_hid_input;
1133 +
1134 +- struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
1135 +- struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
1136 ++ if (unlikely(boot_interface == NULL))
1137 ++ return -ENODEV;
1138 ++
1139 ++ boot_hid = usb_get_intfdata(boot_interface);
1140 ++ boot_hid_input = list_first_entry(&boot_hid->inputs,
1141 + struct hid_input, list);
1142 +
1143 + return boot_hid_input->input->event(boot_hid_input->input, type, code,
1144 +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
1145 +index 55b72573066b..4e11cc6fc34b 100644
1146 +--- a/drivers/hid/usbhid/hiddev.c
1147 ++++ b/drivers/hid/usbhid/hiddev.c
1148 +@@ -284,6 +284,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
1149 + spin_unlock_irq(&list->hiddev->list_lock);
1150 +
1151 + mutex_lock(&hiddev->existancelock);
1152 ++ /*
1153 ++ * recheck exist with existance lock held to
1154 ++ * avoid opening a disconnected device
1155 ++ */
1156 ++ if (!list->hiddev->exist) {
1157 ++ res = -ENODEV;
1158 ++ goto bail_unlock;
1159 ++ }
1160 + if (!list->hiddev->open++)
1161 + if (list->hiddev->exist) {
1162 + struct hid_device *hid = hiddev->hid;
1163 +@@ -300,6 +308,10 @@ bail_normal_power:
1164 + hid_hw_power(hid, PM_HINT_NORMAL);
1165 + bail_unlock:
1166 + mutex_unlock(&hiddev->existancelock);
1167 ++
1168 ++ spin_lock_irq(&list->hiddev->list_lock);
1169 ++ list_del(&list->node);
1170 ++ spin_unlock_irq(&list->hiddev->list_lock);
1171 + bail:
1172 + file->private_data = NULL;
1173 + vfree(list);
1174 +diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
1175 +index fd70b110e8f4..87564010ddbe 100644
1176 +--- a/drivers/i2c/busses/i2c-imx.c
1177 ++++ b/drivers/i2c/busses/i2c-imx.c
1178 +@@ -273,8 +273,8 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx,
1179 + }
1180 +
1181 + /* Functions for DMA support */
1182 +-static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
1183 +- dma_addr_t phy_addr)
1184 ++static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
1185 ++ dma_addr_t phy_addr)
1186 + {
1187 + struct imx_i2c_dma *dma;
1188 + struct dma_slave_config dma_sconfig;
1189 +@@ -283,7 +283,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
1190 +
1191 + dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
1192 + if (!dma)
1193 +- return -ENOMEM;
1194 ++ return;
1195 +
1196 + dma->chan_tx = dma_request_chan(dev, "tx");
1197 + if (IS_ERR(dma->chan_tx)) {
1198 +@@ -328,7 +328,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
1199 + dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
1200 + dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
1201 +
1202 +- return 0;
1203 ++ return;
1204 +
1205 + fail_rx:
1206 + dma_release_channel(dma->chan_rx);
1207 +@@ -336,8 +336,6 @@ fail_tx:
1208 + dma_release_channel(dma->chan_tx);
1209 + fail_al:
1210 + devm_kfree(dev, dma);
1211 +- /* return successfully if there is no dma support */
1212 +- return ret == -ENODEV ? 0 : ret;
1213 + }
1214 +
1215 + static void i2c_imx_dma_callback(void *arg)
1216 +@@ -1165,17 +1163,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
1217 + dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res);
1218 + dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
1219 + i2c_imx->adapter.name);
1220 ++ dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
1221 +
1222 + /* Init DMA config if supported */
1223 +- ret = i2c_imx_dma_request(i2c_imx, phy_addr);
1224 +- if (ret < 0)
1225 +- goto del_adapter;
1226 ++ i2c_imx_dma_request(i2c_imx, phy_addr);
1227 +
1228 +- dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
1229 + return 0; /* Return OK */
1230 +
1231 +-del_adapter:
1232 +- i2c_del_adapter(&i2c_imx->adapter);
1233 + clk_notifier_unregister:
1234 + clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
1235 + rpm_disable:
1236 +diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
1237 +index 0e3c6529fc4c..da073d72f649 100644
1238 +--- a/drivers/iio/adc/max9611.c
1239 ++++ b/drivers/iio/adc/max9611.c
1240 +@@ -480,7 +480,7 @@ static int max9611_init(struct max9611_dev *max9611)
1241 + if (ret)
1242 + return ret;
1243 +
1244 +- regval = ret & MAX9611_TEMP_MASK;
1245 ++ regval &= MAX9611_TEMP_MASK;
1246 +
1247 + if ((regval > MAX9611_TEMP_MAX_POS &&
1248 + regval < MAX9611_TEMP_MIN_NEG) ||
1249 +diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
1250 +index ff40a450b5d2..ff9e0d7fb4f3 100644
1251 +--- a/drivers/infiniband/core/core_priv.h
1252 ++++ b/drivers/infiniband/core/core_priv.h
1253 +@@ -292,7 +292,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
1254 + struct ib_udata *udata,
1255 + struct ib_uobject *uobj)
1256 + {
1257 ++ enum ib_qp_type qp_type = attr->qp_type;
1258 + struct ib_qp *qp;
1259 ++ bool is_xrc;
1260 +
1261 + if (!dev->ops.create_qp)
1262 + return ERR_PTR(-EOPNOTSUPP);
1263 +@@ -310,7 +312,8 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
1264 + * and more importantly they are created internaly by driver,
1265 + * see mlx5 create_dev_resources() as an example.
1266 + */
1267 +- if (attr->qp_type < IB_QPT_XRC_INI) {
1268 ++ is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT;
1269 ++ if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) {
1270 + qp->res.type = RDMA_RESTRACK_QP;
1271 + if (uobj)
1272 + rdma_restrack_uadd(&qp->res);
1273 +diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
1274 +index cc99479b2c09..9947d16edef2 100644
1275 +--- a/drivers/infiniband/core/mad.c
1276 ++++ b/drivers/infiniband/core/mad.c
1277 +@@ -3224,18 +3224,18 @@ static int ib_mad_port_open(struct ib_device *device,
1278 + if (has_smi)
1279 + cq_size *= 2;
1280 +
1281 ++ port_priv->pd = ib_alloc_pd(device, 0);
1282 ++ if (IS_ERR(port_priv->pd)) {
1283 ++ dev_err(&device->dev, "Couldn't create ib_mad PD\n");
1284 ++ ret = PTR_ERR(port_priv->pd);
1285 ++ goto error3;
1286 ++ }
1287 ++
1288 + port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
1289 + IB_POLL_UNBOUND_WORKQUEUE);
1290 + if (IS_ERR(port_priv->cq)) {
1291 + dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
1292 + ret = PTR_ERR(port_priv->cq);
1293 +- goto error3;
1294 +- }
1295 +-
1296 +- port_priv->pd = ib_alloc_pd(device, 0);
1297 +- if (IS_ERR(port_priv->pd)) {
1298 +- dev_err(&device->dev, "Couldn't create ib_mad PD\n");
1299 +- ret = PTR_ERR(port_priv->pd);
1300 + goto error4;
1301 + }
1302 +
1303 +@@ -3278,11 +3278,11 @@ error8:
1304 + error7:
1305 + destroy_mad_qp(&port_priv->qp_info[0]);
1306 + error6:
1307 +- ib_dealloc_pd(port_priv->pd);
1308 +-error4:
1309 + ib_free_cq(port_priv->cq);
1310 + cleanup_recv_queue(&port_priv->qp_info[1]);
1311 + cleanup_recv_queue(&port_priv->qp_info[0]);
1312 ++error4:
1313 ++ ib_dealloc_pd(port_priv->pd);
1314 + error3:
1315 + kfree(port_priv);
1316 +
1317 +@@ -3312,8 +3312,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
1318 + destroy_workqueue(port_priv->wq);
1319 + destroy_mad_qp(&port_priv->qp_info[1]);
1320 + destroy_mad_qp(&port_priv->qp_info[0]);
1321 +- ib_dealloc_pd(port_priv->pd);
1322 + ib_free_cq(port_priv->cq);
1323 ++ ib_dealloc_pd(port_priv->pd);
1324 + cleanup_recv_queue(&port_priv->qp_info[1]);
1325 + cleanup_recv_queue(&port_priv->qp_info[0]);
1326 + /* XXX: Handle deallocation of MAD registration tables */
1327 +diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
1328 +index 671f07ba1fad..025b6d86a61f 100644
1329 +--- a/drivers/infiniband/core/user_mad.c
1330 ++++ b/drivers/infiniband/core/user_mad.c
1331 +@@ -49,6 +49,7 @@
1332 + #include <linux/sched.h>
1333 + #include <linux/semaphore.h>
1334 + #include <linux/slab.h>
1335 ++#include <linux/nospec.h>
1336 +
1337 + #include <linux/uaccess.h>
1338 +
1339 +@@ -883,11 +884,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
1340 +
1341 + if (get_user(id, arg))
1342 + return -EFAULT;
1343 ++ if (id >= IB_UMAD_MAX_AGENTS)
1344 ++ return -EINVAL;
1345 +
1346 + mutex_lock(&file->port->file_mutex);
1347 + mutex_lock(&file->mutex);
1348 +
1349 +- if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
1350 ++ id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
1351 ++ if (!__get_agent(file, id)) {
1352 + ret = -EINVAL;
1353 + goto out;
1354 + }
1355 +diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
1356 +index 0c6c1fe87705..d60453e98db7 100644
1357 +--- a/drivers/infiniband/hw/hns/hns_roce_db.c
1358 ++++ b/drivers/infiniband/hw/hns/hns_roce_db.c
1359 +@@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
1360 + struct ib_udata *udata, unsigned long virt,
1361 + struct hns_roce_db *db)
1362 + {
1363 ++ unsigned long page_addr = virt & PAGE_MASK;
1364 + struct hns_roce_user_db_page *page;
1365 ++ unsigned int offset;
1366 + int ret = 0;
1367 +
1368 + mutex_lock(&context->page_mutex);
1369 +
1370 + list_for_each_entry(page, &context->page_list, list)
1371 +- if (page->user_virt == (virt & PAGE_MASK))
1372 ++ if (page->user_virt == page_addr)
1373 + goto found;
1374 +
1375 + page = kmalloc(sizeof(*page), GFP_KERNEL);
1376 +@@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
1377 + }
1378 +
1379 + refcount_set(&page->refcount, 1);
1380 +- page->user_virt = (virt & PAGE_MASK);
1381 +- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
1382 ++ page->user_virt = page_addr;
1383 ++ page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
1384 + if (IS_ERR(page->umem)) {
1385 + ret = PTR_ERR(page->umem);
1386 + kfree(page);
1387 +@@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
1388 + list_add(&page->list, &context->page_list);
1389 +
1390 + found:
1391 +- db->dma = sg_dma_address(page->umem->sg_head.sgl) +
1392 +- (virt & ~PAGE_MASK);
1393 +- page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
1394 +- db->virt_addr = sg_virt(page->umem->sg_head.sgl);
1395 ++ offset = virt - page_addr;
1396 ++ db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
1397 ++ db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
1398 + db->u.user_page = page;
1399 + refcount_inc(&page->refcount);
1400 +
1401 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
1402 +index e068a02122f5..9496c69fff3a 100644
1403 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
1404 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
1405 +@@ -745,8 +745,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
1406 +
1407 + ibdev = &hr_dev->ib_dev;
1408 + pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
1409 +- if (!pd)
1410 ++ if (!pd) {
1411 ++ ret = -ENOMEM;
1412 + goto alloc_mem_failed;
1413 ++ }
1414 +
1415 + pd->device = ibdev;
1416 + ret = hns_roce_alloc_pd(pd, NULL);
1417 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
1418 +index a6713a3b6c80..9ab276a8bc81 100644
1419 +--- a/drivers/infiniband/hw/mlx5/main.c
1420 ++++ b/drivers/infiniband/hw/mlx5/main.c
1421 +@@ -5687,13 +5687,12 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
1422 + return;
1423 + }
1424 +
1425 +- if (mpi->mdev_events.notifier_call)
1426 +- mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
1427 +- mpi->mdev_events.notifier_call = NULL;
1428 +-
1429 + mpi->ibdev = NULL;
1430 +
1431 + spin_unlock(&port->mp.mpi_lock);
1432 ++ if (mpi->mdev_events.notifier_call)
1433 ++ mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
1434 ++ mpi->mdev_events.notifier_call = NULL;
1435 + mlx5_remove_netdev_notifier(ibdev, port_num);
1436 + spin_lock(&port->mp.mpi_lock);
1437 +
1438 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
1439 +index e54bec2c2965..d239fc58c002 100644
1440 +--- a/drivers/infiniband/hw/mlx5/mr.c
1441 ++++ b/drivers/infiniband/hw/mlx5/mr.c
1442 +@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
1443 + static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
1444 + static int mr_cache_max_order(struct mlx5_ib_dev *dev);
1445 + static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
1446 +-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
1447 +-{
1448 +- return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
1449 +-}
1450 +
1451 + static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
1452 + {
1453 + return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
1454 + }
1455 +
1456 +-static bool use_umr(struct mlx5_ib_dev *dev, int order)
1457 +-{
1458 +- return order <= mr_cache_max_order(dev) &&
1459 +- umr_can_modify_entity_size(dev);
1460 +-}
1461 +-
1462 + static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1463 + {
1464 + int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
1465 +@@ -1271,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1466 + {
1467 + struct mlx5_ib_dev *dev = to_mdev(pd->device);
1468 + struct mlx5_ib_mr *mr = NULL;
1469 +- bool populate_mtts = false;
1470 ++ bool use_umr;
1471 + struct ib_umem *umem;
1472 + int page_shift;
1473 + int npages;
1474 +@@ -1303,29 +1293,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1475 + if (err < 0)
1476 + return ERR_PTR(err);
1477 +
1478 +- if (use_umr(dev, order)) {
1479 ++ use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
1480 ++ (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
1481 ++ !MLX5_CAP_GEN(dev->mdev, atomic));
1482 ++
1483 ++ if (order <= mr_cache_max_order(dev) && use_umr) {
1484 + mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1485 + page_shift, order, access_flags);
1486 + if (PTR_ERR(mr) == -EAGAIN) {
1487 + mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
1488 + mr = NULL;
1489 + }
1490 +- populate_mtts = false;
1491 + } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1492 + if (access_flags & IB_ACCESS_ON_DEMAND) {
1493 + err = -EINVAL;
1494 + pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
1495 + goto error;
1496 + }
1497 +- populate_mtts = true;
1498 ++ use_umr = false;
1499 + }
1500 +
1501 + if (!mr) {
1502 +- if (!umr_can_modify_entity_size(dev))
1503 +- populate_mtts = true;
1504 + mutex_lock(&dev->slow_path_mutex);
1505 + mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1506 +- page_shift, access_flags, populate_mtts);
1507 ++ page_shift, access_flags, !use_umr);
1508 + mutex_unlock(&dev->slow_path_mutex);
1509 + }
1510 +
1511 +@@ -1341,7 +1332,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1512 +
1513 + update_odp_mr(mr);
1514 +
1515 +- if (!populate_mtts) {
1516 ++ if (use_umr) {
1517 + int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
1518 +
1519 + if (access_flags & IB_ACCESS_ON_DEMAND)
1520 +diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
1521 +index 91507a2e9290..f6e5351ba4d5 100644
1522 +--- a/drivers/infiniband/hw/mlx5/odp.c
1523 ++++ b/drivers/infiniband/hw/mlx5/odp.c
1524 +@@ -1765,7 +1765,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
1525 +
1526 + num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
1527 + w->num_sge, 0);
1528 +- kfree(w);
1529 ++ kvfree(w);
1530 + }
1531 +
1532 + int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1533 +@@ -1807,7 +1807,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1534 + if (valid_req)
1535 + queue_work(system_unbound_wq, &work->work);
1536 + else
1537 +- kfree(work);
1538 ++ kvfree(work);
1539 +
1540 + srcu_read_unlock(&dev->mr_srcu, srcu_key);
1541 +
1542 +diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
1543 +index 083c2c00a8e9..dfdd1e16de7f 100644
1544 +--- a/drivers/infiniband/hw/qedr/main.c
1545 ++++ b/drivers/infiniband/hw/qedr/main.c
1546 +@@ -125,14 +125,20 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
1547 + struct qedr_dev *dev =
1548 + rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
1549 +
1550 +- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
1551 ++ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver);
1552 + }
1553 + static DEVICE_ATTR_RO(hw_rev);
1554 +
1555 + static ssize_t hca_type_show(struct device *device,
1556 + struct device_attribute *attr, char *buf)
1557 + {
1558 +- return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
1559 ++ struct qedr_dev *dev =
1560 ++ rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
1561 ++
1562 ++ return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n",
1563 ++ dev->pdev->device,
1564 ++ rdma_protocol_iwarp(&dev->ibdev, 1) ?
1565 ++ "iWARP" : "RoCE");
1566 + }
1567 + static DEVICE_ATTR_RO(hca_type);
1568 +
1569 +diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
1570 +index f1569ae8381b..a0a686f56ac4 100644
1571 +--- a/drivers/input/joystick/iforce/iforce-usb.c
1572 ++++ b/drivers/input/joystick/iforce/iforce-usb.c
1573 +@@ -129,7 +129,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
1574 + return -ENODEV;
1575 +
1576 + epirq = &interface->endpoint[0].desc;
1577 ++ if (!usb_endpoint_is_int_in(epirq))
1578 ++ return -ENODEV;
1579 ++
1580 + epout = &interface->endpoint[1].desc;
1581 ++ if (!usb_endpoint_is_int_out(epout))
1582 ++ return -ENODEV;
1583 +
1584 + if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL)))
1585 + goto fail;
1586 +diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
1587 +index 0afffe8d824f..77110f3ec21d 100644
1588 +--- a/drivers/input/mouse/trackpoint.h
1589 ++++ b/drivers/input/mouse/trackpoint.h
1590 +@@ -158,7 +158,8 @@ struct trackpoint_data {
1591 + #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
1592 + int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
1593 + #else
1594 +-inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
1595 ++static inline int trackpoint_detect(struct psmouse *psmouse,
1596 ++ bool set_properties)
1597 + {
1598 + return -ENOSYS;
1599 + }
1600 +diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
1601 +index 04b85571f41e..aa577898e952 100644
1602 +--- a/drivers/input/tablet/kbtab.c
1603 ++++ b/drivers/input/tablet/kbtab.c
1604 +@@ -117,6 +117,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
1605 + if (intf->cur_altsetting->desc.bNumEndpoints < 1)
1606 + return -ENODEV;
1607 +
1608 ++ endpoint = &intf->cur_altsetting->endpoint[0].desc;
1609 ++ if (!usb_endpoint_is_int_in(endpoint))
1610 ++ return -ENODEV;
1611 ++
1612 + kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
1613 + input_dev = input_allocate_device();
1614 + if (!kbtab || !input_dev)
1615 +@@ -155,8 +159,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
1616 + input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
1617 + input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
1618 +
1619 +- endpoint = &intf->cur_altsetting->endpoint[0].desc;
1620 +-
1621 + usb_fill_int_urb(kbtab->irq, dev,
1622 + usb_rcvintpipe(dev, endpoint->bEndpointAddress),
1623 + kbtab->data, 8,
1624 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
1625 +index 35500801dc2b..20e5482d91b9 100644
1626 +--- a/drivers/irqchip/irq-gic-v3-its.c
1627 ++++ b/drivers/irqchip/irq-gic-v3-its.c
1628 +@@ -3010,7 +3010,7 @@ static int its_vpe_init(struct its_vpe *vpe)
1629 +
1630 + if (!its_alloc_vpe_table(vpe_id)) {
1631 + its_vpe_id_free(vpe_id);
1632 +- its_free_pending_table(vpe->vpt_page);
1633 ++ its_free_pending_table(vpt_page);
1634 + return -ENOMEM;
1635 + }
1636 +
1637 +diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
1638 +index bf2237ac5d09..4f74c15c4755 100644
1639 +--- a/drivers/irqchip/irq-imx-gpcv2.c
1640 ++++ b/drivers/irqchip/irq-imx-gpcv2.c
1641 +@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
1642 + .irq_unmask = imx_gpcv2_irq_unmask,
1643 + .irq_set_wake = imx_gpcv2_irq_set_wake,
1644 + .irq_retrigger = irq_chip_retrigger_hierarchy,
1645 ++ .irq_set_type = irq_chip_set_type_parent,
1646 + #ifdef CONFIG_SMP
1647 + .irq_set_affinity = irq_chip_set_affinity_parent,
1648 + #endif
1649 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1650 +index b0aab3a0a1bf..f183cadd14e3 100644
1651 +--- a/drivers/net/bonding/bond_main.c
1652 ++++ b/drivers/net/bonding/bond_main.c
1653 +@@ -1113,6 +1113,8 @@ static void bond_compute_features(struct bonding *bond)
1654 + done:
1655 + bond_dev->vlan_features = vlan_features;
1656 + bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1657 ++ NETIF_F_HW_VLAN_CTAG_TX |
1658 ++ NETIF_F_HW_VLAN_STAG_TX |
1659 + NETIF_F_GSO_UDP_L4;
1660 + bond_dev->gso_max_segs = gso_max_segs;
1661 + netif_set_gso_max_size(bond_dev, gso_max_size);
1662 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1663 +index 4039a9599d79..9d582b3ebc88 100644
1664 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1665 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1666 +@@ -3057,12 +3057,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
1667 + /* if VF indicate to PF this function is going down (PF will delete sp
1668 + * elements and clear initializations
1669 + */
1670 +- if (IS_VF(bp))
1671 ++ if (IS_VF(bp)) {
1672 ++ bnx2x_clear_vlan_info(bp);
1673 + bnx2x_vfpf_close_vf(bp);
1674 +- else if (unload_mode != UNLOAD_RECOVERY)
1675 ++ } else if (unload_mode != UNLOAD_RECOVERY) {
1676 + /* if this is a normal/close unload need to clean up chip*/
1677 + bnx2x_chip_cleanup(bp, unload_mode, keep_link);
1678 +- else {
1679 ++ } else {
1680 + /* Send the UNLOAD_REQUEST to the MCP */
1681 + bnx2x_send_unload_req(bp, unload_mode);
1682 +
1683 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
1684 +index c2f6e44e9a3f..8b08cb18e363 100644
1685 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
1686 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
1687 +@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
1688 + void bnx2x_disable_close_the_gate(struct bnx2x *bp);
1689 + int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
1690 +
1691 ++void bnx2x_clear_vlan_info(struct bnx2x *bp);
1692 ++
1693 + /**
1694 + * bnx2x_sp_event - handle ramrods completion.
1695 + *
1696 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1697 +index 2cc14db8f0ec..192ff8d5da32 100644
1698 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1699 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1700 +@@ -8482,11 +8482,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
1701 + return rc;
1702 + }
1703 +
1704 ++void bnx2x_clear_vlan_info(struct bnx2x *bp)
1705 ++{
1706 ++ struct bnx2x_vlan_entry *vlan;
1707 ++
1708 ++ /* Mark that hw forgot all entries */
1709 ++ list_for_each_entry(vlan, &bp->vlan_reg, link)
1710 ++ vlan->hw = false;
1711 ++
1712 ++ bp->vlan_cnt = 0;
1713 ++}
1714 ++
1715 + static int bnx2x_del_all_vlans(struct bnx2x *bp)
1716 + {
1717 + struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
1718 + unsigned long ramrod_flags = 0, vlan_flags = 0;
1719 +- struct bnx2x_vlan_entry *vlan;
1720 + int rc;
1721 +
1722 + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1723 +@@ -8495,10 +8505,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
1724 + if (rc)
1725 + return rc;
1726 +
1727 +- /* Mark that hw forgot all entries */
1728 +- list_for_each_entry(vlan, &bp->vlan_reg, link)
1729 +- vlan->hw = false;
1730 +- bp->vlan_cnt = 0;
1731 ++ bnx2x_clear_vlan_info(bp);
1732 +
1733 + return 0;
1734 + }
1735 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1736 +index 7afae9d80e75..36fe4f161cf1 100644
1737 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1738 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1739 +@@ -2015,9 +2015,9 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
1740 + if (bnapi->events & BNXT_RX_EVENT) {
1741 + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1742 +
1743 +- bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
1744 + if (bnapi->events & BNXT_AGG_EVENT)
1745 + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
1746 ++ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
1747 + }
1748 + bnapi->events = 0;
1749 + }
1750 +@@ -5011,6 +5011,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
1751 +
1752 + static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
1753 + {
1754 ++ bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
1755 + int i, rc = 0;
1756 + u32 type;
1757 +
1758 +@@ -5086,7 +5087,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
1759 + if (rc)
1760 + goto err_out;
1761 + bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
1762 +- bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
1763 ++ /* If we have agg rings, post agg buffers first. */
1764 ++ if (!agg_rings)
1765 ++ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
1766 + bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
1767 + if (bp->flags & BNXT_FLAG_CHIP_P5) {
1768 + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1769 +@@ -5105,7 +5108,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
1770 + }
1771 + }
1772 +
1773 +- if (bp->flags & BNXT_FLAG_AGG_RINGS) {
1774 ++ if (agg_rings) {
1775 + type = HWRM_RING_ALLOC_AGG;
1776 + for (i = 0; i < bp->rx_nr_rings; i++) {
1777 + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
1778 +@@ -5121,6 +5124,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
1779 + bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
1780 + ring->fw_ring_id);
1781 + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
1782 ++ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
1783 + bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
1784 + }
1785 + }
1786 +@@ -6963,19 +6967,29 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
1787 + bnxt_hwrm_vnic_set_rss(bp, i, false);
1788 + }
1789 +
1790 +-static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
1791 +- bool irq_re_init)
1792 ++static void bnxt_clear_vnic(struct bnxt *bp)
1793 + {
1794 +- if (bp->vnic_info) {
1795 +- bnxt_hwrm_clear_vnic_filter(bp);
1796 ++ if (!bp->vnic_info)
1797 ++ return;
1798 ++
1799 ++ bnxt_hwrm_clear_vnic_filter(bp);
1800 ++ if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
1801 + /* clear all RSS setting before free vnic ctx */
1802 + bnxt_hwrm_clear_vnic_rss(bp);
1803 + bnxt_hwrm_vnic_ctx_free(bp);
1804 +- /* before free the vnic, undo the vnic tpa settings */
1805 +- if (bp->flags & BNXT_FLAG_TPA)
1806 +- bnxt_set_tpa(bp, false);
1807 +- bnxt_hwrm_vnic_free(bp);
1808 + }
1809 ++ /* before free the vnic, undo the vnic tpa settings */
1810 ++ if (bp->flags & BNXT_FLAG_TPA)
1811 ++ bnxt_set_tpa(bp, false);
1812 ++ bnxt_hwrm_vnic_free(bp);
1813 ++ if (bp->flags & BNXT_FLAG_CHIP_P5)
1814 ++ bnxt_hwrm_vnic_ctx_free(bp);
1815 ++}
1816 ++
1817 ++static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
1818 ++ bool irq_re_init)
1819 ++{
1820 ++ bnxt_clear_vnic(bp);
1821 + bnxt_hwrm_ring_free(bp, close_path);
1822 + bnxt_hwrm_ring_grp_free(bp);
1823 + if (irq_re_init) {
1824 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
1825 +index 549c90d3e465..c05d663212b2 100644
1826 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
1827 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
1828 +@@ -98,10 +98,13 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
1829 + if (idx)
1830 + req->dimensions = cpu_to_le16(1);
1831 +
1832 +- if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE))
1833 ++ if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
1834 + memcpy(data_addr, buf, bytesize);
1835 +-
1836 +- rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
1837 ++ rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
1838 ++ } else {
1839 ++ rc = hwrm_send_message_silent(bp, msg, msg_len,
1840 ++ HWRM_CMD_TIMEOUT);
1841 ++ }
1842 + if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
1843 + memcpy(buf, data_addr, bytesize);
1844 +
1845 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1846 +index a6c7baf38036..b761a2e28a10 100644
1847 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1848 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1849 +@@ -2016,21 +2016,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
1850 + mutex_lock(&bp->hwrm_cmd_lock);
1851 + hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
1852 + INSTALL_PACKAGE_TIMEOUT);
1853 +- if (hwrm_err)
1854 +- goto flash_pkg_exit;
1855 +-
1856 +- if (resp->error_code) {
1857 ++ if (hwrm_err) {
1858 + u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
1859 +
1860 +- if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
1861 ++ if (resp->error_code && error_code ==
1862 ++ NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
1863 + install.flags |= cpu_to_le16(
1864 + NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
1865 + hwrm_err = _hwrm_send_message(bp, &install,
1866 + sizeof(install),
1867 + INSTALL_PACKAGE_TIMEOUT);
1868 +- if (hwrm_err)
1869 +- goto flash_pkg_exit;
1870 + }
1871 ++ if (hwrm_err)
1872 ++ goto flash_pkg_exit;
1873 + }
1874 +
1875 + if (resp->result) {
1876 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1877 +index 44d6c5743fb9..434470a6b9f3 100644
1878 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1879 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1880 +@@ -1236,7 +1236,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
1881 + static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
1882 + u16 src_fid)
1883 + {
1884 +- flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
1885 ++ flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
1886 + }
1887 +
1888 + static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
1889 +@@ -1285,9 +1285,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1890 + goto free_node;
1891 +
1892 + bnxt_tc_set_src_fid(bp, flow, src_fid);
1893 +-
1894 +- if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
1895 +- bnxt_tc_set_flow_dir(bp, flow, src_fid);
1896 ++ bnxt_tc_set_flow_dir(bp, flow, flow->src_fid);
1897 +
1898 + if (!bnxt_tc_can_offload(bp, flow)) {
1899 + rc = -EOPNOTSUPP;
1900 +@@ -1407,7 +1405,7 @@ static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
1901 + * 2. 15th bit of flow_handle must specify the flow
1902 + * direction (TX/RX).
1903 + */
1904 +- if (flow_node->flow.dir == BNXT_DIR_RX)
1905 ++ if (flow_node->flow.l2_key.dir == BNXT_DIR_RX)
1906 + handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
1907 + CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
1908 + else
1909 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
1910 +index 8a0968967bc5..8b0f1510bdc4 100644
1911 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
1912 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
1913 +@@ -23,6 +23,9 @@ struct bnxt_tc_l2_key {
1914 + __be16 inner_vlan_tci;
1915 + __be16 ether_type;
1916 + u8 num_vlans;
1917 ++ u8 dir;
1918 ++#define BNXT_DIR_RX 1
1919 ++#define BNXT_DIR_TX 0
1920 + };
1921 +
1922 + struct bnxt_tc_l3_key {
1923 +@@ -98,9 +101,6 @@ struct bnxt_tc_flow {
1924 +
1925 + /* flow applicable to pkts ingressing on this fid */
1926 + u16 src_fid;
1927 +- u8 dir;
1928 +-#define BNXT_DIR_RX 1
1929 +-#define BNXT_DIR_TX 0
1930 + struct bnxt_tc_l2_key l2_key;
1931 + struct bnxt_tc_l2_key l2_mask;
1932 + struct bnxt_tc_l3_key l3_key;
1933 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1934 +index 6c01314e87b0..db3552f2d087 100644
1935 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1936 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1937 +@@ -1187,7 +1187,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1938 + err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
1939 + if (err) {
1940 + en_err(priv, "Failed to allocate RSS indirection QP\n");
1941 +- goto rss_err;
1942 ++ goto qp_alloc_err;
1943 + }
1944 +
1945 + rss_map->indir_qp->event = mlx4_en_sqp_event;
1946 +@@ -1241,6 +1241,7 @@ indir_err:
1947 + MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
1948 + mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
1949 + mlx4_qp_free(mdev->dev, rss_map->indir_qp);
1950 ++qp_alloc_err:
1951 + kfree(rss_map->indir_qp);
1952 + rss_map->indir_qp = NULL;
1953 + rss_err:
1954 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
1955 +index f3d98748b211..c1caf14bc334 100644
1956 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
1957 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
1958 +@@ -76,9 +76,6 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
1959 + u8 state;
1960 + int err;
1961 +
1962 +- if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
1963 +- return 0;
1964 +-
1965 + err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
1966 + if (err) {
1967 + netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
1968 +@@ -86,10 +83,8 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
1969 + return err;
1970 + }
1971 +
1972 +- if (state != MLX5_SQC_STATE_ERR) {
1973 +- netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
1974 +- return -EINVAL;
1975 +- }
1976 ++ if (state != MLX5_SQC_STATE_ERR)
1977 ++ return 0;
1978 +
1979 + mlx5e_tx_disable_queue(sq->txq);
1980 +
1981 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
1982 +index 8657e0f26995..2c75b2752f58 100644
1983 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
1984 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
1985 +@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
1986 + return &arfs_t->rules_hash[bucket_idx];
1987 + }
1988 +
1989 +-static u8 arfs_get_ip_proto(const struct sk_buff *skb)
1990 +-{
1991 +- return (skb->protocol == htons(ETH_P_IP)) ?
1992 +- ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
1993 +-}
1994 +-
1995 + static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
1996 + u8 ip_proto, __be16 etype)
1997 + {
1998 +@@ -602,31 +596,9 @@ out:
1999 + arfs_may_expire_flow(priv);
2000 + }
2001 +
2002 +-/* return L4 destination port from ip4/6 packets */
2003 +-static __be16 arfs_get_dst_port(const struct sk_buff *skb)
2004 +-{
2005 +- char *transport_header;
2006 +-
2007 +- transport_header = skb_transport_header(skb);
2008 +- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
2009 +- return ((struct tcphdr *)transport_header)->dest;
2010 +- return ((struct udphdr *)transport_header)->dest;
2011 +-}
2012 +-
2013 +-/* return L4 source port from ip4/6 packets */
2014 +-static __be16 arfs_get_src_port(const struct sk_buff *skb)
2015 +-{
2016 +- char *transport_header;
2017 +-
2018 +- transport_header = skb_transport_header(skb);
2019 +- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
2020 +- return ((struct tcphdr *)transport_header)->source;
2021 +- return ((struct udphdr *)transport_header)->source;
2022 +-}
2023 +-
2024 + static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
2025 + struct arfs_table *arfs_t,
2026 +- const struct sk_buff *skb,
2027 ++ const struct flow_keys *fk,
2028 + u16 rxq, u32 flow_id)
2029 + {
2030 + struct arfs_rule *rule;
2031 +@@ -641,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
2032 + INIT_WORK(&rule->arfs_work, arfs_handle_work);
2033 +
2034 + tuple = &rule->tuple;
2035 +- tuple->etype = skb->protocol;
2036 ++ tuple->etype = fk->basic.n_proto;
2037 ++ tuple->ip_proto = fk->basic.ip_proto;
2038 + if (tuple->etype == htons(ETH_P_IP)) {
2039 +- tuple->src_ipv4 = ip_hdr(skb)->saddr;
2040 +- tuple->dst_ipv4 = ip_hdr(skb)->daddr;
2041 ++ tuple->src_ipv4 = fk->addrs.v4addrs.src;
2042 ++ tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
2043 + } else {
2044 +- memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
2045 ++ memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
2046 + sizeof(struct in6_addr));
2047 +- memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
2048 ++ memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
2049 + sizeof(struct in6_addr));
2050 + }
2051 +- tuple->ip_proto = arfs_get_ip_proto(skb);
2052 +- tuple->src_port = arfs_get_src_port(skb);
2053 +- tuple->dst_port = arfs_get_dst_port(skb);
2054 ++ tuple->src_port = fk->ports.src;
2055 ++ tuple->dst_port = fk->ports.dst;
2056 +
2057 + rule->flow_id = flow_id;
2058 + rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
2059 +@@ -664,37 +636,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
2060 + return rule;
2061 + }
2062 +
2063 +-static bool arfs_cmp_ips(struct arfs_tuple *tuple,
2064 +- const struct sk_buff *skb)
2065 ++static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
2066 + {
2067 +- if (tuple->etype == htons(ETH_P_IP) &&
2068 +- tuple->src_ipv4 == ip_hdr(skb)->saddr &&
2069 +- tuple->dst_ipv4 == ip_hdr(skb)->daddr)
2070 +- return true;
2071 +- if (tuple->etype == htons(ETH_P_IPV6) &&
2072 +- (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
2073 +- sizeof(struct in6_addr))) &&
2074 +- (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
2075 +- sizeof(struct in6_addr))))
2076 +- return true;
2077 ++ if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
2078 ++ return false;
2079 ++ if (tuple->etype != fk->basic.n_proto)
2080 ++ return false;
2081 ++ if (tuple->etype == htons(ETH_P_IP))
2082 ++ return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
2083 ++ tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
2084 ++ if (tuple->etype == htons(ETH_P_IPV6))
2085 ++ return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
2086 ++ sizeof(struct in6_addr)) &&
2087 ++ !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
2088 ++ sizeof(struct in6_addr));
2089 + return false;
2090 + }
2091 +
2092 + static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
2093 +- const struct sk_buff *skb)
2094 ++ const struct flow_keys *fk)
2095 + {
2096 + struct arfs_rule *arfs_rule;
2097 + struct hlist_head *head;
2098 +- __be16 src_port = arfs_get_src_port(skb);
2099 +- __be16 dst_port = arfs_get_dst_port(skb);
2100 +
2101 +- head = arfs_hash_bucket(arfs_t, src_port, dst_port);
2102 ++ head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
2103 + hlist_for_each_entry(arfs_rule, head, hlist) {
2104 +- if (arfs_rule->tuple.src_port == src_port &&
2105 +- arfs_rule->tuple.dst_port == dst_port &&
2106 +- arfs_cmp_ips(&arfs_rule->tuple, skb)) {
2107 ++ if (arfs_cmp(&arfs_rule->tuple, fk))
2108 + return arfs_rule;
2109 +- }
2110 + }
2111 +
2112 + return NULL;
2113 +@@ -707,20 +675,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
2114 + struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
2115 + struct arfs_table *arfs_t;
2116 + struct arfs_rule *arfs_rule;
2117 ++ struct flow_keys fk;
2118 ++
2119 ++ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
2120 ++ return -EPROTONOSUPPORT;
2121 +
2122 +- if (skb->protocol != htons(ETH_P_IP) &&
2123 +- skb->protocol != htons(ETH_P_IPV6))
2124 ++ if (fk.basic.n_proto != htons(ETH_P_IP) &&
2125 ++ fk.basic.n_proto != htons(ETH_P_IPV6))
2126 + return -EPROTONOSUPPORT;
2127 +
2128 + if (skb->encapsulation)
2129 + return -EPROTONOSUPPORT;
2130 +
2131 +- arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
2132 ++ arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
2133 + if (!arfs_t)
2134 + return -EPROTONOSUPPORT;
2135 +
2136 + spin_lock_bh(&arfs->arfs_lock);
2137 +- arfs_rule = arfs_find_rule(arfs_t, skb);
2138 ++ arfs_rule = arfs_find_rule(arfs_t, &fk);
2139 + if (arfs_rule) {
2140 + if (arfs_rule->rxq == rxq_index) {
2141 + spin_unlock_bh(&arfs->arfs_lock);
2142 +@@ -728,8 +700,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
2143 + }
2144 + arfs_rule->rxq = rxq_index;
2145 + } else {
2146 +- arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
2147 +- rxq_index, flow_id);
2148 ++ arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
2149 + if (!arfs_rule) {
2150 + spin_unlock_bh(&arfs->arfs_lock);
2151 + return -ENOMEM;
2152 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2153 +index f637d81f08bc..06f9bd6a45e3 100644
2154 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2155 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2156 +@@ -1060,6 +1060,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
2157 + link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
2158 + mlx5e_port_speed2linkmodes(mdev, speed, !ext);
2159 +
2160 ++ if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
2161 ++ autoneg != AUTONEG_ENABLE) {
2162 ++ netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
2163 ++ __func__);
2164 ++ err = -EINVAL;
2165 ++ goto out;
2166 ++ }
2167 ++
2168 + link_modes = link_modes & eproto.cap;
2169 + if (!link_modes) {
2170 + netdev_err(priv->netdev, "%s: Not supported link mode(s) requested",
2171 +@@ -1317,6 +1325,9 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
2172 + struct mlx5_core_dev *mdev = priv->mdev;
2173 + int err;
2174 +
2175 ++ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
2176 ++ return -EOPNOTSUPP;
2177 ++
2178 + if (pauseparam->autoneg)
2179 + return -EINVAL;
2180 +
2181 +diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
2182 +index b509b941d5ca..6825254eb882 100644
2183 +--- a/drivers/net/netdevsim/dev.c
2184 ++++ b/drivers/net/netdevsim/dev.c
2185 +@@ -71,46 +71,47 @@ static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
2186 + debugfs_remove_recursive(nsim_dev_port->ddir);
2187 + }
2188 +
2189 ++static struct net *nsim_devlink_net(struct devlink *devlink)
2190 ++{
2191 ++ return &init_net;
2192 ++}
2193 ++
2194 + static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv)
2195 + {
2196 +- struct nsim_dev *nsim_dev = priv;
2197 ++ struct net *net = priv;
2198 +
2199 +- return nsim_fib_get_val(nsim_dev->fib_data,
2200 +- NSIM_RESOURCE_IPV4_FIB, false);
2201 ++ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
2202 + }
2203 +
2204 + static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv)
2205 + {
2206 +- struct nsim_dev *nsim_dev = priv;
2207 ++ struct net *net = priv;
2208 +
2209 +- return nsim_fib_get_val(nsim_dev->fib_data,
2210 +- NSIM_RESOURCE_IPV4_FIB_RULES, false);
2211 ++ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
2212 + }
2213 +
2214 + static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv)
2215 + {
2216 +- struct nsim_dev *nsim_dev = priv;
2217 ++ struct net *net = priv;
2218 +
2219 +- return nsim_fib_get_val(nsim_dev->fib_data,
2220 +- NSIM_RESOURCE_IPV6_FIB, false);
2221 ++ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
2222 + }
2223 +
2224 + static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv)
2225 + {
2226 +- struct nsim_dev *nsim_dev = priv;
2227 ++ struct net *net = priv;
2228 +
2229 +- return nsim_fib_get_val(nsim_dev->fib_data,
2230 +- NSIM_RESOURCE_IPV6_FIB_RULES, false);
2231 ++ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
2232 + }
2233 +
2234 + static int nsim_dev_resources_register(struct devlink *devlink)
2235 + {
2236 +- struct nsim_dev *nsim_dev = devlink_priv(devlink);
2237 + struct devlink_resource_size_params params = {
2238 + .size_max = (u64)-1,
2239 + .size_granularity = 1,
2240 + .unit = DEVLINK_RESOURCE_UNIT_ENTRY
2241 + };
2242 ++ struct net *net = nsim_devlink_net(devlink);
2243 + int err;
2244 + u64 n;
2245 +
2246 +@@ -124,8 +125,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
2247 + goto out;
2248 + }
2249 +
2250 +- n = nsim_fib_get_val(nsim_dev->fib_data,
2251 +- NSIM_RESOURCE_IPV4_FIB, true);
2252 ++ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
2253 + err = devlink_resource_register(devlink, "fib", n,
2254 + NSIM_RESOURCE_IPV4_FIB,
2255 + NSIM_RESOURCE_IPV4, &params);
2256 +@@ -134,8 +134,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
2257 + return err;
2258 + }
2259 +
2260 +- n = nsim_fib_get_val(nsim_dev->fib_data,
2261 +- NSIM_RESOURCE_IPV4_FIB_RULES, true);
2262 ++ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
2263 + err = devlink_resource_register(devlink, "fib-rules", n,
2264 + NSIM_RESOURCE_IPV4_FIB_RULES,
2265 + NSIM_RESOURCE_IPV4, &params);
2266 +@@ -154,8 +153,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
2267 + goto out;
2268 + }
2269 +
2270 +- n = nsim_fib_get_val(nsim_dev->fib_data,
2271 +- NSIM_RESOURCE_IPV6_FIB, true);
2272 ++ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
2273 + err = devlink_resource_register(devlink, "fib", n,
2274 + NSIM_RESOURCE_IPV6_FIB,
2275 + NSIM_RESOURCE_IPV6, &params);
2276 +@@ -164,8 +162,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
2277 + return err;
2278 + }
2279 +
2280 +- n = nsim_fib_get_val(nsim_dev->fib_data,
2281 +- NSIM_RESOURCE_IPV6_FIB_RULES, true);
2282 ++ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
2283 + err = devlink_resource_register(devlink, "fib-rules", n,
2284 + NSIM_RESOURCE_IPV6_FIB_RULES,
2285 + NSIM_RESOURCE_IPV6, &params);
2286 +@@ -177,19 +174,19 @@ static int nsim_dev_resources_register(struct devlink *devlink)
2287 + devlink_resource_occ_get_register(devlink,
2288 + NSIM_RESOURCE_IPV4_FIB,
2289 + nsim_dev_ipv4_fib_resource_occ_get,
2290 +- nsim_dev);
2291 ++ net);
2292 + devlink_resource_occ_get_register(devlink,
2293 + NSIM_RESOURCE_IPV4_FIB_RULES,
2294 + nsim_dev_ipv4_fib_rules_res_occ_get,
2295 +- nsim_dev);
2296 ++ net);
2297 + devlink_resource_occ_get_register(devlink,
2298 + NSIM_RESOURCE_IPV6_FIB,
2299 + nsim_dev_ipv6_fib_resource_occ_get,
2300 +- nsim_dev);
2301 ++ net);
2302 + devlink_resource_occ_get_register(devlink,
2303 + NSIM_RESOURCE_IPV6_FIB_RULES,
2304 + nsim_dev_ipv6_fib_rules_res_occ_get,
2305 +- nsim_dev);
2306 ++ net);
2307 + out:
2308 + return err;
2309 + }
2310 +@@ -197,11 +194,11 @@ out:
2311 + static int nsim_dev_reload(struct devlink *devlink,
2312 + struct netlink_ext_ack *extack)
2313 + {
2314 +- struct nsim_dev *nsim_dev = devlink_priv(devlink);
2315 + enum nsim_resource_id res_ids[] = {
2316 + NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
2317 + NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
2318 + };
2319 ++ struct net *net = nsim_devlink_net(devlink);
2320 + int i;
2321 +
2322 + for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
2323 +@@ -210,8 +207,7 @@ static int nsim_dev_reload(struct devlink *devlink,
2324 +
2325 + err = devlink_resource_size_get(devlink, res_ids[i], &val);
2326 + if (!err) {
2327 +- err = nsim_fib_set_max(nsim_dev->fib_data,
2328 +- res_ids[i], val, extack);
2329 ++ err = nsim_fib_set_max(net, res_ids[i], val, extack);
2330 + if (err)
2331 + return err;
2332 + }
2333 +@@ -241,15 +237,9 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
2334 + INIT_LIST_HEAD(&nsim_dev->port_list);
2335 + mutex_init(&nsim_dev->port_list_lock);
2336 +
2337 +- nsim_dev->fib_data = nsim_fib_create();
2338 +- if (IS_ERR(nsim_dev->fib_data)) {
2339 +- err = PTR_ERR(nsim_dev->fib_data);
2340 +- goto err_devlink_free;
2341 +- }
2342 +-
2343 + err = nsim_dev_resources_register(devlink);
2344 + if (err)
2345 +- goto err_fib_destroy;
2346 ++ goto err_devlink_free;
2347 +
2348 + err = devlink_register(devlink, &nsim_bus_dev->dev);
2349 + if (err)
2350 +@@ -271,8 +261,6 @@ err_dl_unregister:
2351 + devlink_unregister(devlink);
2352 + err_resources_unregister:
2353 + devlink_resources_unregister(devlink, NULL);
2354 +-err_fib_destroy:
2355 +- nsim_fib_destroy(nsim_dev->fib_data);
2356 + err_devlink_free:
2357 + devlink_free(devlink);
2358 + return ERR_PTR(err);
2359 +@@ -286,7 +274,6 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
2360 + nsim_dev_debugfs_exit(nsim_dev);
2361 + devlink_unregister(devlink);
2362 + devlink_resources_unregister(devlink, NULL);
2363 +- nsim_fib_destroy(nsim_dev->fib_data);
2364 + mutex_destroy(&nsim_dev->port_list_lock);
2365 + devlink_free(devlink);
2366 + }
2367 +diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
2368 +index 8c57ba747772..f61d094746c0 100644
2369 +--- a/drivers/net/netdevsim/fib.c
2370 ++++ b/drivers/net/netdevsim/fib.c
2371 +@@ -18,6 +18,7 @@
2372 + #include <net/ip_fib.h>
2373 + #include <net/ip6_fib.h>
2374 + #include <net/fib_rules.h>
2375 ++#include <net/netns/generic.h>
2376 +
2377 + #include "netdevsim.h"
2378 +
2379 +@@ -32,14 +33,15 @@ struct nsim_per_fib_data {
2380 + };
2381 +
2382 + struct nsim_fib_data {
2383 +- struct notifier_block fib_nb;
2384 + struct nsim_per_fib_data ipv4;
2385 + struct nsim_per_fib_data ipv6;
2386 + };
2387 +
2388 +-u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
2389 +- enum nsim_resource_id res_id, bool max)
2390 ++static unsigned int nsim_fib_net_id;
2391 ++
2392 ++u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
2393 + {
2394 ++ struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
2395 + struct nsim_fib_entry *entry;
2396 +
2397 + switch (res_id) {
2398 +@@ -62,10 +64,10 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
2399 + return max ? entry->max : entry->num;
2400 + }
2401 +
2402 +-int nsim_fib_set_max(struct nsim_fib_data *fib_data,
2403 +- enum nsim_resource_id res_id, u64 val,
2404 ++int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
2405 + struct netlink_ext_ack *extack)
2406 + {
2407 ++ struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
2408 + struct nsim_fib_entry *entry;
2409 + int err = 0;
2410 +
2411 +@@ -118,9 +120,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
2412 + return err;
2413 + }
2414 +
2415 +-static int nsim_fib_rule_event(struct nsim_fib_data *data,
2416 +- struct fib_notifier_info *info, bool add)
2417 ++static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
2418 + {
2419 ++ struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
2420 + struct netlink_ext_ack *extack = info->extack;
2421 + int err = 0;
2422 +
2423 +@@ -155,9 +157,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
2424 + return err;
2425 + }
2426 +
2427 +-static int nsim_fib_event(struct nsim_fib_data *data,
2428 +- struct fib_notifier_info *info, bool add)
2429 ++static int nsim_fib_event(struct fib_notifier_info *info, bool add)
2430 + {
2431 ++ struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
2432 + struct netlink_ext_ack *extack = info->extack;
2433 + int err = 0;
2434 +
2435 +@@ -176,22 +178,18 @@ static int nsim_fib_event(struct nsim_fib_data *data,
2436 + static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
2437 + void *ptr)
2438 + {
2439 +- struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
2440 +- fib_nb);
2441 + struct fib_notifier_info *info = ptr;
2442 + int err = 0;
2443 +
2444 + switch (event) {
2445 + case FIB_EVENT_RULE_ADD: /* fall through */
2446 + case FIB_EVENT_RULE_DEL:
2447 +- err = nsim_fib_rule_event(data, info,
2448 +- event == FIB_EVENT_RULE_ADD);
2449 ++ err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
2450 + break;
2451 +
2452 + case FIB_EVENT_ENTRY_ADD: /* fall through */
2453 + case FIB_EVENT_ENTRY_DEL:
2454 +- err = nsim_fib_event(data, info,
2455 +- event == FIB_EVENT_ENTRY_ADD);
2456 ++ err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
2457 + break;
2458 + }
2459 +
2460 +@@ -201,23 +199,30 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
2461 + /* inconsistent dump, trying again */
2462 + static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
2463 + {
2464 +- struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
2465 +- fib_nb);
2466 ++ struct nsim_fib_data *data;
2467 ++ struct net *net;
2468 ++
2469 ++ rcu_read_lock();
2470 ++ for_each_net_rcu(net) {
2471 ++ data = net_generic(net, nsim_fib_net_id);
2472 ++
2473 ++ data->ipv4.fib.num = 0ULL;
2474 ++ data->ipv4.rules.num = 0ULL;
2475 +
2476 +- data->ipv4.fib.num = 0ULL;
2477 +- data->ipv4.rules.num = 0ULL;
2478 +- data->ipv6.fib.num = 0ULL;
2479 +- data->ipv6.rules.num = 0ULL;
2480 ++ data->ipv6.fib.num = 0ULL;
2481 ++ data->ipv6.rules.num = 0ULL;
2482 ++ }
2483 ++ rcu_read_unlock();
2484 + }
2485 +
2486 +-struct nsim_fib_data *nsim_fib_create(void)
2487 +-{
2488 +- struct nsim_fib_data *data;
2489 +- int err;
2490 ++static struct notifier_block nsim_fib_nb = {
2491 ++ .notifier_call = nsim_fib_event_nb,
2492 ++};
2493 +
2494 +- data = kzalloc(sizeof(*data), GFP_KERNEL);
2495 +- if (!data)
2496 +- return ERR_PTR(-ENOMEM);
2497 ++/* Initialize per network namespace state */
2498 ++static int __net_init nsim_fib_netns_init(struct net *net)
2499 ++{
2500 ++ struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
2501 +
2502 + data->ipv4.fib.max = (u64)-1;
2503 + data->ipv4.rules.max = (u64)-1;
2504 +@@ -225,22 +230,37 @@ struct nsim_fib_data *nsim_fib_create(void)
2505 + data->ipv6.fib.max = (u64)-1;
2506 + data->ipv6.rules.max = (u64)-1;
2507 +
2508 +- data->fib_nb.notifier_call = nsim_fib_event_nb;
2509 +- err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent);
2510 +- if (err) {
2511 +- pr_err("Failed to register fib notifier\n");
2512 +- goto err_out;
2513 +- }
2514 ++ return 0;
2515 ++}
2516 +
2517 +- return data;
2518 ++static struct pernet_operations nsim_fib_net_ops = {
2519 ++ .init = nsim_fib_netns_init,
2520 ++ .id = &nsim_fib_net_id,
2521 ++ .size = sizeof(struct nsim_fib_data),
2522 ++};
2523 +
2524 +-err_out:
2525 +- kfree(data);
2526 +- return ERR_PTR(err);
2527 ++void nsim_fib_exit(void)
2528 ++{
2529 ++ unregister_pernet_subsys(&nsim_fib_net_ops);
2530 ++ unregister_fib_notifier(&nsim_fib_nb);
2531 + }
2532 +
2533 +-void nsim_fib_destroy(struct nsim_fib_data *data)
2534 ++int nsim_fib_init(void)
2535 + {
2536 +- unregister_fib_notifier(&data->fib_nb);
2537 +- kfree(data);
2538 ++ int err;
2539 ++
2540 ++ err = register_pernet_subsys(&nsim_fib_net_ops);
2541 ++ if (err < 0) {
2542 ++ pr_err("Failed to register pernet subsystem\n");
2543 ++ goto err_out;
2544 ++ }
2545 ++
2546 ++ err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
2547 ++ if (err < 0) {
2548 ++ pr_err("Failed to register fib notifier\n");
2549 ++ goto err_out;
2550 ++ }
2551 ++
2552 ++err_out:
2553 ++ return err;
2554 + }
2555 +diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
2556 +index e5c8aa08e1cd..533a182eefca 100644
2557 +--- a/drivers/net/netdevsim/netdev.c
2558 ++++ b/drivers/net/netdevsim/netdev.c
2559 +@@ -370,12 +370,18 @@ static int __init nsim_module_init(void)
2560 + if (err)
2561 + goto err_dev_exit;
2562 +
2563 +- err = rtnl_link_register(&nsim_link_ops);
2564 ++ err = nsim_fib_init();
2565 + if (err)
2566 + goto err_bus_exit;
2567 +
2568 ++ err = rtnl_link_register(&nsim_link_ops);
2569 ++ if (err)
2570 ++ goto err_fib_exit;
2571 ++
2572 + return 0;
2573 +
2574 ++err_fib_exit:
2575 ++ nsim_fib_exit();
2576 + err_bus_exit:
2577 + nsim_bus_exit();
2578 + err_dev_exit:
2579 +@@ -386,6 +392,7 @@ err_dev_exit:
2580 + static void __exit nsim_module_exit(void)
2581 + {
2582 + rtnl_link_unregister(&nsim_link_ops);
2583 ++ nsim_fib_exit();
2584 + nsim_bus_exit();
2585 + nsim_dev_exit();
2586 + }
2587 +diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
2588 +index 3f398797c2bc..f9253fe68c31 100644
2589 +--- a/drivers/net/netdevsim/netdevsim.h
2590 ++++ b/drivers/net/netdevsim/netdevsim.h
2591 +@@ -168,12 +168,10 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
2592 + int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
2593 + unsigned int port_index);
2594 +
2595 +-struct nsim_fib_data *nsim_fib_create(void);
2596 +-void nsim_fib_destroy(struct nsim_fib_data *fib_data);
2597 +-u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
2598 +- enum nsim_resource_id res_id, bool max);
2599 +-int nsim_fib_set_max(struct nsim_fib_data *fib_data,
2600 +- enum nsim_resource_id res_id, u64 val,
2601 ++int nsim_fib_init(void);
2602 ++void nsim_fib_exit(void);
2603 ++u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
2604 ++int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
2605 + struct netlink_ext_ack *extack);
2606 +
2607 + #if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
2608 +diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
2609 +index b9d4145781ca..58bb25e4af10 100644
2610 +--- a/drivers/net/phy/phy-c45.c
2611 ++++ b/drivers/net/phy/phy-c45.c
2612 +@@ -219,6 +219,20 @@ int genphy_c45_read_link(struct phy_device *phydev)
2613 + int val, devad;
2614 + bool link = true;
2615 +
2616 ++ if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) {
2617 ++ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
2618 ++ if (val < 0)
2619 ++ return val;
2620 ++
2621 ++ /* Autoneg is being started, therefore disregard current
2622 ++ * link status and report link as down.
2623 ++ */
2624 ++ if (val & MDIO_AN_CTRL1_RESTART) {
2625 ++ phydev->link = 0;
2626 ++ return 0;
2627 ++ }
2628 ++ }
2629 ++
2630 + while (mmd_mask && link) {
2631 + devad = __ffs(mmd_mask);
2632 + mmd_mask &= ~BIT(devad);
2633 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
2634 +index ffa402732aea..3af0af495cf1 100644
2635 +--- a/drivers/net/phy/phy_device.c
2636 ++++ b/drivers/net/phy/phy_device.c
2637 +@@ -1708,7 +1708,17 @@ EXPORT_SYMBOL(genphy_aneg_done);
2638 + */
2639 + int genphy_update_link(struct phy_device *phydev)
2640 + {
2641 +- int status;
2642 ++ int status = 0, bmcr;
2643 ++
2644 ++ bmcr = phy_read(phydev, MII_BMCR);
2645 ++ if (bmcr < 0)
2646 ++ return bmcr;
2647 ++
2648 ++ /* Autoneg is being started, therefore disregard BMSR value and
2649 ++ * report link as down.
2650 ++ */
2651 ++ if (bmcr & BMCR_ANRESTART)
2652 ++ goto done;
2653 +
2654 + /* The link state is latched low so that momentary link
2655 + * drops can be detected. Do not double-read the status
2656 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2657 +index 36916bf51ee6..d1b4c7d8e2bc 100644
2658 +--- a/drivers/net/team/team.c
2659 ++++ b/drivers/net/team/team.c
2660 +@@ -1004,6 +1004,8 @@ static void __team_compute_features(struct team *team)
2661 +
2662 + team->dev->vlan_features = vlan_features;
2663 + team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
2664 ++ NETIF_F_HW_VLAN_CTAG_TX |
2665 ++ NETIF_F_HW_VLAN_STAG_TX |
2666 + NETIF_F_GSO_UDP_L4;
2667 + team->dev->hard_header_len = max_hard_header_len;
2668 +
2669 +diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
2670 +index 6d25dea5ad4b..f7d117d80cfb 100644
2671 +--- a/drivers/net/usb/pegasus.c
2672 ++++ b/drivers/net/usb/pegasus.c
2673 +@@ -282,7 +282,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
2674 + static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
2675 + {
2676 + int i;
2677 +- __u8 tmp;
2678 ++ __u8 tmp = 0;
2679 + __le16 retdatai;
2680 + int ret;
2681 +
2682 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
2683 +index 405038ce98d6..7573af2d88ce 100644
2684 +--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
2685 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
2686 +@@ -97,7 +97,7 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
2687 +
2688 + union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
2689 + union acpi_object *data,
2690 +- int data_size)
2691 ++ int data_size, int *tbl_rev)
2692 + {
2693 + int i;
2694 + union acpi_object *wifi_pkg;
2695 +@@ -113,16 +113,19 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
2696 + /*
2697 + * We need at least two packages, one for the revision and one
2698 + * for the data itself. Also check that the revision is valid
2699 +- * (i.e. it is an integer set to 0).
2700 ++ * (i.e. it is an integer smaller than 2, as we currently support only
2701 ++ * 2 revisions).
2702 + */
2703 + if (data->type != ACPI_TYPE_PACKAGE ||
2704 + data->package.count < 2 ||
2705 + data->package.elements[0].type != ACPI_TYPE_INTEGER ||
2706 +- data->package.elements[0].integer.value != 0) {
2707 ++ data->package.elements[0].integer.value > 1) {
2708 + IWL_DEBUG_DEV_RADIO(dev, "Unsupported packages structure\n");
2709 + return ERR_PTR(-EINVAL);
2710 + }
2711 +
2712 ++ *tbl_rev = data->package.elements[0].integer.value;
2713 ++
2714 + /* loop through all the packages to find the one for WiFi */
2715 + for (i = 1; i < data->package.count; i++) {
2716 + union acpi_object *domain;
2717 +@@ -151,14 +154,15 @@ int iwl_acpi_get_mcc(struct device *dev, char *mcc)
2718 + {
2719 + union acpi_object *wifi_pkg, *data;
2720 + u32 mcc_val;
2721 +- int ret;
2722 ++ int ret, tbl_rev;
2723 +
2724 + data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
2725 + if (IS_ERR(data))
2726 + return PTR_ERR(data);
2727 +
2728 +- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE);
2729 +- if (IS_ERR(wifi_pkg)) {
2730 ++ wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
2731 ++ &tbl_rev);
2732 ++ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
2733 + ret = PTR_ERR(wifi_pkg);
2734 + goto out_free;
2735 + }
2736 +@@ -185,6 +189,7 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev)
2737 + {
2738 + union acpi_object *data, *wifi_pkg;
2739 + u64 dflt_pwr_limit;
2740 ++ int tbl_rev;
2741 +
2742 + data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
2743 + if (IS_ERR(data)) {
2744 +@@ -193,8 +198,8 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev)
2745 + }
2746 +
2747 + wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data,
2748 +- ACPI_SPLC_WIFI_DATA_SIZE);
2749 +- if (IS_ERR(wifi_pkg) ||
2750 ++ ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev);
2751 ++ if (IS_ERR(wifi_pkg) || tbl_rev != 0 ||
2752 + wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
2753 + dflt_pwr_limit = 0;
2754 + goto out_free;
2755 +@@ -211,14 +216,15 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
2756 + int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
2757 + {
2758 + union acpi_object *wifi_pkg, *data;
2759 +- int ret;
2760 ++ int ret, tbl_rev;
2761 +
2762 + data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD);
2763 + if (IS_ERR(data))
2764 + return PTR_ERR(data);
2765 +
2766 +- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE);
2767 +- if (IS_ERR(wifi_pkg)) {
2768 ++ wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
2769 ++ &tbl_rev);
2770 ++ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
2771 + ret = PTR_ERR(wifi_pkg);
2772 + goto out_free;
2773 + }
2774 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
2775 +index f5704e16643f..991a23450999 100644
2776 +--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
2777 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
2778 +@@ -97,7 +97,7 @@
2779 + void *iwl_acpi_get_object(struct device *dev, acpi_string method);
2780 + union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
2781 + union acpi_object *data,
2782 +- int data_size);
2783 ++ int data_size, int *tbl_rev);
2784 +
2785 + /**
2786 + * iwl_acpi_get_mcc - read MCC from ACPI, if available
2787 +@@ -131,7 +131,8 @@ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
2788 +
2789 + static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
2790 + union acpi_object *data,
2791 +- int data_size)
2792 ++ int data_size,
2793 ++ int *tbl_rev)
2794 + {
2795 + return ERR_PTR(-ENOENT);
2796 + }
2797 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
2798 +index 01f003c6cff9..f195db398bed 100644
2799 +--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
2800 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
2801 +@@ -419,14 +419,26 @@ struct iwl_per_chain_offset_group {
2802 + struct iwl_per_chain_offset hb;
2803 + } __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */
2804 +
2805 ++/**
2806 ++ * struct iwl_geo_tx_power_profile_cmd_v1 - struct for GEO_TX_POWER_LIMIT cmd.
2807 ++ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
2808 ++ * @table: offset profile per band.
2809 ++ */
2810 ++struct iwl_geo_tx_power_profiles_cmd_v1 {
2811 ++ __le32 ops;
2812 ++ struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
2813 ++} __packed; /* GEO_TX_POWER_LIMIT_VER_1 */
2814 ++
2815 + /**
2816 + * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd.
2817 + * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
2818 + * @table: offset profile per band.
2819 ++ * @table_revision: BIOS table revision.
2820 + */
2821 + struct iwl_geo_tx_power_profiles_cmd {
2822 + __le32 ops;
2823 + struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
2824 ++ __le32 table_revision;
2825 + } __packed; /* GEO_TX_POWER_LIMIT */
2826 +
2827 + /**
2828 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
2829 +index de9243d30135..a74f34a8dffb 100644
2830 +--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
2831 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
2832 +@@ -286,6 +286,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
2833 + * SCAN_OFFLOAD_PROFILES_QUERY_RSP_S.
2834 + * @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of
2835 + * STA_CONTEXT_DOT11AX_API_S
2836 ++ * @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar
2837 ++ * version tables.
2838 + *
2839 + * @NUM_IWL_UCODE_TLV_API: number of bits used
2840 + */
2841 +@@ -318,6 +320,7 @@ enum iwl_ucode_tlv_api {
2842 + IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52,
2843 + IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53,
2844 + IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54,
2845 ++ IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55,
2846 +
2847 + NUM_IWL_UCODE_TLV_API
2848 + #ifdef __CHECKER__
2849 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2850 +index 5af9959d05e5..8892707050d5 100644
2851 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2852 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2853 +@@ -682,15 +682,15 @@ static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
2854 + {
2855 + union acpi_object *wifi_pkg, *table, *data;
2856 + bool enabled;
2857 +- int ret;
2858 ++ int ret, tbl_rev;
2859 +
2860 + data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD);
2861 + if (IS_ERR(data))
2862 + return PTR_ERR(data);
2863 +
2864 + wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
2865 +- ACPI_WRDS_WIFI_DATA_SIZE);
2866 +- if (IS_ERR(wifi_pkg)) {
2867 ++ ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
2868 ++ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
2869 + ret = PTR_ERR(wifi_pkg);
2870 + goto out_free;
2871 + }
2872 +@@ -719,15 +719,15 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
2873 + {
2874 + union acpi_object *wifi_pkg, *data;
2875 + bool enabled;
2876 +- int i, n_profiles, ret;
2877 ++ int i, n_profiles, ret, tbl_rev;
2878 +
2879 + data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD);
2880 + if (IS_ERR(data))
2881 + return PTR_ERR(data);
2882 +
2883 + wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
2884 +- ACPI_EWRD_WIFI_DATA_SIZE);
2885 +- if (IS_ERR(wifi_pkg)) {
2886 ++ ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
2887 ++ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
2888 + ret = PTR_ERR(wifi_pkg);
2889 + goto out_free;
2890 + }
2891 +@@ -778,7 +778,7 @@ out_free:
2892 + static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
2893 + {
2894 + union acpi_object *wifi_pkg, *data;
2895 +- int i, j, ret;
2896 ++ int i, j, ret, tbl_rev;
2897 + int idx = 1;
2898 +
2899 + data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD);
2900 +@@ -786,12 +786,13 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
2901 + return PTR_ERR(data);
2902 +
2903 + wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
2904 +- ACPI_WGDS_WIFI_DATA_SIZE);
2905 +- if (IS_ERR(wifi_pkg)) {
2906 ++ ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
2907 ++ if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
2908 + ret = PTR_ERR(wifi_pkg);
2909 + goto out_free;
2910 + }
2911 +
2912 ++ mvm->geo_rev = tbl_rev;
2913 + for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
2914 + for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
2915 + union acpi_object *entry;
2916 +@@ -894,15 +895,29 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
2917 + {
2918 + struct iwl_geo_tx_power_profiles_resp *resp;
2919 + int ret;
2920 ++ u16 len;
2921 ++ void *data;
2922 ++ struct iwl_geo_tx_power_profiles_cmd geo_cmd;
2923 ++ struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1;
2924 ++ struct iwl_host_cmd cmd;
2925 ++
2926 ++ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
2927 ++ geo_cmd.ops =
2928 ++ cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
2929 ++ len = sizeof(geo_cmd);
2930 ++ data = &geo_cmd;
2931 ++ } else {
2932 ++ geo_cmd_v1.ops =
2933 ++ cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
2934 ++ len = sizeof(geo_cmd_v1);
2935 ++ data = &geo_cmd_v1;
2936 ++ }
2937 +
2938 +- struct iwl_geo_tx_power_profiles_cmd geo_cmd = {
2939 +- .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE),
2940 +- };
2941 +- struct iwl_host_cmd cmd = {
2942 ++ cmd = (struct iwl_host_cmd){
2943 + .id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
2944 +- .len = { sizeof(geo_cmd), },
2945 ++ .len = { len, },
2946 + .flags = CMD_WANT_SKB,
2947 +- .data = { &geo_cmd },
2948 ++ .data = { data },
2949 + };
2950 +
2951 + if (!iwl_mvm_sar_geo_support(mvm))
2952 +@@ -969,6 +984,16 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
2953 + i, j, value[1], value[2], value[0]);
2954 + }
2955 + }
2956 ++
2957 ++ cmd.table_revision = cpu_to_le32(mvm->geo_rev);
2958 ++
2959 ++ if (!fw_has_api(&mvm->fw->ucode_capa,
2960 ++ IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
2961 ++ return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0,
2962 ++ sizeof(struct iwl_geo_tx_power_profiles_cmd_v1),
2963 ++ &cmd);
2964 ++ }
2965 ++
2966 + return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
2967 + }
2968 +
2969 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
2970 +index 88af1f0ba3f0..ed8fc9a9204c 100644
2971 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
2972 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
2973 +@@ -1184,6 +1184,7 @@ struct iwl_mvm {
2974 + #ifdef CONFIG_ACPI
2975 + struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
2976 + struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
2977 ++ u32 geo_rev;
2978 + #endif
2979 + };
2980 +
2981 +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
2982 +index 1d9940d4e8c7..c9262ffeefe4 100644
2983 +--- a/drivers/net/xen-netback/netback.c
2984 ++++ b/drivers/net/xen-netback/netback.c
2985 +@@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
2986 + skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
2987 + nskb = xenvif_alloc_skb(0);
2988 + if (unlikely(nskb == NULL)) {
2989 ++ skb_shinfo(skb)->nr_frags = 0;
2990 + kfree_skb(skb);
2991 + xenvif_tx_err(queue, &txreq, extra_count, idx);
2992 + if (net_ratelimit())
2993 +@@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
2994 +
2995 + if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
2996 + /* Failure in xenvif_set_skb_gso is fatal. */
2997 ++ skb_shinfo(skb)->nr_frags = 0;
2998 + kfree_skb(skb);
2999 + kfree_skb(nskb);
3000 + break;
3001 +diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
3002 +index 1d902230ba61..be6cda89dcf5 100644
3003 +--- a/drivers/platform/x86/intel_pmc_core.c
3004 ++++ b/drivers/platform/x86/intel_pmc_core.c
3005 +@@ -815,6 +815,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
3006 + INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map),
3007 + INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map),
3008 + INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map),
3009 ++ INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
3010 + {}
3011 + };
3012 +
3013 +diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c
3014 +index c1ca931e1fab..7a8cbfb5d213 100644
3015 +--- a/drivers/platform/x86/pcengines-apuv2.c
3016 ++++ b/drivers/platform/x86/pcengines-apuv2.c
3017 +@@ -255,6 +255,4 @@ MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver");
3018 + MODULE_LICENSE("GPL");
3019 + MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table);
3020 + MODULE_ALIAS("platform:pcengines-apuv2");
3021 +-MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME);
3022 +-MODULE_SOFTDEP("pre: platform:leds-gpio");
3023 +-MODULE_SOFTDEP("pre: platform:gpio_keys_polled");
3024 ++MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled");
3025 +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
3026 +index 8068520cf89e..152de392f9aa 100644
3027 +--- a/drivers/scsi/hpsa.c
3028 ++++ b/drivers/scsi/hpsa.c
3029 +@@ -2320,6 +2320,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
3030 + case IOACCEL2_SERV_RESPONSE_COMPLETE:
3031 + switch (c2->error_data.status) {
3032 + case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
3033 ++ if (cmd)
3034 ++ cmd->result = 0;
3035 + break;
3036 + case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
3037 + cmd->result |= SAM_STAT_CHECK_CONDITION;
3038 +@@ -2479,8 +2481,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
3039 +
3040 + /* check for good status */
3041 + if (likely(c2->error_data.serv_response == 0 &&
3042 +- c2->error_data.status == 0))
3043 ++ c2->error_data.status == 0)) {
3044 ++ cmd->result = 0;
3045 + return hpsa_cmd_free_and_done(h, c, cmd);
3046 ++ }
3047 +
3048 + /*
3049 + * Any RAID offload error results in retry which will use
3050 +@@ -5638,6 +5642,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
3051 + }
3052 + c = cmd_tagged_alloc(h, cmd);
3053 +
3054 ++ /*
3055 ++ * This is necessary because the SML doesn't zero out this field during
3056 ++ * error recovery.
3057 ++ */
3058 ++ cmd->result = 0;
3059 ++
3060 + /*
3061 + * Call alternate submit routine for I/O accelerated commands.
3062 + * Retries always go down the normal I/O path.
3063 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
3064 +index 54772d4c377f..6a4c719497ca 100644
3065 +--- a/drivers/scsi/qla2xxx/qla_init.c
3066 ++++ b/drivers/scsi/qla2xxx/qla_init.c
3067 +@@ -4877,7 +4877,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
3068 + ql_log(ql_log_warn, vha, 0xd049,
3069 + "Failed to allocate ct_sns request.\n");
3070 + kfree(fcport);
3071 +- fcport = NULL;
3072 ++ return NULL;
3073 + }
3074 +
3075 + INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
3076 +diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
3077 +index 2edf3ee91300..caf4d4df4bd3 100644
3078 +--- a/drivers/staging/comedi/drivers/dt3000.c
3079 ++++ b/drivers/staging/comedi/drivers/dt3000.c
3080 +@@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
3081 + static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
3082 + unsigned int flags)
3083 + {
3084 +- int divider, base, prescale;
3085 ++ unsigned int divider, base, prescale;
3086 +
3087 +- /* This function needs improvment */
3088 ++ /* This function needs improvement */
3089 + /* Don't know if divider==0 works. */
3090 +
3091 + for (prescale = 0; prescale < 16; prescale++) {
3092 +@@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
3093 + divider = (*nanosec) / base;
3094 + break;
3095 + case CMDF_ROUND_UP:
3096 +- divider = (*nanosec) / base;
3097 ++ divider = DIV_ROUND_UP(*nanosec, base);
3098 + break;
3099 + }
3100 + if (divider < 65536) {
3101 +@@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
3102 + }
3103 +
3104 + prescale = 15;
3105 +- base = timer_base * (1 << prescale);
3106 ++ base = timer_base * (prescale + 1);
3107 + divider = 65535;
3108 + *nanosec = divider * base;
3109 + return (prescale << 16) | (divider);
3110 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
3111 +index 183b41753c98..62f4fb9b362f 100644
3112 +--- a/drivers/usb/class/cdc-acm.c
3113 ++++ b/drivers/usb/class/cdc-acm.c
3114 +@@ -1301,10 +1301,6 @@ made_compressed_probe:
3115 + tty_port_init(&acm->port);
3116 + acm->port.ops = &acm_port_ops;
3117 +
3118 +- minor = acm_alloc_minor(acm);
3119 +- if (minor < 0)
3120 +- goto alloc_fail1;
3121 +-
3122 + ctrlsize = usb_endpoint_maxp(epctrl);
3123 + readsize = usb_endpoint_maxp(epread) *
3124 + (quirks == SINGLE_RX_URB ? 1 : 2);
3125 +@@ -1312,6 +1308,13 @@ made_compressed_probe:
3126 + acm->writesize = usb_endpoint_maxp(epwrite) * 20;
3127 + acm->control = control_interface;
3128 + acm->data = data_interface;
3129 ++
3130 ++ usb_get_intf(acm->control); /* undone in destruct() */
3131 ++
3132 ++ minor = acm_alloc_minor(acm);
3133 ++ if (minor < 0)
3134 ++ goto alloc_fail1;
3135 ++
3136 + acm->minor = minor;
3137 + acm->dev = usb_dev;
3138 + if (h.usb_cdc_acm_descriptor)
3139 +@@ -1458,7 +1461,6 @@ skip_countries:
3140 + usb_driver_claim_interface(&acm_driver, data_interface, acm);
3141 + usb_set_intfdata(data_interface, acm);
3142 +
3143 +- usb_get_intf(control_interface);
3144 + tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
3145 + &control_interface->dev);
3146 + if (IS_ERR(tty_dev)) {
3147 +diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
3148 +index 65de6f73b672..558890ada0e5 100644
3149 +--- a/drivers/usb/core/file.c
3150 ++++ b/drivers/usb/core/file.c
3151 +@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
3152 + intf->minor = minor;
3153 + break;
3154 + }
3155 +- up_write(&minor_rwsem);
3156 +- if (intf->minor < 0)
3157 ++ if (intf->minor < 0) {
3158 ++ up_write(&minor_rwsem);
3159 + return -EXFULL;
3160 ++ }
3161 +
3162 + /* create a usb class device for this usb interface */
3163 + snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
3164 +@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
3165 + MKDEV(USB_MAJOR, minor), class_driver,
3166 + "%s", kbasename(name));
3167 + if (IS_ERR(intf->usb_dev)) {
3168 +- down_write(&minor_rwsem);
3169 + usb_minors[minor] = NULL;
3170 + intf->minor = -1;
3171 +- up_write(&minor_rwsem);
3172 + retval = PTR_ERR(intf->usb_dev);
3173 + }
3174 ++ up_write(&minor_rwsem);
3175 + return retval;
3176 + }
3177 + EXPORT_SYMBOL_GPL(usb_register_dev);
3178 +@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
3179 + return;
3180 +
3181 + dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
3182 ++ device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
3183 +
3184 + down_write(&minor_rwsem);
3185 + usb_minors[intf->minor] = NULL;
3186 + up_write(&minor_rwsem);
3187 +
3188 +- device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
3189 + intf->usb_dev = NULL;
3190 + intf->minor = -1;
3191 + destroy_usb_class();
3192 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
3193 +index 94d22551fc1b..82e41179fb2d 100644
3194 +--- a/drivers/usb/core/hcd.c
3195 ++++ b/drivers/usb/core/hcd.c
3196 +@@ -101,11 +101,6 @@ static DEFINE_SPINLOCK(hcd_urb_unlink_lock);
3197 + /* wait queue for synchronous unlinks */
3198 + DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue);
3199 +
3200 +-static inline int is_root_hub(struct usb_device *udev)
3201 +-{
3202 +- return (udev->parent == NULL);
3203 +-}
3204 +-
3205 + /*-------------------------------------------------------------------------*/
3206 +
3207 + /*
3208 +@@ -878,101 +873,6 @@ static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
3209 + }
3210 +
3211 +
3212 +-
3213 +-/*
3214 +- * Show & store the current value of authorized_default
3215 +- */
3216 +-static ssize_t authorized_default_show(struct device *dev,
3217 +- struct device_attribute *attr, char *buf)
3218 +-{
3219 +- struct usb_device *rh_usb_dev = to_usb_device(dev);
3220 +- struct usb_bus *usb_bus = rh_usb_dev->bus;
3221 +- struct usb_hcd *hcd;
3222 +-
3223 +- hcd = bus_to_hcd(usb_bus);
3224 +- return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy);
3225 +-}
3226 +-
3227 +-static ssize_t authorized_default_store(struct device *dev,
3228 +- struct device_attribute *attr,
3229 +- const char *buf, size_t size)
3230 +-{
3231 +- ssize_t result;
3232 +- unsigned val;
3233 +- struct usb_device *rh_usb_dev = to_usb_device(dev);
3234 +- struct usb_bus *usb_bus = rh_usb_dev->bus;
3235 +- struct usb_hcd *hcd;
3236 +-
3237 +- hcd = bus_to_hcd(usb_bus);
3238 +- result = sscanf(buf, "%u\n", &val);
3239 +- if (result == 1) {
3240 +- hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ?
3241 +- val : USB_DEVICE_AUTHORIZE_ALL;
3242 +- result = size;
3243 +- } else {
3244 +- result = -EINVAL;
3245 +- }
3246 +- return result;
3247 +-}
3248 +-static DEVICE_ATTR_RW(authorized_default);
3249 +-
3250 +-/*
3251 +- * interface_authorized_default_show - show default authorization status
3252 +- * for USB interfaces
3253 +- *
3254 +- * note: interface_authorized_default is the default value
3255 +- * for initializing the authorized attribute of interfaces
3256 +- */
3257 +-static ssize_t interface_authorized_default_show(struct device *dev,
3258 +- struct device_attribute *attr, char *buf)
3259 +-{
3260 +- struct usb_device *usb_dev = to_usb_device(dev);
3261 +- struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
3262 +-
3263 +- return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd));
3264 +-}
3265 +-
3266 +-/*
3267 +- * interface_authorized_default_store - store default authorization status
3268 +- * for USB interfaces
3269 +- *
3270 +- * note: interface_authorized_default is the default value
3271 +- * for initializing the authorized attribute of interfaces
3272 +- */
3273 +-static ssize_t interface_authorized_default_store(struct device *dev,
3274 +- struct device_attribute *attr, const char *buf, size_t count)
3275 +-{
3276 +- struct usb_device *usb_dev = to_usb_device(dev);
3277 +- struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
3278 +- int rc = count;
3279 +- bool val;
3280 +-
3281 +- if (strtobool(buf, &val) != 0)
3282 +- return -EINVAL;
3283 +-
3284 +- if (val)
3285 +- set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
3286 +- else
3287 +- clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
3288 +-
3289 +- return rc;
3290 +-}
3291 +-static DEVICE_ATTR_RW(interface_authorized_default);
3292 +-
3293 +-/* Group all the USB bus attributes */
3294 +-static struct attribute *usb_bus_attrs[] = {
3295 +- &dev_attr_authorized_default.attr,
3296 +- &dev_attr_interface_authorized_default.attr,
3297 +- NULL,
3298 +-};
3299 +-
3300 +-static const struct attribute_group usb_bus_attr_group = {
3301 +- .name = NULL, /* we want them in the same directory */
3302 +- .attrs = usb_bus_attrs,
3303 +-};
3304 +-
3305 +-
3306 +-
3307 + /*-------------------------------------------------------------------------*/
3308 +
3309 + /**
3310 +@@ -2895,32 +2795,11 @@ int usb_add_hcd(struct usb_hcd *hcd,
3311 + if (retval != 0)
3312 + goto err_register_root_hub;
3313 +
3314 +- retval = sysfs_create_group(&rhdev->dev.kobj, &usb_bus_attr_group);
3315 +- if (retval < 0) {
3316 +- printk(KERN_ERR "Cannot register USB bus sysfs attributes: %d\n",
3317 +- retval);
3318 +- goto error_create_attr_group;
3319 +- }
3320 + if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
3321 + usb_hcd_poll_rh_status(hcd);
3322 +
3323 + return retval;
3324 +
3325 +-error_create_attr_group:
3326 +- clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
3327 +- if (HC_IS_RUNNING(hcd->state))
3328 +- hcd->state = HC_STATE_QUIESCING;
3329 +- spin_lock_irq(&hcd_root_hub_lock);
3330 +- hcd->rh_registered = 0;
3331 +- spin_unlock_irq(&hcd_root_hub_lock);
3332 +-
3333 +-#ifdef CONFIG_PM
3334 +- cancel_work_sync(&hcd->wakeup_work);
3335 +-#endif
3336 +- cancel_work_sync(&hcd->died_work);
3337 +- mutex_lock(&usb_bus_idr_lock);
3338 +- usb_disconnect(&rhdev); /* Sets rhdev to NULL */
3339 +- mutex_unlock(&usb_bus_idr_lock);
3340 + err_register_root_hub:
3341 + hcd->rh_pollable = 0;
3342 + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
3343 +@@ -2964,8 +2843,6 @@ void usb_remove_hcd(struct usb_hcd *hcd)
3344 + dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
3345 +
3346 + usb_get_dev(rhdev);
3347 +- sysfs_remove_group(&rhdev->dev.kobj, &usb_bus_attr_group);
3348 +-
3349 + clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
3350 + if (HC_IS_RUNNING (hcd->state))
3351 + hcd->state = HC_STATE_QUIESCING;
3352 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
3353 +index e844bb7b5676..5adf489428aa 100644
3354 +--- a/drivers/usb/core/message.c
3355 ++++ b/drivers/usb/core/message.c
3356 +@@ -2218,14 +2218,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
3357 + (struct usb_cdc_dmm_desc *)buffer;
3358 + break;
3359 + case USB_CDC_MDLM_TYPE:
3360 +- if (elength < sizeof(struct usb_cdc_mdlm_desc *))
3361 ++ if (elength < sizeof(struct usb_cdc_mdlm_desc))
3362 + goto next_desc;
3363 + if (desc)
3364 + return -EINVAL;
3365 + desc = (struct usb_cdc_mdlm_desc *)buffer;
3366 + break;
3367 + case USB_CDC_MDLM_DETAIL_TYPE:
3368 +- if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
3369 ++ if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
3370 + goto next_desc;
3371 + if (detail)
3372 + return -EINVAL;
3373 +diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
3374 +index 7e88fdfe3cf5..f19694e69f5c 100644
3375 +--- a/drivers/usb/core/sysfs.c
3376 ++++ b/drivers/usb/core/sysfs.c
3377 +@@ -15,6 +15,7 @@
3378 + #include <linux/kernel.h>
3379 + #include <linux/string.h>
3380 + #include <linux/usb.h>
3381 ++#include <linux/usb/hcd.h>
3382 + #include <linux/usb/quirks.h>
3383 + #include <linux/of.h>
3384 + #include "usb.h"
3385 +@@ -922,6 +923,116 @@ static struct bin_attribute dev_bin_attr_descriptors = {
3386 + .size = 18 + 65535, /* dev descr + max-size raw descriptor */
3387 + };
3388 +
3389 ++/*
3390 ++ * Show & store the current value of authorized_default
3391 ++ */
3392 ++static ssize_t authorized_default_show(struct device *dev,
3393 ++ struct device_attribute *attr, char *buf)
3394 ++{
3395 ++ struct usb_device *rh_usb_dev = to_usb_device(dev);
3396 ++ struct usb_bus *usb_bus = rh_usb_dev->bus;
3397 ++ struct usb_hcd *hcd;
3398 ++
3399 ++ hcd = bus_to_hcd(usb_bus);
3400 ++ return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy);
3401 ++}
3402 ++
3403 ++static ssize_t authorized_default_store(struct device *dev,
3404 ++ struct device_attribute *attr,
3405 ++ const char *buf, size_t size)
3406 ++{
3407 ++ ssize_t result;
3408 ++ unsigned int val;
3409 ++ struct usb_device *rh_usb_dev = to_usb_device(dev);
3410 ++ struct usb_bus *usb_bus = rh_usb_dev->bus;
3411 ++ struct usb_hcd *hcd;
3412 ++
3413 ++ hcd = bus_to_hcd(usb_bus);
3414 ++ result = sscanf(buf, "%u\n", &val);
3415 ++ if (result == 1) {
3416 ++ hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ?
3417 ++ val : USB_DEVICE_AUTHORIZE_ALL;
3418 ++ result = size;
3419 ++ } else {
3420 ++ result = -EINVAL;
3421 ++ }
3422 ++ return result;
3423 ++}
3424 ++static DEVICE_ATTR_RW(authorized_default);
3425 ++
3426 ++/*
3427 ++ * interface_authorized_default_show - show default authorization status
3428 ++ * for USB interfaces
3429 ++ *
3430 ++ * note: interface_authorized_default is the default value
3431 ++ * for initializing the authorized attribute of interfaces
3432 ++ */
3433 ++static ssize_t interface_authorized_default_show(struct device *dev,
3434 ++ struct device_attribute *attr, char *buf)
3435 ++{
3436 ++ struct usb_device *usb_dev = to_usb_device(dev);
3437 ++ struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
3438 ++
3439 ++ return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd));
3440 ++}
3441 ++
3442 ++/*
3443 ++ * interface_authorized_default_store - store default authorization status
3444 ++ * for USB interfaces
3445 ++ *
3446 ++ * note: interface_authorized_default is the default value
3447 ++ * for initializing the authorized attribute of interfaces
3448 ++ */
3449 ++static ssize_t interface_authorized_default_store(struct device *dev,
3450 ++ struct device_attribute *attr, const char *buf, size_t count)
3451 ++{
3452 ++ struct usb_device *usb_dev = to_usb_device(dev);
3453 ++ struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
3454 ++ int rc = count;
3455 ++ bool val;
3456 ++
3457 ++ if (strtobool(buf, &val) != 0)
3458 ++ return -EINVAL;
3459 ++
3460 ++ if (val)
3461 ++ set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
3462 ++ else
3463 ++ clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
3464 ++
3465 ++ return rc;
3466 ++}
3467 ++static DEVICE_ATTR_RW(interface_authorized_default);
3468 ++
3469 ++/* Group all the USB bus attributes */
3470 ++static struct attribute *usb_bus_attrs[] = {
3471 ++ &dev_attr_authorized_default.attr,
3472 ++ &dev_attr_interface_authorized_default.attr,
3473 ++ NULL,
3474 ++};
3475 ++
3476 ++static const struct attribute_group usb_bus_attr_group = {
3477 ++ .name = NULL, /* we want them in the same directory */
3478 ++ .attrs = usb_bus_attrs,
3479 ++};
3480 ++
3481 ++
3482 ++static int add_default_authorized_attributes(struct device *dev)
3483 ++{
3484 ++ int rc = 0;
3485 ++
3486 ++ if (is_usb_device(dev))
3487 ++ rc = sysfs_create_group(&dev->kobj, &usb_bus_attr_group);
3488 ++
3489 ++ return rc;
3490 ++}
3491 ++
3492 ++static void remove_default_authorized_attributes(struct device *dev)
3493 ++{
3494 ++ if (is_usb_device(dev)) {
3495 ++ sysfs_remove_group(&dev->kobj, &usb_bus_attr_group);
3496 ++ }
3497 ++}
3498 ++
3499 + int usb_create_sysfs_dev_files(struct usb_device *udev)
3500 + {
3501 + struct device *dev = &udev->dev;
3502 +@@ -938,7 +1049,14 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
3503 + retval = add_power_attributes(dev);
3504 + if (retval)
3505 + goto error;
3506 ++
3507 ++ if (is_root_hub(udev)) {
3508 ++ retval = add_default_authorized_attributes(dev);
3509 ++ if (retval)
3510 ++ goto error;
3511 ++ }
3512 + return retval;
3513 ++
3514 + error:
3515 + usb_remove_sysfs_dev_files(udev);
3516 + return retval;
3517 +@@ -948,6 +1066,9 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev)
3518 + {
3519 + struct device *dev = &udev->dev;
3520 +
3521 ++ if (is_root_hub(udev))
3522 ++ remove_default_authorized_attributes(dev);
3523 ++
3524 + remove_power_attributes(dev);
3525 + remove_persist_attributes(dev);
3526 + device_remove_bin_file(dev, &dev_bin_attr_descriptors);
3527 +diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
3528 +index d95a5358f73d..d5ac492f441b 100644
3529 +--- a/drivers/usb/core/usb.h
3530 ++++ b/drivers/usb/core/usb.h
3531 +@@ -153,6 +153,11 @@ static inline int is_usb_port(const struct device *dev)
3532 + return dev->type == &usb_port_device_type;
3533 + }
3534 +
3535 ++static inline int is_root_hub(struct usb_device *udev)
3536 ++{
3537 ++ return (udev->parent == NULL);
3538 ++}
3539 ++
3540 + /* Do the same for device drivers and interface drivers. */
3541 +
3542 + static inline int is_usb_device_driver(struct device_driver *drv)
3543 +diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
3544 +index 7dc248546fd4..b6eec81b6a40 100644
3545 +--- a/drivers/usb/gadget/udc/renesas_usb3.c
3546 ++++ b/drivers/usb/gadget/udc/renesas_usb3.c
3547 +@@ -19,6 +19,7 @@
3548 + #include <linux/pm_runtime.h>
3549 + #include <linux/sizes.h>
3550 + #include <linux/slab.h>
3551 ++#include <linux/string.h>
3552 + #include <linux/sys_soc.h>
3553 + #include <linux/uaccess.h>
3554 + #include <linux/usb/ch9.h>
3555 +@@ -2378,9 +2379,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
3556 + if (usb3->forced_b_device)
3557 + return -EBUSY;
3558 +
3559 +- if (!strncmp(buf, "host", strlen("host")))
3560 ++ if (sysfs_streq(buf, "host"))
3561 + new_mode_is_host = true;
3562 +- else if (!strncmp(buf, "peripheral", strlen("peripheral")))
3563 ++ else if (sysfs_streq(buf, "peripheral"))
3564 + new_mode_is_host = false;
3565 + else
3566 + return -EINVAL;
3567 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3568 +index c1582fbd1150..38e920ac7f82 100644
3569 +--- a/drivers/usb/serial/option.c
3570 ++++ b/drivers/usb/serial/option.c
3571 +@@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = {
3572 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
3573 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
3574 +
3575 ++ /* Motorola devices */
3576 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
3577 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
3578 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
3579 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
3580 +
3581 + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
3582 + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
3583 +@@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = {
3584 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
3585 + .driver_info = RSVD(2) },
3586 + { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
3587 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
3588 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
3589 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
3590 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
3591 +@@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = {
3592 + .driver_info = RSVD(4) },
3593 + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
3594 + .driver_info = RSVD(4) },
3595 ++ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
3596 ++ .driver_info = RSVD(4) },
3597 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
3598 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
3599 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
3600 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
3601 + .driver_info = RSVD(4) },
3602 ++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
3603 ++ .driver_info = RSVD(4) },
3604 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
3605 + { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
3606 + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
3607 +diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
3608 +index 73427d8e0116..e5694133ebe5 100644
3609 +--- a/drivers/xen/xen-pciback/conf_space_capability.c
3610 ++++ b/drivers/xen/xen-pciback/conf_space_capability.c
3611 +@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
3612 + {
3613 + int err;
3614 + u16 old_value;
3615 +- pci_power_t new_state, old_state;
3616 ++ pci_power_t new_state;
3617 +
3618 + err = pci_read_config_word(dev, offset, &old_value);
3619 + if (err)
3620 + goto out;
3621 +
3622 +- old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
3623 + new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
3624 +
3625 + new_value &= PM_OK_BITS;
3626 +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
3627 +index 982152d3f920..69f8ab4d91f2 100644
3628 +--- a/fs/btrfs/backref.c
3629 ++++ b/fs/btrfs/backref.c
3630 +@@ -1488,7 +1488,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
3631 + goto out;
3632 + }
3633 +
3634 +- trans = btrfs_attach_transaction(root);
3635 ++ trans = btrfs_join_transaction_nostart(root);
3636 + if (IS_ERR(trans)) {
3637 + if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
3638 + ret = PTR_ERR(trans);
3639 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
3640 +index 1aa3f6d6d775..2db14fdd6bff 100644
3641 +--- a/fs/btrfs/transaction.c
3642 ++++ b/fs/btrfs/transaction.c
3643 +@@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
3644 + [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
3645 + [TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
3646 + __TRANS_ATTACH |
3647 +- __TRANS_JOIN),
3648 ++ __TRANS_JOIN |
3649 ++ __TRANS_JOIN_NOSTART),
3650 + [TRANS_STATE_UNBLOCKED] = (__TRANS_START |
3651 + __TRANS_ATTACH |
3652 + __TRANS_JOIN |
3653 +- __TRANS_JOIN_NOLOCK),
3654 ++ __TRANS_JOIN_NOLOCK |
3655 ++ __TRANS_JOIN_NOSTART),
3656 + [TRANS_STATE_COMPLETED] = (__TRANS_START |
3657 + __TRANS_ATTACH |
3658 + __TRANS_JOIN |
3659 +- __TRANS_JOIN_NOLOCK),
3660 ++ __TRANS_JOIN_NOLOCK |
3661 ++ __TRANS_JOIN_NOSTART),
3662 + };
3663 +
3664 + void btrfs_put_transaction(struct btrfs_transaction *transaction)
3665 +@@ -525,7 +528,8 @@ again:
3666 + ret = join_transaction(fs_info, type);
3667 + if (ret == -EBUSY) {
3668 + wait_current_trans(fs_info);
3669 +- if (unlikely(type == TRANS_ATTACH))
3670 ++ if (unlikely(type == TRANS_ATTACH ||
3671 ++ type == TRANS_JOIN_NOSTART))
3672 + ret = -ENOENT;
3673 + }
3674 + } while (ret == -EBUSY);
3675 +@@ -641,6 +645,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
3676 + BTRFS_RESERVE_NO_FLUSH, true);
3677 + }
3678 +
3679 ++/*
3680 ++ * Similar to regular join but it never starts a transaction when none is
3681 ++ * running or after waiting for the current one to finish.
3682 ++ */
3683 ++struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
3684 ++{
3685 ++ return start_transaction(root, 0, TRANS_JOIN_NOSTART,
3686 ++ BTRFS_RESERVE_NO_FLUSH, true);
3687 ++}
3688 ++
3689 + /*
3690 + * btrfs_attach_transaction() - catch the running transaction
3691 + *
3692 +diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
3693 +index 78c446c222b7..2f695587f828 100644
3694 +--- a/fs/btrfs/transaction.h
3695 ++++ b/fs/btrfs/transaction.h
3696 +@@ -94,11 +94,13 @@ struct btrfs_transaction {
3697 + #define __TRANS_JOIN (1U << 11)
3698 + #define __TRANS_JOIN_NOLOCK (1U << 12)
3699 + #define __TRANS_DUMMY (1U << 13)
3700 ++#define __TRANS_JOIN_NOSTART (1U << 14)
3701 +
3702 + #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
3703 + #define TRANS_ATTACH (__TRANS_ATTACH)
3704 + #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
3705 + #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
3706 ++#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
3707 +
3708 + #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
3709 +
3710 +@@ -183,6 +185,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
3711 + int min_factor);
3712 + struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
3713 + struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
3714 ++struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
3715 + struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
3716 + struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
3717 + struct btrfs_root *root);
3718 +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
3719 +index 963fb4571fd9..bb6fd5a506d3 100644
3720 +--- a/fs/f2fs/gc.c
3721 ++++ b/fs/f2fs/gc.c
3722 +@@ -794,6 +794,29 @@ static int move_data_block(struct inode *inode, block_t bidx,
3723 + if (lfs_mode)
3724 + down_write(&fio.sbi->io_order_lock);
3725 +
3726 ++ mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
3727 ++ fio.old_blkaddr, false);
3728 ++ if (!mpage)
3729 ++ goto up_out;
3730 ++
3731 ++ fio.encrypted_page = mpage;
3732 ++
3733 ++ /* read source block in mpage */
3734 ++ if (!PageUptodate(mpage)) {
3735 ++ err = f2fs_submit_page_bio(&fio);
3736 ++ if (err) {
3737 ++ f2fs_put_page(mpage, 1);
3738 ++ goto up_out;
3739 ++ }
3740 ++ lock_page(mpage);
3741 ++ if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
3742 ++ !PageUptodate(mpage))) {
3743 ++ err = -EIO;
3744 ++ f2fs_put_page(mpage, 1);
3745 ++ goto up_out;
3746 ++ }
3747 ++ }
3748 ++
3749 + f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
3750 + &sum, CURSEG_COLD_DATA, NULL, false);
3751 +
3752 +@@ -801,44 +824,18 @@ static int move_data_block(struct inode *inode, block_t bidx,
3753 + newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
3754 + if (!fio.encrypted_page) {
3755 + err = -ENOMEM;
3756 +- goto recover_block;
3757 +- }
3758 +-
3759 +- mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
3760 +- fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
3761 +- if (mpage) {
3762 +- bool updated = false;
3763 +-
3764 +- if (PageUptodate(mpage)) {
3765 +- memcpy(page_address(fio.encrypted_page),
3766 +- page_address(mpage), PAGE_SIZE);
3767 +- updated = true;
3768 +- }
3769 + f2fs_put_page(mpage, 1);
3770 +- invalidate_mapping_pages(META_MAPPING(fio.sbi),
3771 +- fio.old_blkaddr, fio.old_blkaddr);
3772 +- if (updated)
3773 +- goto write_page;
3774 +- }
3775 +-
3776 +- err = f2fs_submit_page_bio(&fio);
3777 +- if (err)
3778 +- goto put_page_out;
3779 +-
3780 +- /* write page */
3781 +- lock_page(fio.encrypted_page);
3782 +-
3783 +- if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
3784 +- err = -EIO;
3785 +- goto put_page_out;
3786 +- }
3787 +- if (unlikely(!PageUptodate(fio.encrypted_page))) {
3788 +- err = -EIO;
3789 +- goto put_page_out;
3790 ++ goto recover_block;
3791 + }
3792 +
3793 +-write_page:
3794 ++ /* write target block */
3795 + f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
3796 ++ memcpy(page_address(fio.encrypted_page),
3797 ++ page_address(mpage), PAGE_SIZE);
3798 ++ f2fs_put_page(mpage, 1);
3799 ++ invalidate_mapping_pages(META_MAPPING(fio.sbi),
3800 ++ fio.old_blkaddr, fio.old_blkaddr);
3801 ++
3802 + set_page_dirty(fio.encrypted_page);
3803 + if (clear_page_dirty_for_io(fio.encrypted_page))
3804 + dec_page_count(fio.sbi, F2FS_DIRTY_META);
3805 +@@ -869,11 +866,12 @@ write_page:
3806 + put_page_out:
3807 + f2fs_put_page(fio.encrypted_page, 1);
3808 + recover_block:
3809 +- if (lfs_mode)
3810 +- up_write(&fio.sbi->io_order_lock);
3811 + if (err)
3812 + f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
3813 + true, true);
3814 ++up_out:
3815 ++ if (lfs_mode)
3816 ++ up_write(&fio.sbi->io_order_lock);
3817 + put_out:
3818 + f2fs_put_dnode(&dn);
3819 + out:
3820 +diff --git a/fs/io_uring.c b/fs/io_uring.c
3821 +index 3e887a09533b..61018559e8fe 100644
3822 +--- a/fs/io_uring.c
3823 ++++ b/fs/io_uring.c
3824 +@@ -1032,10 +1032,8 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
3825 +
3826 + iter->bvec = bvec + seg_skip;
3827 + iter->nr_segs -= seg_skip;
3828 +- iter->count -= (seg_skip << PAGE_SHIFT);
3829 ++ iter->count -= bvec->bv_len + offset;
3830 + iter->iov_offset = offset & ~PAGE_MASK;
3831 +- if (iter->iov_offset)
3832 +- iter->count -= iter->iov_offset;
3833 + }
3834 + }
3835 +
3836 +diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
3837 +index 385f3aaa2448..90c830e3758e 100644
3838 +--- a/fs/ocfs2/xattr.c
3839 ++++ b/fs/ocfs2/xattr.c
3840 +@@ -3825,7 +3825,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
3841 + u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
3842 + int low_bucket = 0, bucket, high_bucket;
3843 + struct ocfs2_xattr_bucket *search;
3844 +- u32 last_hash;
3845 + u64 blkno, lower_blkno = 0;
3846 +
3847 + search = ocfs2_xattr_bucket_new(inode);
3848 +@@ -3869,8 +3868,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
3849 + if (xh->xh_count)
3850 + xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
3851 +
3852 +- last_hash = le32_to_cpu(xe->xe_name_hash);
3853 +-
3854 + /* record lower_blkno which may be the insert place. */
3855 + lower_blkno = blkno;
3856 +
3857 +diff --git a/fs/seq_file.c b/fs/seq_file.c
3858 +index abe27ec43176..225bf9239b32 100644
3859 +--- a/fs/seq_file.c
3860 ++++ b/fs/seq_file.c
3861 +@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
3862 + }
3863 + if (seq_has_overflowed(m))
3864 + goto Eoverflow;
3865 ++ p = m->op->next(m, p, &m->index);
3866 + if (pos + m->count > offset) {
3867 + m->from = offset - pos;
3868 + m->count -= m->from;
3869 +@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
3870 + }
3871 + pos += m->count;
3872 + m->count = 0;
3873 +- p = m->op->next(m, p, &m->index);
3874 + if (pos == offset)
3875 + break;
3876 + }
3877 +diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
3878 +index c64bea7a52be..e9f20b813a69 100644
3879 +--- a/include/asm-generic/getorder.h
3880 ++++ b/include/asm-generic/getorder.h
3881 +@@ -7,24 +7,6 @@
3882 + #include <linux/compiler.h>
3883 + #include <linux/log2.h>
3884 +
3885 +-/*
3886 +- * Runtime evaluation of get_order()
3887 +- */
3888 +-static inline __attribute_const__
3889 +-int __get_order(unsigned long size)
3890 +-{
3891 +- int order;
3892 +-
3893 +- size--;
3894 +- size >>= PAGE_SHIFT;
3895 +-#if BITS_PER_LONG == 32
3896 +- order = fls(size);
3897 +-#else
3898 +- order = fls64(size);
3899 +-#endif
3900 +- return order;
3901 +-}
3902 +-
3903 + /**
3904 + * get_order - Determine the allocation order of a memory size
3905 + * @size: The size for which to get the order
3906 +@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
3907 + * to hold an object of the specified size.
3908 + *
3909 + * The result is undefined if the size is 0.
3910 +- *
3911 +- * This function may be used to initialise variables with compile time
3912 +- * evaluations of constants.
3913 + */
3914 +-#define get_order(n) \
3915 +-( \
3916 +- __builtin_constant_p(n) ? ( \
3917 +- ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
3918 +- (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
3919 +- ilog2((n) - 1) - PAGE_SHIFT + 1) \
3920 +- ) : \
3921 +- __get_order(n) \
3922 +-)
3923 ++static inline __attribute_const__ int get_order(unsigned long size)
3924 ++{
3925 ++ if (__builtin_constant_p(size)) {
3926 ++ if (!size)
3927 ++ return BITS_PER_LONG - PAGE_SHIFT;
3928 ++
3929 ++ if (size < (1UL << PAGE_SHIFT))
3930 ++ return 0;
3931 ++
3932 ++ return ilog2((size) - 1) - PAGE_SHIFT + 1;
3933 ++ }
3934 ++
3935 ++ size--;
3936 ++ size >>= PAGE_SHIFT;
3937 ++#if BITS_PER_LONG == 32
3938 ++ return fls(size);
3939 ++#else
3940 ++ return fls64(size);
3941 ++#endif
3942 ++}
3943 +
3944 + #endif /* __ASSEMBLY__ */
3945 +
3946 +diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
3947 +index 1dda31825ec4..71283739ffd2 100644
3948 +--- a/include/linux/page-flags-layout.h
3949 ++++ b/include/linux/page-flags-layout.h
3950 +@@ -32,6 +32,7 @@
3951 +
3952 + #endif /* CONFIG_SPARSEMEM */
3953 +
3954 ++#ifndef BUILD_VDSO32_64
3955 + /*
3956 + * page->flags layout:
3957 + *
3958 +@@ -76,20 +77,22 @@
3959 + #define LAST_CPUPID_SHIFT 0
3960 + #endif
3961 +
3962 +-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
3963 ++#ifdef CONFIG_KASAN_SW_TAGS
3964 ++#define KASAN_TAG_WIDTH 8
3965 ++#else
3966 ++#define KASAN_TAG_WIDTH 0
3967 ++#endif
3968 ++
3969 ++#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
3970 ++ <= BITS_PER_LONG - NR_PAGEFLAGS
3971 + #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
3972 + #else
3973 + #define LAST_CPUPID_WIDTH 0
3974 + #endif
3975 +
3976 +-#ifdef CONFIG_KASAN_SW_TAGS
3977 +-#define KASAN_TAG_WIDTH 8
3978 + #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
3979 + > BITS_PER_LONG - NR_PAGEFLAGS
3980 +-#error "KASAN: not enough bits in page flags for tag"
3981 +-#endif
3982 +-#else
3983 +-#define KASAN_TAG_WIDTH 0
3984 ++#error "Not enough bits in page flags"
3985 + #endif
3986 +
3987 + /*
3988 +@@ -104,4 +107,5 @@
3989 + #define LAST_CPUPID_NOT_IN_PAGE_FLAGS
3990 + #endif
3991 +
3992 ++#endif
3993 + #endif /* _LINUX_PAGE_FLAGS_LAYOUT */
3994 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3995 +index 056f557d5194..64fa59b2c8d5 100644
3996 +--- a/include/linux/skbuff.h
3997 ++++ b/include/linux/skbuff.h
3998 +@@ -1358,6 +1358,14 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
3999 + to->l4_hash = from->l4_hash;
4000 + };
4001 +
4002 ++static inline void skb_copy_decrypted(struct sk_buff *to,
4003 ++ const struct sk_buff *from)
4004 ++{
4005 ++#ifdef CONFIG_TLS_DEVICE
4006 ++ to->decrypted = from->decrypted;
4007 ++#endif
4008 ++}
4009 ++
4010 + #ifdef NET_SKBUFF_DATA_USES_OFFSET
4011 + static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
4012 + {
4013 +diff --git a/include/linux/socket.h b/include/linux/socket.h
4014 +index b57cd8bf96e2..810d5ec0ada3 100644
4015 +--- a/include/linux/socket.h
4016 ++++ b/include/linux/socket.h
4017 +@@ -291,6 +291,9 @@ struct ucred {
4018 + #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
4019 + #define MSG_EOF MSG_FIN
4020 + #define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
4021 ++#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry
4022 ++ * plain text and require encryption
4023 ++ */
4024 +
4025 + #define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
4026 + #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
4027 +diff --git a/include/net/netlink.h b/include/net/netlink.h
4028 +index 395b4406f4b0..222af2046086 100644
4029 +--- a/include/net/netlink.h
4030 ++++ b/include/net/netlink.h
4031 +@@ -680,9 +680,8 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
4032 + const struct nla_policy *policy,
4033 + struct netlink_ext_ack *extack)
4034 + {
4035 +- return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
4036 +- nlmsg_attrlen(nlh, hdrlen), policy,
4037 +- NL_VALIDATE_STRICT, extack);
4038 ++ return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
4039 ++ NL_VALIDATE_STRICT, extack);
4040 + }
4041 +
4042 + /**
4043 +diff --git a/include/net/sock.h b/include/net/sock.h
4044 +index 6cbc16136357..526de911cd91 100644
4045 +--- a/include/net/sock.h
4046 ++++ b/include/net/sock.h
4047 +@@ -2482,6 +2482,7 @@ static inline bool sk_fullsock(const struct sock *sk)
4048 +
4049 + /* Checks if this SKB belongs to an HW offloaded socket
4050 + * and whether any SW fallbacks are required based on dev.
4051 ++ * Check decrypted mark in case skb_orphan() cleared socket.
4052 + */
4053 + static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
4054 + struct net_device *dev)
4055 +@@ -2489,8 +2490,15 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
4056 + #ifdef CONFIG_SOCK_VALIDATE_XMIT
4057 + struct sock *sk = skb->sk;
4058 +
4059 +- if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb)
4060 ++ if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
4061 + skb = sk->sk_validate_xmit_skb(sk, dev, skb);
4062 ++#ifdef CONFIG_TLS_DEVICE
4063 ++ } else if (unlikely(skb->decrypted)) {
4064 ++ pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
4065 ++ kfree_skb(skb);
4066 ++ skb = NULL;
4067 ++#endif
4068 ++ }
4069 + #endif
4070 +
4071 + return skb;
4072 +diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
4073 +index 2212adda8f77..64e92d56c6a8 100644
4074 +--- a/include/trace/events/dma_fence.h
4075 ++++ b/include/trace/events/dma_fence.h
4076 +@@ -2,7 +2,7 @@
4077 + #undef TRACE_SYSTEM
4078 + #define TRACE_SYSTEM dma_fence
4079 +
4080 +-#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
4081 ++#if !defined(_TRACE_DMA_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
4082 + #define _TRACE_DMA_FENCE_H
4083 +
4084 + #include <linux/tracepoint.h>
4085 +diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
4086 +index f3a12566bed0..6678cf8b235b 100644
4087 +--- a/include/trace/events/napi.h
4088 ++++ b/include/trace/events/napi.h
4089 +@@ -3,7 +3,7 @@
4090 + #define TRACE_SYSTEM napi
4091 +
4092 + #if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
4093 +-#define _TRACE_NAPI_H_
4094 ++#define _TRACE_NAPI_H
4095 +
4096 + #include <linux/netdevice.h>
4097 + #include <linux/tracepoint.h>
4098 +@@ -38,7 +38,7 @@ TRACE_EVENT(napi_poll,
4099 +
4100 + #undef NO_DEV
4101 +
4102 +-#endif /* _TRACE_NAPI_H_ */
4103 ++#endif /* _TRACE_NAPI_H */
4104 +
4105 + /* This part must be outside protection */
4106 + #include <trace/define_trace.h>
4107 +diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
4108 +index 60d0d8bd336d..0d1a9ebf55ba 100644
4109 +--- a/include/trace/events/qdisc.h
4110 ++++ b/include/trace/events/qdisc.h
4111 +@@ -2,7 +2,7 @@
4112 + #define TRACE_SYSTEM qdisc
4113 +
4114 + #if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ)
4115 +-#define _TRACE_QDISC_H_
4116 ++#define _TRACE_QDISC_H
4117 +
4118 + #include <linux/skbuff.h>
4119 + #include <linux/netdevice.h>
4120 +@@ -44,7 +44,7 @@ TRACE_EVENT(qdisc_dequeue,
4121 + __entry->txq_state, __entry->packets, __entry->skbaddr )
4122 + );
4123 +
4124 +-#endif /* _TRACE_QDISC_H_ */
4125 ++#endif /* _TRACE_QDISC_H */
4126 +
4127 + /* This part must be outside protection */
4128 + #include <trace/define_trace.h>
4129 +diff --git a/include/trace/events/tegra_apb_dma.h b/include/trace/events/tegra_apb_dma.h
4130 +index 0818f6286110..971cd02d2daf 100644
4131 +--- a/include/trace/events/tegra_apb_dma.h
4132 ++++ b/include/trace/events/tegra_apb_dma.h
4133 +@@ -1,5 +1,5 @@
4134 + #if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
4135 +-#define _TRACE_TEGRA_APM_DMA_H
4136 ++#define _TRACE_TEGRA_APB_DMA_H
4137 +
4138 + #include <linux/tracepoint.h>
4139 + #include <linux/dmaengine.h>
4140 +@@ -55,7 +55,7 @@ TRACE_EVENT(tegra_dma_isr,
4141 + TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq)
4142 + );
4143 +
4144 +-#endif /* _TRACE_TEGRADMA_H */
4145 ++#endif /* _TRACE_TEGRA_APB_DMA_H */
4146 +
4147 + /* This part must be outside protection */
4148 + #include <trace/define_trace.h>
4149 +diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
4150 +index f7afdadb6770..3401382bbca2 100644
4151 +--- a/kernel/dma/mapping.c
4152 ++++ b/kernel/dma/mapping.c
4153 +@@ -116,11 +116,16 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
4154 + int ret;
4155 +
4156 + if (!dev_is_dma_coherent(dev)) {
4157 ++ unsigned long pfn;
4158 ++
4159 + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
4160 + return -ENXIO;
4161 +
4162 +- page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr,
4163 +- dma_addr));
4164 ++ /* If the PFN is not valid, we do not have a struct page */
4165 ++ pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
4166 ++ if (!pfn_valid(pfn))
4167 ++ return -ENXIO;
4168 ++ page = pfn_to_page(pfn);
4169 + } else {
4170 + page = virt_to_page(cpu_addr);
4171 + }
4172 +@@ -170,7 +175,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
4173 + if (!dev_is_dma_coherent(dev)) {
4174 + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
4175 + return -ENXIO;
4176 ++
4177 ++ /* If the PFN is not valid, we do not have a struct page */
4178 + pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
4179 ++ if (!pfn_valid(pfn))
4180 ++ return -ENXIO;
4181 + } else {
4182 + pfn = page_to_pfn(virt_to_page(cpu_addr));
4183 + }
4184 +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
4185 +index 962cf343f798..ae3ec77bb92f 100644
4186 +--- a/kernel/sched/cpufreq_schedutil.c
4187 ++++ b/kernel/sched/cpufreq_schedutil.c
4188 +@@ -40,6 +40,7 @@ struct sugov_policy {
4189 + struct task_struct *thread;
4190 + bool work_in_progress;
4191 +
4192 ++ bool limits_changed;
4193 + bool need_freq_update;
4194 + };
4195 +
4196 +@@ -89,8 +90,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
4197 + !cpufreq_this_cpu_can_update(sg_policy->policy))
4198 + return false;
4199 +
4200 +- if (unlikely(sg_policy->need_freq_update))
4201 ++ if (unlikely(sg_policy->limits_changed)) {
4202 ++ sg_policy->limits_changed = false;
4203 ++ sg_policy->need_freq_update = true;
4204 + return true;
4205 ++ }
4206 +
4207 + delta_ns = time - sg_policy->last_freq_update_time;
4208 +
4209 +@@ -427,7 +431,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
4210 + static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
4211 + {
4212 + if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
4213 +- sg_policy->need_freq_update = true;
4214 ++ sg_policy->limits_changed = true;
4215 + }
4216 +
4217 + static void sugov_update_single(struct update_util_data *hook, u64 time,
4218 +@@ -447,7 +451,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
4219 + if (!sugov_should_update_freq(sg_policy, time))
4220 + return;
4221 +
4222 +- busy = sugov_cpu_is_busy(sg_cpu);
4223 ++ /* Limits may have changed, don't skip frequency update */
4224 ++ busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
4225 +
4226 + util = sugov_get_util(sg_cpu);
4227 + max = sg_cpu->max;
4228 +@@ -821,6 +826,7 @@ static int sugov_start(struct cpufreq_policy *policy)
4229 + sg_policy->last_freq_update_time = 0;
4230 + sg_policy->next_freq = 0;
4231 + sg_policy->work_in_progress = false;
4232 ++ sg_policy->limits_changed = false;
4233 + sg_policy->need_freq_update = false;
4234 + sg_policy->cached_raw_freq = 0;
4235 +
4236 +@@ -869,7 +875,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
4237 + mutex_unlock(&sg_policy->work_lock);
4238 + }
4239 +
4240 +- sg_policy->need_freq_update = true;
4241 ++ sg_policy->limits_changed = true;
4242 + }
4243 +
4244 + struct cpufreq_governor schedutil_gov = {
4245 +diff --git a/mm/hmm.c b/mm/hmm.c
4246 +index 4c405dfbd2b3..27dd9a881627 100644
4247 +--- a/mm/hmm.c
4248 ++++ b/mm/hmm.c
4249 +@@ -995,7 +995,7 @@ EXPORT_SYMBOL(hmm_range_unregister);
4250 + * @range: range
4251 + * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
4252 + * permission (for instance asking for write and range is read only),
4253 +- * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
4254 ++ * -EBUSY if you need to retry, -EFAULT invalid (ie either no valid
4255 + * vma or it is illegal to access that range), number of valid pages
4256 + * in range->pfns[] (from range start address).
4257 + *
4258 +@@ -1019,7 +1019,7 @@ long hmm_range_snapshot(struct hmm_range *range)
4259 + do {
4260 + /* If range is no longer valid force retry. */
4261 + if (!range->valid)
4262 +- return -EAGAIN;
4263 ++ return -EBUSY;
4264 +
4265 + vma = find_vma(hmm->mm, start);
4266 + if (vma == NULL || (vma->vm_flags & device_vma))
4267 +@@ -1117,10 +1117,8 @@ long hmm_range_fault(struct hmm_range *range, bool block)
4268 +
4269 + do {
4270 + /* If range is no longer valid force retry. */
4271 +- if (!range->valid) {
4272 +- up_read(&hmm->mm->mmap_sem);
4273 +- return -EAGAIN;
4274 +- }
4275 ++ if (!range->valid)
4276 ++ return -EBUSY;
4277 +
4278 + vma = find_vma(hmm->mm, start);
4279 + if (vma == NULL || (vma->vm_flags & device_vma))
4280 +diff --git a/mm/kmemleak.c b/mm/kmemleak.c
4281 +index 3e147ea83182..3afb01bce736 100644
4282 +--- a/mm/kmemleak.c
4283 ++++ b/mm/kmemleak.c
4284 +@@ -114,7 +114,7 @@
4285 + /* GFP bitmask for kmemleak internal allocations */
4286 + #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
4287 + __GFP_NORETRY | __GFP_NOMEMALLOC | \
4288 +- __GFP_NOWARN | __GFP_NOFAIL)
4289 ++ __GFP_NOWARN)
4290 +
4291 + /* scanning area inside a memory block */
4292 + struct kmemleak_scan_area {
4293 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4294 +index 902d020aa70e..8f5dabfaf94d 100644
4295 +--- a/mm/memcontrol.c
4296 ++++ b/mm/memcontrol.c
4297 +@@ -1126,26 +1126,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
4298 + css_put(&prev->css);
4299 + }
4300 +
4301 +-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
4302 ++static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
4303 ++ struct mem_cgroup *dead_memcg)
4304 + {
4305 +- struct mem_cgroup *memcg = dead_memcg;
4306 + struct mem_cgroup_reclaim_iter *iter;
4307 + struct mem_cgroup_per_node *mz;
4308 + int nid;
4309 + int i;
4310 +
4311 +- for (; memcg; memcg = parent_mem_cgroup(memcg)) {
4312 +- for_each_node(nid) {
4313 +- mz = mem_cgroup_nodeinfo(memcg, nid);
4314 +- for (i = 0; i <= DEF_PRIORITY; i++) {
4315 +- iter = &mz->iter[i];
4316 +- cmpxchg(&iter->position,
4317 +- dead_memcg, NULL);
4318 +- }
4319 ++ for_each_node(nid) {
4320 ++ mz = mem_cgroup_nodeinfo(from, nid);
4321 ++ for (i = 0; i <= DEF_PRIORITY; i++) {
4322 ++ iter = &mz->iter[i];
4323 ++ cmpxchg(&iter->position,
4324 ++ dead_memcg, NULL);
4325 + }
4326 + }
4327 + }
4328 +
4329 ++static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
4330 ++{
4331 ++ struct mem_cgroup *memcg = dead_memcg;
4332 ++ struct mem_cgroup *last;
4333 ++
4334 ++ do {
4335 ++ __invalidate_reclaim_iterators(memcg, dead_memcg);
4336 ++ last = memcg;
4337 ++ } while ((memcg = parent_mem_cgroup(memcg)));
4338 ++
4339 ++ /*
4340 ++ * When cgruop1 non-hierarchy mode is used,
4341 ++ * parent_mem_cgroup() does not walk all the way up to the
4342 ++ * cgroup root (root_mem_cgroup). So we have to handle
4343 ++ * dead_memcg from cgroup root separately.
4344 ++ */
4345 ++ if (last != root_mem_cgroup)
4346 ++ __invalidate_reclaim_iterators(root_mem_cgroup,
4347 ++ dead_memcg);
4348 ++}
4349 ++
4350 + /**
4351 + * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
4352 + * @memcg: hierarchy root
4353 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
4354 +index fdcb73536319..ca3f443c8fc1 100644
4355 +--- a/mm/mempolicy.c
4356 ++++ b/mm/mempolicy.c
4357 +@@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
4358 + },
4359 + };
4360 +
4361 +-static void migrate_page_add(struct page *page, struct list_head *pagelist,
4362 ++static int migrate_page_add(struct page *page, struct list_head *pagelist,
4363 + unsigned long flags);
4364 +
4365 + struct queue_pages {
4366 +@@ -429,11 +429,14 @@ static inline bool queue_pages_required(struct page *page,
4367 + }
4368 +
4369 + /*
4370 +- * queue_pages_pmd() has three possible return values:
4371 +- * 1 - pages are placed on the right node or queued successfully.
4372 +- * 0 - THP was split.
4373 +- * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
4374 +- * page was already on a node that does not follow the policy.
4375 ++ * queue_pages_pmd() has four possible return values:
4376 ++ * 0 - pages are placed on the right node or queued successfully.
4377 ++ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
4378 ++ * specified.
4379 ++ * 2 - THP was split.
4380 ++ * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
4381 ++ * existing page was already on a node that does not follow the
4382 ++ * policy.
4383 + */
4384 + static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
4385 + unsigned long end, struct mm_walk *walk)
4386 +@@ -451,23 +454,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
4387 + if (is_huge_zero_page(page)) {
4388 + spin_unlock(ptl);
4389 + __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
4390 ++ ret = 2;
4391 + goto out;
4392 + }
4393 +- if (!queue_pages_required(page, qp)) {
4394 +- ret = 1;
4395 ++ if (!queue_pages_required(page, qp))
4396 + goto unlock;
4397 +- }
4398 +
4399 +- ret = 1;
4400 + flags = qp->flags;
4401 + /* go to thp migration */
4402 + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
4403 +- if (!vma_migratable(walk->vma)) {
4404 +- ret = -EIO;
4405 ++ if (!vma_migratable(walk->vma) ||
4406 ++ migrate_page_add(page, qp->pagelist, flags)) {
4407 ++ ret = 1;
4408 + goto unlock;
4409 + }
4410 +-
4411 +- migrate_page_add(page, qp->pagelist, flags);
4412 + } else
4413 + ret = -EIO;
4414 + unlock:
4415 +@@ -479,6 +479,13 @@ out:
4416 + /*
4417 + * Scan through pages checking if pages follow certain conditions,
4418 + * and move them to the pagelist if they do.
4419 ++ *
4420 ++ * queue_pages_pte_range() has three possible return values:
4421 ++ * 0 - pages are placed on the right node or queued successfully.
4422 ++ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
4423 ++ * specified.
4424 ++ * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
4425 ++ * on a node that does not follow the policy.
4426 + */
4427 + static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
4428 + unsigned long end, struct mm_walk *walk)
4429 +@@ -488,17 +495,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
4430 + struct queue_pages *qp = walk->private;
4431 + unsigned long flags = qp->flags;
4432 + int ret;
4433 ++ bool has_unmovable = false;
4434 + pte_t *pte;
4435 + spinlock_t *ptl;
4436 +
4437 + ptl = pmd_trans_huge_lock(pmd, vma);
4438 + if (ptl) {
4439 + ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
4440 +- if (ret > 0)
4441 +- return 0;
4442 +- else if (ret < 0)
4443 ++ if (ret != 2)
4444 + return ret;
4445 + }
4446 ++ /* THP was split, fall through to pte walk */
4447 +
4448 + if (pmd_trans_unstable(pmd))
4449 + return 0;
4450 +@@ -519,14 +526,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
4451 + if (!queue_pages_required(page, qp))
4452 + continue;
4453 + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
4454 +- if (!vma_migratable(vma))
4455 ++ /* MPOL_MF_STRICT must be specified if we get here */
4456 ++ if (!vma_migratable(vma)) {
4457 ++ has_unmovable = true;
4458 + break;
4459 +- migrate_page_add(page, qp->pagelist, flags);
4460 ++ }
4461 ++
4462 ++ /*
4463 ++ * Do not abort immediately since there may be
4464 ++ * temporary off LRU pages in the range. Still
4465 ++ * need migrate other LRU pages.
4466 ++ */
4467 ++ if (migrate_page_add(page, qp->pagelist, flags))
4468 ++ has_unmovable = true;
4469 + } else
4470 + break;
4471 + }
4472 + pte_unmap_unlock(pte - 1, ptl);
4473 + cond_resched();
4474 ++
4475 ++ if (has_unmovable)
4476 ++ return 1;
4477 ++
4478 + return addr != end ? -EIO : 0;
4479 + }
4480 +
4481 +@@ -639,7 +660,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
4482 + *
4483 + * If pages found in a given range are on a set of nodes (determined by
4484 + * @nodes and @flags,) it's isolated and queued to the pagelist which is
4485 +- * passed via @private.)
4486 ++ * passed via @private.
4487 ++ *
4488 ++ * queue_pages_range() has three possible return values:
4489 ++ * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
4490 ++ * specified.
4491 ++ * 0 - queue pages successfully or no misplaced page.
4492 ++ * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
4493 + */
4494 + static int
4495 + queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
4496 +@@ -940,7 +967,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
4497 + /*
4498 + * page migration, thp tail pages can be passed.
4499 + */
4500 +-static void migrate_page_add(struct page *page, struct list_head *pagelist,
4501 ++static int migrate_page_add(struct page *page, struct list_head *pagelist,
4502 + unsigned long flags)
4503 + {
4504 + struct page *head = compound_head(page);
4505 +@@ -953,8 +980,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
4506 + mod_node_page_state(page_pgdat(head),
4507 + NR_ISOLATED_ANON + page_is_file_cache(head),
4508 + hpage_nr_pages(head));
4509 ++ } else if (flags & MPOL_MF_STRICT) {
4510 ++ /*
4511 ++ * Non-movable page may reach here. And, there may be
4512 ++ * temporary off LRU pages or non-LRU movable pages.
4513 ++ * Treat them as unmovable pages since they can't be
4514 ++ * isolated, so they can't be moved at the moment. It
4515 ++ * should return -EIO for this case too.
4516 ++ */
4517 ++ return -EIO;
4518 + }
4519 + }
4520 ++
4521 ++ return 0;
4522 + }
4523 +
4524 + /* page allocation callback for NUMA node migration */
4525 +@@ -1157,9 +1195,10 @@ static struct page *new_page(struct page *page, unsigned long start)
4526 + }
4527 + #else
4528 +
4529 +-static void migrate_page_add(struct page *page, struct list_head *pagelist,
4530 ++static int migrate_page_add(struct page *page, struct list_head *pagelist,
4531 + unsigned long flags)
4532 + {
4533 ++ return -EIO;
4534 + }
4535 +
4536 + int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
4537 +@@ -1182,6 +1221,7 @@ static long do_mbind(unsigned long start, unsigned long len,
4538 + struct mempolicy *new;
4539 + unsigned long end;
4540 + int err;
4541 ++ int ret;
4542 + LIST_HEAD(pagelist);
4543 +
4544 + if (flags & ~(unsigned long)MPOL_MF_VALID)
4545 +@@ -1243,10 +1283,15 @@ static long do_mbind(unsigned long start, unsigned long len,
4546 + if (err)
4547 + goto mpol_out;
4548 +
4549 +- err = queue_pages_range(mm, start, end, nmask,
4550 ++ ret = queue_pages_range(mm, start, end, nmask,
4551 + flags | MPOL_MF_INVERT, &pagelist);
4552 +- if (!err)
4553 +- err = mbind_range(mm, start, end, new);
4554 ++
4555 ++ if (ret < 0) {
4556 ++ err = -EIO;
4557 ++ goto up_out;
4558 ++ }
4559 ++
4560 ++ err = mbind_range(mm, start, end, new);
4561 +
4562 + if (!err) {
4563 + int nr_failed = 0;
4564 +@@ -1259,13 +1304,14 @@ static long do_mbind(unsigned long start, unsigned long len,
4565 + putback_movable_pages(&pagelist);
4566 + }
4567 +
4568 +- if (nr_failed && (flags & MPOL_MF_STRICT))
4569 ++ if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
4570 + err = -EIO;
4571 + } else
4572 + putback_movable_pages(&pagelist);
4573 +
4574 ++up_out:
4575 + up_write(&mm->mmap_sem);
4576 +- mpol_out:
4577 ++mpol_out:
4578 + mpol_put(new);
4579 + return err;
4580 + }
4581 +diff --git a/mm/rmap.c b/mm/rmap.c
4582 +index e5dfe2ae6b0d..003377e24232 100644
4583 +--- a/mm/rmap.c
4584 ++++ b/mm/rmap.c
4585 +@@ -1475,7 +1475,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
4586 + /*
4587 + * No need to invalidate here it will synchronize on
4588 + * against the special swap migration pte.
4589 ++ *
4590 ++ * The assignment to subpage above was computed from a
4591 ++ * swap PTE which results in an invalid pointer.
4592 ++ * Since only PAGE_SIZE pages can currently be
4593 ++ * migrated, just set it to page. This will need to be
4594 ++ * changed when hugepage migrations to device private
4595 ++ * memory are supported.
4596 + */
4597 ++ subpage = page;
4598 + goto discard;
4599 + }
4600 +
4601 +diff --git a/mm/usercopy.c b/mm/usercopy.c
4602 +index 2a09796edef8..98e924864554 100644
4603 +--- a/mm/usercopy.c
4604 ++++ b/mm/usercopy.c
4605 +@@ -147,7 +147,7 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
4606 + bool to_user)
4607 + {
4608 + /* Reject if object wraps past end of memory. */
4609 +- if (ptr + n < ptr)
4610 ++ if (ptr + (n - 1) < ptr)
4611 + usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
4612 +
4613 + /* Reject if NULL or ZERO-allocation. */
4614 +diff --git a/mm/vmscan.c b/mm/vmscan.c
4615 +index 4ebf20152328..c8f58f5695a9 100644
4616 +--- a/mm/vmscan.c
4617 ++++ b/mm/vmscan.c
4618 +@@ -88,9 +88,6 @@ struct scan_control {
4619 + /* Can pages be swapped as part of reclaim? */
4620 + unsigned int may_swap:1;
4621 +
4622 +- /* e.g. boosted watermark reclaim leaves slabs alone */
4623 +- unsigned int may_shrinkslab:1;
4624 +-
4625 + /*
4626 + * Cgroups are not reclaimed below their configured memory.low,
4627 + * unless we threaten to OOM. If any cgroups are skipped due to
4628 +@@ -2669,10 +2666,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
4629 + shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
4630 + node_lru_pages += lru_pages;
4631 +
4632 +- if (sc->may_shrinkslab) {
4633 +- shrink_slab(sc->gfp_mask, pgdat->node_id,
4634 +- memcg, sc->priority);
4635 +- }
4636 ++ shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
4637 ++ sc->priority);
4638 +
4639 + /* Record the group's reclaim efficiency */
4640 + vmpressure(sc->gfp_mask, memcg, false,
4641 +@@ -3149,7 +3144,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
4642 + .may_writepage = !laptop_mode,
4643 + .may_unmap = 1,
4644 + .may_swap = 1,
4645 +- .may_shrinkslab = 1,
4646 + };
4647 +
4648 + /*
4649 +@@ -3191,7 +3185,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
4650 + .may_unmap = 1,
4651 + .reclaim_idx = MAX_NR_ZONES - 1,
4652 + .may_swap = !noswap,
4653 +- .may_shrinkslab = 1,
4654 + };
4655 + unsigned long lru_pages;
4656 +
4657 +@@ -3236,7 +3229,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
4658 + .may_writepage = !laptop_mode,
4659 + .may_unmap = 1,
4660 + .may_swap = may_swap,
4661 +- .may_shrinkslab = 1,
4662 + };
4663 +
4664 + /*
4665 +@@ -3545,7 +3537,6 @@ restart:
4666 + */
4667 + sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
4668 + sc.may_swap = !nr_boost_reclaim;
4669 +- sc.may_shrinkslab = !nr_boost_reclaim;
4670 +
4671 + /*
4672 + * Do some background aging of the anon list, to give
4673 +diff --git a/mm/z3fold.c b/mm/z3fold.c
4674 +index 3b27094dc42e..c4debbe683eb 100644
4675 +--- a/mm/z3fold.c
4676 ++++ b/mm/z3fold.c
4677 +@@ -819,9 +819,19 @@ out:
4678 + static void z3fold_destroy_pool(struct z3fold_pool *pool)
4679 + {
4680 + kmem_cache_destroy(pool->c_handle);
4681 +- z3fold_unregister_migration(pool);
4682 +- destroy_workqueue(pool->release_wq);
4683 ++
4684 ++ /*
4685 ++ * We need to destroy pool->compact_wq before pool->release_wq,
4686 ++ * as any pending work on pool->compact_wq will call
4687 ++ * queue_work(pool->release_wq, &pool->work).
4688 ++ *
4689 ++ * There are still outstanding pages until both workqueues are drained,
4690 ++ * so we cannot unregister migration until then.
4691 ++ */
4692 ++
4693 + destroy_workqueue(pool->compact_wq);
4694 ++ destroy_workqueue(pool->release_wq);
4695 ++ z3fold_unregister_migration(pool);
4696 + kfree(pool);
4697 + }
4698 +
4699 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
4700 +index 963dfdc14827..1fa9ac483173 100644
4701 +--- a/net/bridge/netfilter/ebtables.c
4702 ++++ b/net/bridge/netfilter/ebtables.c
4703 +@@ -1770,20 +1770,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
4704 + return 0;
4705 + }
4706 +
4707 ++static int ebt_compat_init_offsets(unsigned int number)
4708 ++{
4709 ++ if (number > INT_MAX)
4710 ++ return -EINVAL;
4711 ++
4712 ++ /* also count the base chain policies */
4713 ++ number += NF_BR_NUMHOOKS;
4714 ++
4715 ++ return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
4716 ++}
4717 +
4718 + static int compat_table_info(const struct ebt_table_info *info,
4719 + struct compat_ebt_replace *newinfo)
4720 + {
4721 + unsigned int size = info->entries_size;
4722 + const void *entries = info->entries;
4723 ++ int ret;
4724 +
4725 + newinfo->entries_size = size;
4726 +- if (info->nentries) {
4727 +- int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
4728 +- info->nentries);
4729 +- if (ret)
4730 +- return ret;
4731 +- }
4732 ++ ret = ebt_compat_init_offsets(info->nentries);
4733 ++ if (ret)
4734 ++ return ret;
4735 +
4736 + return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
4737 + entries, newinfo);
4738 +@@ -2234,11 +2242,9 @@ static int compat_do_replace(struct net *net, void __user *user,
4739 +
4740 + xt_compat_lock(NFPROTO_BRIDGE);
4741 +
4742 +- if (tmp.nentries) {
4743 +- ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
4744 +- if (ret < 0)
4745 +- goto out_unlock;
4746 +- }
4747 ++ ret = ebt_compat_init_offsets(tmp.nentries);
4748 ++ if (ret < 0)
4749 ++ goto out_unlock;
4750 +
4751 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
4752 + if (ret < 0)
4753 +diff --git a/net/core/filter.c b/net/core/filter.c
4754 +index f681fb772940..534c310bb089 100644
4755 +--- a/net/core/filter.c
4756 ++++ b/net/core/filter.c
4757 +@@ -7325,12 +7325,12 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
4758 + case offsetof(struct __sk_buff, gso_segs):
4759 + /* si->dst_reg = skb_shinfo(SKB); */
4760 + #ifdef NET_SKBUFF_DATA_USES_OFFSET
4761 +- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
4762 +- si->dst_reg, si->src_reg,
4763 +- offsetof(struct sk_buff, head));
4764 + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
4765 + BPF_REG_AX, si->src_reg,
4766 + offsetof(struct sk_buff, end));
4767 ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
4768 ++ si->dst_reg, si->src_reg,
4769 ++ offsetof(struct sk_buff, head));
4770 + *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
4771 + #else
4772 + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
4773 +diff --git a/net/core/sock.c b/net/core/sock.c
4774 +index aa4a00d381e3..df7b38b60164 100644
4775 +--- a/net/core/sock.c
4776 ++++ b/net/core/sock.c
4777 +@@ -1988,6 +1988,19 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
4778 + }
4779 + EXPORT_SYMBOL(skb_set_owner_w);
4780 +
4781 ++static bool can_skb_orphan_partial(const struct sk_buff *skb)
4782 ++{
4783 ++#ifdef CONFIG_TLS_DEVICE
4784 ++ /* Drivers depend on in-order delivery for crypto offload,
4785 ++ * partial orphan breaks out-of-order-OK logic.
4786 ++ */
4787 ++ if (skb->decrypted)
4788 ++ return false;
4789 ++#endif
4790 ++ return (skb->destructor == sock_wfree ||
4791 ++ (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
4792 ++}
4793 ++
4794 + /* This helper is used by netem, as it can hold packets in its
4795 + * delay queue. We want to allow the owner socket to send more
4796 + * packets, as if they were already TX completed by a typical driver.
4797 +@@ -1999,11 +2012,7 @@ void skb_orphan_partial(struct sk_buff *skb)
4798 + if (skb_is_tcp_pure_ack(skb))
4799 + return;
4800 +
4801 +- if (skb->destructor == sock_wfree
4802 +-#ifdef CONFIG_INET
4803 +- || skb->destructor == tcp_wfree
4804 +-#endif
4805 +- ) {
4806 ++ if (can_skb_orphan_partial(skb)) {
4807 + struct sock *sk = skb->sk;
4808 +
4809 + if (refcount_inc_not_zero(&sk->sk_refcnt)) {
4810 +diff --git a/net/dsa/switch.c b/net/dsa/switch.c
4811 +index 4ec5b7f85d51..09d9286b27cc 100644
4812 +--- a/net/dsa/switch.c
4813 ++++ b/net/dsa/switch.c
4814 +@@ -153,6 +153,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
4815 + {
4816 + int port;
4817 +
4818 ++ if (!ds->ops->port_mdb_add)
4819 ++ return;
4820 ++
4821 + for_each_set_bit(port, bitmap, ds->num_ports)
4822 + ds->ops->port_mdb_add(ds, port, mdb);
4823 + }
4824 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
4825 +index 5264f064a87e..b30f7f877181 100644
4826 +--- a/net/ipv4/tcp.c
4827 ++++ b/net/ipv4/tcp.c
4828 +@@ -984,6 +984,9 @@ new_segment:
4829 + if (!skb)
4830 + goto wait_for_memory;
4831 +
4832 ++#ifdef CONFIG_TLS_DEVICE
4833 ++ skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
4834 ++#endif
4835 + skb_entail(sk, skb);
4836 + copy = size_goal;
4837 + }
4838 +diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
4839 +index 3d1e15401384..8a56e09cfb0e 100644
4840 +--- a/net/ipv4/tcp_bpf.c
4841 ++++ b/net/ipv4/tcp_bpf.c
4842 +@@ -398,10 +398,14 @@ more_data:
4843 + static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
4844 + {
4845 + struct sk_msg tmp, *msg_tx = NULL;
4846 +- int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
4847 + int copied = 0, err = 0;
4848 + struct sk_psock *psock;
4849 + long timeo;
4850 ++ int flags;
4851 ++
4852 ++ /* Don't let internal do_tcp_sendpages() flags through */
4853 ++ flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
4854 ++ flags |= MSG_NO_SHARED_FRAGS;
4855 +
4856 + psock = sk_psock_get(sk);
4857 + if (unlikely(!psock))
4858 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
4859 +index 7d0be046cbc1..359d298348c7 100644
4860 +--- a/net/ipv4/tcp_output.c
4861 ++++ b/net/ipv4/tcp_output.c
4862 +@@ -1318,6 +1318,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
4863 + buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
4864 + if (!buff)
4865 + return -ENOMEM; /* We'll just try again later. */
4866 ++ skb_copy_decrypted(buff, skb);
4867 +
4868 + sk->sk_wmem_queued += buff->truesize;
4869 + sk_mem_charge(sk, buff->truesize);
4870 +@@ -1872,6 +1873,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
4871 + buff = sk_stream_alloc_skb(sk, 0, gfp, true);
4872 + if (unlikely(!buff))
4873 + return -ENOMEM;
4874 ++ skb_copy_decrypted(buff, skb);
4875 +
4876 + sk->sk_wmem_queued += buff->truesize;
4877 + sk_mem_charge(sk, buff->truesize);
4878 +@@ -2141,6 +2143,7 @@ static int tcp_mtu_probe(struct sock *sk)
4879 + sk_mem_charge(sk, nskb->truesize);
4880 +
4881 + skb = tcp_send_head(sk);
4882 ++ skb_copy_decrypted(nskb, skb);
4883 +
4884 + TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
4885 + TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
4886 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
4887 +index f4f9b8344a32..e343a030ec26 100644
4888 +--- a/net/netfilter/nf_conntrack_core.c
4889 ++++ b/net/netfilter/nf_conntrack_core.c
4890 +@@ -453,13 +453,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
4891 + * table location, we assume id gets exposed to userspace.
4892 + *
4893 + * Following nf_conn items do not change throughout lifetime
4894 +- * of the nf_conn after it has been committed to main hash table:
4895 ++ * of the nf_conn:
4896 + *
4897 + * 1. nf_conn address
4898 +- * 2. nf_conn->ext address
4899 +- * 3. nf_conn->master address (normally NULL)
4900 +- * 4. tuple
4901 +- * 5. the associated net namespace
4902 ++ * 2. nf_conn->master address (normally NULL)
4903 ++ * 3. the associated net namespace
4904 ++ * 4. the original direction tuple
4905 + */
4906 + u32 nf_ct_get_id(const struct nf_conn *ct)
4907 + {
4908 +@@ -469,9 +468,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
4909 + net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
4910 +
4911 + a = (unsigned long)ct;
4912 +- b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
4913 +- c = (unsigned long)ct->ext;
4914 +- d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
4915 ++ b = (unsigned long)ct->master;
4916 ++ c = (unsigned long)nf_ct_net(ct);
4917 ++ d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
4918 ++ sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
4919 + &ct_id_seed);
4920 + #ifdef CONFIG_64BIT
4921 + return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
4922 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
4923 +index 5f78df080573..bad144dfabc5 100644
4924 +--- a/net/packet/af_packet.c
4925 ++++ b/net/packet/af_packet.c
4926 +@@ -2607,6 +2607,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
4927 +
4928 + mutex_lock(&po->pg_vec_lock);
4929 +
4930 ++ /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
4931 ++ * we need to confirm it under protection of pg_vec_lock.
4932 ++ */
4933 ++ if (unlikely(!po->tx_ring.pg_vec)) {
4934 ++ err = -EBUSY;
4935 ++ goto out;
4936 ++ }
4937 + if (likely(saddr == NULL)) {
4938 + dev = packet_cached_dev_get(po);
4939 + proto = po->num;
4940 +diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
4941 +index b100870f02a6..37dced00b63d 100644
4942 +--- a/net/sched/act_skbedit.c
4943 ++++ b/net/sched/act_skbedit.c
4944 +@@ -307,6 +307,17 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
4945 + return tcf_idr_search(tn, a, index);
4946 + }
4947 +
4948 ++static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
4949 ++{
4950 ++ return nla_total_size(sizeof(struct tc_skbedit))
4951 ++ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
4952 ++ + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
4953 ++ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
4954 ++ + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
4955 ++ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
4956 ++ + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
4957 ++}
4958 ++
4959 + static struct tc_action_ops act_skbedit_ops = {
4960 + .kind = "skbedit",
4961 + .id = TCA_ID_SKBEDIT,
4962 +@@ -316,6 +327,7 @@ static struct tc_action_ops act_skbedit_ops = {
4963 + .init = tcf_skbedit_init,
4964 + .cleanup = tcf_skbedit_cleanup,
4965 + .walk = tcf_skbedit_walker,
4966 ++ .get_fill_size = tcf_skbedit_get_fill_size,
4967 + .lookup = tcf_skbedit_search,
4968 + .size = sizeof(struct tcf_skbedit),
4969 + };
4970 +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
4971 +index 9ecfb8f5902a..8be89aa52b6e 100644
4972 +--- a/net/sched/sch_taprio.c
4973 ++++ b/net/sched/sch_taprio.c
4974 +@@ -849,7 +849,8 @@ unlock:
4975 + spin_unlock_bh(qdisc_lock(sch));
4976 +
4977 + free_sched:
4978 +- kfree(new_admin);
4979 ++ if (new_admin)
4980 ++ call_rcu(&new_admin->rcu, taprio_free_sched_cb);
4981 +
4982 + return err;
4983 + }
4984 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
4985 +index a554d6d15d1b..1cf5bb5b73c4 100644
4986 +--- a/net/sctp/sm_sideeffect.c
4987 ++++ b/net/sctp/sm_sideeffect.c
4988 +@@ -546,7 +546,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
4989 + */
4990 + if (net->sctp.pf_enable &&
4991 + (transport->state == SCTP_ACTIVE) &&
4992 +- (asoc->pf_retrans < transport->pathmaxrxt) &&
4993 ++ (transport->error_count < transport->pathmaxrxt) &&
4994 + (transport->error_count > asoc->pf_retrans)) {
4995 +
4996 + sctp_assoc_control_transport(asoc, transport,
4997 +diff --git a/net/sctp/stream.c b/net/sctp/stream.c
4998 +index 25946604af85..e83cdaa2ab76 100644
4999 +--- a/net/sctp/stream.c
5000 ++++ b/net/sctp/stream.c
5001 +@@ -316,6 +316,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
5002 + nstr_list[i] = htons(str_list[i]);
5003 +
5004 + if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
5005 ++ kfree(nstr_list);
5006 + retval = -EAGAIN;
5007 + goto out;
5008 + }
5009 +diff --git a/net/tipc/addr.c b/net/tipc/addr.c
5010 +index b88d48d00913..0f1eaed1bd1b 100644
5011 +--- a/net/tipc/addr.c
5012 ++++ b/net/tipc/addr.c
5013 +@@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
5014 + tipc_set_node_id(net, node_id);
5015 + }
5016 + tn->trial_addr = addr;
5017 ++ tn->addr_trial_end = jiffies;
5018 + pr_info("32-bit node address hash set to %x\n", addr);
5019 + }
5020 +
5021 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
5022 +index eb8f24f420f0..4cfcce211c2f 100644
5023 +--- a/net/tls/tls_device.c
5024 ++++ b/net/tls/tls_device.c
5025 +@@ -342,9 +342,9 @@ static int tls_push_data(struct sock *sk,
5026 + struct tls_context *tls_ctx = tls_get_ctx(sk);
5027 + struct tls_prot_info *prot = &tls_ctx->prot_info;
5028 + struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
5029 +- int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
5030 + int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
5031 + struct tls_record_info *record = ctx->open_record;
5032 ++ int tls_push_record_flags;
5033 + struct page_frag *pfrag;
5034 + size_t orig_size = size;
5035 + u32 max_open_record_len;
5036 +@@ -359,6 +359,9 @@ static int tls_push_data(struct sock *sk,
5037 + if (sk->sk_err)
5038 + return -sk->sk_err;
5039 +
5040 ++ flags |= MSG_SENDPAGE_DECRYPTED;
5041 ++ tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
5042 ++
5043 + timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
5044 + if (tls_is_partially_sent_record(tls_ctx)) {
5045 + rc = tls_push_partial_record(sk, tls_ctx, flags);
5046 +@@ -545,7 +548,9 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
5047 + gfp_t sk_allocation = sk->sk_allocation;
5048 +
5049 + sk->sk_allocation = GFP_ATOMIC;
5050 +- tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL);
5051 ++ tls_push_partial_record(sk, ctx,
5052 ++ MSG_DONTWAIT | MSG_NOSIGNAL |
5053 ++ MSG_SENDPAGE_DECRYPTED);
5054 + sk->sk_allocation = sk_allocation;
5055 + }
5056 + }
5057 +diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
5058 +index 8a5c4d645eb1..4bbf4fc163a2 100644
5059 +--- a/scripts/Kconfig.include
5060 ++++ b/scripts/Kconfig.include
5061 +@@ -25,7 +25,7 @@ failure = $(if-success,$(1),n,y)
5062 +
5063 + # $(cc-option,<flag>)
5064 + # Return y if the compiler supports <flag>, n otherwise
5065 +-cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null)
5066 ++cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
5067 +
5068 + # $(ld-option,<flag>)
5069 + # Return y if the linker supports <flag>, n otherwise
5070 +diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
5071 +index 38d77353c66a..cea276955147 100644
5072 +--- a/scripts/Makefile.modpost
5073 ++++ b/scripts/Makefile.modpost
5074 +@@ -75,7 +75,7 @@ modpost = scripts/mod/modpost \
5075 + $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
5076 + $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
5077 + $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
5078 +- $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
5079 ++ $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
5080 + $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
5081 + $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
5082 + $(if $(KBUILD_MODPOST_WARN),-w)
5083 +diff --git a/security/keys/trusted.c b/security/keys/trusted.c
5084 +index 9a94672e7adc..ade699131065 100644
5085 +--- a/security/keys/trusted.c
5086 ++++ b/security/keys/trusted.c
5087 +@@ -1228,24 +1228,11 @@ hashalg_fail:
5088 +
5089 + static int __init init_digests(void)
5090 + {
5091 +- u8 digest[TPM_MAX_DIGEST_SIZE];
5092 +- int ret;
5093 +- int i;
5094 +-
5095 +- ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE);
5096 +- if (ret < 0)
5097 +- return ret;
5098 +- if (ret < TPM_MAX_DIGEST_SIZE)
5099 +- return -EFAULT;
5100 +-
5101 + digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
5102 + GFP_KERNEL);
5103 + if (!digests)
5104 + return -ENOMEM;
5105 +
5106 +- for (i = 0; i < chip->nr_allocated_banks; i++)
5107 +- memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE);
5108 +-
5109 + return 0;
5110 + }
5111 +
5112 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
5113 +index 12dd9b318db1..703857aab00f 100644
5114 +--- a/sound/core/pcm_native.c
5115 ++++ b/sound/core/pcm_native.c
5116 +@@ -1873,6 +1873,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
5117 + if (!to_check)
5118 + break; /* all drained */
5119 + init_waitqueue_entry(&wait, current);
5120 ++ set_current_state(TASK_INTERRUPTIBLE);
5121 + add_wait_queue(&to_check->sleep, &wait);
5122 + snd_pcm_stream_unlock_irq(substream);
5123 + if (runtime->no_period_wakeup)
5124 +@@ -1885,7 +1886,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
5125 + }
5126 + tout = msecs_to_jiffies(tout * 1000);
5127 + }
5128 +- tout = schedule_timeout_interruptible(tout);
5129 ++ tout = schedule_timeout(tout);
5130 +
5131 + snd_pcm_stream_lock_irq(substream);
5132 + group = snd_pcm_stream_group_ref(substream);
5133 +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
5134 +index 485edaba0037..5bf24fb819d2 100644
5135 +--- a/sound/pci/hda/hda_generic.c
5136 ++++ b/sound/pci/hda/hda_generic.c
5137 +@@ -6051,6 +6051,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
5138 + }
5139 + EXPORT_SYMBOL_GPL(snd_hda_gen_free);
5140 +
5141 ++/**
5142 ++ * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
5143 ++ * @codec: the HDA codec
5144 ++ *
5145 ++ * This can be put as patch_ops reboot_notify function.
5146 ++ */
5147 ++void snd_hda_gen_reboot_notify(struct hda_codec *codec)
5148 ++{
5149 ++ /* Make the codec enter D3 to avoid spurious noises from the internal
5150 ++ * speaker during (and after) reboot
5151 ++ */
5152 ++ snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
5153 ++ snd_hda_codec_write(codec, codec->core.afg, 0,
5154 ++ AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
5155 ++ msleep(10);
5156 ++}
5157 ++EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
5158 ++
5159 + #ifdef CONFIG_PM
5160 + /**
5161 + * snd_hda_gen_check_power_status - check the loopback power save state
5162 +@@ -6078,6 +6096,7 @@ static const struct hda_codec_ops generic_patch_ops = {
5163 + .init = snd_hda_gen_init,
5164 + .free = snd_hda_gen_free,
5165 + .unsol_event = snd_hda_jack_unsol_event,
5166 ++ .reboot_notify = snd_hda_gen_reboot_notify,
5167 + #ifdef CONFIG_PM
5168 + .check_power_status = snd_hda_gen_check_power_status,
5169 + #endif
5170 +@@ -6100,7 +6119,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
5171 +
5172 + err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
5173 + if (err < 0)
5174 +- return err;
5175 ++ goto error;
5176 +
5177 + err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
5178 + if (err < 0)
5179 +diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
5180 +index 35a670a71c42..5f199dcb0d18 100644
5181 +--- a/sound/pci/hda/hda_generic.h
5182 ++++ b/sound/pci/hda/hda_generic.h
5183 +@@ -332,6 +332,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
5184 + struct auto_pin_cfg *cfg);
5185 + int snd_hda_gen_build_controls(struct hda_codec *codec);
5186 + int snd_hda_gen_build_pcms(struct hda_codec *codec);
5187 ++void snd_hda_gen_reboot_notify(struct hda_codec *codec);
5188 +
5189 + /* standard jack event callbacks */
5190 + void snd_hda_gen_hp_automute(struct hda_codec *codec,
5191 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5192 +index fb8f452a1c78..5732c31c4167 100644
5193 +--- a/sound/pci/hda/hda_intel.c
5194 ++++ b/sound/pci/hda/hda_intel.c
5195 +@@ -2505,6 +2505,9 @@ static const struct pci_device_id azx_ids[] = {
5196 + /* AMD, X370 & co */
5197 + { PCI_DEVICE(0x1022, 0x1457),
5198 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
5199 ++ /* AMD, X570 & co */
5200 ++ { PCI_DEVICE(0x1022, 0x1487),
5201 ++ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
5202 + /* AMD Stoney */
5203 + { PCI_DEVICE(0x1022, 0x157a),
5204 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
5205 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
5206 +index f299f137eaea..14298ef45b21 100644
5207 +--- a/sound/pci/hda/patch_conexant.c
5208 ++++ b/sound/pci/hda/patch_conexant.c
5209 +@@ -163,23 +163,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
5210 + {
5211 + struct conexant_spec *spec = codec->spec;
5212 +
5213 +- switch (codec->core.vendor_id) {
5214 +- case 0x14f12008: /* CX8200 */
5215 +- case 0x14f150f2: /* CX20722 */
5216 +- case 0x14f150f4: /* CX20724 */
5217 +- break;
5218 +- default:
5219 +- return;
5220 +- }
5221 +-
5222 + /* Turn the problematic codec into D3 to avoid spurious noises
5223 + from the internal speaker during (and after) reboot */
5224 + cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
5225 +-
5226 +- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
5227 +- snd_hda_codec_write(codec, codec->core.afg, 0,
5228 +- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
5229 +- msleep(10);
5230 ++ snd_hda_gen_reboot_notify(codec);
5231 + }
5232 +
5233 + static void cx_auto_free(struct hda_codec *codec)
5234 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5235 +index de224cbea7a0..e333b3e30e31 100644
5236 +--- a/sound/pci/hda/patch_realtek.c
5237 ++++ b/sound/pci/hda/patch_realtek.c
5238 +@@ -869,15 +869,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
5239 + alc_shutup(codec);
5240 + }
5241 +
5242 +-/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
5243 +-static void alc_d3_at_reboot(struct hda_codec *codec)
5244 +-{
5245 +- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
5246 +- snd_hda_codec_write(codec, codec->core.afg, 0,
5247 +- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
5248 +- msleep(10);
5249 +-}
5250 +-
5251 + #define alc_free snd_hda_gen_free
5252 +
5253 + #ifdef CONFIG_PM
5254 +@@ -5152,7 +5143,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
5255 + struct alc_spec *spec = codec->spec;
5256 +
5257 + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
5258 +- spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
5259 ++ spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
5260 + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
5261 + codec->power_save_node = 0; /* avoid click noises */
5262 + snd_hda_apply_pincfgs(codec, pincfgs);
5263 +@@ -6987,6 +6978,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5264 + SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
5265 + SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
5266 + SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5267 ++ SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5268 + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
5269 + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
5270 + SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5271 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
5272 +index 7498b5191b68..b5927c3d5bc0 100644
5273 +--- a/sound/usb/mixer.c
5274 ++++ b/sound/usb/mixer.c
5275 +@@ -68,6 +68,7 @@ struct mixer_build {
5276 + unsigned char *buffer;
5277 + unsigned int buflen;
5278 + DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
5279 ++ DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
5280 + struct usb_audio_term oterm;
5281 + const struct usbmix_name_map *map;
5282 + const struct usbmix_selector_map *selector_map;
5283 +@@ -744,6 +745,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
5284 + return -EINVAL;
5285 + if (!desc->bNrInPins)
5286 + return -EINVAL;
5287 ++ if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
5288 ++ return -EINVAL;
5289 +
5290 + switch (state->mixer->protocol) {
5291 + case UAC_VERSION_1:
5292 +@@ -773,16 +776,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
5293 + * parse the source unit recursively until it reaches to a terminal
5294 + * or a branched unit.
5295 + */
5296 +-static int check_input_term(struct mixer_build *state, int id,
5297 ++static int __check_input_term(struct mixer_build *state, int id,
5298 + struct usb_audio_term *term)
5299 + {
5300 + int protocol = state->mixer->protocol;
5301 + int err;
5302 + void *p1;
5303 ++ unsigned char *hdr;
5304 +
5305 + memset(term, 0, sizeof(*term));
5306 +- while ((p1 = find_audio_control_unit(state, id)) != NULL) {
5307 +- unsigned char *hdr = p1;
5308 ++ for (;;) {
5309 ++ /* a loop in the terminal chain? */
5310 ++ if (test_and_set_bit(id, state->termbitmap))
5311 ++ return -EINVAL;
5312 ++
5313 ++ p1 = find_audio_control_unit(state, id);
5314 ++ if (!p1)
5315 ++ break;
5316 ++
5317 ++ hdr = p1;
5318 + term->id = id;
5319 +
5320 + if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
5321 +@@ -800,7 +812,7 @@ static int check_input_term(struct mixer_build *state, int id,
5322 +
5323 + /* call recursively to verify that the
5324 + * referenced clock entity is valid */
5325 +- err = check_input_term(state, d->bCSourceID, term);
5326 ++ err = __check_input_term(state, d->bCSourceID, term);
5327 + if (err < 0)
5328 + return err;
5329 +
5330 +@@ -834,7 +846,7 @@ static int check_input_term(struct mixer_build *state, int id,
5331 + case UAC2_CLOCK_SELECTOR: {
5332 + struct uac_selector_unit_descriptor *d = p1;
5333 + /* call recursively to retrieve the channel info */
5334 +- err = check_input_term(state, d->baSourceID[0], term);
5335 ++ err = __check_input_term(state, d->baSourceID[0], term);
5336 + if (err < 0)
5337 + return err;
5338 + term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
5339 +@@ -897,7 +909,7 @@ static int check_input_term(struct mixer_build *state, int id,
5340 +
5341 + /* call recursively to verify that the
5342 + * referenced clock entity is valid */
5343 +- err = check_input_term(state, d->bCSourceID, term);
5344 ++ err = __check_input_term(state, d->bCSourceID, term);
5345 + if (err < 0)
5346 + return err;
5347 +
5348 +@@ -948,7 +960,7 @@ static int check_input_term(struct mixer_build *state, int id,
5349 + case UAC3_CLOCK_SELECTOR: {
5350 + struct uac_selector_unit_descriptor *d = p1;
5351 + /* call recursively to retrieve the channel info */
5352 +- err = check_input_term(state, d->baSourceID[0], term);
5353 ++ err = __check_input_term(state, d->baSourceID[0], term);
5354 + if (err < 0)
5355 + return err;
5356 + term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
5357 +@@ -964,7 +976,7 @@ static int check_input_term(struct mixer_build *state, int id,
5358 + return -EINVAL;
5359 +
5360 + /* call recursively to retrieve the channel info */
5361 +- err = check_input_term(state, d->baSourceID[0], term);
5362 ++ err = __check_input_term(state, d->baSourceID[0], term);
5363 + if (err < 0)
5364 + return err;
5365 +
5366 +@@ -982,6 +994,15 @@ static int check_input_term(struct mixer_build *state, int id,
5367 + return -ENODEV;
5368 + }
5369 +
5370 ++
5371 ++static int check_input_term(struct mixer_build *state, int id,
5372 ++ struct usb_audio_term *term)
5373 ++{
5374 ++ memset(term, 0, sizeof(*term));
5375 ++ memset(state->termbitmap, 0, sizeof(state->termbitmap));
5376 ++ return __check_input_term(state, id, term);
5377 ++}
5378 ++
5379 + /*
5380 + * Feature Unit
5381 + */
5382 +diff --git a/tools/perf/trace/beauty/usbdevfs_ioctl.sh b/tools/perf/trace/beauty/usbdevfs_ioctl.sh
5383 +index 930b80f422e8..aa597ae53747 100755
5384 +--- a/tools/perf/trace/beauty/usbdevfs_ioctl.sh
5385 ++++ b/tools/perf/trace/beauty/usbdevfs_ioctl.sh
5386 +@@ -3,10 +3,13 @@
5387 +
5388 + [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
5389 +
5390 ++# also as:
5391 ++# #define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len)
5392 ++
5393 + printf "static const char *usbdevfs_ioctl_cmds[] = {\n"
5394 +-regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*"
5395 +-egrep $regex ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \
5396 +- sed -r "s/$regex/\2 \1/g" | \
5397 ++regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)(\(\w+\))?[[:space:]]+_IO[CWR]{0,2}\([[:space:]]*(_IOC_\w+,[[:space:]]*)?'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*"
5398 ++egrep "$regex" ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \
5399 ++ sed -r "s/$regex/\4 \1/g" | \
5400 + sort | xargs printf "\t[%s] = \"%s\",\n"
5401 + printf "};\n\n"
5402 + printf "#if 0\n"
5403 +diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
5404 +index e84b70be3fc1..abe9af867967 100644
5405 +--- a/tools/perf/util/header.c
5406 ++++ b/tools/perf/util/header.c
5407 +@@ -3478,6 +3478,13 @@ int perf_session__read_header(struct perf_session *session)
5408 + data->file.path);
5409 + }
5410 +
5411 ++ if (f_header.attr_size == 0) {
5412 ++ pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
5413 ++ "Was the 'perf record' command properly terminated?\n",
5414 ++ data->file.path);
5415 ++ return -EINVAL;
5416 ++ }
5417 ++
5418 + nr_attrs = f_header.attrs.size / f_header.attr_size;
5419 + lseek(fd, f_header.attrs.offset, SEEK_SET);
5420 +
5421 +@@ -3558,7 +3565,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
5422 + size += sizeof(struct perf_event_header);
5423 + size += ids * sizeof(u64);
5424 +
5425 +- ev = malloc(size);
5426 ++ ev = zalloc(size);
5427 +
5428 + if (ev == NULL)
5429 + return -ENOMEM;
5430 +diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
5431 +index ecd96eda7f6a..e11b7c1efda3 100644
5432 +--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
5433 ++++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
5434 +@@ -509,5 +509,52 @@
5435 + "teardown": [
5436 + "$TC actions flush action skbedit"
5437 + ]
5438 ++ },
5439 ++ {
5440 ++ "id": "630c",
5441 ++ "name": "Add batch of 32 skbedit actions with all parameters and cookie",
5442 ++ "category": [
5443 ++ "actions",
5444 ++ "skbedit"
5445 ++ ],
5446 ++ "setup": [
5447 ++ [
5448 ++ "$TC actions flush action skbedit",
5449 ++ 0,
5450 ++ 1,
5451 ++ 255
5452 ++ ]
5453 ++ ],
5454 ++ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
5455 ++ "expExitCode": "0",
5456 ++ "verifyCmd": "$TC actions list action skbedit",
5457 ++ "matchPattern": "^[ \t]+index [0-9]+ ref",
5458 ++ "matchCount": "32",
5459 ++ "teardown": [
5460 ++ "$TC actions flush action skbedit"
5461 ++ ]
5462 ++ },
5463 ++ {
5464 ++ "id": "706d",
5465 ++ "name": "Delete batch of 32 skbedit actions with all parameters",
5466 ++ "category": [
5467 ++ "actions",
5468 ++ "skbedit"
5469 ++ ],
5470 ++ "setup": [
5471 ++ [
5472 ++ "$TC actions flush action skbedit",
5473 ++ 0,
5474 ++ 1,
5475 ++ 255
5476 ++ ],
5477 ++ "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\""
5478 ++ ],
5479 ++ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
5480 ++ "expExitCode": "0",
5481 ++ "verifyCmd": "$TC actions list action skbedit",
5482 ++ "matchPattern": "^[ \t]+index [0-9]+ ref",
5483 ++ "matchCount": "0",
5484 ++ "teardown": []
5485 + }
5486 + ]