Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Thu, 31 Jan 2019 11:28:11
Message-Id: 1548934046.6670ad51f7a31da618109cb0747df2242d01d72d.mpagano@gentoo
1 commit: 6670ad51f7a31da618109cb0747df2242d01d72d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Jan 31 11:27:26 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Jan 31 11:27:26 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6670ad51
7
8 proj/linux-patches: Linux patch 4.19.19
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1018_linux-4.19.19.patch | 4389 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4393 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 776e758..b459632 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -115,6 +115,10 @@ Patch: 1017_linux-4.19.18.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.18
23
24 +Patch: 1018_linux-4.19.19.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.19
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1018_linux-4.19.19.patch b/1018_linux-4.19.19.patch
33 new file mode 100644
34 index 0000000..ffdaa82
35 --- /dev/null
36 +++ b/1018_linux-4.19.19.patch
37 @@ -0,0 +1,4389 @@
38 +diff --git a/Makefile b/Makefile
39 +index 9f37a8a9feb9..39c4e7c3c13c 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 18
47 ++SUBLEVEL = 19
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
52 +index 9185541035cc..6958545390f0 100644
53 +--- a/arch/arc/include/asm/perf_event.h
54 ++++ b/arch/arc/include/asm/perf_event.h
55 +@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
56 +
57 + /* counts condition */
58 + [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
59 +- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
60 ++ /* All jump instructions that are taken */
61 ++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
62 + [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
63 + #ifdef CONFIG_ISA_ARCV2
64 + [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
65 +diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
66 +index 62ad4bcb841a..f230bb7092fd 100644
67 +--- a/arch/arc/lib/memset-archs.S
68 ++++ b/arch/arc/lib/memset-archs.S
69 +@@ -7,11 +7,39 @@
70 + */
71 +
72 + #include <linux/linkage.h>
73 ++#include <asm/cache.h>
74 +
75 +-#undef PREALLOC_NOT_AVAIL
76 ++/*
77 ++ * The memset implementation below is optimized to use prefetchw and prealloc
78 ++ * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
79 ++ * If you want to implement optimized memset for other possible L1 data cache
80 ++ * line lengths (32B and 128B) you should rewrite code carefully checking
81 ++ * we don't call any prefetchw/prealloc instruction for L1 cache lines which
82 ++ * don't belongs to memset area.
83 ++ */
84 ++
85 ++#if L1_CACHE_SHIFT == 6
86 ++
87 ++.macro PREALLOC_INSTR reg, off
88 ++ prealloc [\reg, \off]
89 ++.endm
90 ++
91 ++.macro PREFETCHW_INSTR reg, off
92 ++ prefetchw [\reg, \off]
93 ++.endm
94 ++
95 ++#else
96 ++
97 ++.macro PREALLOC_INSTR
98 ++.endm
99 ++
100 ++.macro PREFETCHW_INSTR
101 ++.endm
102 ++
103 ++#endif
104 +
105 + ENTRY_CFI(memset)
106 +- prefetchw [r0] ; Prefetch the write location
107 ++ PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
108 + mov.f 0, r2
109 + ;;; if size is zero
110 + jz.d [blink]
111 +@@ -48,11 +76,8 @@ ENTRY_CFI(memset)
112 +
113 + lpnz @.Lset64bytes
114 + ;; LOOP START
115 +-#ifdef PREALLOC_NOT_AVAIL
116 +- prefetchw [r3, 64] ;Prefetch the next write location
117 +-#else
118 +- prealloc [r3, 64]
119 +-#endif
120 ++ PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
121 ++
122 + #ifdef CONFIG_ARC_HAS_LL64
123 + std.ab r4, [r3, 8]
124 + std.ab r4, [r3, 8]
125 +@@ -85,7 +110,6 @@ ENTRY_CFI(memset)
126 + lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
127 + lpnz .Lset32bytes
128 + ;; LOOP START
129 +- prefetchw [r3, 32] ;Prefetch the next write location
130 + #ifdef CONFIG_ARC_HAS_LL64
131 + std.ab r4, [r3, 8]
132 + std.ab r4, [r3, 8]
133 +diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
134 +index ba145065c579..f890b2f9f82f 100644
135 +--- a/arch/arc/mm/init.c
136 ++++ b/arch/arc/mm/init.c
137 +@@ -138,7 +138,8 @@ void __init setup_arch_memory(void)
138 + */
139 +
140 + memblock_add_node(low_mem_start, low_mem_sz, 0);
141 +- memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
142 ++ memblock_reserve(CONFIG_LINUX_LINK_BASE,
143 ++ __pa(_end) - CONFIG_LINUX_LINK_BASE);
144 +
145 + #ifdef CONFIG_BLK_DEV_INITRD
146 + if (initrd_start)
147 +diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
148 +index f1ab9420ccfb..09b61d0e491f 100644
149 +--- a/arch/s390/include/asm/mmu_context.h
150 ++++ b/arch/s390/include/asm/mmu_context.h
151 +@@ -89,8 +89,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
152 + {
153 + int cpu = smp_processor_id();
154 +
155 +- if (prev == next)
156 +- return;
157 + S390_lowcore.user_asce = next->context.asce;
158 + cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
159 + /* Clear previous user-ASCE from CR1 and CR7 */
160 +@@ -102,7 +100,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
161 + __ctl_load(S390_lowcore.vdso_asce, 7, 7);
162 + clear_cpu_flag(CIF_ASCE_SECONDARY);
163 + }
164 +- cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
165 ++ if (prev != next)
166 ++ cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
167 + }
168 +
169 + #define finish_arch_post_lock_switch finish_arch_post_lock_switch
170 +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
171 +index 5b28b434f8a1..e7e6608b996c 100644
172 +--- a/arch/s390/kernel/early.c
173 ++++ b/arch/s390/kernel/early.c
174 +@@ -64,10 +64,10 @@ static noinline __init void detect_machine_type(void)
175 + if (stsi(vmms, 3, 2, 2) || !vmms->count)
176 + return;
177 +
178 +- /* Running under KVM? If not we assume z/VM */
179 ++ /* Detect known hypervisors */
180 + if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
181 + S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
182 +- else
183 ++ else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
184 + S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
185 + }
186 +
187 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
188 +index c637c12f9e37..a0097f8bada8 100644
189 +--- a/arch/s390/kernel/setup.c
190 ++++ b/arch/s390/kernel/setup.c
191 +@@ -882,6 +882,8 @@ void __init setup_arch(char **cmdline_p)
192 + pr_info("Linux is running under KVM in 64-bit mode\n");
193 + else if (MACHINE_IS_LPAR)
194 + pr_info("Linux is running natively in 64-bit mode\n");
195 ++ else
196 ++ pr_info("Linux is running as a guest in 64-bit mode\n");
197 +
198 + /* Have one command line that is parsed and saved in /proc/cmdline */
199 + /* boot_command_line has been already set up in early.c */
200 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
201 +index 2f8f7d7dd9a8..da02f4087d61 100644
202 +--- a/arch/s390/kernel/smp.c
203 ++++ b/arch/s390/kernel/smp.c
204 +@@ -371,9 +371,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
205 + */
206 + void smp_call_ipl_cpu(void (*func)(void *), void *data)
207 + {
208 ++ struct lowcore *lc = pcpu_devices->lowcore;
209 ++
210 ++ if (pcpu_devices[0].address == stap())
211 ++ lc = &S390_lowcore;
212 ++
213 + pcpu_delegate(&pcpu_devices[0], func, data,
214 +- pcpu_devices->lowcore->panic_stack -
215 +- PANIC_FRAME_OFFSET + PAGE_SIZE);
216 ++ lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
217 + }
218 +
219 + int smp_find_processor_id(u16 address)
220 +@@ -1152,7 +1156,11 @@ static ssize_t __ref rescan_store(struct device *dev,
221 + {
222 + int rc;
223 +
224 ++ rc = lock_device_hotplug_sysfs();
225 ++ if (rc)
226 ++ return rc;
227 + rc = smp_rescan_cpus();
228 ++ unlock_device_hotplug();
229 + return rc ? rc : count;
230 + }
231 + static DEVICE_ATTR_WO(rescan);
232 +diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
233 +index 7d0df78db727..40d2834a8101 100644
234 +--- a/arch/x86/entry/entry_64_compat.S
235 ++++ b/arch/x86/entry/entry_64_compat.S
236 +@@ -356,7 +356,8 @@ ENTRY(entry_INT80_compat)
237 +
238 + /* Need to switch before accessing the thread stack. */
239 + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
240 +- movq %rsp, %rdi
241 ++ /* In the Xen PV case we already run on the thread stack. */
242 ++ ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
243 + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
244 +
245 + pushq 6*8(%rdi) /* regs->ss */
246 +@@ -365,8 +366,9 @@ ENTRY(entry_INT80_compat)
247 + pushq 3*8(%rdi) /* regs->cs */
248 + pushq 2*8(%rdi) /* regs->ip */
249 + pushq 1*8(%rdi) /* regs->orig_ax */
250 +-
251 + pushq (%rdi) /* pt_regs->di */
252 ++.Lint80_keep_stack:
253 ++
254 + pushq %rsi /* pt_regs->si */
255 + xorl %esi, %esi /* nospec si */
256 + pushq %rdx /* pt_regs->dx */
257 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
258 +index eeeb9289c764..2252b63d38b5 100644
259 +--- a/arch/x86/include/asm/mmu_context.h
260 ++++ b/arch/x86/include/asm/mmu_context.h
261 +@@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
262 +
263 + void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
264 +
265 ++/*
266 ++ * Init a new mm. Used on mm copies, like at fork()
267 ++ * and on mm's that are brand-new, like at execve().
268 ++ */
269 + static inline int init_new_context(struct task_struct *tsk,
270 + struct mm_struct *mm)
271 + {
272 +@@ -228,8 +232,22 @@ do { \
273 + } while (0)
274 + #endif
275 +
276 ++static inline void arch_dup_pkeys(struct mm_struct *oldmm,
277 ++ struct mm_struct *mm)
278 ++{
279 ++#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
280 ++ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
281 ++ return;
282 ++
283 ++ /* Duplicate the oldmm pkey state in mm: */
284 ++ mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
285 ++ mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
286 ++#endif
287 ++}
288 ++
289 + static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
290 + {
291 ++ arch_dup_pkeys(oldmm, mm);
292 + paravirt_arch_dup_mmap(oldmm, mm);
293 + return ldt_dup_context(oldmm, mm);
294 + }
295 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
296 +index d9b71924c23c..7f89d609095a 100644
297 +--- a/arch/x86/kernel/kvm.c
298 ++++ b/arch/x86/kernel/kvm.c
299 +@@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
300 + #else
301 + u64 ipi_bitmap = 0;
302 + #endif
303 ++ long ret;
304 +
305 + if (cpumask_empty(mask))
306 + return;
307 +@@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
308 + } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
309 + max = apic_id < max ? max : apic_id;
310 + } else {
311 +- kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
312 ++ ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
313 + (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
314 ++ WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
315 + min = max = apic_id;
316 + ipi_bitmap = 0;
317 + }
318 +@@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
319 + }
320 +
321 + if (ipi_bitmap) {
322 +- kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
323 ++ ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
324 + (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
325 ++ WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
326 + }
327 +
328 + local_irq_restore(flags);
329 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
330 +index 841740045554..39a0e34ff676 100644
331 +--- a/arch/x86/kvm/vmx.c
332 ++++ b/arch/x86/kvm/vmx.c
333 +@@ -8290,11 +8290,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
334 + if (r < 0)
335 + goto out_vmcs02;
336 +
337 +- vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
338 ++ vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
339 + if (!vmx->nested.cached_vmcs12)
340 + goto out_cached_vmcs12;
341 +
342 +- vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
343 ++ vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
344 + if (!vmx->nested.cached_shadow_vmcs12)
345 + goto out_cached_shadow_vmcs12;
346 +
347 +@@ -11733,7 +11733,7 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
348 + !nested_exit_intr_ack_set(vcpu) ||
349 + (vmcs12->posted_intr_nv & 0xff00) ||
350 + (vmcs12->posted_intr_desc_addr & 0x3f) ||
351 +- (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr))))
352 ++ (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
353 + return -EINVAL;
354 +
355 + /* tpr shadow is needed by all apicv features. */
356 +@@ -13984,13 +13984,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
357 + else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs)
358 + copy_shadow_to_vmcs12(vmx);
359 +
360 +- if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
361 ++ /*
362 ++ * Copy over the full allocated size of vmcs12 rather than just the size
363 ++ * of the struct.
364 ++ */
365 ++ if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
366 + return -EFAULT;
367 +
368 + if (nested_cpu_has_shadow_vmcs(vmcs12) &&
369 + vmcs12->vmcs_link_pointer != -1ull) {
370 + if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
371 +- get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
372 ++ get_shadow_vmcs12(vcpu), VMCS12_SIZE))
373 + return -EFAULT;
374 + }
375 +
376 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
377 +index 956eecd227f8..5a9a3ebe8fba 100644
378 +--- a/arch/x86/kvm/x86.c
379 ++++ b/arch/x86/kvm/x86.c
380 +@@ -6277,8 +6277,7 @@ restart:
381 + toggle_interruptibility(vcpu, ctxt->interruptibility);
382 + vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
383 + kvm_rip_write(vcpu, ctxt->eip);
384 +- if (r == EMULATE_DONE &&
385 +- (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
386 ++ if (r == EMULATE_DONE && ctxt->tf)
387 + kvm_vcpu_do_singlestep(vcpu, &r);
388 + if (!ctxt->have_exception ||
389 + exception_type(ctxt->exception.vector) == EXCPT_TRAP)
390 +@@ -6868,10 +6867,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
391 + case KVM_HC_CLOCK_PAIRING:
392 + ret = kvm_pv_clock_pairing(vcpu, a0, a1);
393 + break;
394 ++#endif
395 + case KVM_HC_SEND_IPI:
396 + ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
397 + break;
398 +-#endif
399 + default:
400 + ret = -KVM_ENOSYS;
401 + break;
402 +diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
403 +index 79778ab200e4..a53665116458 100644
404 +--- a/arch/x86/lib/kaslr.c
405 ++++ b/arch/x86/lib/kaslr.c
406 +@@ -36,8 +36,8 @@ static inline u16 i8254(void)
407 + u16 status, timer;
408 +
409 + do {
410 +- outb(I8254_PORT_CONTROL,
411 +- I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
412 ++ outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
413 ++ I8254_PORT_CONTROL);
414 + status = inb(I8254_PORT_COUNTER0);
415 + timer = inb(I8254_PORT_COUNTER0);
416 + timer |= inb(I8254_PORT_COUNTER0) << 8;
417 +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
418 +index 75b331f8a16a..ea59c01ce8db 100644
419 +--- a/drivers/acpi/nfit/core.c
420 ++++ b/drivers/acpi/nfit/core.c
421 +@@ -391,6 +391,32 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func)
422 + return id;
423 + }
424 +
425 ++static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
426 ++ struct nd_cmd_pkg *call_pkg)
427 ++{
428 ++ if (call_pkg) {
429 ++ int i;
430 ++
431 ++ if (nfit_mem->family != call_pkg->nd_family)
432 ++ return -ENOTTY;
433 ++
434 ++ for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
435 ++ if (call_pkg->nd_reserved2[i])
436 ++ return -EINVAL;
437 ++ return call_pkg->nd_command;
438 ++ }
439 ++
440 ++ /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
441 ++ if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
442 ++ return cmd;
443 ++
444 ++ /*
445 ++ * Force function number validation to fail since 0 is never
446 ++ * published as a valid function in dsm_mask.
447 ++ */
448 ++ return 0;
449 ++}
450 ++
451 + int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
452 + unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
453 + {
454 +@@ -404,30 +430,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
455 + unsigned long cmd_mask, dsm_mask;
456 + u32 offset, fw_status = 0;
457 + acpi_handle handle;
458 +- unsigned int func;
459 + const guid_t *guid;
460 +- int rc, i;
461 ++ int func, rc, i;
462 +
463 + if (cmd_rc)
464 + *cmd_rc = -EINVAL;
465 +- func = cmd;
466 +- if (cmd == ND_CMD_CALL) {
467 +- call_pkg = buf;
468 +- func = call_pkg->nd_command;
469 +-
470 +- for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
471 +- if (call_pkg->nd_reserved2[i])
472 +- return -EINVAL;
473 +- }
474 +
475 + if (nvdimm) {
476 + struct acpi_device *adev = nfit_mem->adev;
477 +
478 + if (!adev)
479 + return -ENOTTY;
480 +- if (call_pkg && nfit_mem->family != call_pkg->nd_family)
481 +- return -ENOTTY;
482 +
483 ++ if (cmd == ND_CMD_CALL)
484 ++ call_pkg = buf;
485 ++ func = cmd_to_func(nfit_mem, cmd, call_pkg);
486 ++ if (func < 0)
487 ++ return func;
488 + dimm_name = nvdimm_name(nvdimm);
489 + cmd_name = nvdimm_cmd_name(cmd);
490 + cmd_mask = nvdimm_cmd_mask(nvdimm);
491 +@@ -438,6 +457,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
492 + } else {
493 + struct acpi_device *adev = to_acpi_dev(acpi_desc);
494 +
495 ++ func = cmd;
496 + cmd_name = nvdimm_bus_cmd_name(cmd);
497 + cmd_mask = nd_desc->cmd_mask;
498 + dsm_mask = cmd_mask;
499 +@@ -452,7 +472,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
500 + if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
501 + return -ENOTTY;
502 +
503 +- if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
504 ++ /*
505 ++ * Check for a valid command. For ND_CMD_CALL, we also have to
506 ++ * make sure that the DSM function is supported.
507 ++ */
508 ++ if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
509 ++ return -ENOTTY;
510 ++ else if (!test_bit(cmd, &cmd_mask))
511 + return -ENOTTY;
512 +
513 + in_obj.type = ACPI_TYPE_PACKAGE;
514 +@@ -1764,6 +1790,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
515 + return 0;
516 + }
517 +
518 ++ /*
519 ++ * Function 0 is the command interrogation function, don't
520 ++ * export it to potential userspace use, and enable it to be
521 ++ * used as an error value in acpi_nfit_ctl().
522 ++ */
523 ++ dsm_mask &= ~1UL;
524 ++
525 + guid = to_nfit_uuid(nfit_mem->family);
526 + for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
527 + if (acpi_check_dsm(adev_dimm->handle, guid,
528 +diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
529 +index b5e3103c1175..e43c876a9223 100644
530 +--- a/drivers/char/mwave/mwavedd.c
531 ++++ b/drivers/char/mwave/mwavedd.c
532 +@@ -59,6 +59,7 @@
533 + #include <linux/mutex.h>
534 + #include <linux/delay.h>
535 + #include <linux/serial_8250.h>
536 ++#include <linux/nospec.h>
537 + #include "smapi.h"
538 + #include "mwavedd.h"
539 + #include "3780i.h"
540 +@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
541 + ipcnum);
542 + return -EINVAL;
543 + }
544 ++ ipcnum = array_index_nospec(ipcnum,
545 ++ ARRAY_SIZE(pDrvData->IPCs));
546 + PRINTK_3(TRACE_MWAVE,
547 + "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
548 + " ipcnum %x entry usIntCount %x\n",
549 +@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
550 + " Invalid ipcnum %x\n", ipcnum);
551 + return -EINVAL;
552 + }
553 ++ ipcnum = array_index_nospec(ipcnum,
554 ++ ARRAY_SIZE(pDrvData->IPCs));
555 + PRINTK_3(TRACE_MWAVE,
556 + "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
557 + " ipcnum %x, usIntCount %x\n",
558 +@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
559 + ipcnum);
560 + return -EINVAL;
561 + }
562 ++ ipcnum = array_index_nospec(ipcnum,
563 ++ ARRAY_SIZE(pDrvData->IPCs));
564 + mutex_lock(&mwave_mutex);
565 + if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
566 + pDrvData->IPCs[ipcnum].bIsEnabled = false;
567 +diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
568 +index 2d5d8b43727e..c4d0b6f6abf2 100644
569 +--- a/drivers/clk/socfpga/clk-pll-s10.c
570 ++++ b/drivers/clk/socfpga/clk-pll-s10.c
571 +@@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
572 + /* Read mdiv and fdiv from the fdbck register */
573 + reg = readl(socfpgaclk->hw.reg + 0x4);
574 + mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
575 +- vco_freq = (unsigned long long)parent_rate * (mdiv + 6);
576 ++ vco_freq = (unsigned long long)vco_freq * (mdiv + 6);
577 +
578 + return (unsigned long)vco_freq;
579 + }
580 +diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
581 +index 5b238fc314ac..8281dfbf38c2 100644
582 +--- a/drivers/clk/socfpga/clk-s10.c
583 ++++ b/drivers/clk/socfpga/clk-s10.c
584 +@@ -12,17 +12,17 @@
585 +
586 + #include "stratix10-clk.h"
587 +
588 +-static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk",
589 +- "f2s_free_clk",};
590 ++static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
591 ++ "f2s-free-clk",};
592 + static const char * const cntr_mux[] = { "main_pll", "periph_pll",
593 +- "osc1", "cb_intosc_hs_div2_clk",
594 +- "f2s_free_clk"};
595 +-static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",};
596 ++ "osc1", "cb-intosc-hs-div2-clk",
597 ++ "f2s-free-clk"};
598 ++static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
599 +
600 + static const char * const noc_free_mux[] = {"main_noc_base_clk",
601 + "peri_noc_base_clk",
602 +- "osc1", "cb_intosc_hs_div2_clk",
603 +- "f2s_free_clk"};
604 ++ "osc1", "cb-intosc-hs-div2-clk",
605 ++ "f2s-free-clk"};
606 +
607 + static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
608 + static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
609 +@@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"
610 + static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
611 + static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
612 +
613 +-static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
614 ++static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
615 + static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
616 + static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
617 +
618 + static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
619 + "peri_mpu_base_clk",
620 +- "osc1", "cb_intosc_hs_div2_clk",
621 +- "f2s_free_clk"};
622 ++ "osc1", "cb-intosc-hs-div2-clk",
623 ++ "f2s-free-clk"};
624 +
625 + /* clocks in AO (always on) controller */
626 + static const struct stratix10_pll_clock s10_pll_clks[] = {
627 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
628 +index a028661d9e20..92b11de19581 100644
629 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
630 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
631 +@@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
632 + { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
633 + { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
634 + { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
635 ++ { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
636 + { 0, 0, 0, 0, 0 },
637 + };
638 +
639 +diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
640 +index 191b314f9e9e..709475d5cc30 100644
641 +--- a/drivers/gpu/drm/meson/meson_crtc.c
642 ++++ b/drivers/gpu/drm/meson/meson_crtc.c
643 +@@ -45,7 +45,6 @@ struct meson_crtc {
644 + struct drm_crtc base;
645 + struct drm_pending_vblank_event *event;
646 + struct meson_drm *priv;
647 +- bool enabled;
648 + };
649 + #define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
650 +
651 +@@ -81,7 +80,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
652 +
653 + };
654 +
655 +-static void meson_crtc_enable(struct drm_crtc *crtc)
656 ++static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
657 ++ struct drm_crtc_state *old_state)
658 + {
659 + struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
660 + struct drm_crtc_state *crtc_state = crtc->state;
661 +@@ -103,20 +103,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc)
662 +
663 + drm_crtc_vblank_on(crtc);
664 +
665 +- meson_crtc->enabled = true;
666 +-}
667 +-
668 +-static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
669 +- struct drm_crtc_state *old_state)
670 +-{
671 +- struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
672 +- struct meson_drm *priv = meson_crtc->priv;
673 +-
674 +- DRM_DEBUG_DRIVER("\n");
675 +-
676 +- if (!meson_crtc->enabled)
677 +- meson_crtc_enable(crtc);
678 +-
679 + priv->viu.osd1_enabled = true;
680 + }
681 +
682 +@@ -142,8 +128,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
683 +
684 + crtc->state->event = NULL;
685 + }
686 +-
687 +- meson_crtc->enabled = false;
688 + }
689 +
690 + static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
691 +@@ -152,9 +136,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
692 + struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
693 + unsigned long flags;
694 +
695 +- if (crtc->state->enable && !meson_crtc->enabled)
696 +- meson_crtc_enable(crtc);
697 +-
698 + if (crtc->state->event) {
699 + WARN_ON(drm_crtc_vblank_get(crtc) != 0);
700 +
701 +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
702 +index d3443125e661..bf5f294f172f 100644
703 +--- a/drivers/gpu/drm/meson/meson_drv.c
704 ++++ b/drivers/gpu/drm/meson/meson_drv.c
705 +@@ -82,6 +82,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = {
706 + .fb_create = drm_gem_fb_create,
707 + };
708 +
709 ++static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
710 ++ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
711 ++};
712 ++
713 + static irqreturn_t meson_irq(int irq, void *arg)
714 + {
715 + struct drm_device *dev = arg;
716 +@@ -246,6 +250,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
717 + drm->mode_config.max_width = 3840;
718 + drm->mode_config.max_height = 2160;
719 + drm->mode_config.funcs = &meson_mode_config_funcs;
720 ++ drm->mode_config.helper_private = &meson_mode_config_helpers;
721 +
722 + /* Hardware Initialization */
723 +
724 +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
725 +index b1b788082793..d2a735ac9ba1 100644
726 +--- a/drivers/hv/hv_balloon.c
727 ++++ b/drivers/hv/hv_balloon.c
728 +@@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start,
729 + pfn_cnt -= pgs_ol;
730 + /*
731 + * Check if the corresponding memory block is already
732 +- * online by checking its last previously backed page.
733 +- * In case it is we need to bring rest (which was not
734 +- * backed previously) online too.
735 ++ * online. It is possible to observe struct pages still
736 ++ * being uninitialized here so check section instead.
737 ++ * In case the section is online we need to bring the
738 ++ * rest of pfns (which were not backed previously)
739 ++ * online too.
740 + */
741 + if (start_pfn > has->start_pfn &&
742 +- !PageReserved(pfn_to_page(start_pfn - 1)))
743 ++ online_section_nr(pfn_to_section_nr(start_pfn)))
744 + hv_bring_pgs_online(has, start_pfn, pgs_ol);
745 +
746 + }
747 +diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
748 +index 3e90eb91db45..6cb45f256107 100644
749 +--- a/drivers/hv/ring_buffer.c
750 ++++ b/drivers/hv/ring_buffer.c
751 +@@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
752 + }
753 +
754 + /* Get various debug metrics for the specified ring buffer. */
755 +-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
756 +- struct hv_ring_buffer_debug_info *debug_info)
757 ++int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
758 ++ struct hv_ring_buffer_debug_info *debug_info)
759 + {
760 + u32 bytes_avail_towrite;
761 + u32 bytes_avail_toread;
762 +
763 +- if (ring_info->ring_buffer) {
764 +- hv_get_ringbuffer_availbytes(ring_info,
765 +- &bytes_avail_toread,
766 +- &bytes_avail_towrite);
767 +-
768 +- debug_info->bytes_avail_toread = bytes_avail_toread;
769 +- debug_info->bytes_avail_towrite = bytes_avail_towrite;
770 +- debug_info->current_read_index =
771 +- ring_info->ring_buffer->read_index;
772 +- debug_info->current_write_index =
773 +- ring_info->ring_buffer->write_index;
774 +- debug_info->current_interrupt_mask =
775 +- ring_info->ring_buffer->interrupt_mask;
776 +- }
777 ++ if (!ring_info->ring_buffer)
778 ++ return -EINVAL;
779 ++
780 ++ hv_get_ringbuffer_availbytes(ring_info,
781 ++ &bytes_avail_toread,
782 ++ &bytes_avail_towrite);
783 ++ debug_info->bytes_avail_toread = bytes_avail_toread;
784 ++ debug_info->bytes_avail_towrite = bytes_avail_towrite;
785 ++ debug_info->current_read_index = ring_info->ring_buffer->read_index;
786 ++ debug_info->current_write_index = ring_info->ring_buffer->write_index;
787 ++ debug_info->current_interrupt_mask
788 ++ = ring_info->ring_buffer->interrupt_mask;
789 ++ return 0;
790 + }
791 + EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
792 +
793 +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
794 +index 2c6d5c7a4445..9aa18f387a34 100644
795 +--- a/drivers/hv/vmbus_drv.c
796 ++++ b/drivers/hv/vmbus_drv.c
797 +@@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev,
798 + {
799 + struct hv_device *hv_dev = device_to_hv_device(dev);
800 + struct hv_ring_buffer_debug_info outbound;
801 ++ int ret;
802 +
803 + if (!hv_dev->channel)
804 + return -ENODEV;
805 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
806 +- return -EINVAL;
807 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
808 ++
809 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
810 ++ &outbound);
811 ++ if (ret < 0)
812 ++ return ret;
813 ++
814 + return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
815 + }
816 + static DEVICE_ATTR_RO(out_intr_mask);
817 +@@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev,
818 + {
819 + struct hv_device *hv_dev = device_to_hv_device(dev);
820 + struct hv_ring_buffer_debug_info outbound;
821 ++ int ret;
822 +
823 + if (!hv_dev->channel)
824 + return -ENODEV;
825 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
826 +- return -EINVAL;
827 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
828 ++
829 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
830 ++ &outbound);
831 ++ if (ret < 0)
832 ++ return ret;
833 + return sprintf(buf, "%d\n", outbound.current_read_index);
834 + }
835 + static DEVICE_ATTR_RO(out_read_index);
836 +@@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev,
837 + {
838 + struct hv_device *hv_dev = device_to_hv_device(dev);
839 + struct hv_ring_buffer_debug_info outbound;
840 ++ int ret;
841 +
842 + if (!hv_dev->channel)
843 + return -ENODEV;
844 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
845 +- return -EINVAL;
846 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
847 ++
848 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
849 ++ &outbound);
850 ++ if (ret < 0)
851 ++ return ret;
852 + return sprintf(buf, "%d\n", outbound.current_write_index);
853 + }
854 + static DEVICE_ATTR_RO(out_write_index);
855 +@@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
856 + {
857 + struct hv_device *hv_dev = device_to_hv_device(dev);
858 + struct hv_ring_buffer_debug_info outbound;
859 ++ int ret;
860 +
861 + if (!hv_dev->channel)
862 + return -ENODEV;
863 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
864 +- return -EINVAL;
865 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
866 ++
867 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
868 ++ &outbound);
869 ++ if (ret < 0)
870 ++ return ret;
871 + return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
872 + }
873 + static DEVICE_ATTR_RO(out_read_bytes_avail);
874 +@@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
875 + {
876 + struct hv_device *hv_dev = device_to_hv_device(dev);
877 + struct hv_ring_buffer_debug_info outbound;
878 ++ int ret;
879 +
880 + if (!hv_dev->channel)
881 + return -ENODEV;
882 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
883 +- return -EINVAL;
884 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
885 ++
886 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
887 ++ &outbound);
888 ++ if (ret < 0)
889 ++ return ret;
890 + return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
891 + }
892 + static DEVICE_ATTR_RO(out_write_bytes_avail);
893 +@@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev,
894 + {
895 + struct hv_device *hv_dev = device_to_hv_device(dev);
896 + struct hv_ring_buffer_debug_info inbound;
897 ++ int ret;
898 +
899 + if (!hv_dev->channel)
900 + return -ENODEV;
901 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
902 +- return -EINVAL;
903 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
904 ++
905 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
906 ++ if (ret < 0)
907 ++ return ret;
908 ++
909 + return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
910 + }
911 + static DEVICE_ATTR_RO(in_intr_mask);
912 +@@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev,
913 + {
914 + struct hv_device *hv_dev = device_to_hv_device(dev);
915 + struct hv_ring_buffer_debug_info inbound;
916 ++ int ret;
917 +
918 + if (!hv_dev->channel)
919 + return -ENODEV;
920 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
921 +- return -EINVAL;
922 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
923 ++
924 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
925 ++ if (ret < 0)
926 ++ return ret;
927 ++
928 + return sprintf(buf, "%d\n", inbound.current_read_index);
929 + }
930 + static DEVICE_ATTR_RO(in_read_index);
931 +@@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev,
932 + {
933 + struct hv_device *hv_dev = device_to_hv_device(dev);
934 + struct hv_ring_buffer_debug_info inbound;
935 ++ int ret;
936 +
937 + if (!hv_dev->channel)
938 + return -ENODEV;
939 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
940 +- return -EINVAL;
941 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
942 ++
943 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
944 ++ if (ret < 0)
945 ++ return ret;
946 ++
947 + return sprintf(buf, "%d\n", inbound.current_write_index);
948 + }
949 + static DEVICE_ATTR_RO(in_write_index);
950 +@@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
951 + {
952 + struct hv_device *hv_dev = device_to_hv_device(dev);
953 + struct hv_ring_buffer_debug_info inbound;
954 ++ int ret;
955 +
956 + if (!hv_dev->channel)
957 + return -ENODEV;
958 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
959 +- return -EINVAL;
960 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
961 ++
962 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
963 ++ if (ret < 0)
964 ++ return ret;
965 ++
966 + return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
967 + }
968 + static DEVICE_ATTR_RO(in_read_bytes_avail);
969 +@@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
970 + {
971 + struct hv_device *hv_dev = device_to_hv_device(dev);
972 + struct hv_ring_buffer_debug_info inbound;
973 ++ int ret;
974 +
975 + if (!hv_dev->channel)
976 + return -ENODEV;
977 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
978 +- return -EINVAL;
979 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
980 ++
981 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
982 ++ if (ret < 0)
983 ++ return ret;
984 ++
985 + return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
986 + }
987 + static DEVICE_ATTR_RO(in_write_bytes_avail);
988 +diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
989 +index 45c997430332..0e51803de0e7 100644
990 +--- a/drivers/ide/ide-proc.c
991 ++++ b/drivers/ide/ide-proc.c
992 +@@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
993 + drive->proc = proc_mkdir(drive->name, parent);
994 + if (drive->proc) {
995 + ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
996 +- proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR,
997 ++ proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
998 + drive->proc, &ide_settings_proc_fops,
999 + drive);
1000 + }
1001 +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
1002 +index cfc8b94527b9..aa4e431cbcd3 100644
1003 +--- a/drivers/input/joystick/xpad.c
1004 ++++ b/drivers/input/joystick/xpad.c
1005 +@@ -252,6 +252,8 @@ static const struct xpad_device {
1006 + { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
1007 + { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
1008 + { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
1009 ++ { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
1010 ++ { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
1011 + { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
1012 + { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
1013 + { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
1014 +@@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = {
1015 + XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
1016 + XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
1017 + XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
1018 ++ XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
1019 + XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
1020 + XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
1021 + XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
1022 +diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
1023 +index 8ec483e8688b..26ec603fe220 100644
1024 +--- a/drivers/input/misc/uinput.c
1025 ++++ b/drivers/input/misc/uinput.c
1026 +@@ -39,6 +39,7 @@
1027 + #include <linux/init.h>
1028 + #include <linux/fs.h>
1029 + #include <linux/miscdevice.h>
1030 ++#include <linux/overflow.h>
1031 + #include <linux/input/mt.h>
1032 + #include "../input-compat.h"
1033 +
1034 +@@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file)
1035 + static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
1036 + const struct input_absinfo *abs)
1037 + {
1038 +- int min, max;
1039 ++ int min, max, range;
1040 +
1041 + min = abs->minimum;
1042 + max = abs->maximum;
1043 +@@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
1044 + return -EINVAL;
1045 + }
1046 +
1047 +- if (abs->flat > max - min) {
1048 ++ if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
1049 + printk(KERN_DEBUG
1050 + "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
1051 + UINPUT_NAME, code, abs->flat, min, max);
1052 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
1053 +index c2df341ff6fa..cf3abb8d284f 100644
1054 +--- a/drivers/irqchip/irq-gic-v3-its.c
1055 ++++ b/drivers/irqchip/irq-gic-v3-its.c
1056 +@@ -2267,13 +2267,14 @@ static void its_free_device(struct its_device *its_dev)
1057 + kfree(its_dev);
1058 + }
1059 +
1060 +-static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1061 ++static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
1062 + {
1063 + int idx;
1064 +
1065 +- idx = find_first_zero_bit(dev->event_map.lpi_map,
1066 +- dev->event_map.nr_lpis);
1067 +- if (idx == dev->event_map.nr_lpis)
1068 ++ idx = bitmap_find_free_region(dev->event_map.lpi_map,
1069 ++ dev->event_map.nr_lpis,
1070 ++ get_count_order(nvecs));
1071 ++ if (idx < 0)
1072 + return -ENOSPC;
1073 +
1074 + *hwirq = dev->event_map.lpi_base + idx;
1075 +@@ -2369,21 +2370,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1076 + int err;
1077 + int i;
1078 +
1079 +- for (i = 0; i < nr_irqs; i++) {
1080 +- err = its_alloc_device_irq(its_dev, &hwirq);
1081 +- if (err)
1082 +- return err;
1083 ++ err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
1084 ++ if (err)
1085 ++ return err;
1086 +
1087 +- err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
1088 ++ for (i = 0; i < nr_irqs; i++) {
1089 ++ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
1090 + if (err)
1091 + return err;
1092 +
1093 + irq_domain_set_hwirq_and_chip(domain, virq + i,
1094 +- hwirq, &its_irq_chip, its_dev);
1095 ++ hwirq + i, &its_irq_chip, its_dev);
1096 + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
1097 + pr_debug("ID:%d pID:%d vID:%d\n",
1098 +- (int)(hwirq - its_dev->event_map.lpi_base),
1099 +- (int) hwirq, virq + i);
1100 ++ (int)(hwirq + i - its_dev->event_map.lpi_base),
1101 ++ (int)(hwirq + i), virq + i);
1102 + }
1103 +
1104 + return 0;
1105 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1106 +index f2ec882f96be..5921ecc670c1 100644
1107 +--- a/drivers/md/dm-crypt.c
1108 ++++ b/drivers/md/dm-crypt.c
1109 +@@ -2405,9 +2405,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
1110 + * capi:cipher_api_spec-iv:ivopts
1111 + */
1112 + tmp = &cipher_in[strlen("capi:")];
1113 +- cipher_api = strsep(&tmp, "-");
1114 +- *ivmode = strsep(&tmp, ":");
1115 +- *ivopts = tmp;
1116 ++
1117 ++ /* Separate IV options if present, it can contain another '-' in hash name */
1118 ++ *ivopts = strrchr(tmp, ':');
1119 ++ if (*ivopts) {
1120 ++ **ivopts = '\0';
1121 ++ (*ivopts)++;
1122 ++ }
1123 ++ /* Parse IV mode */
1124 ++ *ivmode = strrchr(tmp, '-');
1125 ++ if (*ivmode) {
1126 ++ **ivmode = '\0';
1127 ++ (*ivmode)++;
1128 ++ }
1129 ++ /* The rest is crypto API spec */
1130 ++ cipher_api = tmp;
1131 +
1132 + if (*ivmode && !strcmp(*ivmode, "lmk"))
1133 + cc->tfms_count = 64;
1134 +@@ -2477,11 +2489,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
1135 + goto bad_mem;
1136 +
1137 + chainmode = strsep(&tmp, "-");
1138 +- *ivopts = strsep(&tmp, "-");
1139 +- *ivmode = strsep(&*ivopts, ":");
1140 +-
1141 +- if (tmp)
1142 +- DMWARN("Ignoring unexpected additional cipher options");
1143 ++ *ivmode = strsep(&tmp, ":");
1144 ++ *ivopts = tmp;
1145 +
1146 + /*
1147 + * For compatibility with the original dm-crypt mapping format, if
1148 +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
1149 +index 20b0776e39ef..ed3caceaed07 100644
1150 +--- a/drivers/md/dm-thin-metadata.c
1151 ++++ b/drivers/md/dm-thin-metadata.c
1152 +@@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
1153 + return r;
1154 + }
1155 +
1156 +-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1157 ++int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1158 + {
1159 + int r;
1160 + uint32_t ref_count;
1161 +@@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
1162 + down_read(&pmd->root_lock);
1163 + r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1164 + if (!r)
1165 +- *result = (ref_count != 0);
1166 ++ *result = (ref_count > 1);
1167 + up_read(&pmd->root_lock);
1168 +
1169 + return r;
1170 +diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
1171 +index 35e954ea20a9..f6be0d733c20 100644
1172 +--- a/drivers/md/dm-thin-metadata.h
1173 ++++ b/drivers/md/dm-thin-metadata.h
1174 +@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
1175 +
1176 + int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
1177 +
1178 +-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
1179 ++int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
1180 +
1181 + int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
1182 + int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
1183 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1184 +index 1f225a1e08dd..c30a7850b2da 100644
1185 +--- a/drivers/md/dm-thin.c
1186 ++++ b/drivers/md/dm-thin.c
1187 +@@ -1048,7 +1048,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1188 + * passdown we have to check that these blocks are now unused.
1189 + */
1190 + int r = 0;
1191 +- bool used = true;
1192 ++ bool shared = true;
1193 + struct thin_c *tc = m->tc;
1194 + struct pool *pool = tc->pool;
1195 + dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
1196 +@@ -1058,11 +1058,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1197 + while (b != end) {
1198 + /* find start of unmapped run */
1199 + for (; b < end; b++) {
1200 +- r = dm_pool_block_is_used(pool->pmd, b, &used);
1201 ++ r = dm_pool_block_is_shared(pool->pmd, b, &shared);
1202 + if (r)
1203 + goto out;
1204 +
1205 +- if (!used)
1206 ++ if (!shared)
1207 + break;
1208 + }
1209 +
1210 +@@ -1071,11 +1071,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1211 +
1212 + /* find end of run */
1213 + for (e = b + 1; e != end; e++) {
1214 +- r = dm_pool_block_is_used(pool->pmd, e, &used);
1215 ++ r = dm_pool_block_is_shared(pool->pmd, e, &shared);
1216 + if (r)
1217 + goto out;
1218 +
1219 +- if (used)
1220 ++ if (shared)
1221 + break;
1222 + }
1223 +
1224 +diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
1225 +index b8aaa684c397..2ed23c99f59f 100644
1226 +--- a/drivers/misc/ibmvmc.c
1227 ++++ b/drivers/misc/ibmvmc.c
1228 +@@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
1229 + *
1230 + * Return:
1231 + * 0 - Success
1232 ++ * Non-zero - Failure
1233 + */
1234 + static int ibmvmc_open(struct inode *inode, struct file *file)
1235 + {
1236 + struct ibmvmc_file_session *session;
1237 +- int rc = 0;
1238 +
1239 + pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
1240 + (unsigned long)inode, (unsigned long)file,
1241 + ibmvmc.state);
1242 +
1243 + session = kzalloc(sizeof(*session), GFP_KERNEL);
1244 ++ if (!session)
1245 ++ return -ENOMEM;
1246 ++
1247 + session->file = file;
1248 + file->private_data = session;
1249 +
1250 +- return rc;
1251 ++ return 0;
1252 + }
1253 +
1254 + /**
1255 +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
1256 +index e4b10b2d1a08..23739a60517f 100644
1257 +--- a/drivers/misc/mei/hw-me-regs.h
1258 ++++ b/drivers/misc/mei/hw-me-regs.h
1259 +@@ -127,6 +127,8 @@
1260 + #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
1261 + #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
1262 +
1263 ++#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */
1264 ++
1265 + #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
1266 +
1267 + #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
1268 +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
1269 +index ea4e152270a3..c8e21c894a5f 100644
1270 +--- a/drivers/misc/mei/pci-me.c
1271 ++++ b/drivers/misc/mei/pci-me.c
1272 +@@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
1273 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
1274 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
1275 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
1276 +- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)},
1277 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
1278 +
1279 + {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
1280 + {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
1281 +
1282 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
1283 ++
1284 + {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
1285 +
1286 + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
1287 +diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
1288 +index 54c3fbb4a391..db56d4f58aaa 100644
1289 +--- a/drivers/mmc/host/dw_mmc-bluefield.c
1290 ++++ b/drivers/mmc/host/dw_mmc-bluefield.c
1291 +@@ -1,11 +1,6 @@
1292 + // SPDX-License-Identifier: GPL-2.0
1293 + /*
1294 + * Copyright (C) 2018 Mellanox Technologies.
1295 +- *
1296 +- * This program is free software; you can redistribute it and/or modify
1297 +- * it under the terms of the GNU General Public License as published by
1298 +- * the Free Software Foundation; either version 2 of the License, or
1299 +- * (at your option) any later version.
1300 + */
1301 +
1302 + #include <linux/bitfield.h>
1303 +diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
1304 +index c201c378537e..ef9deaa361c7 100644
1305 +--- a/drivers/mmc/host/meson-gx-mmc.c
1306 ++++ b/drivers/mmc/host/meson-gx-mmc.c
1307 +@@ -174,6 +174,8 @@ struct meson_host {
1308 + struct sd_emmc_desc *descs;
1309 + dma_addr_t descs_dma_addr;
1310 +
1311 ++ int irq;
1312 ++
1313 + bool vqmmc_enabled;
1314 + };
1315 +
1316 +@@ -1181,7 +1183,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
1317 + struct resource *res;
1318 + struct meson_host *host;
1319 + struct mmc_host *mmc;
1320 +- int ret, irq;
1321 ++ int ret;
1322 +
1323 + mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
1324 + if (!mmc)
1325 +@@ -1228,8 +1230,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1326 + goto free_host;
1327 + }
1328 +
1329 +- irq = platform_get_irq(pdev, 0);
1330 +- if (irq <= 0) {
1331 ++ host->irq = platform_get_irq(pdev, 0);
1332 ++ if (host->irq <= 0) {
1333 + dev_err(&pdev->dev, "failed to get interrupt resource.\n");
1334 + ret = -EINVAL;
1335 + goto free_host;
1336 +@@ -1283,9 +1285,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1337 + writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
1338 + host->regs + SD_EMMC_IRQ_EN);
1339 +
1340 +- ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq,
1341 +- meson_mmc_irq_thread, IRQF_SHARED,
1342 +- NULL, host);
1343 ++ ret = request_threaded_irq(host->irq, meson_mmc_irq,
1344 ++ meson_mmc_irq_thread, IRQF_SHARED, NULL, host);
1345 + if (ret)
1346 + goto err_init_clk;
1347 +
1348 +@@ -1303,7 +1304,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
1349 + if (host->bounce_buf == NULL) {
1350 + dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
1351 + ret = -ENOMEM;
1352 +- goto err_init_clk;
1353 ++ goto err_free_irq;
1354 + }
1355 +
1356 + host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
1357 +@@ -1322,6 +1323,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1358 + err_bounce_buf:
1359 + dma_free_coherent(host->dev, host->bounce_buf_size,
1360 + host->bounce_buf, host->bounce_dma_addr);
1361 ++err_free_irq:
1362 ++ free_irq(host->irq, host);
1363 + err_init_clk:
1364 + clk_disable_unprepare(host->mmc_clk);
1365 + err_core_clk:
1366 +@@ -1339,6 +1342,7 @@ static int meson_mmc_remove(struct platform_device *pdev)
1367 +
1368 + /* disable interrupts */
1369 + writel(0, host->regs + SD_EMMC_IRQ_EN);
1370 ++ free_irq(host->irq, host);
1371 +
1372 + dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
1373 + host->descs, host->descs_dma_addr);
1374 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1375 +index 3b3f88ffab53..c05e4d50d43d 100644
1376 +--- a/drivers/net/can/dev.c
1377 ++++ b/drivers/net/can/dev.c
1378 +@@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
1379 + struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
1380 + {
1381 + struct can_priv *priv = netdev_priv(dev);
1382 +- struct sk_buff *skb = priv->echo_skb[idx];
1383 +- struct canfd_frame *cf;
1384 +
1385 + if (idx >= priv->echo_skb_max) {
1386 + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
1387 +@@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
1388 + return NULL;
1389 + }
1390 +
1391 +- if (!skb) {
1392 +- netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
1393 +- __func__, idx);
1394 +- return NULL;
1395 +- }
1396 ++ if (priv->echo_skb[idx]) {
1397 ++ /* Using "struct canfd_frame::len" for the frame
1398 ++ * length is supported on both CAN and CANFD frames.
1399 ++ */
1400 ++ struct sk_buff *skb = priv->echo_skb[idx];
1401 ++ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
1402 ++ u8 len = cf->len;
1403 +
1404 +- /* Using "struct canfd_frame::len" for the frame
1405 +- * length is supported on both CAN and CANFD frames.
1406 +- */
1407 +- cf = (struct canfd_frame *)skb->data;
1408 +- *len_ptr = cf->len;
1409 +- priv->echo_skb[idx] = NULL;
1410 ++ *len_ptr = len;
1411 ++ priv->echo_skb[idx] = NULL;
1412 +
1413 +- return skb;
1414 ++ return skb;
1415 ++ }
1416 ++
1417 ++ return NULL;
1418 + }
1419 +
1420 + /*
1421 +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
1422 +index 75ce11395ee8..ae219b8a7754 100644
1423 +--- a/drivers/net/can/flexcan.c
1424 ++++ b/drivers/net/can/flexcan.c
1425 +@@ -1004,7 +1004,7 @@ static int flexcan_chip_start(struct net_device *dev)
1426 + }
1427 + } else {
1428 + /* clear and invalidate unused mailboxes first */
1429 +- for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
1430 ++ for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < ARRAY_SIZE(regs->mb); i++) {
1431 + priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
1432 + &regs->mb[i].can_ctrl);
1433 + }
1434 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
1435 +index d272dc6984ac..b40d4377cc71 100644
1436 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
1437 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
1438 +@@ -431,8 +431,6 @@
1439 + #define MAC_MDIOSCAR_PA_WIDTH 5
1440 + #define MAC_MDIOSCAR_RA_INDEX 0
1441 + #define MAC_MDIOSCAR_RA_WIDTH 16
1442 +-#define MAC_MDIOSCAR_REG_INDEX 0
1443 +-#define MAC_MDIOSCAR_REG_WIDTH 21
1444 + #define MAC_MDIOSCCDR_BUSY_INDEX 22
1445 + #define MAC_MDIOSCCDR_BUSY_WIDTH 1
1446 + #define MAC_MDIOSCCDR_CMD_INDEX 16
1447 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1448 +index 1e929a1e4ca7..4666084eda16 100644
1449 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1450 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1451 +@@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1452 + }
1453 + }
1454 +
1455 ++static unsigned int xgbe_create_mdio_sca(int port, int reg)
1456 ++{
1457 ++ unsigned int mdio_sca, da;
1458 ++
1459 ++ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
1460 ++
1461 ++ mdio_sca = 0;
1462 ++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
1463 ++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
1464 ++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
1465 ++
1466 ++ return mdio_sca;
1467 ++}
1468 ++
1469 + static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1470 + int reg, u16 val)
1471 + {
1472 +@@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1473 +
1474 + reinit_completion(&pdata->mdio_complete);
1475 +
1476 +- mdio_sca = 0;
1477 +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1478 +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1479 ++ mdio_sca = xgbe_create_mdio_sca(addr, reg);
1480 + XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1481 +
1482 + mdio_sccd = 0;
1483 +@@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1484 +
1485 + reinit_completion(&pdata->mdio_complete);
1486 +
1487 +- mdio_sca = 0;
1488 +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1489 +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1490 ++ mdio_sca = xgbe_create_mdio_sca(addr, reg);
1491 + XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1492 +
1493 + mdio_sccd = 0;
1494 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
1495 +index 5890fdfd62c3..c7901a3f2a79 100644
1496 +--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
1497 ++++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
1498 +@@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
1499 + u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
1500 + u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
1501 + u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
1502 ++ char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
1503 ++
1504 ++ memcpy(ncqe, cqe, q->elem_size);
1505 ++ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
1506 +
1507 + if (sendq) {
1508 + struct mlxsw_pci_queue *sdq;
1509 +
1510 + sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
1511 + mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
1512 +- wqe_counter, cqe);
1513 ++ wqe_counter, ncqe);
1514 + q->u.cq.comp_sdq_count++;
1515 + } else {
1516 + struct mlxsw_pci_queue *rdq;
1517 +
1518 + rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
1519 + mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
1520 +- wqe_counter, q->u.cq.v, cqe);
1521 ++ wqe_counter, q->u.cq.v, ncqe);
1522 + q->u.cq.comp_rdq_count++;
1523 + }
1524 + if (++items == credits)
1525 + break;
1526 + }
1527 +- if (items) {
1528 +- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
1529 ++ if (items)
1530 + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
1531 +- }
1532 + }
1533 +
1534 + static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
1535 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1536 +index 83f452b7ccbb..72cdaa01d56d 100644
1537 +--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1538 ++++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1539 +@@ -27,7 +27,7 @@
1540 +
1541 + #define MLXSW_PCI_SW_RESET 0xF0010
1542 + #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
1543 +-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
1544 ++#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
1545 + #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
1546 + #define MLXSW_PCI_FW_READY 0xA1844
1547 + #define MLXSW_PCI_FW_READY_MASK 0xFFFF
1548 +@@ -53,6 +53,7 @@
1549 + #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
1550 + #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
1551 + #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
1552 ++#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE
1553 + #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
1554 + #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
1555 + #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
1556 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
1557 +index 715d24ff937e..562c4429eec7 100644
1558 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
1559 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
1560 +@@ -696,8 +696,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
1561 + static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
1562 + .type = MLXSW_SP_FID_TYPE_DUMMY,
1563 + .fid_size = sizeof(struct mlxsw_sp_fid),
1564 +- .start_index = MLXSW_SP_RFID_BASE - 1,
1565 +- .end_index = MLXSW_SP_RFID_BASE - 1,
1566 ++ .start_index = VLAN_N_VID - 1,
1567 ++ .end_index = VLAN_N_VID - 1,
1568 + .ops = &mlxsw_sp_fid_dummy_ops,
1569 + };
1570 +
1571 +diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
1572 +index 9020b084b953..7ec4eb74fe21 100644
1573 +--- a/drivers/net/ethernet/sun/cassini.c
1574 ++++ b/drivers/net/ethernet/sun/cassini.c
1575 +@@ -1,22 +1,9 @@
1576 +-// SPDX-License-Identifier: GPL-2.0
1577 ++// SPDX-License-Identifier: GPL-2.0+
1578 + /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
1579 + *
1580 + * Copyright (C) 2004 Sun Microsystems Inc.
1581 + * Copyright (C) 2003 Adrian Sun (asun@×××××××××××××.com)
1582 + *
1583 +- * This program is free software; you can redistribute it and/or
1584 +- * modify it under the terms of the GNU General Public License as
1585 +- * published by the Free Software Foundation; either version 2 of the
1586 +- * License, or (at your option) any later version.
1587 +- *
1588 +- * This program is distributed in the hope that it will be useful,
1589 +- * but WITHOUT ANY WARRANTY; without even the implied warranty of
1590 +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1591 +- * GNU General Public License for more details.
1592 +- *
1593 +- * You should have received a copy of the GNU General Public License
1594 +- * along with this program; if not, see <http://www.gnu.org/licenses/>.
1595 +- *
1596 + * This driver uses the sungem driver (c) David Miller
1597 + * (davem@××××××.com) as its basis.
1598 + *
1599 +diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
1600 +index 13f3860496a8..ae5f05f03f88 100644
1601 +--- a/drivers/net/ethernet/sun/cassini.h
1602 ++++ b/drivers/net/ethernet/sun/cassini.h
1603 +@@ -1,23 +1,10 @@
1604 +-/* SPDX-License-Identifier: GPL-2.0 */
1605 ++/* SPDX-License-Identifier: GPL-2.0+ */
1606 + /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
1607 + * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
1608 + *
1609 + * Copyright (C) 2004 Sun Microsystems Inc.
1610 + * Copyright (c) 2003 Adrian Sun (asun@×××××××××××××.com)
1611 + *
1612 +- * This program is free software; you can redistribute it and/or
1613 +- * modify it under the terms of the GNU General Public License as
1614 +- * published by the Free Software Foundation; either version 2 of the
1615 +- * License, or (at your option) any later version.
1616 +- *
1617 +- * This program is distributed in the hope that it will be useful,
1618 +- * but WITHOUT ANY WARRANTY; without even the implied warranty of
1619 +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1620 +- * GNU General Public License for more details.
1621 +- *
1622 +- * You should have received a copy of the GNU General Public License
1623 +- * along with this program; if not, see <http://www.gnu.org/licenses/>.
1624 +- *
1625 + * vendor id: 0x108E (Sun Microsystems, Inc.)
1626 + * device id: 0xabba (Cassini)
1627 + * revision ids: 0x01 = Cassini
1628 +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
1629 +index f7c69ca34056..d71be15c8c69 100644
1630 +--- a/drivers/net/phy/marvell.c
1631 ++++ b/drivers/net/phy/marvell.c
1632 +@@ -1063,6 +1063,39 @@ static int m88e1145_config_init(struct phy_device *phydev)
1633 + return 0;
1634 + }
1635 +
1636 ++/* The VOD can be out of specification on link up. Poke an
1637 ++ * undocumented register, in an undocumented page, with a magic value
1638 ++ * to fix this.
1639 ++ */
1640 ++static int m88e6390_errata(struct phy_device *phydev)
1641 ++{
1642 ++ int err;
1643 ++
1644 ++ err = phy_write(phydev, MII_BMCR,
1645 ++ BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX);
1646 ++ if (err)
1647 ++ return err;
1648 ++
1649 ++ usleep_range(300, 400);
1650 ++
1651 ++ err = phy_write_paged(phydev, 0xf8, 0x08, 0x36);
1652 ++ if (err)
1653 ++ return err;
1654 ++
1655 ++ return genphy_soft_reset(phydev);
1656 ++}
1657 ++
1658 ++static int m88e6390_config_aneg(struct phy_device *phydev)
1659 ++{
1660 ++ int err;
1661 ++
1662 ++ err = m88e6390_errata(phydev);
1663 ++ if (err)
1664 ++ return err;
1665 ++
1666 ++ return m88e1510_config_aneg(phydev);
1667 ++}
1668 ++
1669 + /**
1670 + * fiber_lpa_to_ethtool_lpa_t
1671 + * @lpa: value of the MII_LPA register for fiber link
1672 +@@ -1418,7 +1451,7 @@ static int m88e1318_set_wol(struct phy_device *phydev,
1673 + * before enabling it if !phy_interrupt_is_valid()
1674 + */
1675 + if (!phy_interrupt_is_valid(phydev))
1676 +- phy_read(phydev, MII_M1011_IEVENT);
1677 ++ __phy_read(phydev, MII_M1011_IEVENT);
1678 +
1679 + /* Enable the WOL interrupt */
1680 + err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
1681 +@@ -2313,7 +2346,7 @@ static struct phy_driver marvell_drivers[] = {
1682 + .flags = PHY_HAS_INTERRUPT,
1683 + .probe = m88e6390_probe,
1684 + .config_init = &marvell_config_init,
1685 +- .config_aneg = &m88e1510_config_aneg,
1686 ++ .config_aneg = &m88e6390_config_aneg,
1687 + .read_status = &marvell_read_status,
1688 + .ack_interrupt = &marvell_ack_interrupt,
1689 + .config_intr = &marvell_config_intr,
1690 +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
1691 +index 98f4b1f706df..15c5586d74ff 100644
1692 +--- a/drivers/net/phy/mdio_bus.c
1693 ++++ b/drivers/net/phy/mdio_bus.c
1694 +@@ -391,6 +391,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
1695 + if (IS_ERR(gpiod)) {
1696 + dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
1697 + bus->id);
1698 ++ device_del(&bus->dev);
1699 + return PTR_ERR(gpiod);
1700 + } else if (gpiod) {
1701 + bus->reset_gpiod = gpiod;
1702 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
1703 +index 62dc564b251d..f22639f0116a 100644
1704 +--- a/drivers/net/ppp/pppoe.c
1705 ++++ b/drivers/net/ppp/pppoe.c
1706 +@@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
1707 + if (pskb_trim_rcsum(skb, len))
1708 + goto drop;
1709 +
1710 ++ ph = pppoe_hdr(skb);
1711 + pn = pppoe_pernet(dev_net(dev));
1712 +
1713 + /* Note that get_item does a sock_hold(), so sk_pppox(po)
1714 +diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
1715 +index e57f3902beb3..08f997a390d5 100644
1716 +--- a/drivers/nvme/target/rdma.c
1717 ++++ b/drivers/nvme/target/rdma.c
1718 +@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
1719 + static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
1720 + static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
1721 + static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
1722 ++static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
1723 ++ struct nvmet_rdma_rsp *r);
1724 ++static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
1725 ++ struct nvmet_rdma_rsp *r);
1726 +
1727 + static const struct nvmet_fabrics_ops nvmet_rdma_ops;
1728 +
1729 +@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
1730 + spin_unlock_irqrestore(&queue->rsps_lock, flags);
1731 +
1732 + if (unlikely(!rsp)) {
1733 +- rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
1734 ++ int ret;
1735 ++
1736 ++ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
1737 + if (unlikely(!rsp))
1738 + return NULL;
1739 ++ ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
1740 ++ if (unlikely(ret)) {
1741 ++ kfree(rsp);
1742 ++ return NULL;
1743 ++ }
1744 ++
1745 + rsp->allocated = true;
1746 + }
1747 +
1748 +@@ -196,7 +208,8 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
1749 + {
1750 + unsigned long flags;
1751 +
1752 +- if (rsp->allocated) {
1753 ++ if (unlikely(rsp->allocated)) {
1754 ++ nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
1755 + kfree(rsp);
1756 + return;
1757 + }
1758 +diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
1759 +index 194ffd5c8580..039b2074db7e 100644
1760 +--- a/drivers/s390/char/sclp_config.c
1761 ++++ b/drivers/s390/char/sclp_config.c
1762 +@@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
1763 +
1764 + static void __ref sclp_cpu_change_notify(struct work_struct *work)
1765 + {
1766 ++ lock_device_hotplug();
1767 + smp_rescan_cpus();
1768 ++ unlock_device_hotplug();
1769 + }
1770 +
1771 + static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
1772 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
1773 +index 0b81d9d03357..12ddb5928a73 100644
1774 +--- a/drivers/scsi/ufs/ufshcd.c
1775 ++++ b/drivers/scsi/ufs/ufshcd.c
1776 +@@ -109,13 +109,19 @@
1777 + int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
1778 + const char *prefix)
1779 + {
1780 +- u8 *regs;
1781 ++ u32 *regs;
1782 ++ size_t pos;
1783 ++
1784 ++ if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
1785 ++ return -EINVAL;
1786 +
1787 + regs = kzalloc(len, GFP_KERNEL);
1788 + if (!regs)
1789 + return -ENOMEM;
1790 +
1791 +- memcpy_fromio(regs, hba->mmio_base + offset, len);
1792 ++ for (pos = 0; pos < len; pos += 4)
1793 ++ regs[pos / 4] = ufshcd_readl(hba, offset + pos);
1794 ++
1795 + ufshcd_hex_dump(prefix, regs, len);
1796 + kfree(regs);
1797 +
1798 +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1799 +index 28cbd6b3d26c..dfee6985efa6 100644
1800 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1801 ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1802 +@@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
1803 + {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
1804 + {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
1805 + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
1806 ++ {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
1807 + {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
1808 + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
1809 + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
1810 +diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
1811 +index dabb391909aa..bb63519db7ae 100644
1812 +--- a/drivers/tty/n_hdlc.c
1813 ++++ b/drivers/tty/n_hdlc.c
1814 +@@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
1815 + /* too large for caller's buffer */
1816 + ret = -EOVERFLOW;
1817 + } else {
1818 ++ __set_current_state(TASK_RUNNING);
1819 + if (copy_to_user(buf, rbuf->buf, rbuf->count))
1820 + ret = -EFAULT;
1821 + else
1822 +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
1823 +index ad126f51d549..7fe679413188 100644
1824 +--- a/drivers/tty/serial/serial_core.c
1825 ++++ b/drivers/tty/serial/serial_core.c
1826 +@@ -550,10 +550,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
1827 + int ret = 0;
1828 +
1829 + circ = &state->xmit;
1830 +- if (!circ->buf)
1831 ++ port = uart_port_lock(state, flags);
1832 ++ if (!circ->buf) {
1833 ++ uart_port_unlock(port, flags);
1834 + return 0;
1835 ++ }
1836 +
1837 +- port = uart_port_lock(state, flags);
1838 + if (port && uart_circ_chars_free(circ) != 0) {
1839 + circ->buf[circ->head] = c;
1840 + circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
1841 +@@ -586,11 +588,13 @@ static int uart_write(struct tty_struct *tty,
1842 + return -EL3HLT;
1843 + }
1844 +
1845 ++ port = uart_port_lock(state, flags);
1846 + circ = &state->xmit;
1847 +- if (!circ->buf)
1848 ++ if (!circ->buf) {
1849 ++ uart_port_unlock(port, flags);
1850 + return 0;
1851 ++ }
1852 +
1853 +- port = uart_port_lock(state, flags);
1854 + while (port) {
1855 + c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
1856 + if (count < c)
1857 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
1858 +index 052ec16a4e84..e7d192ebecd7 100644
1859 +--- a/drivers/tty/tty_io.c
1860 ++++ b/drivers/tty/tty_io.c
1861 +@@ -2188,7 +2188,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
1862 + ld = tty_ldisc_ref_wait(tty);
1863 + if (!ld)
1864 + return -EIO;
1865 +- ld->ops->receive_buf(tty, &ch, &mbz, 1);
1866 ++ if (ld->ops->receive_buf)
1867 ++ ld->ops->receive_buf(tty, &ch, &mbz, 1);
1868 + tty_ldisc_deref(ld);
1869 + return 0;
1870 + }
1871 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1872 +index 476ec4b1b86c..da335899527b 100644
1873 +--- a/drivers/tty/vt/vt.c
1874 ++++ b/drivers/tty/vt/vt.c
1875 +@@ -1275,6 +1275,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
1876 + if (con_is_visible(vc))
1877 + update_screen(vc);
1878 + vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
1879 ++ notify_update(vc);
1880 + return err;
1881 + }
1882 +
1883 +@@ -2767,8 +2768,8 @@ rescan_last_byte:
1884 + con_flush(vc, draw_from, draw_to, &draw_x);
1885 + vc_uniscr_debug_check(vc);
1886 + console_conditional_schedule();
1887 +- console_unlock();
1888 + notify_update(vc);
1889 ++ console_unlock();
1890 + return n;
1891 + }
1892 +
1893 +@@ -2887,8 +2888,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
1894 + unsigned char c;
1895 + static DEFINE_SPINLOCK(printing_lock);
1896 + const ushort *start;
1897 +- ushort cnt = 0;
1898 +- ushort myx;
1899 ++ ushort start_x, cnt;
1900 + int kmsg_console;
1901 +
1902 + /* console busy or not yet initialized */
1903 +@@ -2901,10 +2901,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
1904 + if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
1905 + vc = vc_cons[kmsg_console - 1].d;
1906 +
1907 +- /* read `x' only after setting currcons properly (otherwise
1908 +- the `x' macro will read the x of the foreground console). */
1909 +- myx = vc->vc_x;
1910 +-
1911 + if (!vc_cons_allocated(fg_console)) {
1912 + /* impossible */
1913 + /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
1914 +@@ -2919,53 +2915,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
1915 + hide_cursor(vc);
1916 +
1917 + start = (ushort *)vc->vc_pos;
1918 +-
1919 +- /* Contrived structure to try to emulate original need_wrap behaviour
1920 +- * Problems caused when we have need_wrap set on '\n' character */
1921 ++ start_x = vc->vc_x;
1922 ++ cnt = 0;
1923 + while (count--) {
1924 + c = *b++;
1925 + if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
1926 +- if (cnt > 0) {
1927 +- if (con_is_visible(vc))
1928 +- vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
1929 +- vc->vc_x += cnt;
1930 +- if (vc->vc_need_wrap)
1931 +- vc->vc_x--;
1932 +- cnt = 0;
1933 +- }
1934 ++ if (cnt && con_is_visible(vc))
1935 ++ vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
1936 ++ cnt = 0;
1937 + if (c == 8) { /* backspace */
1938 + bs(vc);
1939 + start = (ushort *)vc->vc_pos;
1940 +- myx = vc->vc_x;
1941 ++ start_x = vc->vc_x;
1942 + continue;
1943 + }
1944 + if (c != 13)
1945 + lf(vc);
1946 + cr(vc);
1947 + start = (ushort *)vc->vc_pos;
1948 +- myx = vc->vc_x;
1949 ++ start_x = vc->vc_x;
1950 + if (c == 10 || c == 13)
1951 + continue;
1952 + }
1953 ++ vc_uniscr_putc(vc, c);
1954 + scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
1955 + notify_write(vc, c);
1956 + cnt++;
1957 +- if (myx == vc->vc_cols - 1) {
1958 +- vc->vc_need_wrap = 1;
1959 +- continue;
1960 +- }
1961 +- vc->vc_pos += 2;
1962 +- myx++;
1963 +- }
1964 +- if (cnt > 0) {
1965 +- if (con_is_visible(vc))
1966 +- vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
1967 +- vc->vc_x += cnt;
1968 +- if (vc->vc_x == vc->vc_cols) {
1969 +- vc->vc_x--;
1970 ++ if (vc->vc_x == vc->vc_cols - 1) {
1971 + vc->vc_need_wrap = 1;
1972 ++ } else {
1973 ++ vc->vc_pos += 2;
1974 ++ vc->vc_x++;
1975 + }
1976 + }
1977 ++ if (cnt && con_is_visible(vc))
1978 ++ vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
1979 + set_cursor(vc);
1980 + notify_update(vc);
1981 +
1982 +diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
1983 +index dc7f7fd71684..c12ac56606c3 100644
1984 +--- a/drivers/usb/core/ledtrig-usbport.c
1985 ++++ b/drivers/usb/core/ledtrig-usbport.c
1986 +@@ -119,11 +119,6 @@ static const struct attribute_group ports_group = {
1987 + .attrs = ports_attrs,
1988 + };
1989 +
1990 +-static const struct attribute_group *ports_groups[] = {
1991 +- &ports_group,
1992 +- NULL
1993 +-};
1994 +-
1995 + /***************************************
1996 + * Adding & removing ports
1997 + ***************************************/
1998 +@@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
1999 + static int usbport_trig_activate(struct led_classdev *led_cdev)
2000 + {
2001 + struct usbport_trig_data *usbport_data;
2002 ++ int err;
2003 +
2004 + usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
2005 + if (!usbport_data)
2006 +@@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
2007 +
2008 + /* List of ports */
2009 + INIT_LIST_HEAD(&usbport_data->ports);
2010 ++ err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
2011 ++ if (err)
2012 ++ goto err_free;
2013 + usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
2014 + usbport_trig_update_count(usbport_data);
2015 +
2016 +@@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
2017 + usbport_data->nb.notifier_call = usbport_trig_notify;
2018 + led_set_trigger_data(led_cdev, usbport_data);
2019 + usb_register_notify(&usbport_data->nb);
2020 +-
2021 + return 0;
2022 ++
2023 ++err_free:
2024 ++ kfree(usbport_data);
2025 ++ return err;
2026 + }
2027 +
2028 + static void usbport_trig_deactivate(struct led_classdev *led_cdev)
2029 +@@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev)
2030 + usbport_trig_remove_port(usbport_data, port);
2031 + }
2032 +
2033 ++ sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
2034 ++
2035 + usb_unregister_notify(&usbport_data->nb);
2036 +
2037 + kfree(usbport_data);
2038 +@@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = {
2039 + .name = "usbport",
2040 + .activate = usbport_trig_activate,
2041 + .deactivate = usbport_trig_deactivate,
2042 +- .groups = ports_groups,
2043 + };
2044 +
2045 + static int __init usbport_trig_init(void)
2046 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2047 +index 558949b826d0..d8bf9307901e 100644
2048 +--- a/drivers/usb/dwc3/gadget.c
2049 ++++ b/drivers/usb/dwc3/gadget.c
2050 +@@ -177,6 +177,8 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
2051 + req->started = false;
2052 + list_del(&req->list);
2053 + req->remaining = 0;
2054 ++ req->unaligned = false;
2055 ++ req->zero = false;
2056 +
2057 + if (req->request.status == -EINPROGRESS)
2058 + req->request.status = status;
2059 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
2060 +index d5b38f096698..5a6df6e9ad57 100644
2061 +--- a/drivers/usb/serial/pl2303.c
2062 ++++ b/drivers/usb/serial/pl2303.c
2063 +@@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = {
2064 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
2065 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
2066 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
2067 ++ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
2068 + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
2069 + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
2070 + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
2071 +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
2072 +index 4e2554d55362..559941ca884d 100644
2073 +--- a/drivers/usb/serial/pl2303.h
2074 ++++ b/drivers/usb/serial/pl2303.h
2075 +@@ -8,6 +8,7 @@
2076 +
2077 + #define PL2303_VENDOR_ID 0x067b
2078 + #define PL2303_PRODUCT_ID 0x2303
2079 ++#define PL2303_PRODUCT_ID_TB 0x2304
2080 + #define PL2303_PRODUCT_ID_RSAQ2 0x04bb
2081 + #define PL2303_PRODUCT_ID_DCU11 0x1234
2082 + #define PL2303_PRODUCT_ID_PHAROS 0xaaa0
2083 +@@ -20,6 +21,7 @@
2084 + #define PL2303_PRODUCT_ID_MOTOROLA 0x0307
2085 + #define PL2303_PRODUCT_ID_ZTEK 0xe1f1
2086 +
2087 ++
2088 + #define ATEN_VENDOR_ID 0x0557
2089 + #define ATEN_VENDOR_ID2 0x0547
2090 + #define ATEN_PRODUCT_ID 0x2008
2091 +diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
2092 +index 4d0273508043..edbbb13d6de6 100644
2093 +--- a/drivers/usb/serial/usb-serial-simple.c
2094 ++++ b/drivers/usb/serial/usb-serial-simple.c
2095 +@@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS);
2096 + /* Motorola Tetra driver */
2097 + #define MOTOROLA_TETRA_IDS() \
2098 + { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
2099 +- { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
2100 ++ { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
2101 ++ { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
2102 + DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
2103 +
2104 + /* Novatel Wireless GPS driver */
2105 +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
2106 +index 4e656f89cb22..a86aa65ad66d 100644
2107 +--- a/drivers/vhost/net.c
2108 ++++ b/drivers/vhost/net.c
2109 +@@ -1024,7 +1024,8 @@ static void handle_rx(struct vhost_net *net)
2110 + if (nvq->done_idx > VHOST_NET_BATCH)
2111 + vhost_net_signal_used(nvq);
2112 + if (unlikely(vq_log))
2113 +- vhost_log_write(vq, vq_log, log, vhost_len);
2114 ++ vhost_log_write(vq, vq_log, log, vhost_len,
2115 ++ vq->iov, in);
2116 + total_len += vhost_len;
2117 + if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
2118 + vhost_poll_queue(&vq->poll);
2119 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
2120 +index 55e5aa662ad5..c66fc8308b5e 100644
2121 +--- a/drivers/vhost/vhost.c
2122 ++++ b/drivers/vhost/vhost.c
2123 +@@ -1733,13 +1733,87 @@ static int log_write(void __user *log_base,
2124 + return r;
2125 + }
2126 +
2127 ++static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
2128 ++{
2129 ++ struct vhost_umem *umem = vq->umem;
2130 ++ struct vhost_umem_node *u;
2131 ++ u64 start, end, l, min;
2132 ++ int r;
2133 ++ bool hit = false;
2134 ++
2135 ++ while (len) {
2136 ++ min = len;
2137 ++ /* More than one GPAs can be mapped into a single HVA. So
2138 ++ * iterate all possible umems here to be safe.
2139 ++ */
2140 ++ list_for_each_entry(u, &umem->umem_list, link) {
2141 ++ if (u->userspace_addr > hva - 1 + len ||
2142 ++ u->userspace_addr - 1 + u->size < hva)
2143 ++ continue;
2144 ++ start = max(u->userspace_addr, hva);
2145 ++ end = min(u->userspace_addr - 1 + u->size,
2146 ++ hva - 1 + len);
2147 ++ l = end - start + 1;
2148 ++ r = log_write(vq->log_base,
2149 ++ u->start + start - u->userspace_addr,
2150 ++ l);
2151 ++ if (r < 0)
2152 ++ return r;
2153 ++ hit = true;
2154 ++ min = min(l, min);
2155 ++ }
2156 ++
2157 ++ if (!hit)
2158 ++ return -EFAULT;
2159 ++
2160 ++ len -= min;
2161 ++ hva += min;
2162 ++ }
2163 ++
2164 ++ return 0;
2165 ++}
2166 ++
2167 ++static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
2168 ++{
2169 ++ struct iovec iov[64];
2170 ++ int i, ret;
2171 ++
2172 ++ if (!vq->iotlb)
2173 ++ return log_write(vq->log_base, vq->log_addr + used_offset, len);
2174 ++
2175 ++ ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
2176 ++ len, iov, 64, VHOST_ACCESS_WO);
2177 ++ if (ret)
2178 ++ return ret;
2179 ++
2180 ++ for (i = 0; i < ret; i++) {
2181 ++ ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
2182 ++ iov[i].iov_len);
2183 ++ if (ret)
2184 ++ return ret;
2185 ++ }
2186 ++
2187 ++ return 0;
2188 ++}
2189 ++
2190 + int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
2191 +- unsigned int log_num, u64 len)
2192 ++ unsigned int log_num, u64 len, struct iovec *iov, int count)
2193 + {
2194 + int i, r;
2195 +
2196 + /* Make sure data written is seen before log. */
2197 + smp_wmb();
2198 ++
2199 ++ if (vq->iotlb) {
2200 ++ for (i = 0; i < count; i++) {
2201 ++ r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
2202 ++ iov[i].iov_len);
2203 ++ if (r < 0)
2204 ++ return r;
2205 ++ }
2206 ++ return 0;
2207 ++ }
2208 ++
2209 + for (i = 0; i < log_num; ++i) {
2210 + u64 l = min(log[i].len, len);
2211 + r = log_write(vq->log_base, log[i].addr, l);
2212 +@@ -1769,9 +1843,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
2213 + smp_wmb();
2214 + /* Log used flag write. */
2215 + used = &vq->used->flags;
2216 +- log_write(vq->log_base, vq->log_addr +
2217 +- (used - (void __user *)vq->used),
2218 +- sizeof vq->used->flags);
2219 ++ log_used(vq, (used - (void __user *)vq->used),
2220 ++ sizeof vq->used->flags);
2221 + if (vq->log_ctx)
2222 + eventfd_signal(vq->log_ctx, 1);
2223 + }
2224 +@@ -1789,9 +1862,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
2225 + smp_wmb();
2226 + /* Log avail event write */
2227 + used = vhost_avail_event(vq);
2228 +- log_write(vq->log_base, vq->log_addr +
2229 +- (used - (void __user *)vq->used),
2230 +- sizeof *vhost_avail_event(vq));
2231 ++ log_used(vq, (used - (void __user *)vq->used),
2232 ++ sizeof *vhost_avail_event(vq));
2233 + if (vq->log_ctx)
2234 + eventfd_signal(vq->log_ctx, 1);
2235 + }
2236 +@@ -2191,10 +2263,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2237 + /* Make sure data is seen before log. */
2238 + smp_wmb();
2239 + /* Log used ring entry write. */
2240 +- log_write(vq->log_base,
2241 +- vq->log_addr +
2242 +- ((void __user *)used - (void __user *)vq->used),
2243 +- count * sizeof *used);
2244 ++ log_used(vq, ((void __user *)used - (void __user *)vq->used),
2245 ++ count * sizeof *used);
2246 + }
2247 + old = vq->last_used_idx;
2248 + new = (vq->last_used_idx += count);
2249 +@@ -2236,9 +2306,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2250 + /* Make sure used idx is seen before log. */
2251 + smp_wmb();
2252 + /* Log used index update. */
2253 +- log_write(vq->log_base,
2254 +- vq->log_addr + offsetof(struct vring_used, idx),
2255 +- sizeof vq->used->idx);
2256 ++ log_used(vq, offsetof(struct vring_used, idx),
2257 ++ sizeof vq->used->idx);
2258 + if (vq->log_ctx)
2259 + eventfd_signal(vq->log_ctx, 1);
2260 + }
2261 +diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
2262 +index 466ef7542291..1b675dad5e05 100644
2263 +--- a/drivers/vhost/vhost.h
2264 ++++ b/drivers/vhost/vhost.h
2265 +@@ -205,7 +205,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
2266 + bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
2267 +
2268 + int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
2269 +- unsigned int log_num, u64 len);
2270 ++ unsigned int log_num, u64 len,
2271 ++ struct iovec *iov, int count);
2272 + int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
2273 +
2274 + struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
2275 +diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
2276 +index 09731b2f6815..c6b3bdbbdbc9 100644
2277 +--- a/drivers/video/console/vgacon.c
2278 ++++ b/drivers/video/console/vgacon.c
2279 +@@ -271,6 +271,7 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
2280 +
2281 + static void vgacon_restore_screen(struct vc_data *c)
2282 + {
2283 ++ c->vc_origin = c->vc_visible_origin;
2284 + vgacon_scrollback_cur->save = 0;
2285 +
2286 + if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
2287 +@@ -287,8 +288,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
2288 + int start, end, count, soff;
2289 +
2290 + if (!lines) {
2291 +- c->vc_visible_origin = c->vc_origin;
2292 +- vga_set_mem_top(c);
2293 ++ vgacon_restore_screen(c);
2294 + return;
2295 + }
2296 +
2297 +@@ -298,6 +298,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
2298 + if (!vgacon_scrollback_cur->save) {
2299 + vgacon_cursor(c, CM_ERASE);
2300 + vgacon_save_screen(c);
2301 ++ c->vc_origin = (unsigned long)c->vc_screenbuf;
2302 + vgacon_scrollback_cur->save = 1;
2303 + }
2304 +
2305 +@@ -335,7 +336,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
2306 + int copysize;
2307 +
2308 + int diff = c->vc_rows - count;
2309 +- void *d = (void *) c->vc_origin;
2310 ++ void *d = (void *) c->vc_visible_origin;
2311 + void *s = (void *) c->vc_screenbuf;
2312 +
2313 + count *= c->vc_size_row;
2314 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
2315 +index eadffaa39f4e..c7542e8dd096 100644
2316 +--- a/fs/ceph/caps.c
2317 ++++ b/fs/ceph/caps.c
2318 +@@ -1030,6 +1030,8 @@ static void drop_inode_snap_realm(struct ceph_inode_info *ci)
2319 + list_del_init(&ci->i_snap_realm_item);
2320 + ci->i_snap_realm_counter++;
2321 + ci->i_snap_realm = NULL;
2322 ++ if (realm->ino == ci->i_vino.ino)
2323 ++ realm->inode = NULL;
2324 + spin_unlock(&realm->inodes_with_caps_lock);
2325 + ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
2326 + realm);
2327 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
2328 +index 5657b79dbc99..269471c8f42b 100644
2329 +--- a/fs/cifs/cifssmb.c
2330 ++++ b/fs/cifs/cifssmb.c
2331 +@@ -1458,18 +1458,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server)
2332 + }
2333 +
2334 + static int
2335 +-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2336 ++__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
2337 ++ bool malformed)
2338 + {
2339 + int length;
2340 +- struct cifs_readdata *rdata = mid->callback_data;
2341 +
2342 + length = cifs_discard_remaining_data(server);
2343 +- dequeue_mid(mid, rdata->result);
2344 ++ dequeue_mid(mid, malformed);
2345 + mid->resp_buf = server->smallbuf;
2346 + server->smallbuf = NULL;
2347 + return length;
2348 + }
2349 +
2350 ++static int
2351 ++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2352 ++{
2353 ++ struct cifs_readdata *rdata = mid->callback_data;
2354 ++
2355 ++ return __cifs_readv_discard(server, mid, rdata->result);
2356 ++}
2357 ++
2358 + int
2359 + cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2360 + {
2361 +@@ -1511,12 +1519,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2362 + return -1;
2363 + }
2364 +
2365 ++ /* set up first two iov for signature check and to get credits */
2366 ++ rdata->iov[0].iov_base = buf;
2367 ++ rdata->iov[0].iov_len = 4;
2368 ++ rdata->iov[1].iov_base = buf + 4;
2369 ++ rdata->iov[1].iov_len = server->total_read - 4;
2370 ++ cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
2371 ++ rdata->iov[0].iov_base, rdata->iov[0].iov_len);
2372 ++ cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
2373 ++ rdata->iov[1].iov_base, rdata->iov[1].iov_len);
2374 ++
2375 + /* Was the SMB read successful? */
2376 + rdata->result = server->ops->map_error(buf, false);
2377 + if (rdata->result != 0) {
2378 + cifs_dbg(FYI, "%s: server returned error %d\n",
2379 + __func__, rdata->result);
2380 +- return cifs_readv_discard(server, mid);
2381 ++ /* normal error on read response */
2382 ++ return __cifs_readv_discard(server, mid, false);
2383 + }
2384 +
2385 + /* Is there enough to get to the rest of the READ_RSP header? */
2386 +@@ -1560,14 +1579,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2387 + server->total_read += length;
2388 + }
2389 +
2390 +- /* set up first iov for signature check */
2391 +- rdata->iov[0].iov_base = buf;
2392 +- rdata->iov[0].iov_len = 4;
2393 +- rdata->iov[1].iov_base = buf + 4;
2394 +- rdata->iov[1].iov_len = server->total_read - 4;
2395 +- cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
2396 +- rdata->iov[0].iov_base, server->total_read);
2397 +-
2398 + /* how much data is in the response? */
2399 + #ifdef CONFIG_CIFS_SMB_DIRECT
2400 + use_rdma_mr = rdata->mr;
2401 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
2402 +index 52d71b64c0c6..d0bba175117c 100644
2403 +--- a/fs/cifs/connect.c
2404 ++++ b/fs/cifs/connect.c
2405 +@@ -533,6 +533,21 @@ server_unresponsive(struct TCP_Server_Info *server)
2406 + return false;
2407 + }
2408 +
2409 ++static inline bool
2410 ++zero_credits(struct TCP_Server_Info *server)
2411 ++{
2412 ++ int val;
2413 ++
2414 ++ spin_lock(&server->req_lock);
2415 ++ val = server->credits + server->echo_credits + server->oplock_credits;
2416 ++ if (server->in_flight == 0 && val == 0) {
2417 ++ spin_unlock(&server->req_lock);
2418 ++ return true;
2419 ++ }
2420 ++ spin_unlock(&server->req_lock);
2421 ++ return false;
2422 ++}
2423 ++
2424 + static int
2425 + cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
2426 + {
2427 +@@ -545,6 +560,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
2428 + for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
2429 + try_to_freeze();
2430 +
2431 ++ /* reconnect if no credits and no requests in flight */
2432 ++ if (zero_credits(server)) {
2433 ++ cifs_reconnect(server);
2434 ++ return -ECONNABORTED;
2435 ++ }
2436 ++
2437 + if (server_unresponsive(server))
2438 + return -ECONNABORTED;
2439 + if (cifs_rdma_enabled(server) && server->smbd_conn)
2440 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
2441 +index 6a9c47541c53..7b8b58fb4d3f 100644
2442 +--- a/fs/cifs/smb2misc.c
2443 ++++ b/fs/cifs/smb2misc.c
2444 +@@ -648,6 +648,13 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
2445 + if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK)
2446 + return false;
2447 +
2448 ++ if (rsp->sync_hdr.CreditRequest) {
2449 ++ spin_lock(&server->req_lock);
2450 ++ server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest);
2451 ++ spin_unlock(&server->req_lock);
2452 ++ wake_up(&server->request_q);
2453 ++ }
2454 ++
2455 + if (rsp->StructureSize !=
2456 + smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
2457 + if (le16_to_cpu(rsp->StructureSize) == 44)
2458 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2459 +index f44bb4a304e9..237d7281ada3 100644
2460 +--- a/fs/cifs/smb2ops.c
2461 ++++ b/fs/cifs/smb2ops.c
2462 +@@ -34,6 +34,7 @@
2463 + #include "cifs_ioctl.h"
2464 + #include "smbdirect.h"
2465 +
2466 ++/* Change credits for different ops and return the total number of credits */
2467 + static int
2468 + change_conf(struct TCP_Server_Info *server)
2469 + {
2470 +@@ -41,17 +42,15 @@ change_conf(struct TCP_Server_Info *server)
2471 + server->oplock_credits = server->echo_credits = 0;
2472 + switch (server->credits) {
2473 + case 0:
2474 +- return -1;
2475 ++ return 0;
2476 + case 1:
2477 + server->echoes = false;
2478 + server->oplocks = false;
2479 +- cifs_dbg(VFS, "disabling echoes and oplocks\n");
2480 + break;
2481 + case 2:
2482 + server->echoes = true;
2483 + server->oplocks = false;
2484 + server->echo_credits = 1;
2485 +- cifs_dbg(FYI, "disabling oplocks\n");
2486 + break;
2487 + default:
2488 + server->echoes = true;
2489 +@@ -64,14 +63,15 @@ change_conf(struct TCP_Server_Info *server)
2490 + server->echo_credits = 1;
2491 + }
2492 + server->credits -= server->echo_credits + server->oplock_credits;
2493 +- return 0;
2494 ++ return server->credits + server->echo_credits + server->oplock_credits;
2495 + }
2496 +
2497 + static void
2498 + smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
2499 + const int optype)
2500 + {
2501 +- int *val, rc = 0;
2502 ++ int *val, rc = -1;
2503 ++
2504 + spin_lock(&server->req_lock);
2505 + val = server->ops->get_credits_field(server, optype);
2506 + *val += add;
2507 +@@ -95,8 +95,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
2508 + }
2509 + spin_unlock(&server->req_lock);
2510 + wake_up(&server->request_q);
2511 +- if (rc)
2512 +- cifs_reconnect(server);
2513 ++
2514 ++ if (server->tcpStatus == CifsNeedReconnect)
2515 ++ return;
2516 ++
2517 ++ switch (rc) {
2518 ++ case -1:
2519 ++ /* change_conf hasn't been executed */
2520 ++ break;
2521 ++ case 0:
2522 ++ cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
2523 ++ break;
2524 ++ case 1:
2525 ++ cifs_dbg(VFS, "disabling echoes and oplocks\n");
2526 ++ break;
2527 ++ case 2:
2528 ++ cifs_dbg(FYI, "disabling oplocks\n");
2529 ++ break;
2530 ++ default:
2531 ++ cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
2532 ++ }
2533 + }
2534 +
2535 + static void
2536 +@@ -154,14 +172,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
2537 +
2538 + scredits = server->credits;
2539 + /* can deadlock with reopen */
2540 +- if (scredits == 1) {
2541 ++ if (scredits <= 8) {
2542 + *num = SMB2_MAX_BUFFER_SIZE;
2543 + *credits = 0;
2544 + break;
2545 + }
2546 +
2547 +- /* leave one credit for a possible reopen */
2548 +- scredits--;
2549 ++ /* leave some credits for reopen and other ops */
2550 ++ scredits -= 8;
2551 + *num = min_t(unsigned int, size,
2552 + scredits * SMB2_MAX_BUFFER_SIZE);
2553 +
2554 +@@ -2901,11 +2919,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
2555 + server->ops->is_status_pending(buf, server, 0))
2556 + return -1;
2557 +
2558 +- rdata->result = server->ops->map_error(buf, false);
2559 ++ /* set up first two iov to get credits */
2560 ++ rdata->iov[0].iov_base = buf;
2561 ++ rdata->iov[0].iov_len = 4;
2562 ++ rdata->iov[1].iov_base = buf + 4;
2563 ++ rdata->iov[1].iov_len =
2564 ++ min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4;
2565 ++ cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
2566 ++ rdata->iov[0].iov_base, rdata->iov[0].iov_len);
2567 ++ cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
2568 ++ rdata->iov[1].iov_base, rdata->iov[1].iov_len);
2569 ++
2570 ++ rdata->result = server->ops->map_error(buf, true);
2571 + if (rdata->result != 0) {
2572 + cifs_dbg(FYI, "%s: server returned error %d\n",
2573 + __func__, rdata->result);
2574 +- dequeue_mid(mid, rdata->result);
2575 ++ /* normal error on read response */
2576 ++ dequeue_mid(mid, false);
2577 + return 0;
2578 + }
2579 +
2580 +@@ -2978,14 +3008,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
2581 + return 0;
2582 + }
2583 +
2584 +- /* set up first iov for signature check */
2585 +- rdata->iov[0].iov_base = buf;
2586 +- rdata->iov[0].iov_len = 4;
2587 +- rdata->iov[1].iov_base = buf + 4;
2588 +- rdata->iov[1].iov_len = server->vals->read_rsp_size - 4;
2589 +- cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
2590 +- rdata->iov[0].iov_base, server->vals->read_rsp_size);
2591 +-
2592 + length = rdata->copy_into_pages(server, rdata, &iter);
2593 +
2594 + kfree(bvec);
2595 +diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
2596 +index ac6978d3208c..780bba695453 100644
2597 +--- a/fs/notify/inotify/inotify_user.c
2598 ++++ b/fs/notify/inotify/inotify_user.c
2599 +@@ -724,8 +724,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
2600 + return -EBADF;
2601 +
2602 + /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
2603 +- if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE)))
2604 +- return -EINVAL;
2605 ++ if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
2606 ++ ret = -EINVAL;
2607 ++ goto fput_and_out;
2608 ++ }
2609 +
2610 + /* verify that this is indeed an inotify instance */
2611 + if (unlikely(f.file->f_op != &inotify_fops)) {
2612 +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
2613 +index 1fd6fa822d2c..91393724e933 100644
2614 +--- a/include/linux/bpf_verifier.h
2615 ++++ b/include/linux/bpf_verifier.h
2616 +@@ -134,6 +134,7 @@ struct bpf_verifier_state {
2617 + struct bpf_func_state *frame[MAX_CALL_FRAMES];
2618 + struct bpf_verifier_state *parent;
2619 + u32 curframe;
2620 ++ bool speculative;
2621 + };
2622 +
2623 + /* linked list of verifier states used to prune search */
2624 +@@ -142,15 +143,25 @@ struct bpf_verifier_state_list {
2625 + struct bpf_verifier_state_list *next;
2626 + };
2627 +
2628 ++/* Possible states for alu_state member. */
2629 ++#define BPF_ALU_SANITIZE_SRC 1U
2630 ++#define BPF_ALU_SANITIZE_DST 2U
2631 ++#define BPF_ALU_NEG_VALUE (1U << 2)
2632 ++#define BPF_ALU_NON_POINTER (1U << 3)
2633 ++#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
2634 ++ BPF_ALU_SANITIZE_DST)
2635 ++
2636 + struct bpf_insn_aux_data {
2637 + union {
2638 + enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
2639 + unsigned long map_state; /* pointer/poison value for maps */
2640 + s32 call_imm; /* saved imm field of call insn */
2641 ++ u32 alu_limit; /* limit for add/sub register with pointer */
2642 + };
2643 + int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
2644 + int sanitize_stack_off; /* stack slot to be cleared */
2645 + bool seen; /* this insn was processed by the verifier */
2646 ++ u8 alu_state; /* used in combination with alu_limit */
2647 + };
2648 +
2649 + #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
2650 +@@ -186,6 +197,8 @@ struct bpf_subprog_info {
2651 + * one verifier_env per bpf_check() call
2652 + */
2653 + struct bpf_verifier_env {
2654 ++ u32 insn_idx;
2655 ++ u32 prev_insn_idx;
2656 + struct bpf_prog *prog; /* eBPF program being verified */
2657 + const struct bpf_verifier_ops *ops;
2658 + struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
2659 +diff --git a/include/linux/filter.h b/include/linux/filter.h
2660 +index ec90d5255cf7..1a39d57eb88f 100644
2661 +--- a/include/linux/filter.h
2662 ++++ b/include/linux/filter.h
2663 +@@ -53,14 +53,10 @@ struct sock_reuseport;
2664 + #define BPF_REG_D BPF_REG_8 /* data, callee-saved */
2665 + #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */
2666 +
2667 +-/* Kernel hidden auxiliary/helper register for hardening step.
2668 +- * Only used by eBPF JITs. It's nothing more than a temporary
2669 +- * register that JITs use internally, only that here it's part
2670 +- * of eBPF instructions that have been rewritten for blinding
2671 +- * constants. See JIT pre-step in bpf_jit_blind_constants().
2672 +- */
2673 ++/* Kernel hidden auxiliary/helper register. */
2674 + #define BPF_REG_AX MAX_BPF_REG
2675 +-#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
2676 ++#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
2677 ++#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
2678 +
2679 + /* unused opcode to mark special call to bpf_tail_call() helper */
2680 + #define BPF_TAIL_CALL 0xf0
2681 +diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
2682 +index 5185a16b19ba..bbde887ed393 100644
2683 +--- a/include/linux/hyperv.h
2684 ++++ b/include/linux/hyperv.h
2685 +@@ -1166,8 +1166,9 @@ struct hv_ring_buffer_debug_info {
2686 + u32 bytes_avail_towrite;
2687 + };
2688 +
2689 +-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
2690 +- struct hv_ring_buffer_debug_info *debug_info);
2691 ++
2692 ++int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
2693 ++ struct hv_ring_buffer_debug_info *debug_info);
2694 +
2695 + /* Vmbus interface */
2696 + #define vmbus_driver_register(driver) \
2697 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
2698 +index 60a2e7646985..5d69e208e8d9 100644
2699 +--- a/include/linux/skbuff.h
2700 ++++ b/include/linux/skbuff.h
2701 +@@ -3178,6 +3178,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
2702 + *
2703 + * This is exactly the same as pskb_trim except that it ensures the
2704 + * checksum of received packets are still valid after the operation.
2705 ++ * It can change skb pointers.
2706 + */
2707 +
2708 + static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2709 +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
2710 +index c9b7b136939d..95eed32d8c6b 100644
2711 +--- a/include/net/ip_fib.h
2712 ++++ b/include/net/ip_fib.h
2713 +@@ -230,7 +230,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
2714 + struct netlink_ext_ack *extack);
2715 + int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
2716 + struct netlink_callback *cb);
2717 +-int fib_table_flush(struct net *net, struct fib_table *table);
2718 ++int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
2719 + struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
2720 + void fib_table_flush_external(struct fib_table *table);
2721 + void fib_free_table(struct fib_table *tb);
2722 +diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
2723 +index fb78f6f500f3..f056b2a00d5c 100644
2724 +--- a/include/uapi/linux/input.h
2725 ++++ b/include/uapi/linux/input.h
2726 +@@ -26,13 +26,17 @@
2727 + */
2728 +
2729 + struct input_event {
2730 +-#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL)
2731 ++#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__)
2732 + struct timeval time;
2733 + #define input_event_sec time.tv_sec
2734 + #define input_event_usec time.tv_usec
2735 + #else
2736 + __kernel_ulong_t __sec;
2737 ++#if defined(__sparc__) && defined(__arch64__)
2738 ++ unsigned int __usec;
2739 ++#else
2740 + __kernel_ulong_t __usec;
2741 ++#endif
2742 + #define input_event_sec __sec
2743 + #define input_event_usec __usec
2744 + #endif
2745 +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
2746 +index 3f5bf1af0826..474525e3a9db 100644
2747 +--- a/kernel/bpf/core.c
2748 ++++ b/kernel/bpf/core.c
2749 +@@ -52,6 +52,7 @@
2750 + #define DST regs[insn->dst_reg]
2751 + #define SRC regs[insn->src_reg]
2752 + #define FP regs[BPF_REG_FP]
2753 ++#define AX regs[BPF_REG_AX]
2754 + #define ARG1 regs[BPF_REG_ARG1]
2755 + #define CTX regs[BPF_REG_CTX]
2756 + #define IMM insn->imm
2757 +@@ -642,6 +643,26 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
2758 + BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
2759 + BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
2760 +
2761 ++ /* Constraints on AX register:
2762 ++ *
2763 ++ * AX register is inaccessible from user space. It is mapped in
2764 ++ * all JITs, and used here for constant blinding rewrites. It is
2765 ++ * typically "stateless" meaning its contents are only valid within
2766 ++ * the executed instruction, but not across several instructions.
2767 ++ * There are a few exceptions however which are further detailed
2768 ++ * below.
2769 ++ *
2770 ++ * Constant blinding is only used by JITs, not in the interpreter.
2771 ++ * The interpreter uses AX in some occasions as a local temporary
2772 ++ * register e.g. in DIV or MOD instructions.
2773 ++ *
2774 ++ * In restricted circumstances, the verifier can also use the AX
2775 ++ * register for rewrites as long as they do not interfere with
2776 ++ * the above cases!
2777 ++ */
2778 ++ if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
2779 ++ goto out;
2780 ++
2781 + if (from->imm == 0 &&
2782 + (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
2783 + from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
2784 +@@ -971,7 +992,6 @@ bool bpf_opcode_in_insntable(u8 code)
2785 + */
2786 + static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
2787 + {
2788 +- u64 tmp;
2789 + #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
2790 + #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
2791 + static const void *jumptable[256] = {
2792 +@@ -1045,36 +1065,36 @@ select_insn:
2793 + (*(s64 *) &DST) >>= IMM;
2794 + CONT;
2795 + ALU64_MOD_X:
2796 +- div64_u64_rem(DST, SRC, &tmp);
2797 +- DST = tmp;
2798 ++ div64_u64_rem(DST, SRC, &AX);
2799 ++ DST = AX;
2800 + CONT;
2801 + ALU_MOD_X:
2802 +- tmp = (u32) DST;
2803 +- DST = do_div(tmp, (u32) SRC);
2804 ++ AX = (u32) DST;
2805 ++ DST = do_div(AX, (u32) SRC);
2806 + CONT;
2807 + ALU64_MOD_K:
2808 +- div64_u64_rem(DST, IMM, &tmp);
2809 +- DST = tmp;
2810 ++ div64_u64_rem(DST, IMM, &AX);
2811 ++ DST = AX;
2812 + CONT;
2813 + ALU_MOD_K:
2814 +- tmp = (u32) DST;
2815 +- DST = do_div(tmp, (u32) IMM);
2816 ++ AX = (u32) DST;
2817 ++ DST = do_div(AX, (u32) IMM);
2818 + CONT;
2819 + ALU64_DIV_X:
2820 + DST = div64_u64(DST, SRC);
2821 + CONT;
2822 + ALU_DIV_X:
2823 +- tmp = (u32) DST;
2824 +- do_div(tmp, (u32) SRC);
2825 +- DST = (u32) tmp;
2826 ++ AX = (u32) DST;
2827 ++ do_div(AX, (u32) SRC);
2828 ++ DST = (u32) AX;
2829 + CONT;
2830 + ALU64_DIV_K:
2831 + DST = div64_u64(DST, IMM);
2832 + CONT;
2833 + ALU_DIV_K:
2834 +- tmp = (u32) DST;
2835 +- do_div(tmp, (u32) IMM);
2836 +- DST = (u32) tmp;
2837 ++ AX = (u32) DST;
2838 ++ do_div(AX, (u32) IMM);
2839 ++ DST = (u32) AX;
2840 + CONT;
2841 + ALU_END_TO_BE:
2842 + switch (IMM) {
2843 +@@ -1330,7 +1350,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
2844 + static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2845 + { \
2846 + u64 stack[stack_size / sizeof(u64)]; \
2847 +- u64 regs[MAX_BPF_REG]; \
2848 ++ u64 regs[MAX_BPF_EXT_REG]; \
2849 + \
2850 + FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2851 + ARG1 = (u64) (unsigned long) ctx; \
2852 +@@ -1343,7 +1363,7 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2853 + const struct bpf_insn *insn) \
2854 + { \
2855 + u64 stack[stack_size / sizeof(u64)]; \
2856 +- u64 regs[MAX_BPF_REG]; \
2857 ++ u64 regs[MAX_BPF_EXT_REG]; \
2858 + \
2859 + FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2860 + BPF_R1 = r1; \
2861 +diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
2862 +index 3bfbf4464416..9670ee5ee74e 100644
2863 +--- a/kernel/bpf/map_in_map.c
2864 ++++ b/kernel/bpf/map_in_map.c
2865 +@@ -12,6 +12,7 @@
2866 + struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
2867 + {
2868 + struct bpf_map *inner_map, *inner_map_meta;
2869 ++ u32 inner_map_meta_size;
2870 + struct fd f;
2871 +
2872 + f = fdget(inner_map_ufd);
2873 +@@ -35,7 +36,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
2874 + return ERR_PTR(-EINVAL);
2875 + }
2876 +
2877 +- inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER);
2878 ++ inner_map_meta_size = sizeof(*inner_map_meta);
2879 ++ /* In some cases verifier needs to access beyond just base map. */
2880 ++ if (inner_map->ops == &array_map_ops)
2881 ++ inner_map_meta_size = sizeof(struct bpf_array);
2882 ++
2883 ++ inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
2884 + if (!inner_map_meta) {
2885 + fdput(f);
2886 + return ERR_PTR(-ENOMEM);
2887 +@@ -45,9 +51,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
2888 + inner_map_meta->key_size = inner_map->key_size;
2889 + inner_map_meta->value_size = inner_map->value_size;
2890 + inner_map_meta->map_flags = inner_map->map_flags;
2891 +- inner_map_meta->ops = inner_map->ops;
2892 + inner_map_meta->max_entries = inner_map->max_entries;
2893 +
2894 ++ /* Misc members not needed in bpf_map_meta_equal() check. */
2895 ++ inner_map_meta->ops = inner_map->ops;
2896 ++ if (inner_map->ops == &array_map_ops) {
2897 ++ inner_map_meta->unpriv_array = inner_map->unpriv_array;
2898 ++ container_of(inner_map_meta, struct bpf_array, map)->index_mask =
2899 ++ container_of(inner_map, struct bpf_array, map)->index_mask;
2900 ++ }
2901 ++
2902 + fdput(f);
2903 + return inner_map_meta;
2904 + }
2905 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
2906 +index 341806668f03..4d81be2d0739 100644
2907 +--- a/kernel/bpf/verifier.c
2908 ++++ b/kernel/bpf/verifier.c
2909 +@@ -156,6 +156,7 @@ struct bpf_verifier_stack_elem {
2910 +
2911 + #define BPF_COMPLEXITY_LIMIT_INSNS 131072
2912 + #define BPF_COMPLEXITY_LIMIT_STACK 1024
2913 ++#define BPF_COMPLEXITY_LIMIT_STATES 64
2914 +
2915 + #define BPF_MAP_PTR_UNPRIV 1UL
2916 + #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
2917 +@@ -465,6 +466,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
2918 + free_func_state(dst_state->frame[i]);
2919 + dst_state->frame[i] = NULL;
2920 + }
2921 ++ dst_state->speculative = src->speculative;
2922 + dst_state->curframe = src->curframe;
2923 + dst_state->parent = src->parent;
2924 + for (i = 0; i <= src->curframe; i++) {
2925 +@@ -510,7 +512,8 @@ static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
2926 + }
2927 +
2928 + static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
2929 +- int insn_idx, int prev_insn_idx)
2930 ++ int insn_idx, int prev_insn_idx,
2931 ++ bool speculative)
2932 + {
2933 + struct bpf_verifier_state *cur = env->cur_state;
2934 + struct bpf_verifier_stack_elem *elem;
2935 +@@ -528,6 +531,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
2936 + err = copy_verifier_state(&elem->st, cur);
2937 + if (err)
2938 + goto err;
2939 ++ elem->st.speculative |= speculative;
2940 + if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
2941 + verbose(env, "BPF program is too complex\n");
2942 + goto err;
2943 +@@ -1237,6 +1241,31 @@ static int check_stack_read(struct bpf_verifier_env *env,
2944 + }
2945 + }
2946 +
2947 ++static int check_stack_access(struct bpf_verifier_env *env,
2948 ++ const struct bpf_reg_state *reg,
2949 ++ int off, int size)
2950 ++{
2951 ++ /* Stack accesses must be at a fixed offset, so that we
2952 ++ * can determine what type of data were returned. See
2953 ++ * check_stack_read().
2954 ++ */
2955 ++ if (!tnum_is_const(reg->var_off)) {
2956 ++ char tn_buf[48];
2957 ++
2958 ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2959 ++ verbose(env, "variable stack access var_off=%s off=%d size=%d",
2960 ++ tn_buf, off, size);
2961 ++ return -EACCES;
2962 ++ }
2963 ++
2964 ++ if (off >= 0 || off < -MAX_BPF_STACK) {
2965 ++ verbose(env, "invalid stack off=%d size=%d\n", off, size);
2966 ++ return -EACCES;
2967 ++ }
2968 ++
2969 ++ return 0;
2970 ++}
2971 ++
2972 + /* check read/write into map element returned by bpf_map_lookup_elem() */
2973 + static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
2974 + int size, bool zero_size_allowed)
2975 +@@ -1268,13 +1297,17 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
2976 + */
2977 + if (env->log.level)
2978 + print_verifier_state(env, state);
2979 ++
2980 + /* The minimum value is only important with signed
2981 + * comparisons where we can't assume the floor of a
2982 + * value is 0. If we are using signed variables for our
2983 + * index'es we need to make sure that whatever we use
2984 + * will have a set floor within our range.
2985 + */
2986 +- if (reg->smin_value < 0) {
2987 ++ if (reg->smin_value < 0 &&
2988 ++ (reg->smin_value == S64_MIN ||
2989 ++ (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
2990 ++ reg->smin_value + off < 0)) {
2991 + verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2992 + regno);
2993 + return -EACCES;
2994 +@@ -1735,24 +1768,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
2995 + }
2996 +
2997 + } else if (reg->type == PTR_TO_STACK) {
2998 +- /* stack accesses must be at a fixed offset, so that we can
2999 +- * determine what type of data were returned.
3000 +- * See check_stack_read().
3001 +- */
3002 +- if (!tnum_is_const(reg->var_off)) {
3003 +- char tn_buf[48];
3004 +-
3005 +- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3006 +- verbose(env, "variable stack access var_off=%s off=%d size=%d",
3007 +- tn_buf, off, size);
3008 +- return -EACCES;
3009 +- }
3010 + off += reg->var_off.value;
3011 +- if (off >= 0 || off < -MAX_BPF_STACK) {
3012 +- verbose(env, "invalid stack off=%d size=%d\n", off,
3013 +- size);
3014 +- return -EACCES;
3015 +- }
3016 ++ err = check_stack_access(env, reg, off, size);
3017 ++ if (err)
3018 ++ return err;
3019 +
3020 + state = func(env, reg);
3021 + err = update_stack_depth(env, state, off);
3022 +@@ -2682,6 +2701,125 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env,
3023 + return true;
3024 + }
3025 +
3026 ++static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
3027 ++{
3028 ++ return &env->insn_aux_data[env->insn_idx];
3029 ++}
3030 ++
3031 ++static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
3032 ++ u32 *ptr_limit, u8 opcode, bool off_is_neg)
3033 ++{
3034 ++ bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
3035 ++ (opcode == BPF_SUB && !off_is_neg);
3036 ++ u32 off;
3037 ++
3038 ++ switch (ptr_reg->type) {
3039 ++ case PTR_TO_STACK:
3040 ++ off = ptr_reg->off + ptr_reg->var_off.value;
3041 ++ if (mask_to_left)
3042 ++ *ptr_limit = MAX_BPF_STACK + off;
3043 ++ else
3044 ++ *ptr_limit = -off;
3045 ++ return 0;
3046 ++ case PTR_TO_MAP_VALUE:
3047 ++ if (mask_to_left) {
3048 ++ *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
3049 ++ } else {
3050 ++ off = ptr_reg->smin_value + ptr_reg->off;
3051 ++ *ptr_limit = ptr_reg->map_ptr->value_size - off;
3052 ++ }
3053 ++ return 0;
3054 ++ default:
3055 ++ return -EINVAL;
3056 ++ }
3057 ++}
3058 ++
3059 ++static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
3060 ++ const struct bpf_insn *insn)
3061 ++{
3062 ++ return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
3063 ++}
3064 ++
3065 ++static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
3066 ++ u32 alu_state, u32 alu_limit)
3067 ++{
3068 ++ /* If we arrived here from different branches with different
3069 ++ * state or limits to sanitize, then this won't work.
3070 ++ */
3071 ++ if (aux->alu_state &&
3072 ++ (aux->alu_state != alu_state ||
3073 ++ aux->alu_limit != alu_limit))
3074 ++ return -EACCES;
3075 ++
3076 ++ /* Corresponding fixup done in fixup_bpf_calls(). */
3077 ++ aux->alu_state = alu_state;
3078 ++ aux->alu_limit = alu_limit;
3079 ++ return 0;
3080 ++}
3081 ++
3082 ++static int sanitize_val_alu(struct bpf_verifier_env *env,
3083 ++ struct bpf_insn *insn)
3084 ++{
3085 ++ struct bpf_insn_aux_data *aux = cur_aux(env);
3086 ++
3087 ++ if (can_skip_alu_sanitation(env, insn))
3088 ++ return 0;
3089 ++
3090 ++ return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
3091 ++}
3092 ++
3093 ++static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3094 ++ struct bpf_insn *insn,
3095 ++ const struct bpf_reg_state *ptr_reg,
3096 ++ struct bpf_reg_state *dst_reg,
3097 ++ bool off_is_neg)
3098 ++{
3099 ++ struct bpf_verifier_state *vstate = env->cur_state;
3100 ++ struct bpf_insn_aux_data *aux = cur_aux(env);
3101 ++ bool ptr_is_dst_reg = ptr_reg == dst_reg;
3102 ++ u8 opcode = BPF_OP(insn->code);
3103 ++ u32 alu_state, alu_limit;
3104 ++ struct bpf_reg_state tmp;
3105 ++ bool ret;
3106 ++
3107 ++ if (can_skip_alu_sanitation(env, insn))
3108 ++ return 0;
3109 ++
3110 ++ /* We already marked aux for masking from non-speculative
3111 ++ * paths, thus we got here in the first place. We only care
3112 ++ * to explore bad access from here.
3113 ++ */
3114 ++ if (vstate->speculative)
3115 ++ goto do_sim;
3116 ++
3117 ++ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
3118 ++ alu_state |= ptr_is_dst_reg ?
3119 ++ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
3120 ++
3121 ++ if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
3122 ++ return 0;
3123 ++ if (update_alu_sanitation_state(aux, alu_state, alu_limit))
3124 ++ return -EACCES;
3125 ++do_sim:
3126 ++ /* Simulate and find potential out-of-bounds access under
3127 ++ * speculative execution from truncation as a result of
3128 ++ * masking when off was not within expected range. If off
3129 ++ * sits in dst, then we temporarily need to move ptr there
3130 ++ * to simulate dst (== 0) +/-= ptr. Needed, for example,
3131 ++ * for cases where we use K-based arithmetic in one direction
3132 ++ * and truncated reg-based in the other in order to explore
3133 ++ * bad access.
3134 ++ */
3135 ++ if (!ptr_is_dst_reg) {
3136 ++ tmp = *dst_reg;
3137 ++ *dst_reg = *ptr_reg;
3138 ++ }
3139 ++ ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
3140 ++ if (!ptr_is_dst_reg)
3141 ++ *dst_reg = tmp;
3142 ++ return !ret ? -EFAULT : 0;
3143 ++}
3144 ++
3145 + /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
3146 + * Caller should also handle BPF_MOV case separately.
3147 + * If we return -EACCES, caller may want to try again treating pointer as a
3148 +@@ -2700,8 +2838,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3149 + smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
3150 + u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
3151 + umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
3152 ++ u32 dst = insn->dst_reg, src = insn->src_reg;
3153 + u8 opcode = BPF_OP(insn->code);
3154 +- u32 dst = insn->dst_reg;
3155 ++ int ret;
3156 +
3157 + dst_reg = &regs[dst];
3158 +
3159 +@@ -2737,6 +2876,12 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3160 + dst);
3161 + return -EACCES;
3162 + }
3163 ++ if (ptr_reg->type == PTR_TO_MAP_VALUE &&
3164 ++ !env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
3165 ++ verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
3166 ++ off_reg == dst_reg ? dst : src);
3167 ++ return -EACCES;
3168 ++ }
3169 +
3170 + /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
3171 + * The id may be overwritten later if we create a new variable offset.
3172 +@@ -2750,6 +2895,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3173 +
3174 + switch (opcode) {
3175 + case BPF_ADD:
3176 ++ ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3177 ++ if (ret < 0) {
3178 ++ verbose(env, "R%d tried to add from different maps or paths\n", dst);
3179 ++ return ret;
3180 ++ }
3181 + /* We can take a fixed offset as long as it doesn't overflow
3182 + * the s32 'off' field
3183 + */
3184 +@@ -2800,6 +2950,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3185 + }
3186 + break;
3187 + case BPF_SUB:
3188 ++ ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3189 ++ if (ret < 0) {
3190 ++ verbose(env, "R%d tried to sub from different maps or paths\n", dst);
3191 ++ return ret;
3192 ++ }
3193 + if (dst_reg == off_reg) {
3194 + /* scalar -= pointer. Creates an unknown scalar */
3195 + verbose(env, "R%d tried to subtract pointer from scalar\n",
3196 +@@ -2879,6 +3034,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3197 + __update_reg_bounds(dst_reg);
3198 + __reg_deduce_bounds(dst_reg);
3199 + __reg_bound_offset(dst_reg);
3200 ++
3201 ++ /* For unprivileged we require that resulting offset must be in bounds
3202 ++ * in order to be able to sanitize access later on.
3203 ++ */
3204 ++ if (!env->allow_ptr_leaks) {
3205 ++ if (dst_reg->type == PTR_TO_MAP_VALUE &&
3206 ++ check_map_access(env, dst, dst_reg->off, 1, false)) {
3207 ++ verbose(env, "R%d pointer arithmetic of map value goes out of range, "
3208 ++ "prohibited for !root\n", dst);
3209 ++ return -EACCES;
3210 ++ } else if (dst_reg->type == PTR_TO_STACK &&
3211 ++ check_stack_access(env, dst_reg, dst_reg->off +
3212 ++ dst_reg->var_off.value, 1)) {
3213 ++ verbose(env, "R%d stack pointer arithmetic goes out of range, "
3214 ++ "prohibited for !root\n", dst);
3215 ++ return -EACCES;
3216 ++ }
3217 ++ }
3218 ++
3219 + return 0;
3220 + }
3221 +
3222 +@@ -2897,6 +3071,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3223 + s64 smin_val, smax_val;
3224 + u64 umin_val, umax_val;
3225 + u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
3226 ++ u32 dst = insn->dst_reg;
3227 ++ int ret;
3228 +
3229 + if (insn_bitness == 32) {
3230 + /* Relevant for 32-bit RSH: Information can propagate towards
3231 +@@ -2931,6 +3107,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3232 +
3233 + switch (opcode) {
3234 + case BPF_ADD:
3235 ++ ret = sanitize_val_alu(env, insn);
3236 ++ if (ret < 0) {
3237 ++ verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
3238 ++ return ret;
3239 ++ }
3240 + if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
3241 + signed_add_overflows(dst_reg->smax_value, smax_val)) {
3242 + dst_reg->smin_value = S64_MIN;
3243 +@@ -2950,6 +3131,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3244 + dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
3245 + break;
3246 + case BPF_SUB:
3247 ++ ret = sanitize_val_alu(env, insn);
3248 ++ if (ret < 0) {
3249 ++ verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
3250 ++ return ret;
3251 ++ }
3252 + if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
3253 + signed_sub_overflows(dst_reg->smax_value, smin_val)) {
3254 + /* Overflow possible, we know nothing */
3255 +@@ -3475,6 +3661,79 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
3256 + }
3257 + }
3258 +
3259 ++/* compute branch direction of the expression "if (reg opcode val) goto target;"
3260 ++ * and return:
3261 ++ * 1 - branch will be taken and "goto target" will be executed
3262 ++ * 0 - branch will not be taken and fall-through to next insn
3263 ++ * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
3264 ++ */
3265 ++static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
3266 ++{
3267 ++ if (__is_pointer_value(false, reg))
3268 ++ return -1;
3269 ++
3270 ++ switch (opcode) {
3271 ++ case BPF_JEQ:
3272 ++ if (tnum_is_const(reg->var_off))
3273 ++ return !!tnum_equals_const(reg->var_off, val);
3274 ++ break;
3275 ++ case BPF_JNE:
3276 ++ if (tnum_is_const(reg->var_off))
3277 ++ return !tnum_equals_const(reg->var_off, val);
3278 ++ break;
3279 ++ case BPF_JGT:
3280 ++ if (reg->umin_value > val)
3281 ++ return 1;
3282 ++ else if (reg->umax_value <= val)
3283 ++ return 0;
3284 ++ break;
3285 ++ case BPF_JSGT:
3286 ++ if (reg->smin_value > (s64)val)
3287 ++ return 1;
3288 ++ else if (reg->smax_value < (s64)val)
3289 ++ return 0;
3290 ++ break;
3291 ++ case BPF_JLT:
3292 ++ if (reg->umax_value < val)
3293 ++ return 1;
3294 ++ else if (reg->umin_value >= val)
3295 ++ return 0;
3296 ++ break;
3297 ++ case BPF_JSLT:
3298 ++ if (reg->smax_value < (s64)val)
3299 ++ return 1;
3300 ++ else if (reg->smin_value >= (s64)val)
3301 ++ return 0;
3302 ++ break;
3303 ++ case BPF_JGE:
3304 ++ if (reg->umin_value >= val)
3305 ++ return 1;
3306 ++ else if (reg->umax_value < val)
3307 ++ return 0;
3308 ++ break;
3309 ++ case BPF_JSGE:
3310 ++ if (reg->smin_value >= (s64)val)
3311 ++ return 1;
3312 ++ else if (reg->smax_value < (s64)val)
3313 ++ return 0;
3314 ++ break;
3315 ++ case BPF_JLE:
3316 ++ if (reg->umax_value <= val)
3317 ++ return 1;
3318 ++ else if (reg->umin_value > val)
3319 ++ return 0;
3320 ++ break;
3321 ++ case BPF_JSLE:
3322 ++ if (reg->smax_value <= (s64)val)
3323 ++ return 1;
3324 ++ else if (reg->smin_value > (s64)val)
3325 ++ return 0;
3326 ++ break;
3327 ++ }
3328 ++
3329 ++ return -1;
3330 ++}
3331 ++
3332 + /* Adjusts the register min/max values in the case that the dst_reg is the
3333 + * variable register that we are working on, and src_reg is a constant or we're
3334 + * simply doing a BPF_K check.
3335 +@@ -3868,28 +4127,23 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
3336 +
3337 + dst_reg = &regs[insn->dst_reg];
3338 +
3339 +- /* detect if R == 0 where R was initialized to zero earlier */
3340 +- if (BPF_SRC(insn->code) == BPF_K &&
3341 +- (opcode == BPF_JEQ || opcode == BPF_JNE) &&
3342 +- dst_reg->type == SCALAR_VALUE &&
3343 +- tnum_is_const(dst_reg->var_off)) {
3344 +- if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
3345 +- (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
3346 +- /* if (imm == imm) goto pc+off;
3347 +- * only follow the goto, ignore fall-through
3348 +- */
3349 ++ if (BPF_SRC(insn->code) == BPF_K) {
3350 ++ int pred = is_branch_taken(dst_reg, insn->imm, opcode);
3351 ++
3352 ++ if (pred == 1) {
3353 ++ /* only follow the goto, ignore fall-through */
3354 + *insn_idx += insn->off;
3355 + return 0;
3356 +- } else {
3357 +- /* if (imm != imm) goto pc+off;
3358 +- * only follow fall-through branch, since
3359 ++ } else if (pred == 0) {
3360 ++ /* only follow fall-through branch, since
3361 + * that's where the program will go
3362 + */
3363 + return 0;
3364 + }
3365 + }
3366 +
3367 +- other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
3368 ++ other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
3369 ++ false);
3370 + if (!other_branch)
3371 + return -EFAULT;
3372 + other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
3373 +@@ -4604,6 +4858,12 @@ static bool states_equal(struct bpf_verifier_env *env,
3374 + if (old->curframe != cur->curframe)
3375 + return false;
3376 +
3377 ++ /* Verification state from speculative execution simulation
3378 ++ * must never prune a non-speculative execution one.
3379 ++ */
3380 ++ if (old->speculative && !cur->speculative)
3381 ++ return false;
3382 ++
3383 + /* for states to be equal callsites have to be the same
3384 + * and all frame states need to be equivalent
3385 + */
3386 +@@ -4668,7 +4928,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
3387 + struct bpf_verifier_state_list *new_sl;
3388 + struct bpf_verifier_state_list *sl;
3389 + struct bpf_verifier_state *cur = env->cur_state;
3390 +- int i, j, err;
3391 ++ int i, j, err, states_cnt = 0;
3392 +
3393 + sl = env->explored_states[insn_idx];
3394 + if (!sl)
3395 +@@ -4695,8 +4955,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
3396 + return 1;
3397 + }
3398 + sl = sl->next;
3399 ++ states_cnt++;
3400 + }
3401 +
3402 ++ if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
3403 ++ return 0;
3404 ++
3405 + /* there were no equivalent states, remember current one.
3406 + * technically the current state is not proven to be safe yet,
3407 + * but it will either reach outer most bpf_exit (which means it's safe)
3408 +@@ -4744,7 +5008,6 @@ static int do_check(struct bpf_verifier_env *env)
3409 + struct bpf_insn *insns = env->prog->insnsi;
3410 + struct bpf_reg_state *regs;
3411 + int insn_cnt = env->prog->len, i;
3412 +- int insn_idx, prev_insn_idx = 0;
3413 + int insn_processed = 0;
3414 + bool do_print_state = false;
3415 +
3416 +@@ -4752,7 +5015,7 @@ static int do_check(struct bpf_verifier_env *env)
3417 + if (!state)
3418 + return -ENOMEM;
3419 + state->curframe = 0;
3420 +- state->parent = NULL;
3421 ++ state->speculative = false;
3422 + state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
3423 + if (!state->frame[0]) {
3424 + kfree(state);
3425 +@@ -4763,19 +5026,19 @@ static int do_check(struct bpf_verifier_env *env)
3426 + BPF_MAIN_FUNC /* callsite */,
3427 + 0 /* frameno */,
3428 + 0 /* subprogno, zero == main subprog */);
3429 +- insn_idx = 0;
3430 ++
3431 + for (;;) {
3432 + struct bpf_insn *insn;
3433 + u8 class;
3434 + int err;
3435 +
3436 +- if (insn_idx >= insn_cnt) {
3437 ++ if (env->insn_idx >= insn_cnt) {
3438 + verbose(env, "invalid insn idx %d insn_cnt %d\n",
3439 +- insn_idx, insn_cnt);
3440 ++ env->insn_idx, insn_cnt);
3441 + return -EFAULT;
3442 + }
3443 +
3444 +- insn = &insns[insn_idx];
3445 ++ insn = &insns[env->insn_idx];
3446 + class = BPF_CLASS(insn->code);
3447 +
3448 + if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
3449 +@@ -4785,17 +5048,19 @@ static int do_check(struct bpf_verifier_env *env)
3450 + return -E2BIG;
3451 + }
3452 +
3453 +- err = is_state_visited(env, insn_idx);
3454 ++ err = is_state_visited(env, env->insn_idx);
3455 + if (err < 0)
3456 + return err;
3457 + if (err == 1) {
3458 + /* found equivalent state, can prune the search */
3459 + if (env->log.level) {
3460 + if (do_print_state)
3461 +- verbose(env, "\nfrom %d to %d: safe\n",
3462 +- prev_insn_idx, insn_idx);
3463 ++ verbose(env, "\nfrom %d to %d%s: safe\n",
3464 ++ env->prev_insn_idx, env->insn_idx,
3465 ++ env->cur_state->speculative ?
3466 ++ " (speculative execution)" : "");
3467 + else
3468 +- verbose(env, "%d: safe\n", insn_idx);
3469 ++ verbose(env, "%d: safe\n", env->insn_idx);
3470 + }
3471 + goto process_bpf_exit;
3472 + }
3473 +@@ -4808,10 +5073,12 @@ static int do_check(struct bpf_verifier_env *env)
3474 +
3475 + if (env->log.level > 1 || (env->log.level && do_print_state)) {
3476 + if (env->log.level > 1)
3477 +- verbose(env, "%d:", insn_idx);
3478 ++ verbose(env, "%d:", env->insn_idx);
3479 + else
3480 +- verbose(env, "\nfrom %d to %d:",
3481 +- prev_insn_idx, insn_idx);
3482 ++ verbose(env, "\nfrom %d to %d%s:",
3483 ++ env->prev_insn_idx, env->insn_idx,
3484 ++ env->cur_state->speculative ?
3485 ++ " (speculative execution)" : "");
3486 + print_verifier_state(env, state->frame[state->curframe]);
3487 + do_print_state = false;
3488 + }
3489 +@@ -4822,19 +5089,20 @@ static int do_check(struct bpf_verifier_env *env)
3490 + .private_data = env,
3491 + };
3492 +
3493 +- verbose(env, "%d: ", insn_idx);
3494 ++ verbose(env, "%d: ", env->insn_idx);
3495 + print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
3496 + }
3497 +
3498 + if (bpf_prog_is_dev_bound(env->prog->aux)) {
3499 +- err = bpf_prog_offload_verify_insn(env, insn_idx,
3500 +- prev_insn_idx);
3501 ++ err = bpf_prog_offload_verify_insn(env, env->insn_idx,
3502 ++ env->prev_insn_idx);
3503 + if (err)
3504 + return err;
3505 + }
3506 +
3507 + regs = cur_regs(env);
3508 +- env->insn_aux_data[insn_idx].seen = true;
3509 ++ env->insn_aux_data[env->insn_idx].seen = true;
3510 ++
3511 + if (class == BPF_ALU || class == BPF_ALU64) {
3512 + err = check_alu_op(env, insn);
3513 + if (err)
3514 +@@ -4859,13 +5127,13 @@ static int do_check(struct bpf_verifier_env *env)
3515 + /* check that memory (src_reg + off) is readable,
3516 + * the state of dst_reg will be updated by this func
3517 + */
3518 +- err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
3519 +- BPF_SIZE(insn->code), BPF_READ,
3520 +- insn->dst_reg, false);
3521 ++ err = check_mem_access(env, env->insn_idx, insn->src_reg,
3522 ++ insn->off, BPF_SIZE(insn->code),
3523 ++ BPF_READ, insn->dst_reg, false);
3524 + if (err)
3525 + return err;
3526 +
3527 +- prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
3528 ++ prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3529 +
3530 + if (*prev_src_type == NOT_INIT) {
3531 + /* saw a valid insn
3532 +@@ -4892,10 +5160,10 @@ static int do_check(struct bpf_verifier_env *env)
3533 + enum bpf_reg_type *prev_dst_type, dst_reg_type;
3534 +
3535 + if (BPF_MODE(insn->code) == BPF_XADD) {
3536 +- err = check_xadd(env, insn_idx, insn);
3537 ++ err = check_xadd(env, env->insn_idx, insn);
3538 + if (err)
3539 + return err;
3540 +- insn_idx++;
3541 ++ env->insn_idx++;
3542 + continue;
3543 + }
3544 +
3545 +@@ -4911,13 +5179,13 @@ static int do_check(struct bpf_verifier_env *env)
3546 + dst_reg_type = regs[insn->dst_reg].type;
3547 +
3548 + /* check that memory (dst_reg + off) is writeable */
3549 +- err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3550 +- BPF_SIZE(insn->code), BPF_WRITE,
3551 +- insn->src_reg, false);
3552 ++ err = check_mem_access(env, env->insn_idx, insn->dst_reg,
3553 ++ insn->off, BPF_SIZE(insn->code),
3554 ++ BPF_WRITE, insn->src_reg, false);
3555 + if (err)
3556 + return err;
3557 +
3558 +- prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
3559 ++ prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3560 +
3561 + if (*prev_dst_type == NOT_INIT) {
3562 + *prev_dst_type = dst_reg_type;
3563 +@@ -4946,9 +5214,9 @@ static int do_check(struct bpf_verifier_env *env)
3564 + }
3565 +
3566 + /* check that memory (dst_reg + off) is writeable */
3567 +- err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3568 +- BPF_SIZE(insn->code), BPF_WRITE,
3569 +- -1, false);
3570 ++ err = check_mem_access(env, env->insn_idx, insn->dst_reg,
3571 ++ insn->off, BPF_SIZE(insn->code),
3572 ++ BPF_WRITE, -1, false);
3573 + if (err)
3574 + return err;
3575 +
3576 +@@ -4966,9 +5234,9 @@ static int do_check(struct bpf_verifier_env *env)
3577 + }
3578 +
3579 + if (insn->src_reg == BPF_PSEUDO_CALL)
3580 +- err = check_func_call(env, insn, &insn_idx);
3581 ++ err = check_func_call(env, insn, &env->insn_idx);
3582 + else
3583 +- err = check_helper_call(env, insn->imm, insn_idx);
3584 ++ err = check_helper_call(env, insn->imm, env->insn_idx);
3585 + if (err)
3586 + return err;
3587 +
3588 +@@ -4981,7 +5249,7 @@ static int do_check(struct bpf_verifier_env *env)
3589 + return -EINVAL;
3590 + }
3591 +
3592 +- insn_idx += insn->off + 1;
3593 ++ env->insn_idx += insn->off + 1;
3594 + continue;
3595 +
3596 + } else if (opcode == BPF_EXIT) {
3597 +@@ -4995,8 +5263,8 @@ static int do_check(struct bpf_verifier_env *env)
3598 +
3599 + if (state->curframe) {
3600 + /* exit from nested function */
3601 +- prev_insn_idx = insn_idx;
3602 +- err = prepare_func_exit(env, &insn_idx);
3603 ++ env->prev_insn_idx = env->insn_idx;
3604 ++ err = prepare_func_exit(env, &env->insn_idx);
3605 + if (err)
3606 + return err;
3607 + do_print_state = true;
3608 +@@ -5022,7 +5290,8 @@ static int do_check(struct bpf_verifier_env *env)
3609 + if (err)
3610 + return err;
3611 + process_bpf_exit:
3612 +- err = pop_stack(env, &prev_insn_idx, &insn_idx);
3613 ++ err = pop_stack(env, &env->prev_insn_idx,
3614 ++ &env->insn_idx);
3615 + if (err < 0) {
3616 + if (err != -ENOENT)
3617 + return err;
3618 +@@ -5032,7 +5301,7 @@ process_bpf_exit:
3619 + continue;
3620 + }
3621 + } else {
3622 +- err = check_cond_jmp_op(env, insn, &insn_idx);
3623 ++ err = check_cond_jmp_op(env, insn, &env->insn_idx);
3624 + if (err)
3625 + return err;
3626 + }
3627 +@@ -5049,8 +5318,8 @@ process_bpf_exit:
3628 + if (err)
3629 + return err;
3630 +
3631 +- insn_idx++;
3632 +- env->insn_aux_data[insn_idx].seen = true;
3633 ++ env->insn_idx++;
3634 ++ env->insn_aux_data[env->insn_idx].seen = true;
3635 + } else {
3636 + verbose(env, "invalid BPF_LD mode\n");
3637 + return -EINVAL;
3638 +@@ -5060,7 +5329,7 @@ process_bpf_exit:
3639 + return -EINVAL;
3640 + }
3641 +
3642 +- insn_idx++;
3643 ++ env->insn_idx++;
3644 + }
3645 +
3646 + verbose(env, "processed %d insns (limit %d), stack depth ",
3647 +@@ -5756,6 +6025,57 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
3648 + continue;
3649 + }
3650 +
3651 ++ if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
3652 ++ insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
3653 ++ const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
3654 ++ const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
3655 ++ struct bpf_insn insn_buf[16];
3656 ++ struct bpf_insn *patch = &insn_buf[0];
3657 ++ bool issrc, isneg;
3658 ++ u32 off_reg;
3659 ++
3660 ++ aux = &env->insn_aux_data[i + delta];
3661 ++ if (!aux->alu_state)
3662 ++ continue;
3663 ++
3664 ++ isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
3665 ++ issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
3666 ++ BPF_ALU_SANITIZE_SRC;
3667 ++
3668 ++ off_reg = issrc ? insn->src_reg : insn->dst_reg;
3669 ++ if (isneg)
3670 ++ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
3671 ++ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
3672 ++ *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
3673 ++ *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
3674 ++ *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
3675 ++ *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
3676 ++ if (issrc) {
3677 ++ *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
3678 ++ off_reg);
3679 ++ insn->src_reg = BPF_REG_AX;
3680 ++ } else {
3681 ++ *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
3682 ++ BPF_REG_AX);
3683 ++ }
3684 ++ if (isneg)
3685 ++ insn->code = insn->code == code_add ?
3686 ++ code_sub : code_add;
3687 ++ *patch++ = *insn;
3688 ++ if (issrc && isneg)
3689 ++ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
3690 ++ cnt = patch - insn_buf;
3691 ++
3692 ++ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
3693 ++ if (!new_prog)
3694 ++ return -ENOMEM;
3695 ++
3696 ++ delta += cnt - 1;
3697 ++ env->prog = prog = new_prog;
3698 ++ insn = new_prog->insnsi + i + delta;
3699 ++ continue;
3700 ++ }
3701 ++
3702 + if (insn->code != (BPF_JMP | BPF_CALL))
3703 + continue;
3704 + if (insn->src_reg == BPF_PSEUDO_CALL)
3705 +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
3706 +index ce32cf741b25..76801b9b481e 100644
3707 +--- a/kernel/time/posix-cpu-timers.c
3708 ++++ b/kernel/time/posix-cpu-timers.c
3709 +@@ -685,6 +685,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
3710 + * set up the signal and overrun bookkeeping.
3711 + */
3712 + timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
3713 ++ timer->it_interval = ns_to_ktime(timer->it.cpu.incr);
3714 +
3715 + /*
3716 + * This acts as a modification timestamp for the timer,
3717 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
3718 +index 93e73ccb4dec..9e45553cabd6 100644
3719 +--- a/mm/page_alloc.c
3720 ++++ b/mm/page_alloc.c
3721 +@@ -5538,18 +5538,6 @@ not_early:
3722 + cond_resched();
3723 + }
3724 + }
3725 +-#ifdef CONFIG_SPARSEMEM
3726 +- /*
3727 +- * If the zone does not span the rest of the section then
3728 +- * we should at least initialize those pages. Otherwise we
3729 +- * could blow up on a poisoned page in some paths which depend
3730 +- * on full sections being initialized (e.g. memory hotplug).
3731 +- */
3732 +- while (end_pfn % PAGES_PER_SECTION) {
3733 +- __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
3734 +- end_pfn++;
3735 +- }
3736 +-#endif
3737 + }
3738 +
3739 + static void __meminit zone_init_free_lists(struct zone *zone)
3740 +diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
3741 +index 2cb8da465b98..48ddc60b4fbd 100644
3742 +--- a/net/bridge/br_forward.c
3743 ++++ b/net/bridge/br_forward.c
3744 +@@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
3745 +
3746 + int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3747 + {
3748 ++ skb_push(skb, ETH_HLEN);
3749 + if (!is_skb_forwardable(skb->dev, skb))
3750 + goto drop;
3751 +
3752 +- skb_push(skb, ETH_HLEN);
3753 + br_drop_fake_rtable(skb);
3754 +
3755 + if (skb->ip_summed == CHECKSUM_PARTIAL &&
3756 +@@ -98,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to,
3757 + net = dev_net(indev);
3758 + } else {
3759 + if (unlikely(netpoll_tx_running(to->br->dev))) {
3760 +- if (!is_skb_forwardable(skb->dev, skb)) {
3761 ++ skb_push(skb, ETH_HLEN);
3762 ++ if (!is_skb_forwardable(skb->dev, skb))
3763 + kfree_skb(skb);
3764 +- } else {
3765 +- skb_push(skb, ETH_HLEN);
3766 ++ else
3767 + br_netpoll_send_skb(to, skb);
3768 +- }
3769 + return;
3770 + }
3771 + br_hook = NF_BR_LOCAL_OUT;
3772 +diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
3773 +index 96c072e71ea2..5811208863b7 100644
3774 +--- a/net/bridge/br_netfilter_ipv6.c
3775 ++++ b/net/bridge/br_netfilter_ipv6.c
3776 +@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
3777 + IPSTATS_MIB_INDISCARDS);
3778 + goto drop;
3779 + }
3780 ++ hdr = ipv6_hdr(skb);
3781 + }
3782 + if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
3783 + goto drop;
3784 +diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
3785 +index 08cbed7d940e..419e8edf23ba 100644
3786 +--- a/net/bridge/netfilter/nft_reject_bridge.c
3787 ++++ b/net/bridge/netfilter/nft_reject_bridge.c
3788 +@@ -229,6 +229,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
3789 + pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
3790 + return false;
3791 +
3792 ++ ip6h = ipv6_hdr(skb);
3793 + thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
3794 + if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
3795 + return false;
3796 +diff --git a/net/can/bcm.c b/net/can/bcm.c
3797 +index 0af8f0db892a..79bb8afa9c0c 100644
3798 +--- a/net/can/bcm.c
3799 ++++ b/net/can/bcm.c
3800 +@@ -67,6 +67,9 @@
3801 + */
3802 + #define MAX_NFRAMES 256
3803 +
3804 ++/* limit timers to 400 days for sending/timeouts */
3805 ++#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
3806 ++
3807 + /* use of last_frames[index].flags */
3808 + #define RX_RECV 0x40 /* received data for this element */
3809 + #define RX_THR 0x80 /* element not been sent due to throttle feature */
3810 +@@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
3811 + return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
3812 + }
3813 +
3814 ++/* check limitations for timeval provided by user */
3815 ++static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
3816 ++{
3817 ++ if ((msg_head->ival1.tv_sec < 0) ||
3818 ++ (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
3819 ++ (msg_head->ival1.tv_usec < 0) ||
3820 ++ (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
3821 ++ (msg_head->ival2.tv_sec < 0) ||
3822 ++ (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
3823 ++ (msg_head->ival2.tv_usec < 0) ||
3824 ++ (msg_head->ival2.tv_usec >= USEC_PER_SEC))
3825 ++ return true;
3826 ++
3827 ++ return false;
3828 ++}
3829 ++
3830 + #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
3831 + #define OPSIZ sizeof(struct bcm_op)
3832 + #define MHSIZ sizeof(struct bcm_msg_head)
3833 +@@ -873,6 +892,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
3834 + if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
3835 + return -EINVAL;
3836 +
3837 ++ /* check timeval limitations */
3838 ++ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
3839 ++ return -EINVAL;
3840 ++
3841 + /* check the given can_id */
3842 + op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
3843 + if (op) {
3844 +@@ -1053,6 +1076,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
3845 + (!(msg_head->can_id & CAN_RTR_FLAG))))
3846 + return -EINVAL;
3847 +
3848 ++ /* check timeval limitations */
3849 ++ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
3850 ++ return -EINVAL;
3851 ++
3852 + /* check the given can_id */
3853 + op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
3854 + if (op) {
3855 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
3856 +index 0113993e9b2c..958e185a8e8d 100644
3857 +--- a/net/ipv4/fib_frontend.c
3858 ++++ b/net/ipv4/fib_frontend.c
3859 +@@ -203,7 +203,7 @@ static void fib_flush(struct net *net)
3860 + struct fib_table *tb;
3861 +
3862 + hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
3863 +- flushed += fib_table_flush(net, tb);
3864 ++ flushed += fib_table_flush(net, tb, false);
3865 + }
3866 +
3867 + if (flushed)
3868 +@@ -1357,7 +1357,7 @@ static void ip_fib_net_exit(struct net *net)
3869 +
3870 + hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
3871 + hlist_del(&tb->tb_hlist);
3872 +- fib_table_flush(net, tb);
3873 ++ fib_table_flush(net, tb, true);
3874 + fib_free_table(tb);
3875 + }
3876 + }
3877 +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
3878 +index 5bc0c89e81e4..3955a6d7ea66 100644
3879 +--- a/net/ipv4/fib_trie.c
3880 ++++ b/net/ipv4/fib_trie.c
3881 +@@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb)
3882 + }
3883 +
3884 + /* Caller must hold RTNL. */
3885 +-int fib_table_flush(struct net *net, struct fib_table *tb)
3886 ++int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
3887 + {
3888 + struct trie *t = (struct trie *)tb->tb_data;
3889 + struct key_vector *pn = t->kv;
3890 +@@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
3891 + hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
3892 + struct fib_info *fi = fa->fa_info;
3893 +
3894 +- if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
3895 +- tb->tb_id != fa->tb_id) {
3896 ++ if (!fi || tb->tb_id != fa->tb_id ||
3897 ++ (!(fi->fib_flags & RTNH_F_DEAD) &&
3898 ++ !fib_props[fa->fa_type].error)) {
3899 ++ slen = fa->fa_slen;
3900 ++ continue;
3901 ++ }
3902 ++
3903 ++ /* Do not flush error routes if network namespace is
3904 ++ * not being dismantled
3905 ++ */
3906 ++ if (!flush_all && fib_props[fa->fa_type].error) {
3907 + slen = fa->fa_slen;
3908 + continue;
3909 + }
3910 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
3911 +index 5ef5df3a06f1..0bfad3e72509 100644
3912 +--- a/net/ipv4/ip_gre.c
3913 ++++ b/net/ipv4/ip_gre.c
3914 +@@ -570,8 +570,7 @@ err_free_skb:
3915 + dev->stats.tx_dropped++;
3916 + }
3917 +
3918 +-static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
3919 +- __be16 proto)
3920 ++static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
3921 + {
3922 + struct ip_tunnel *tunnel = netdev_priv(dev);
3923 + struct ip_tunnel_info *tun_info;
3924 +@@ -579,10 +578,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
3925 + struct erspan_metadata *md;
3926 + struct rtable *rt = NULL;
3927 + bool truncate = false;
3928 ++ __be16 df, proto;
3929 + struct flowi4 fl;
3930 + int tunnel_hlen;
3931 + int version;
3932 +- __be16 df;
3933 + int nhoff;
3934 + int thoff;
3935 +
3936 +@@ -627,18 +626,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
3937 + if (version == 1) {
3938 + erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
3939 + ntohl(md->u.index), truncate, true);
3940 ++ proto = htons(ETH_P_ERSPAN);
3941 + } else if (version == 2) {
3942 + erspan_build_header_v2(skb,
3943 + ntohl(tunnel_id_to_key32(key->tun_id)),
3944 + md->u.md2.dir,
3945 + get_hwid(&md->u.md2),
3946 + truncate, true);
3947 ++ proto = htons(ETH_P_ERSPAN2);
3948 + } else {
3949 + goto err_free_rt;
3950 + }
3951 +
3952 + gre_build_header(skb, 8, TUNNEL_SEQ,
3953 +- htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
3954 ++ proto, 0, htonl(tunnel->o_seqno++));
3955 +
3956 + df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
3957 +
3958 +@@ -722,12 +723,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
3959 + {
3960 + struct ip_tunnel *tunnel = netdev_priv(dev);
3961 + bool truncate = false;
3962 ++ __be16 proto;
3963 +
3964 + if (!pskb_inet_may_pull(skb))
3965 + goto free_skb;
3966 +
3967 + if (tunnel->collect_md) {
3968 +- erspan_fb_xmit(skb, dev, skb->protocol);
3969 ++ erspan_fb_xmit(skb, dev);
3970 + return NETDEV_TX_OK;
3971 + }
3972 +
3973 +@@ -743,19 +745,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
3974 + }
3975 +
3976 + /* Push ERSPAN header */
3977 +- if (tunnel->erspan_ver == 1)
3978 ++ if (tunnel->erspan_ver == 1) {
3979 + erspan_build_header(skb, ntohl(tunnel->parms.o_key),
3980 + tunnel->index,
3981 + truncate, true);
3982 +- else if (tunnel->erspan_ver == 2)
3983 ++ proto = htons(ETH_P_ERSPAN);
3984 ++ } else if (tunnel->erspan_ver == 2) {
3985 + erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
3986 + tunnel->dir, tunnel->hwid,
3987 + truncate, true);
3988 +- else
3989 ++ proto = htons(ETH_P_ERSPAN2);
3990 ++ } else {
3991 + goto free_skb;
3992 ++ }
3993 +
3994 + tunnel->parms.o_flags &= ~TUNNEL_KEY;
3995 +- __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
3996 ++ __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
3997 + return NETDEV_TX_OK;
3998 +
3999 + free_skb:
4000 +diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
4001 +index 27c863f6dd83..6f977b0fef54 100644
4002 +--- a/net/ipv4/ip_input.c
4003 ++++ b/net/ipv4/ip_input.c
4004 +@@ -489,6 +489,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
4005 + goto drop;
4006 + }
4007 +
4008 ++ iph = ip_hdr(skb);
4009 + skb->transport_header = skb->network_header + iph->ihl*4;
4010 +
4011 + /* Remove any debris in the socket control block */
4012 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
4013 +index a32a0f4cc138..87fe44197aa1 100644
4014 +--- a/net/ipv4/tcp.c
4015 ++++ b/net/ipv4/tcp.c
4016 +@@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
4017 + flags = msg->msg_flags;
4018 +
4019 + if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
4020 +- if (sk->sk_state != TCP_ESTABLISHED) {
4021 ++ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
4022 + err = -EINVAL;
4023 + goto out_err;
4024 + }
4025 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
4026 +index f8183fdce5b2..e45a5e19e509 100644
4027 +--- a/net/ipv4/udp.c
4028 ++++ b/net/ipv4/udp.c
4029 +@@ -785,15 +785,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
4030 + const int hlen = skb_network_header_len(skb) +
4031 + sizeof(struct udphdr);
4032 +
4033 +- if (hlen + cork->gso_size > cork->fragsize)
4034 ++ if (hlen + cork->gso_size > cork->fragsize) {
4035 ++ kfree_skb(skb);
4036 + return -EINVAL;
4037 +- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
4038 ++ }
4039 ++ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
4040 ++ kfree_skb(skb);
4041 + return -EINVAL;
4042 +- if (sk->sk_no_check_tx)
4043 ++ }
4044 ++ if (sk->sk_no_check_tx) {
4045 ++ kfree_skb(skb);
4046 + return -EINVAL;
4047 ++ }
4048 + if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
4049 +- dst_xfrm(skb_dst(skb)))
4050 ++ dst_xfrm(skb_dst(skb))) {
4051 ++ kfree_skb(skb);
4052 + return -EIO;
4053 ++ }
4054 +
4055 + skb_shinfo(skb)->gso_size = cork->gso_size;
4056 + skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
4057 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
4058 +index c270726b01b0..345e6839f031 100644
4059 +--- a/net/ipv6/ip6_gre.c
4060 ++++ b/net/ipv6/ip6_gre.c
4061 +@@ -938,6 +938,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
4062 + __u8 dsfield = false;
4063 + struct flowi6 fl6;
4064 + int err = -EINVAL;
4065 ++ __be16 proto;
4066 + __u32 mtu;
4067 + int nhoff;
4068 + int thoff;
4069 +@@ -1051,8 +1052,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
4070 + }
4071 +
4072 + /* Push GRE header. */
4073 +- gre_build_header(skb, 8, TUNNEL_SEQ,
4074 +- htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
4075 ++ proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
4076 ++ : htons(ETH_P_ERSPAN2);
4077 ++ gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
4078 +
4079 + /* TooBig packet may have updated dst->dev's mtu */
4080 + if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
4081 +@@ -1185,6 +1187,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
4082 + t->parms.i_flags = p->i_flags;
4083 + t->parms.o_flags = p->o_flags;
4084 + t->parms.fwmark = p->fwmark;
4085 ++ t->parms.erspan_ver = p->erspan_ver;
4086 ++ t->parms.index = p->index;
4087 ++ t->parms.dir = p->dir;
4088 ++ t->parms.hwid = p->hwid;
4089 + dst_cache_reset(&t->dst_cache);
4090 + }
4091 +
4092 +@@ -2047,9 +2053,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
4093 + struct nlattr *data[],
4094 + struct netlink_ext_ack *extack)
4095 + {
4096 +- struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
4097 ++ struct ip6_tnl *t = netdev_priv(dev);
4098 ++ struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
4099 + struct __ip6_tnl_parm p;
4100 +- struct ip6_tnl *t;
4101 +
4102 + t = ip6gre_changelink_common(dev, tb, data, &p, extack);
4103 + if (IS_ERR(t))
4104 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
4105 +index b36694b6716e..76ba2f34ef6b 100644
4106 +--- a/net/ipv6/udp.c
4107 ++++ b/net/ipv6/udp.c
4108 +@@ -1056,15 +1056,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
4109 + const int hlen = skb_network_header_len(skb) +
4110 + sizeof(struct udphdr);
4111 +
4112 +- if (hlen + cork->gso_size > cork->fragsize)
4113 ++ if (hlen + cork->gso_size > cork->fragsize) {
4114 ++ kfree_skb(skb);
4115 + return -EINVAL;
4116 +- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
4117 ++ }
4118 ++ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
4119 ++ kfree_skb(skb);
4120 + return -EINVAL;
4121 +- if (udp_sk(sk)->no_check6_tx)
4122 ++ }
4123 ++ if (udp_sk(sk)->no_check6_tx) {
4124 ++ kfree_skb(skb);
4125 + return -EINVAL;
4126 ++ }
4127 + if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
4128 +- dst_xfrm(skb_dst(skb)))
4129 ++ dst_xfrm(skb_dst(skb))) {
4130 ++ kfree_skb(skb);
4131 + return -EIO;
4132 ++ }
4133 +
4134 + skb_shinfo(skb)->gso_size = cork->gso_size;
4135 + skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
4136 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
4137 +index 865ecef68196..c7b6010b2c09 100644
4138 +--- a/net/openvswitch/flow_netlink.c
4139 ++++ b/net/openvswitch/flow_netlink.c
4140 +@@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
4141 + return -EINVAL;
4142 + }
4143 +
4144 +- if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
4145 ++ if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
4146 + attrs |= 1 << type;
4147 + a[type] = nla;
4148 + }
4149 +diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
4150 +index 681f6f04e7da..0f6601fdf889 100644
4151 +--- a/net/sched/act_tunnel_key.c
4152 ++++ b/net/sched/act_tunnel_key.c
4153 +@@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
4154 + [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
4155 + };
4156 +
4157 ++static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
4158 ++{
4159 ++ if (!p)
4160 ++ return;
4161 ++ if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
4162 ++ dst_release(&p->tcft_enc_metadata->dst);
4163 ++ kfree_rcu(p, rcu);
4164 ++}
4165 ++
4166 + static int tunnel_key_init(struct net *net, struct nlattr *nla,
4167 + struct nlattr *est, struct tc_action **a,
4168 + int ovr, int bind, bool rtnl_held,
4169 +@@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
4170 + rcu_swap_protected(t->params, params_new,
4171 + lockdep_is_held(&t->tcf_lock));
4172 + spin_unlock_bh(&t->tcf_lock);
4173 +- if (params_new)
4174 +- kfree_rcu(params_new, rcu);
4175 ++ tunnel_key_release_params(params_new);
4176 +
4177 + if (ret == ACT_P_CREATED)
4178 + tcf_idr_insert(tn, *a);
4179 +@@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a)
4180 + struct tcf_tunnel_key_params *params;
4181 +
4182 + params = rcu_dereference_protected(t->params, 1);
4183 +- if (params) {
4184 +- if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
4185 +- dst_release(&params->tcft_enc_metadata->dst);
4186 +-
4187 +- kfree_rcu(params, rcu);
4188 +- }
4189 ++ tunnel_key_release_params(params);
4190 + }
4191 +
4192 + static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
4193 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
4194 +index 70f144ac5e1d..2167c6ca55e3 100644
4195 +--- a/net/sched/cls_api.c
4196 ++++ b/net/sched/cls_api.c
4197 +@@ -960,7 +960,6 @@ static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
4198 + int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
4199 + struct tcf_result *res, bool compat_mode)
4200 + {
4201 +- __be16 protocol = tc_skb_protocol(skb);
4202 + #ifdef CONFIG_NET_CLS_ACT
4203 + const int max_reclassify_loop = 4;
4204 + const struct tcf_proto *orig_tp = tp;
4205 +@@ -970,6 +969,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
4206 + reclassify:
4207 + #endif
4208 + for (; tp; tp = rcu_dereference_bh(tp->next)) {
4209 ++ __be16 protocol = tc_skb_protocol(skb);
4210 + int err;
4211 +
4212 + if (tp->protocol != protocol &&
4213 +@@ -1002,7 +1002,6 @@ reset:
4214 + }
4215 +
4216 + tp = first_tp;
4217 +- protocol = tc_skb_protocol(skb);
4218 + goto reclassify;
4219 + #endif
4220 + }
4221 +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
4222 +index 7fade7107f95..84893bc67531 100644
4223 +--- a/net/sched/cls_flower.c
4224 ++++ b/net/sched/cls_flower.c
4225 +@@ -1176,17 +1176,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
4226 + struct cls_fl_head *head = rtnl_dereference(tp->root);
4227 + struct cls_fl_filter *fold = *arg;
4228 + struct cls_fl_filter *fnew;
4229 ++ struct fl_flow_mask *mask;
4230 + struct nlattr **tb;
4231 +- struct fl_flow_mask mask = {};
4232 + int err;
4233 +
4234 + if (!tca[TCA_OPTIONS])
4235 + return -EINVAL;
4236 +
4237 +- tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
4238 +- if (!tb)
4239 ++ mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
4240 ++ if (!mask)
4241 + return -ENOBUFS;
4242 +
4243 ++ tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
4244 ++ if (!tb) {
4245 ++ err = -ENOBUFS;
4246 ++ goto errout_mask_alloc;
4247 ++ }
4248 ++
4249 + err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
4250 + fl_policy, NULL);
4251 + if (err < 0)
4252 +@@ -1229,12 +1235,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
4253 + }
4254 + }
4255 +
4256 +- err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
4257 ++ err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
4258 + tp->chain->tmplt_priv, extack);
4259 + if (err)
4260 + goto errout_idr;
4261 +
4262 +- err = fl_check_assign_mask(head, fnew, fold, &mask);
4263 ++ err = fl_check_assign_mask(head, fnew, fold, mask);
4264 + if (err)
4265 + goto errout_idr;
4266 +
4267 +@@ -1281,6 +1287,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
4268 + }
4269 +
4270 + kfree(tb);
4271 ++ kfree(mask);
4272 + return 0;
4273 +
4274 + errout_mask:
4275 +@@ -1294,6 +1301,8 @@ errout:
4276 + kfree(fnew);
4277 + errout_tb:
4278 + kfree(tb);
4279 ++errout_mask_alloc:
4280 ++ kfree(mask);
4281 + return err;
4282 + }
4283 +
4284 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
4285 +index f9176e3b4d37..31a84a5a1338 100644
4286 +--- a/sound/pci/hda/patch_conexant.c
4287 ++++ b/sound/pci/hda/patch_conexant.c
4288 +@@ -931,6 +931,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
4289 + SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
4290 + SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
4291 + SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
4292 ++ SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
4293 + SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
4294 + SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
4295 + SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
4296 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4297 +index 8b9f2487969b..f39f34e12fb6 100644
4298 +--- a/sound/pci/hda/patch_realtek.c
4299 ++++ b/sound/pci/hda/patch_realtek.c
4300 +@@ -6842,7 +6842,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4301 + {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
4302 + {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
4303 + {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
4304 +- {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"},
4305 ++ {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"},
4306 + {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"},
4307 + {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"},
4308 + {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"},
4309 +diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
4310 +index 6478d10c4f4a..cdb1f40009ab 100644
4311 +--- a/sound/soc/codecs/rt5514-spi.c
4312 ++++ b/sound/soc/codecs/rt5514-spi.c
4313 +@@ -278,6 +278,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component)
4314 +
4315 + rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp),
4316 + GFP_KERNEL);
4317 ++ if (!rt5514_dsp)
4318 ++ return -ENOMEM;
4319 +
4320 + rt5514_dsp->dev = &rt5514_spi->dev;
4321 + mutex_init(&rt5514_dsp->dma_lock);
4322 +diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
4323 +index e2b5a11b16d1..f03195d2ab2e 100644
4324 +--- a/sound/soc/codecs/tlv320aic32x4.c
4325 ++++ b/sound/soc/codecs/tlv320aic32x4.c
4326 +@@ -822,6 +822,10 @@ static int aic32x4_set_bias_level(struct snd_soc_component *component,
4327 + case SND_SOC_BIAS_PREPARE:
4328 + break;
4329 + case SND_SOC_BIAS_STANDBY:
4330 ++ /* Initial cold start */
4331 ++ if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
4332 ++ break;
4333 ++
4334 + /* Switch off BCLK_N Divider */
4335 + snd_soc_component_update_bits(component, AIC32X4_BCLKN,
4336 + AIC32X4_BCLKEN, 0);
4337 +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4338 +index 6c36da560877..e662400873ec 100644
4339 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4340 ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4341 +@@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream,
4342 + struct snd_pcm_hw_params *params,
4343 + struct snd_soc_dai *dai)
4344 + {
4345 +- snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
4346 ++ int ret;
4347 ++
4348 ++ ret =
4349 ++ snd_pcm_lib_malloc_pages(substream,
4350 ++ params_buffer_bytes(params));
4351 ++ if (ret)
4352 ++ return ret;
4353 + memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
4354 + return 0;
4355 + }
4356 +diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
4357 +index 460b4bdf4c1e..5d546dcdbc80 100644
4358 +--- a/tools/testing/selftests/x86/protection_keys.c
4359 ++++ b/tools/testing/selftests/x86/protection_keys.c
4360 +@@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
4361 + pkey_assert(err);
4362 + }
4363 +
4364 ++void become_child(void)
4365 ++{
4366 ++ pid_t forkret;
4367 ++
4368 ++ forkret = fork();
4369 ++ pkey_assert(forkret >= 0);
4370 ++ dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
4371 ++
4372 ++ if (!forkret) {
4373 ++ /* in the child */
4374 ++ return;
4375 ++ }
4376 ++ exit(0);
4377 ++}
4378 ++
4379 + /* Assumes that all pkeys other than 'pkey' are unallocated */
4380 + void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
4381 + {
4382 +@@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
4383 + int nr_allocated_pkeys = 0;
4384 + int i;
4385 +
4386 +- for (i = 0; i < NR_PKEYS*2; i++) {
4387 ++ for (i = 0; i < NR_PKEYS*3; i++) {
4388 + int new_pkey;
4389 + dprintf1("%s() alloc loop: %d\n", __func__, i);
4390 + new_pkey = alloc_pkey();
4391 +@@ -1152,20 +1167,26 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
4392 + if ((new_pkey == -1) && (errno == ENOSPC)) {
4393 + dprintf2("%s() failed to allocate pkey after %d tries\n",
4394 + __func__, nr_allocated_pkeys);
4395 +- break;
4396 ++ } else {
4397 ++ /*
4398 ++ * Ensure the number of successes never
4399 ++ * exceeds the number of keys supported
4400 ++ * in the hardware.
4401 ++ */
4402 ++ pkey_assert(nr_allocated_pkeys < NR_PKEYS);
4403 ++ allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
4404 + }
4405 +- pkey_assert(nr_allocated_pkeys < NR_PKEYS);
4406 +- allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
4407 ++
4408 ++ /*
4409 ++ * Make sure that allocation state is properly
4410 ++ * preserved across fork().
4411 ++ */
4412 ++ if (i == NR_PKEYS*2)
4413 ++ become_child();
4414 + }
4415 +
4416 + dprintf3("%s()::%d\n", __func__, __LINE__);
4417 +
4418 +- /*
4419 +- * ensure it did not reach the end of the loop without
4420 +- * failure:
4421 +- */
4422 +- pkey_assert(i < NR_PKEYS*2);
4423 +-
4424 + /*
4425 + * There are 16 pkeys supported in hardware. Three are
4426 + * allocated by the time we get here: