Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.20 commit in: /
Date: Thu, 31 Jan 2019 11:29:03
Message-Id: 1548934120.10480842ab2ac5741568de118fa75c300f53c7f5.mpagano@gentoo
1 commit: 10480842ab2ac5741568de118fa75c300f53c7f5
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Jan 31 11:28:40 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Jan 31 11:28:40 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=10480842
7
8 proj/linux-patches: Linux patch 4.20.6
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1005_linux-4.20.6.patch | 4954 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4958 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 32a3dd6..8bd4163 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -63,6 +63,10 @@ Patch: 1004_linux-4.20.5.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.20.5
23
24 +Patch: 1005_linux-4.20.6.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.20.6
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1005_linux-4.20.6.patch b/1005_linux-4.20.6.patch
33 new file mode 100644
34 index 0000000..231d7f3
35 --- /dev/null
36 +++ b/1005_linux-4.20.6.patch
37 @@ -0,0 +1,4954 @@
38 +diff --git a/Makefile b/Makefile
39 +index 690f6a9d9f1b..523922ea9c97 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 20
46 +-SUBLEVEL = 5
47 ++SUBLEVEL = 6
48 + EXTRAVERSION =
49 + NAME = Shy Crocodile
50 +
51 +diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
52 +index 9185541035cc..6958545390f0 100644
53 +--- a/arch/arc/include/asm/perf_event.h
54 ++++ b/arch/arc/include/asm/perf_event.h
55 +@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
56 +
57 + /* counts condition */
58 + [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
59 +- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
60 ++ /* All jump instructions that are taken */
61 ++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
62 + [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
63 + #ifdef CONFIG_ISA_ARCV2
64 + [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
65 +diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
66 +index 62ad4bcb841a..f230bb7092fd 100644
67 +--- a/arch/arc/lib/memset-archs.S
68 ++++ b/arch/arc/lib/memset-archs.S
69 +@@ -7,11 +7,39 @@
70 + */
71 +
72 + #include <linux/linkage.h>
73 ++#include <asm/cache.h>
74 +
75 +-#undef PREALLOC_NOT_AVAIL
76 ++/*
77 ++ * The memset implementation below is optimized to use prefetchw and prealloc
78 ++ * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
79 ++ * If you want to implement optimized memset for other possible L1 data cache
80 ++ * line lengths (32B and 128B) you should rewrite code carefully checking
81 ++ * we don't call any prefetchw/prealloc instruction for L1 cache lines which
82 ++ * don't belongs to memset area.
83 ++ */
84 ++
85 ++#if L1_CACHE_SHIFT == 6
86 ++
87 ++.macro PREALLOC_INSTR reg, off
88 ++ prealloc [\reg, \off]
89 ++.endm
90 ++
91 ++.macro PREFETCHW_INSTR reg, off
92 ++ prefetchw [\reg, \off]
93 ++.endm
94 ++
95 ++#else
96 ++
97 ++.macro PREALLOC_INSTR
98 ++.endm
99 ++
100 ++.macro PREFETCHW_INSTR
101 ++.endm
102 ++
103 ++#endif
104 +
105 + ENTRY_CFI(memset)
106 +- prefetchw [r0] ; Prefetch the write location
107 ++ PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
108 + mov.f 0, r2
109 + ;;; if size is zero
110 + jz.d [blink]
111 +@@ -48,11 +76,8 @@ ENTRY_CFI(memset)
112 +
113 + lpnz @.Lset64bytes
114 + ;; LOOP START
115 +-#ifdef PREALLOC_NOT_AVAIL
116 +- prefetchw [r3, 64] ;Prefetch the next write location
117 +-#else
118 +- prealloc [r3, 64]
119 +-#endif
120 ++ PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
121 ++
122 + #ifdef CONFIG_ARC_HAS_LL64
123 + std.ab r4, [r3, 8]
124 + std.ab r4, [r3, 8]
125 +@@ -85,7 +110,6 @@ ENTRY_CFI(memset)
126 + lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
127 + lpnz .Lset32bytes
128 + ;; LOOP START
129 +- prefetchw [r3, 32] ;Prefetch the next write location
130 + #ifdef CONFIG_ARC_HAS_LL64
131 + std.ab r4, [r3, 8]
132 + std.ab r4, [r3, 8]
133 +diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
134 +index f8fe5668b30f..a56e6a8ed259 100644
135 +--- a/arch/arc/mm/init.c
136 ++++ b/arch/arc/mm/init.c
137 +@@ -137,7 +137,8 @@ void __init setup_arch_memory(void)
138 + */
139 +
140 + memblock_add_node(low_mem_start, low_mem_sz, 0);
141 +- memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
142 ++ memblock_reserve(CONFIG_LINUX_LINK_BASE,
143 ++ __pa(_end) - CONFIG_LINUX_LINK_BASE);
144 +
145 + #ifdef CONFIG_BLK_DEV_INITRD
146 + if (initrd_start)
147 +diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
148 +index 19516fbc2c55..5461d589a1e2 100644
149 +--- a/arch/arm/mm/proc-macros.S
150 ++++ b/arch/arm/mm/proc-macros.S
151 +@@ -278,7 +278,7 @@
152 + * If we are building for big.Little with branch predictor hardening,
153 + * we need the processor function tables to remain available after boot.
154 + */
155 +-#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
156 ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
157 + .section ".rodata"
158 + #endif
159 + .type \name\()_processor_functions, #object
160 +@@ -316,7 +316,7 @@ ENTRY(\name\()_processor_functions)
161 + .endif
162 +
163 + .size \name\()_processor_functions, . - \name\()_processor_functions
164 +-#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
165 ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
166 + .previous
167 + #endif
168 + .endm
169 +diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
170 +index ccbb53e22024..8d04e6f3f796 100644
171 +--- a/arch/s390/include/asm/mmu_context.h
172 ++++ b/arch/s390/include/asm/mmu_context.h
173 +@@ -25,7 +25,7 @@ static inline int init_new_context(struct task_struct *tsk,
174 + atomic_set(&mm->context.flush_count, 0);
175 + mm->context.gmap_asce = 0;
176 + mm->context.flush_mm = 0;
177 +- mm->context.compat_mm = 0;
178 ++ mm->context.compat_mm = test_thread_flag(TIF_31BIT);
179 + #ifdef CONFIG_PGSTE
180 + mm->context.alloc_pgste = page_table_allocate_pgste ||
181 + test_thread_flag(TIF_PGSTE) ||
182 +@@ -90,8 +90,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
183 + {
184 + int cpu = smp_processor_id();
185 +
186 +- if (prev == next)
187 +- return;
188 + S390_lowcore.user_asce = next->context.asce;
189 + cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
190 + /* Clear previous user-ASCE from CR1 and CR7 */
191 +@@ -103,7 +101,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
192 + __ctl_load(S390_lowcore.vdso_asce, 7, 7);
193 + clear_cpu_flag(CIF_ASCE_SECONDARY);
194 + }
195 +- cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
196 ++ if (prev != next)
197 ++ cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
198 + }
199 +
200 + #define finish_arch_post_lock_switch finish_arch_post_lock_switch
201 +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
202 +index af5c2b3f7065..a8c7789b246b 100644
203 +--- a/arch/s390/kernel/early.c
204 ++++ b/arch/s390/kernel/early.c
205 +@@ -63,10 +63,10 @@ static noinline __init void detect_machine_type(void)
206 + if (stsi(vmms, 3, 2, 2) || !vmms->count)
207 + return;
208 +
209 +- /* Running under KVM? If not we assume z/VM */
210 ++ /* Detect known hypervisors */
211 + if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
212 + S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
213 +- else
214 ++ else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
215 + S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
216 + }
217 +
218 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
219 +index 72dd23ef771b..7ed90a759135 100644
220 +--- a/arch/s390/kernel/setup.c
221 ++++ b/arch/s390/kernel/setup.c
222 +@@ -1006,6 +1006,8 @@ void __init setup_arch(char **cmdline_p)
223 + pr_info("Linux is running under KVM in 64-bit mode\n");
224 + else if (MACHINE_IS_LPAR)
225 + pr_info("Linux is running natively in 64-bit mode\n");
226 ++ else
227 ++ pr_info("Linux is running as a guest in 64-bit mode\n");
228 +
229 + /* Have one command line that is parsed and saved in /proc/cmdline */
230 + /* boot_command_line has been already set up in early.c */
231 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
232 +index f82b3d3c36e2..b198ece2aad6 100644
233 +--- a/arch/s390/kernel/smp.c
234 ++++ b/arch/s390/kernel/smp.c
235 +@@ -381,8 +381,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
236 + */
237 + void smp_call_ipl_cpu(void (*func)(void *), void *data)
238 + {
239 ++ struct lowcore *lc = pcpu_devices->lowcore;
240 ++
241 ++ if (pcpu_devices[0].address == stap())
242 ++ lc = &S390_lowcore;
243 ++
244 + pcpu_delegate(&pcpu_devices[0], func, data,
245 +- pcpu_devices->lowcore->nodat_stack);
246 ++ lc->nodat_stack);
247 + }
248 +
249 + int smp_find_processor_id(u16 address)
250 +@@ -1166,7 +1171,11 @@ static ssize_t __ref rescan_store(struct device *dev,
251 + {
252 + int rc;
253 +
254 ++ rc = lock_device_hotplug_sysfs();
255 ++ if (rc)
256 ++ return rc;
257 + rc = smp_rescan_cpus();
258 ++ unlock_device_hotplug();
259 + return rc ? rc : count;
260 + }
261 + static DEVICE_ATTR_WO(rescan);
262 +diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
263 +index ebe748a9f472..4ff354887db4 100644
264 +--- a/arch/s390/kernel/vdso.c
265 ++++ b/arch/s390/kernel/vdso.c
266 +@@ -224,10 +224,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
267 +
268 + vdso_pages = vdso64_pages;
269 + #ifdef CONFIG_COMPAT
270 +- if (is_compat_task()) {
271 ++ mm->context.compat_mm = is_compat_task();
272 ++ if (mm->context.compat_mm)
273 + vdso_pages = vdso32_pages;
274 +- mm->context.compat_mm = 1;
275 +- }
276 + #endif
277 + /*
278 + * vDSO has a problem and was disabled, just don't "enable" it for
279 +diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
280 +index 8eaf8952c408..39913770a44d 100644
281 +--- a/arch/x86/entry/entry_64_compat.S
282 ++++ b/arch/x86/entry/entry_64_compat.S
283 +@@ -361,7 +361,8 @@ ENTRY(entry_INT80_compat)
284 +
285 + /* Need to switch before accessing the thread stack. */
286 + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
287 +- movq %rsp, %rdi
288 ++ /* In the Xen PV case we already run on the thread stack. */
289 ++ ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
290 + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
291 +
292 + pushq 6*8(%rdi) /* regs->ss */
293 +@@ -370,8 +371,9 @@ ENTRY(entry_INT80_compat)
294 + pushq 3*8(%rdi) /* regs->cs */
295 + pushq 2*8(%rdi) /* regs->ip */
296 + pushq 1*8(%rdi) /* regs->orig_ax */
297 +-
298 + pushq (%rdi) /* pt_regs->di */
299 ++.Lint80_keep_stack:
300 ++
301 + pushq %rsi /* pt_regs->si */
302 + xorl %esi, %esi /* nospec si */
303 + pushq %rdx /* pt_regs->dx */
304 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
305 +index 0ca50611e8ce..19d18fae6ec6 100644
306 +--- a/arch/x86/include/asm/mmu_context.h
307 ++++ b/arch/x86/include/asm/mmu_context.h
308 +@@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
309 +
310 + void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
311 +
312 ++/*
313 ++ * Init a new mm. Used on mm copies, like at fork()
314 ++ * and on mm's that are brand-new, like at execve().
315 ++ */
316 + static inline int init_new_context(struct task_struct *tsk,
317 + struct mm_struct *mm)
318 + {
319 +@@ -228,8 +232,22 @@ do { \
320 + } while (0)
321 + #endif
322 +
323 ++static inline void arch_dup_pkeys(struct mm_struct *oldmm,
324 ++ struct mm_struct *mm)
325 ++{
326 ++#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
327 ++ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
328 ++ return;
329 ++
330 ++ /* Duplicate the oldmm pkey state in mm: */
331 ++ mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
332 ++ mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
333 ++#endif
334 ++}
335 ++
336 + static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
337 + {
338 ++ arch_dup_pkeys(oldmm, mm);
339 + paravirt_arch_dup_mmap(oldmm, mm);
340 + return ldt_dup_context(oldmm, mm);
341 + }
342 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
343 +index ba4bfb7f6a36..5c93a65ee1e5 100644
344 +--- a/arch/x86/kernel/kvm.c
345 ++++ b/arch/x86/kernel/kvm.c
346 +@@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
347 + #else
348 + u64 ipi_bitmap = 0;
349 + #endif
350 ++ long ret;
351 +
352 + if (cpumask_empty(mask))
353 + return;
354 +@@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
355 + } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
356 + max = apic_id < max ? max : apic_id;
357 + } else {
358 +- kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
359 ++ ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
360 + (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
361 ++ WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
362 + min = max = apic_id;
363 + ipi_bitmap = 0;
364 + }
365 +@@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
366 + }
367 +
368 + if (ipi_bitmap) {
369 +- kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
370 ++ ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
371 + (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
372 ++ WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
373 + }
374 +
375 + local_irq_restore(flags);
376 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
377 +index 95784bc4a53c..5a2c87552122 100644
378 +--- a/arch/x86/kvm/vmx.c
379 ++++ b/arch/x86/kvm/vmx.c
380 +@@ -8315,11 +8315,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
381 + if (r < 0)
382 + goto out_vmcs02;
383 +
384 +- vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
385 ++ vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
386 + if (!vmx->nested.cached_vmcs12)
387 + goto out_cached_vmcs12;
388 +
389 +- vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
390 ++ vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
391 + if (!vmx->nested.cached_shadow_vmcs12)
392 + goto out_cached_shadow_vmcs12;
393 +
394 +@@ -14853,13 +14853,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
395 + copy_shadow_to_vmcs12(vmx);
396 + }
397 +
398 +- if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
399 ++ /*
400 ++ * Copy over the full allocated size of vmcs12 rather than just the size
401 ++ * of the struct.
402 ++ */
403 ++ if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
404 + return -EFAULT;
405 +
406 + if (nested_cpu_has_shadow_vmcs(vmcs12) &&
407 + vmcs12->vmcs_link_pointer != -1ull) {
408 + if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
409 +- get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
410 ++ get_shadow_vmcs12(vcpu), VMCS12_SIZE))
411 + return -EFAULT;
412 + }
413 +
414 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
415 +index f049ecfac7bb..4247cb230bd3 100644
416 +--- a/arch/x86/kvm/x86.c
417 ++++ b/arch/x86/kvm/x86.c
418 +@@ -6407,8 +6407,7 @@ restart:
419 + toggle_interruptibility(vcpu, ctxt->interruptibility);
420 + vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
421 + kvm_rip_write(vcpu, ctxt->eip);
422 +- if (r == EMULATE_DONE &&
423 +- (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
424 ++ if (r == EMULATE_DONE && ctxt->tf)
425 + kvm_vcpu_do_singlestep(vcpu, &r);
426 + if (!ctxt->have_exception ||
427 + exception_type(ctxt->exception.vector) == EXCPT_TRAP)
428 +@@ -6998,10 +6997,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
429 + case KVM_HC_CLOCK_PAIRING:
430 + ret = kvm_pv_clock_pairing(vcpu, a0, a1);
431 + break;
432 ++#endif
433 + case KVM_HC_SEND_IPI:
434 + ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
435 + break;
436 +-#endif
437 + default:
438 + ret = -KVM_ENOSYS;
439 + break;
440 +diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
441 +index 79778ab200e4..a53665116458 100644
442 +--- a/arch/x86/lib/kaslr.c
443 ++++ b/arch/x86/lib/kaslr.c
444 +@@ -36,8 +36,8 @@ static inline u16 i8254(void)
445 + u16 status, timer;
446 +
447 + do {
448 +- outb(I8254_PORT_CONTROL,
449 +- I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
450 ++ outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
451 ++ I8254_PORT_CONTROL);
452 + status = inb(I8254_PORT_COUNTER0);
453 + timer = inb(I8254_PORT_COUNTER0);
454 + timer |= inb(I8254_PORT_COUNTER0) << 8;
455 +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
456 +index 5912d30020c7..8535e7999769 100644
457 +--- a/drivers/acpi/nfit/core.c
458 ++++ b/drivers/acpi/nfit/core.c
459 +@@ -394,6 +394,32 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func)
460 + return id;
461 + }
462 +
463 ++static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
464 ++ struct nd_cmd_pkg *call_pkg)
465 ++{
466 ++ if (call_pkg) {
467 ++ int i;
468 ++
469 ++ if (nfit_mem->family != call_pkg->nd_family)
470 ++ return -ENOTTY;
471 ++
472 ++ for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
473 ++ if (call_pkg->nd_reserved2[i])
474 ++ return -EINVAL;
475 ++ return call_pkg->nd_command;
476 ++ }
477 ++
478 ++ /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
479 ++ if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
480 ++ return cmd;
481 ++
482 ++ /*
483 ++ * Force function number validation to fail since 0 is never
484 ++ * published as a valid function in dsm_mask.
485 ++ */
486 ++ return 0;
487 ++}
488 ++
489 + int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
490 + unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
491 + {
492 +@@ -407,30 +433,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
493 + unsigned long cmd_mask, dsm_mask;
494 + u32 offset, fw_status = 0;
495 + acpi_handle handle;
496 +- unsigned int func;
497 + const guid_t *guid;
498 +- int rc, i;
499 ++ int func, rc, i;
500 +
501 + if (cmd_rc)
502 + *cmd_rc = -EINVAL;
503 +- func = cmd;
504 +- if (cmd == ND_CMD_CALL) {
505 +- call_pkg = buf;
506 +- func = call_pkg->nd_command;
507 +-
508 +- for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
509 +- if (call_pkg->nd_reserved2[i])
510 +- return -EINVAL;
511 +- }
512 +
513 + if (nvdimm) {
514 + struct acpi_device *adev = nfit_mem->adev;
515 +
516 + if (!adev)
517 + return -ENOTTY;
518 +- if (call_pkg && nfit_mem->family != call_pkg->nd_family)
519 +- return -ENOTTY;
520 +
521 ++ if (cmd == ND_CMD_CALL)
522 ++ call_pkg = buf;
523 ++ func = cmd_to_func(nfit_mem, cmd, call_pkg);
524 ++ if (func < 0)
525 ++ return func;
526 + dimm_name = nvdimm_name(nvdimm);
527 + cmd_name = nvdimm_cmd_name(cmd);
528 + cmd_mask = nvdimm_cmd_mask(nvdimm);
529 +@@ -441,6 +460,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
530 + } else {
531 + struct acpi_device *adev = to_acpi_dev(acpi_desc);
532 +
533 ++ func = cmd;
534 + cmd_name = nvdimm_bus_cmd_name(cmd);
535 + cmd_mask = nd_desc->cmd_mask;
536 + dsm_mask = cmd_mask;
537 +@@ -455,7 +475,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
538 + if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
539 + return -ENOTTY;
540 +
541 +- if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
542 ++ /*
543 ++ * Check for a valid command. For ND_CMD_CALL, we also have to
544 ++ * make sure that the DSM function is supported.
545 ++ */
546 ++ if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
547 ++ return -ENOTTY;
548 ++ else if (!test_bit(cmd, &cmd_mask))
549 + return -ENOTTY;
550 +
551 + in_obj.type = ACPI_TYPE_PACKAGE;
552 +@@ -1844,6 +1870,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
553 + return 0;
554 + }
555 +
556 ++ /*
557 ++ * Function 0 is the command interrogation function, don't
558 ++ * export it to potential userspace use, and enable it to be
559 ++ * used as an error value in acpi_nfit_ctl().
560 ++ */
561 ++ dsm_mask &= ~1UL;
562 ++
563 + guid = to_nfit_uuid(nfit_mem->family);
564 + for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
565 + if (acpi_check_dsm(adev_dimm->handle, guid,
566 +diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
567 +index b5e3103c1175..e43c876a9223 100644
568 +--- a/drivers/char/mwave/mwavedd.c
569 ++++ b/drivers/char/mwave/mwavedd.c
570 +@@ -59,6 +59,7 @@
571 + #include <linux/mutex.h>
572 + #include <linux/delay.h>
573 + #include <linux/serial_8250.h>
574 ++#include <linux/nospec.h>
575 + #include "smapi.h"
576 + #include "mwavedd.h"
577 + #include "3780i.h"
578 +@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
579 + ipcnum);
580 + return -EINVAL;
581 + }
582 ++ ipcnum = array_index_nospec(ipcnum,
583 ++ ARRAY_SIZE(pDrvData->IPCs));
584 + PRINTK_3(TRACE_MWAVE,
585 + "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
586 + " ipcnum %x entry usIntCount %x\n",
587 +@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
588 + " Invalid ipcnum %x\n", ipcnum);
589 + return -EINVAL;
590 + }
591 ++ ipcnum = array_index_nospec(ipcnum,
592 ++ ARRAY_SIZE(pDrvData->IPCs));
593 + PRINTK_3(TRACE_MWAVE,
594 + "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
595 + " ipcnum %x, usIntCount %x\n",
596 +@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
597 + ipcnum);
598 + return -EINVAL;
599 + }
600 ++ ipcnum = array_index_nospec(ipcnum,
601 ++ ARRAY_SIZE(pDrvData->IPCs));
602 + mutex_lock(&mwave_mutex);
603 + if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
604 + pDrvData->IPCs[ipcnum].bIsEnabled = false;
605 +diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
606 +index 2d5d8b43727e..c4d0b6f6abf2 100644
607 +--- a/drivers/clk/socfpga/clk-pll-s10.c
608 ++++ b/drivers/clk/socfpga/clk-pll-s10.c
609 +@@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
610 + /* Read mdiv and fdiv from the fdbck register */
611 + reg = readl(socfpgaclk->hw.reg + 0x4);
612 + mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
613 +- vco_freq = (unsigned long long)parent_rate * (mdiv + 6);
614 ++ vco_freq = (unsigned long long)vco_freq * (mdiv + 6);
615 +
616 + return (unsigned long)vco_freq;
617 + }
618 +diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
619 +index 5b238fc314ac..8281dfbf38c2 100644
620 +--- a/drivers/clk/socfpga/clk-s10.c
621 ++++ b/drivers/clk/socfpga/clk-s10.c
622 +@@ -12,17 +12,17 @@
623 +
624 + #include "stratix10-clk.h"
625 +
626 +-static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk",
627 +- "f2s_free_clk",};
628 ++static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
629 ++ "f2s-free-clk",};
630 + static const char * const cntr_mux[] = { "main_pll", "periph_pll",
631 +- "osc1", "cb_intosc_hs_div2_clk",
632 +- "f2s_free_clk"};
633 +-static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",};
634 ++ "osc1", "cb-intosc-hs-div2-clk",
635 ++ "f2s-free-clk"};
636 ++static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
637 +
638 + static const char * const noc_free_mux[] = {"main_noc_base_clk",
639 + "peri_noc_base_clk",
640 +- "osc1", "cb_intosc_hs_div2_clk",
641 +- "f2s_free_clk"};
642 ++ "osc1", "cb-intosc-hs-div2-clk",
643 ++ "f2s-free-clk"};
644 +
645 + static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
646 + static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
647 +@@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"
648 + static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
649 + static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
650 +
651 +-static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
652 ++static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
653 + static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
654 + static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
655 +
656 + static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
657 + "peri_mpu_base_clk",
658 +- "osc1", "cb_intosc_hs_div2_clk",
659 +- "f2s_free_clk"};
660 ++ "osc1", "cb-intosc-hs-div2-clk",
661 ++ "f2s-free-clk"};
662 +
663 + /* clocks in AO (always on) controller */
664 + static const struct stratix10_pll_clock s10_pll_clks[] = {
665 +diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
666 +index f65cc0ff76ab..b0908ec62f73 100644
667 +--- a/drivers/clk/zynqmp/clkc.c
668 ++++ b/drivers/clk/zynqmp/clkc.c
669 +@@ -669,8 +669,8 @@ static int zynqmp_clk_setup(struct device_node *np)
670 + if (ret)
671 + return ret;
672 +
673 +- zynqmp_data = kzalloc(sizeof(*zynqmp_data) + sizeof(*zynqmp_data) *
674 +- clock_max_idx, GFP_KERNEL);
675 ++ zynqmp_data = kzalloc(struct_size(zynqmp_data, hws, clock_max_idx),
676 ++ GFP_KERNEL);
677 + if (!zynqmp_data)
678 + return -ENOMEM;
679 +
680 +diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
681 +index 4213cb0bb2a7..f8664bac9fa8 100644
682 +--- a/drivers/edac/altera_edac.h
683 ++++ b/drivers/edac/altera_edac.h
684 +@@ -295,8 +295,8 @@ struct altr_sdram_mc_data {
685 + #define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
686 +
687 + /* Sticky registers for Uncorrected Errors */
688 +-#define S10_SYSMGR_UE_VAL_OFST 0x120
689 +-#define S10_SYSMGR_UE_ADDR_OFST 0x124
690 ++#define S10_SYSMGR_UE_VAL_OFST 0x220
691 ++#define S10_SYSMGR_UE_ADDR_OFST 0x224
692 +
693 + #define S10_DDR0_IRQ_MASK BIT(16)
694 +
695 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
696 +index a028661d9e20..92b11de19581 100644
697 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
698 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
699 +@@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
700 + { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
701 + { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
702 + { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
703 ++ { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
704 + { 0, 0, 0, 0, 0 },
705 + };
706 +
707 +diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
708 +index 191b314f9e9e..709475d5cc30 100644
709 +--- a/drivers/gpu/drm/meson/meson_crtc.c
710 ++++ b/drivers/gpu/drm/meson/meson_crtc.c
711 +@@ -45,7 +45,6 @@ struct meson_crtc {
712 + struct drm_crtc base;
713 + struct drm_pending_vblank_event *event;
714 + struct meson_drm *priv;
715 +- bool enabled;
716 + };
717 + #define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
718 +
719 +@@ -81,7 +80,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
720 +
721 + };
722 +
723 +-static void meson_crtc_enable(struct drm_crtc *crtc)
724 ++static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
725 ++ struct drm_crtc_state *old_state)
726 + {
727 + struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
728 + struct drm_crtc_state *crtc_state = crtc->state;
729 +@@ -103,20 +103,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc)
730 +
731 + drm_crtc_vblank_on(crtc);
732 +
733 +- meson_crtc->enabled = true;
734 +-}
735 +-
736 +-static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
737 +- struct drm_crtc_state *old_state)
738 +-{
739 +- struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
740 +- struct meson_drm *priv = meson_crtc->priv;
741 +-
742 +- DRM_DEBUG_DRIVER("\n");
743 +-
744 +- if (!meson_crtc->enabled)
745 +- meson_crtc_enable(crtc);
746 +-
747 + priv->viu.osd1_enabled = true;
748 + }
749 +
750 +@@ -142,8 +128,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
751 +
752 + crtc->state->event = NULL;
753 + }
754 +-
755 +- meson_crtc->enabled = false;
756 + }
757 +
758 + static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
759 +@@ -152,9 +136,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
760 + struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
761 + unsigned long flags;
762 +
763 +- if (crtc->state->enable && !meson_crtc->enabled)
764 +- meson_crtc_enable(crtc);
765 +-
766 + if (crtc->state->event) {
767 + WARN_ON(drm_crtc_vblank_get(crtc) != 0);
768 +
769 +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
770 +index d3443125e661..bf5f294f172f 100644
771 +--- a/drivers/gpu/drm/meson/meson_drv.c
772 ++++ b/drivers/gpu/drm/meson/meson_drv.c
773 +@@ -82,6 +82,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = {
774 + .fb_create = drm_gem_fb_create,
775 + };
776 +
777 ++static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
778 ++ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
779 ++};
780 ++
781 + static irqreturn_t meson_irq(int irq, void *arg)
782 + {
783 + struct drm_device *dev = arg;
784 +@@ -246,6 +250,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
785 + drm->mode_config.max_width = 3840;
786 + drm->mode_config.max_height = 2160;
787 + drm->mode_config.funcs = &meson_mode_config_funcs;
788 ++ drm->mode_config.helper_private = &meson_mode_config_helpers;
789 +
790 + /* Hardware Initialization */
791 +
792 +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
793 +index fe00b12e4417..bea4c9850247 100644
794 +--- a/drivers/hv/channel.c
795 ++++ b/drivers/hv/channel.c
796 +@@ -701,20 +701,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
797 + int vmbus_disconnect_ring(struct vmbus_channel *channel)
798 + {
799 + struct vmbus_channel *cur_channel, *tmp;
800 +- unsigned long flags;
801 +- LIST_HEAD(list);
802 + int ret;
803 +
804 + if (channel->primary_channel != NULL)
805 + return -EINVAL;
806 +
807 +- /* Snapshot the list of subchannels */
808 +- spin_lock_irqsave(&channel->lock, flags);
809 +- list_splice_init(&channel->sc_list, &list);
810 +- channel->num_sc = 0;
811 +- spin_unlock_irqrestore(&channel->lock, flags);
812 +-
813 +- list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
814 ++ list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
815 + if (cur_channel->rescind)
816 + wait_for_completion(&cur_channel->rescind_event);
817 +
818 +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
819 +index edd34c167a9b..d01689079e9b 100644
820 +--- a/drivers/hv/channel_mgmt.c
821 ++++ b/drivers/hv/channel_mgmt.c
822 +@@ -405,7 +405,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel)
823 + primary_channel = channel->primary_channel;
824 + spin_lock_irqsave(&primary_channel->lock, flags);
825 + list_del(&channel->sc_list);
826 +- primary_channel->num_sc--;
827 + spin_unlock_irqrestore(&primary_channel->lock, flags);
828 + }
829 +
830 +@@ -1302,49 +1301,6 @@ cleanup:
831 + return ret;
832 + }
833 +
834 +-/*
835 +- * Retrieve the (sub) channel on which to send an outgoing request.
836 +- * When a primary channel has multiple sub-channels, we try to
837 +- * distribute the load equally amongst all available channels.
838 +- */
839 +-struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
840 +-{
841 +- struct list_head *cur, *tmp;
842 +- int cur_cpu;
843 +- struct vmbus_channel *cur_channel;
844 +- struct vmbus_channel *outgoing_channel = primary;
845 +- int next_channel;
846 +- int i = 1;
847 +-
848 +- if (list_empty(&primary->sc_list))
849 +- return outgoing_channel;
850 +-
851 +- next_channel = primary->next_oc++;
852 +-
853 +- if (next_channel > (primary->num_sc)) {
854 +- primary->next_oc = 0;
855 +- return outgoing_channel;
856 +- }
857 +-
858 +- cur_cpu = hv_cpu_number_to_vp_number(smp_processor_id());
859 +- list_for_each_safe(cur, tmp, &primary->sc_list) {
860 +- cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
861 +- if (cur_channel->state != CHANNEL_OPENED_STATE)
862 +- continue;
863 +-
864 +- if (cur_channel->target_vp == cur_cpu)
865 +- return cur_channel;
866 +-
867 +- if (i == next_channel)
868 +- return cur_channel;
869 +-
870 +- i++;
871 +- }
872 +-
873 +- return outgoing_channel;
874 +-}
875 +-EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
876 +-
877 + static void invoke_sc_cb(struct vmbus_channel *primary_channel)
878 + {
879 + struct list_head *cur, *tmp;
880 +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
881 +index 41631512ae97..7b9fbd84d6df 100644
882 +--- a/drivers/hv/hv_balloon.c
883 ++++ b/drivers/hv/hv_balloon.c
884 +@@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start,
885 + pfn_cnt -= pgs_ol;
886 + /*
887 + * Check if the corresponding memory block is already
888 +- * online by checking its last previously backed page.
889 +- * In case it is we need to bring rest (which was not
890 +- * backed previously) online too.
891 ++ * online. It is possible to observe struct pages still
892 ++ * being uninitialized here so check section instead.
893 ++ * In case the section is online we need to bring the
894 ++ * rest of pfns (which were not backed previously)
895 ++ * online too.
896 + */
897 + if (start_pfn > has->start_pfn &&
898 +- !PageReserved(pfn_to_page(start_pfn - 1)))
899 ++ online_section_nr(pfn_to_section_nr(start_pfn)))
900 + hv_bring_pgs_online(has, start_pfn, pgs_ol);
901 +
902 + }
903 +diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
904 +index 64d0c85d5161..1f1a55e07733 100644
905 +--- a/drivers/hv/ring_buffer.c
906 ++++ b/drivers/hv/ring_buffer.c
907 +@@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
908 + }
909 +
910 + /* Get various debug metrics for the specified ring buffer. */
911 +-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
912 +- struct hv_ring_buffer_debug_info *debug_info)
913 ++int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
914 ++ struct hv_ring_buffer_debug_info *debug_info)
915 + {
916 + u32 bytes_avail_towrite;
917 + u32 bytes_avail_toread;
918 +
919 +- if (ring_info->ring_buffer) {
920 +- hv_get_ringbuffer_availbytes(ring_info,
921 +- &bytes_avail_toread,
922 +- &bytes_avail_towrite);
923 +-
924 +- debug_info->bytes_avail_toread = bytes_avail_toread;
925 +- debug_info->bytes_avail_towrite = bytes_avail_towrite;
926 +- debug_info->current_read_index =
927 +- ring_info->ring_buffer->read_index;
928 +- debug_info->current_write_index =
929 +- ring_info->ring_buffer->write_index;
930 +- debug_info->current_interrupt_mask =
931 +- ring_info->ring_buffer->interrupt_mask;
932 +- }
933 ++ if (!ring_info->ring_buffer)
934 ++ return -EINVAL;
935 ++
936 ++ hv_get_ringbuffer_availbytes(ring_info,
937 ++ &bytes_avail_toread,
938 ++ &bytes_avail_towrite);
939 ++ debug_info->bytes_avail_toread = bytes_avail_toread;
940 ++ debug_info->bytes_avail_towrite = bytes_avail_towrite;
941 ++ debug_info->current_read_index = ring_info->ring_buffer->read_index;
942 ++ debug_info->current_write_index = ring_info->ring_buffer->write_index;
943 ++ debug_info->current_interrupt_mask
944 ++ = ring_info->ring_buffer->interrupt_mask;
945 ++ return 0;
946 + }
947 + EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
948 +
949 +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
950 +index d0ff65675292..403fee01572c 100644
951 +--- a/drivers/hv/vmbus_drv.c
952 ++++ b/drivers/hv/vmbus_drv.c
953 +@@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev,
954 + {
955 + struct hv_device *hv_dev = device_to_hv_device(dev);
956 + struct hv_ring_buffer_debug_info outbound;
957 ++ int ret;
958 +
959 + if (!hv_dev->channel)
960 + return -ENODEV;
961 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
962 +- return -EINVAL;
963 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
964 ++
965 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
966 ++ &outbound);
967 ++ if (ret < 0)
968 ++ return ret;
969 ++
970 + return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
971 + }
972 + static DEVICE_ATTR_RO(out_intr_mask);
973 +@@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev,
974 + {
975 + struct hv_device *hv_dev = device_to_hv_device(dev);
976 + struct hv_ring_buffer_debug_info outbound;
977 ++ int ret;
978 +
979 + if (!hv_dev->channel)
980 + return -ENODEV;
981 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
982 +- return -EINVAL;
983 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
984 ++
985 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
986 ++ &outbound);
987 ++ if (ret < 0)
988 ++ return ret;
989 + return sprintf(buf, "%d\n", outbound.current_read_index);
990 + }
991 + static DEVICE_ATTR_RO(out_read_index);
992 +@@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev,
993 + {
994 + struct hv_device *hv_dev = device_to_hv_device(dev);
995 + struct hv_ring_buffer_debug_info outbound;
996 ++ int ret;
997 +
998 + if (!hv_dev->channel)
999 + return -ENODEV;
1000 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1001 +- return -EINVAL;
1002 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
1003 ++
1004 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
1005 ++ &outbound);
1006 ++ if (ret < 0)
1007 ++ return ret;
1008 + return sprintf(buf, "%d\n", outbound.current_write_index);
1009 + }
1010 + static DEVICE_ATTR_RO(out_write_index);
1011 +@@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
1012 + {
1013 + struct hv_device *hv_dev = device_to_hv_device(dev);
1014 + struct hv_ring_buffer_debug_info outbound;
1015 ++ int ret;
1016 +
1017 + if (!hv_dev->channel)
1018 + return -ENODEV;
1019 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1020 +- return -EINVAL;
1021 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
1022 ++
1023 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
1024 ++ &outbound);
1025 ++ if (ret < 0)
1026 ++ return ret;
1027 + return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
1028 + }
1029 + static DEVICE_ATTR_RO(out_read_bytes_avail);
1030 +@@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
1031 + {
1032 + struct hv_device *hv_dev = device_to_hv_device(dev);
1033 + struct hv_ring_buffer_debug_info outbound;
1034 ++ int ret;
1035 +
1036 + if (!hv_dev->channel)
1037 + return -ENODEV;
1038 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1039 +- return -EINVAL;
1040 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
1041 ++
1042 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
1043 ++ &outbound);
1044 ++ if (ret < 0)
1045 ++ return ret;
1046 + return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
1047 + }
1048 + static DEVICE_ATTR_RO(out_write_bytes_avail);
1049 +@@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev,
1050 + {
1051 + struct hv_device *hv_dev = device_to_hv_device(dev);
1052 + struct hv_ring_buffer_debug_info inbound;
1053 ++ int ret;
1054 +
1055 + if (!hv_dev->channel)
1056 + return -ENODEV;
1057 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1058 +- return -EINVAL;
1059 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1060 ++
1061 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1062 ++ if (ret < 0)
1063 ++ return ret;
1064 ++
1065 + return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
1066 + }
1067 + static DEVICE_ATTR_RO(in_intr_mask);
1068 +@@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev,
1069 + {
1070 + struct hv_device *hv_dev = device_to_hv_device(dev);
1071 + struct hv_ring_buffer_debug_info inbound;
1072 ++ int ret;
1073 +
1074 + if (!hv_dev->channel)
1075 + return -ENODEV;
1076 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1077 +- return -EINVAL;
1078 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1079 ++
1080 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1081 ++ if (ret < 0)
1082 ++ return ret;
1083 ++
1084 + return sprintf(buf, "%d\n", inbound.current_read_index);
1085 + }
1086 + static DEVICE_ATTR_RO(in_read_index);
1087 +@@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev,
1088 + {
1089 + struct hv_device *hv_dev = device_to_hv_device(dev);
1090 + struct hv_ring_buffer_debug_info inbound;
1091 ++ int ret;
1092 +
1093 + if (!hv_dev->channel)
1094 + return -ENODEV;
1095 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1096 +- return -EINVAL;
1097 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1098 ++
1099 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1100 ++ if (ret < 0)
1101 ++ return ret;
1102 ++
1103 + return sprintf(buf, "%d\n", inbound.current_write_index);
1104 + }
1105 + static DEVICE_ATTR_RO(in_write_index);
1106 +@@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
1107 + {
1108 + struct hv_device *hv_dev = device_to_hv_device(dev);
1109 + struct hv_ring_buffer_debug_info inbound;
1110 ++ int ret;
1111 +
1112 + if (!hv_dev->channel)
1113 + return -ENODEV;
1114 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1115 +- return -EINVAL;
1116 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1117 ++
1118 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1119 ++ if (ret < 0)
1120 ++ return ret;
1121 ++
1122 + return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
1123 + }
1124 + static DEVICE_ATTR_RO(in_read_bytes_avail);
1125 +@@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
1126 + {
1127 + struct hv_device *hv_dev = device_to_hv_device(dev);
1128 + struct hv_ring_buffer_debug_info inbound;
1129 ++ int ret;
1130 +
1131 + if (!hv_dev->channel)
1132 + return -ENODEV;
1133 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1134 +- return -EINVAL;
1135 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1136 ++
1137 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1138 ++ if (ret < 0)
1139 ++ return ret;
1140 ++
1141 + return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
1142 + }
1143 + static DEVICE_ATTR_RO(in_write_bytes_avail);
1144 +diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
1145 +index 4c8c7a620d08..a5dc13576394 100644
1146 +--- a/drivers/ide/ide-proc.c
1147 ++++ b/drivers/ide/ide-proc.c
1148 +@@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
1149 + drive->proc = proc_mkdir(drive->name, parent);
1150 + if (drive->proc) {
1151 + ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
1152 +- proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR,
1153 ++ proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
1154 + drive->proc, &ide_settings_proc_fops,
1155 + drive);
1156 + }
1157 +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
1158 +index cfc8b94527b9..aa4e431cbcd3 100644
1159 +--- a/drivers/input/joystick/xpad.c
1160 ++++ b/drivers/input/joystick/xpad.c
1161 +@@ -252,6 +252,8 @@ static const struct xpad_device {
1162 + { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
1163 + { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
1164 + { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
1165 ++ { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
1166 ++ { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
1167 + { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
1168 + { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
1169 + { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
1170 +@@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = {
1171 + XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
1172 + XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
1173 + XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
1174 ++ XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
1175 + XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
1176 + XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
1177 + XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
1178 +diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
1179 +index 8ec483e8688b..26ec603fe220 100644
1180 +--- a/drivers/input/misc/uinput.c
1181 ++++ b/drivers/input/misc/uinput.c
1182 +@@ -39,6 +39,7 @@
1183 + #include <linux/init.h>
1184 + #include <linux/fs.h>
1185 + #include <linux/miscdevice.h>
1186 ++#include <linux/overflow.h>
1187 + #include <linux/input/mt.h>
1188 + #include "../input-compat.h"
1189 +
1190 +@@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file)
1191 + static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
1192 + const struct input_absinfo *abs)
1193 + {
1194 +- int min, max;
1195 ++ int min, max, range;
1196 +
1197 + min = abs->minimum;
1198 + max = abs->maximum;
1199 +@@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
1200 + return -EINVAL;
1201 + }
1202 +
1203 +- if (abs->flat > max - min) {
1204 ++ if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
1205 + printk(KERN_DEBUG
1206 + "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
1207 + UINPUT_NAME, code, abs->flat, min, max);
1208 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
1209 +index db20e992a40f..7f2a45445b00 100644
1210 +--- a/drivers/irqchip/irq-gic-v3-its.c
1211 ++++ b/drivers/irqchip/irq-gic-v3-its.c
1212 +@@ -2399,13 +2399,14 @@ static void its_free_device(struct its_device *its_dev)
1213 + kfree(its_dev);
1214 + }
1215 +
1216 +-static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1217 ++static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
1218 + {
1219 + int idx;
1220 +
1221 +- idx = find_first_zero_bit(dev->event_map.lpi_map,
1222 +- dev->event_map.nr_lpis);
1223 +- if (idx == dev->event_map.nr_lpis)
1224 ++ idx = bitmap_find_free_region(dev->event_map.lpi_map,
1225 ++ dev->event_map.nr_lpis,
1226 ++ get_count_order(nvecs));
1227 ++ if (idx < 0)
1228 + return -ENOSPC;
1229 +
1230 + *hwirq = dev->event_map.lpi_base + idx;
1231 +@@ -2501,21 +2502,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1232 + int err;
1233 + int i;
1234 +
1235 +- for (i = 0; i < nr_irqs; i++) {
1236 +- err = its_alloc_device_irq(its_dev, &hwirq);
1237 +- if (err)
1238 +- return err;
1239 ++ err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
1240 ++ if (err)
1241 ++ return err;
1242 +
1243 +- err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
1244 ++ for (i = 0; i < nr_irqs; i++) {
1245 ++ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
1246 + if (err)
1247 + return err;
1248 +
1249 + irq_domain_set_hwirq_and_chip(domain, virq + i,
1250 +- hwirq, &its_irq_chip, its_dev);
1251 ++ hwirq + i, &its_irq_chip, its_dev);
1252 + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
1253 + pr_debug("ID:%d pID:%d vID:%d\n",
1254 +- (int)(hwirq - its_dev->event_map.lpi_base),
1255 +- (int) hwirq, virq + i);
1256 ++ (int)(hwirq + i - its_dev->event_map.lpi_base),
1257 ++ (int)(hwirq + i), virq + i);
1258 + }
1259 +
1260 + return 0;
1261 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1262 +index fc7d8b8a654f..1ef828575fae 100644
1263 +--- a/drivers/md/dm-crypt.c
1264 ++++ b/drivers/md/dm-crypt.c
1265 +@@ -2405,9 +2405,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
1266 + * capi:cipher_api_spec-iv:ivopts
1267 + */
1268 + tmp = &cipher_in[strlen("capi:")];
1269 +- cipher_api = strsep(&tmp, "-");
1270 +- *ivmode = strsep(&tmp, ":");
1271 +- *ivopts = tmp;
1272 ++
1273 ++ /* Separate IV options if present, it can contain another '-' in hash name */
1274 ++ *ivopts = strrchr(tmp, ':');
1275 ++ if (*ivopts) {
1276 ++ **ivopts = '\0';
1277 ++ (*ivopts)++;
1278 ++ }
1279 ++ /* Parse IV mode */
1280 ++ *ivmode = strrchr(tmp, '-');
1281 ++ if (*ivmode) {
1282 ++ **ivmode = '\0';
1283 ++ (*ivmode)++;
1284 ++ }
1285 ++ /* The rest is crypto API spec */
1286 ++ cipher_api = tmp;
1287 +
1288 + if (*ivmode && !strcmp(*ivmode, "lmk"))
1289 + cc->tfms_count = 64;
1290 +@@ -2477,11 +2489,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
1291 + goto bad_mem;
1292 +
1293 + chainmode = strsep(&tmp, "-");
1294 +- *ivopts = strsep(&tmp, "-");
1295 +- *ivmode = strsep(&*ivopts, ":");
1296 +-
1297 +- if (tmp)
1298 +- DMWARN("Ignoring unexpected additional cipher options");
1299 ++ *ivmode = strsep(&tmp, ":");
1300 ++ *ivopts = tmp;
1301 +
1302 + /*
1303 + * For compatibility with the original dm-crypt mapping format, if
1304 +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
1305 +index 20b0776e39ef..ed3caceaed07 100644
1306 +--- a/drivers/md/dm-thin-metadata.c
1307 ++++ b/drivers/md/dm-thin-metadata.c
1308 +@@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
1309 + return r;
1310 + }
1311 +
1312 +-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1313 ++int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1314 + {
1315 + int r;
1316 + uint32_t ref_count;
1317 +@@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
1318 + down_read(&pmd->root_lock);
1319 + r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1320 + if (!r)
1321 +- *result = (ref_count != 0);
1322 ++ *result = (ref_count > 1);
1323 + up_read(&pmd->root_lock);
1324 +
1325 + return r;
1326 +diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
1327 +index 35e954ea20a9..f6be0d733c20 100644
1328 +--- a/drivers/md/dm-thin-metadata.h
1329 ++++ b/drivers/md/dm-thin-metadata.h
1330 +@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
1331 +
1332 + int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
1333 +
1334 +-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
1335 ++int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
1336 +
1337 + int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
1338 + int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
1339 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1340 +index dadd9696340c..ca8af21bf644 100644
1341 +--- a/drivers/md/dm-thin.c
1342 ++++ b/drivers/md/dm-thin.c
1343 +@@ -1048,7 +1048,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1344 + * passdown we have to check that these blocks are now unused.
1345 + */
1346 + int r = 0;
1347 +- bool used = true;
1348 ++ bool shared = true;
1349 + struct thin_c *tc = m->tc;
1350 + struct pool *pool = tc->pool;
1351 + dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
1352 +@@ -1058,11 +1058,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1353 + while (b != end) {
1354 + /* find start of unmapped run */
1355 + for (; b < end; b++) {
1356 +- r = dm_pool_block_is_used(pool->pmd, b, &used);
1357 ++ r = dm_pool_block_is_shared(pool->pmd, b, &shared);
1358 + if (r)
1359 + goto out;
1360 +
1361 +- if (!used)
1362 ++ if (!shared)
1363 + break;
1364 + }
1365 +
1366 +@@ -1071,11 +1071,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1367 +
1368 + /* find end of run */
1369 + for (e = b + 1; e != end; e++) {
1370 +- r = dm_pool_block_is_used(pool->pmd, e, &used);
1371 ++ r = dm_pool_block_is_shared(pool->pmd, e, &shared);
1372 + if (r)
1373 + goto out;
1374 +
1375 +- if (used)
1376 ++ if (shared)
1377 + break;
1378 + }
1379 +
1380 +diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
1381 +index b8aaa684c397..2ed23c99f59f 100644
1382 +--- a/drivers/misc/ibmvmc.c
1383 ++++ b/drivers/misc/ibmvmc.c
1384 +@@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
1385 + *
1386 + * Return:
1387 + * 0 - Success
1388 ++ * Non-zero - Failure
1389 + */
1390 + static int ibmvmc_open(struct inode *inode, struct file *file)
1391 + {
1392 + struct ibmvmc_file_session *session;
1393 +- int rc = 0;
1394 +
1395 + pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
1396 + (unsigned long)inode, (unsigned long)file,
1397 + ibmvmc.state);
1398 +
1399 + session = kzalloc(sizeof(*session), GFP_KERNEL);
1400 ++ if (!session)
1401 ++ return -ENOMEM;
1402 ++
1403 + session->file = file;
1404 + file->private_data = session;
1405 +
1406 +- return rc;
1407 ++ return 0;
1408 + }
1409 +
1410 + /**
1411 +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
1412 +index e4b10b2d1a08..23739a60517f 100644
1413 +--- a/drivers/misc/mei/hw-me-regs.h
1414 ++++ b/drivers/misc/mei/hw-me-regs.h
1415 +@@ -127,6 +127,8 @@
1416 + #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
1417 + #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
1418 +
1419 ++#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */
1420 ++
1421 + #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
1422 +
1423 + #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
1424 +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
1425 +index ea4e152270a3..c8e21c894a5f 100644
1426 +--- a/drivers/misc/mei/pci-me.c
1427 ++++ b/drivers/misc/mei/pci-me.c
1428 +@@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
1429 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
1430 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
1431 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
1432 +- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)},
1433 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
1434 +
1435 + {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
1436 + {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
1437 +
1438 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
1439 ++
1440 + {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
1441 +
1442 + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
1443 +diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
1444 +index 54c3fbb4a391..db56d4f58aaa 100644
1445 +--- a/drivers/mmc/host/dw_mmc-bluefield.c
1446 ++++ b/drivers/mmc/host/dw_mmc-bluefield.c
1447 +@@ -1,11 +1,6 @@
1448 + // SPDX-License-Identifier: GPL-2.0
1449 + /*
1450 + * Copyright (C) 2018 Mellanox Technologies.
1451 +- *
1452 +- * This program is free software; you can redistribute it and/or modify
1453 +- * it under the terms of the GNU General Public License as published by
1454 +- * the Free Software Foundation; either version 2 of the License, or
1455 +- * (at your option) any later version.
1456 + */
1457 +
1458 + #include <linux/bitfield.h>
1459 +diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
1460 +index c201c378537e..ef9deaa361c7 100644
1461 +--- a/drivers/mmc/host/meson-gx-mmc.c
1462 ++++ b/drivers/mmc/host/meson-gx-mmc.c
1463 +@@ -174,6 +174,8 @@ struct meson_host {
1464 + struct sd_emmc_desc *descs;
1465 + dma_addr_t descs_dma_addr;
1466 +
1467 ++ int irq;
1468 ++
1469 + bool vqmmc_enabled;
1470 + };
1471 +
1472 +@@ -1181,7 +1183,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
1473 + struct resource *res;
1474 + struct meson_host *host;
1475 + struct mmc_host *mmc;
1476 +- int ret, irq;
1477 ++ int ret;
1478 +
1479 + mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
1480 + if (!mmc)
1481 +@@ -1228,8 +1230,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1482 + goto free_host;
1483 + }
1484 +
1485 +- irq = platform_get_irq(pdev, 0);
1486 +- if (irq <= 0) {
1487 ++ host->irq = platform_get_irq(pdev, 0);
1488 ++ if (host->irq <= 0) {
1489 + dev_err(&pdev->dev, "failed to get interrupt resource.\n");
1490 + ret = -EINVAL;
1491 + goto free_host;
1492 +@@ -1283,9 +1285,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1493 + writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
1494 + host->regs + SD_EMMC_IRQ_EN);
1495 +
1496 +- ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq,
1497 +- meson_mmc_irq_thread, IRQF_SHARED,
1498 +- NULL, host);
1499 ++ ret = request_threaded_irq(host->irq, meson_mmc_irq,
1500 ++ meson_mmc_irq_thread, IRQF_SHARED, NULL, host);
1501 + if (ret)
1502 + goto err_init_clk;
1503 +
1504 +@@ -1303,7 +1304,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
1505 + if (host->bounce_buf == NULL) {
1506 + dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
1507 + ret = -ENOMEM;
1508 +- goto err_init_clk;
1509 ++ goto err_free_irq;
1510 + }
1511 +
1512 + host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
1513 +@@ -1322,6 +1323,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1514 + err_bounce_buf:
1515 + dma_free_coherent(host->dev, host->bounce_buf_size,
1516 + host->bounce_buf, host->bounce_dma_addr);
1517 ++err_free_irq:
1518 ++ free_irq(host->irq, host);
1519 + err_init_clk:
1520 + clk_disable_unprepare(host->mmc_clk);
1521 + err_core_clk:
1522 +@@ -1339,6 +1342,7 @@ static int meson_mmc_remove(struct platform_device *pdev)
1523 +
1524 + /* disable interrupts */
1525 + writel(0, host->regs + SD_EMMC_IRQ_EN);
1526 ++ free_irq(host->irq, host);
1527 +
1528 + dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
1529 + host->descs, host->descs_dma_addr);
1530 +diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
1531 +index 0db99057c44f..9d12c06c7fd6 100644
1532 +--- a/drivers/mmc/host/sdhci-iproc.c
1533 ++++ b/drivers/mmc/host/sdhci-iproc.c
1534 +@@ -296,7 +296,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
1535 +
1536 + iproc_host->data = iproc_data;
1537 +
1538 +- mmc_of_parse(host->mmc);
1539 ++ ret = mmc_of_parse(host->mmc);
1540 ++ if (ret)
1541 ++ goto err;
1542 ++
1543 + sdhci_get_property(pdev);
1544 +
1545 + host->mmc->caps |= iproc_host->data->mmc_caps;
1546 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1547 +index 3b3f88ffab53..c05e4d50d43d 100644
1548 +--- a/drivers/net/can/dev.c
1549 ++++ b/drivers/net/can/dev.c
1550 +@@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
1551 + struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
1552 + {
1553 + struct can_priv *priv = netdev_priv(dev);
1554 +- struct sk_buff *skb = priv->echo_skb[idx];
1555 +- struct canfd_frame *cf;
1556 +
1557 + if (idx >= priv->echo_skb_max) {
1558 + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
1559 +@@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
1560 + return NULL;
1561 + }
1562 +
1563 +- if (!skb) {
1564 +- netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
1565 +- __func__, idx);
1566 +- return NULL;
1567 +- }
1568 ++ if (priv->echo_skb[idx]) {
1569 ++ /* Using "struct canfd_frame::len" for the frame
1570 ++ * length is supported on both CAN and CANFD frames.
1571 ++ */
1572 ++ struct sk_buff *skb = priv->echo_skb[idx];
1573 ++ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
1574 ++ u8 len = cf->len;
1575 +
1576 +- /* Using "struct canfd_frame::len" for the frame
1577 +- * length is supported on both CAN and CANFD frames.
1578 +- */
1579 +- cf = (struct canfd_frame *)skb->data;
1580 +- *len_ptr = cf->len;
1581 +- priv->echo_skb[idx] = NULL;
1582 ++ *len_ptr = len;
1583 ++ priv->echo_skb[idx] = NULL;
1584 +
1585 +- return skb;
1586 ++ return skb;
1587 ++ }
1588 ++
1589 ++ return NULL;
1590 + }
1591 +
1592 + /*
1593 +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
1594 +index 75ce11395ee8..ae219b8a7754 100644
1595 +--- a/drivers/net/can/flexcan.c
1596 ++++ b/drivers/net/can/flexcan.c
1597 +@@ -1004,7 +1004,7 @@ static int flexcan_chip_start(struct net_device *dev)
1598 + }
1599 + } else {
1600 + /* clear and invalidate unused mailboxes first */
1601 +- for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
1602 ++ for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < ARRAY_SIZE(regs->mb); i++) {
1603 + priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
1604 + &regs->mb[i].can_ctrl);
1605 + }
1606 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
1607 +index d272dc6984ac..b40d4377cc71 100644
1608 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
1609 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
1610 +@@ -431,8 +431,6 @@
1611 + #define MAC_MDIOSCAR_PA_WIDTH 5
1612 + #define MAC_MDIOSCAR_RA_INDEX 0
1613 + #define MAC_MDIOSCAR_RA_WIDTH 16
1614 +-#define MAC_MDIOSCAR_REG_INDEX 0
1615 +-#define MAC_MDIOSCAR_REG_WIDTH 21
1616 + #define MAC_MDIOSCCDR_BUSY_INDEX 22
1617 + #define MAC_MDIOSCCDR_BUSY_WIDTH 1
1618 + #define MAC_MDIOSCCDR_CMD_INDEX 16
1619 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1620 +index 1e929a1e4ca7..4666084eda16 100644
1621 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1622 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1623 +@@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1624 + }
1625 + }
1626 +
1627 ++static unsigned int xgbe_create_mdio_sca(int port, int reg)
1628 ++{
1629 ++ unsigned int mdio_sca, da;
1630 ++
1631 ++ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
1632 ++
1633 ++ mdio_sca = 0;
1634 ++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
1635 ++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
1636 ++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
1637 ++
1638 ++ return mdio_sca;
1639 ++}
1640 ++
1641 + static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1642 + int reg, u16 val)
1643 + {
1644 +@@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1645 +
1646 + reinit_completion(&pdata->mdio_complete);
1647 +
1648 +- mdio_sca = 0;
1649 +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1650 +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1651 ++ mdio_sca = xgbe_create_mdio_sca(addr, reg);
1652 + XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1653 +
1654 + mdio_sccd = 0;
1655 +@@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1656 +
1657 + reinit_completion(&pdata->mdio_complete);
1658 +
1659 +- mdio_sca = 0;
1660 +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1661 +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1662 ++ mdio_sca = xgbe_create_mdio_sca(addr, reg);
1663 + XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1664 +
1665 + mdio_sccd = 0;
1666 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
1667 +index 5890fdfd62c3..c7901a3f2a79 100644
1668 +--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
1669 ++++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
1670 +@@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
1671 + u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
1672 + u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
1673 + u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
1674 ++ char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
1675 ++
1676 ++ memcpy(ncqe, cqe, q->elem_size);
1677 ++ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
1678 +
1679 + if (sendq) {
1680 + struct mlxsw_pci_queue *sdq;
1681 +
1682 + sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
1683 + mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
1684 +- wqe_counter, cqe);
1685 ++ wqe_counter, ncqe);
1686 + q->u.cq.comp_sdq_count++;
1687 + } else {
1688 + struct mlxsw_pci_queue *rdq;
1689 +
1690 + rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
1691 + mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
1692 +- wqe_counter, q->u.cq.v, cqe);
1693 ++ wqe_counter, q->u.cq.v, ncqe);
1694 + q->u.cq.comp_rdq_count++;
1695 + }
1696 + if (++items == credits)
1697 + break;
1698 + }
1699 +- if (items) {
1700 +- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
1701 ++ if (items)
1702 + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
1703 +- }
1704 + }
1705 +
1706 + static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
1707 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1708 +index bb99f6d41fe0..ffee38e36ce8 100644
1709 +--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1710 ++++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1711 +@@ -27,7 +27,7 @@
1712 +
1713 + #define MLXSW_PCI_SW_RESET 0xF0010
1714 + #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
1715 +-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
1716 ++#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
1717 + #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
1718 + #define MLXSW_PCI_FW_READY 0xA1844
1719 + #define MLXSW_PCI_FW_READY_MASK 0xFFFF
1720 +@@ -53,6 +53,7 @@
1721 + #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
1722 + #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
1723 + #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
1724 ++#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE
1725 + #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
1726 + #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
1727 + #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
1728 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
1729 +index a3db033d7399..b490589ef25c 100644
1730 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
1731 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
1732 +@@ -882,8 +882,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
1733 + static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
1734 + .type = MLXSW_SP_FID_TYPE_DUMMY,
1735 + .fid_size = sizeof(struct mlxsw_sp_fid),
1736 +- .start_index = MLXSW_SP_RFID_BASE - 1,
1737 +- .end_index = MLXSW_SP_RFID_BASE - 1,
1738 ++ .start_index = VLAN_N_VID - 1,
1739 ++ .end_index = VLAN_N_VID - 1,
1740 + .ops = &mlxsw_sp_fid_dummy_ops,
1741 + };
1742 +
1743 +diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
1744 +index 9020b084b953..7ec4eb74fe21 100644
1745 +--- a/drivers/net/ethernet/sun/cassini.c
1746 ++++ b/drivers/net/ethernet/sun/cassini.c
1747 +@@ -1,22 +1,9 @@
1748 +-// SPDX-License-Identifier: GPL-2.0
1749 ++// SPDX-License-Identifier: GPL-2.0+
1750 + /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
1751 + *
1752 + * Copyright (C) 2004 Sun Microsystems Inc.
1753 + * Copyright (C) 2003 Adrian Sun (asun@×××××××××××××.com)
1754 + *
1755 +- * This program is free software; you can redistribute it and/or
1756 +- * modify it under the terms of the GNU General Public License as
1757 +- * published by the Free Software Foundation; either version 2 of the
1758 +- * License, or (at your option) any later version.
1759 +- *
1760 +- * This program is distributed in the hope that it will be useful,
1761 +- * but WITHOUT ANY WARRANTY; without even the implied warranty of
1762 +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1763 +- * GNU General Public License for more details.
1764 +- *
1765 +- * You should have received a copy of the GNU General Public License
1766 +- * along with this program; if not, see <http://www.gnu.org/licenses/>.
1767 +- *
1768 + * This driver uses the sungem driver (c) David Miller
1769 + * (davem@××××××.com) as its basis.
1770 + *
1771 +diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
1772 +index 13f3860496a8..ae5f05f03f88 100644
1773 +--- a/drivers/net/ethernet/sun/cassini.h
1774 ++++ b/drivers/net/ethernet/sun/cassini.h
1775 +@@ -1,23 +1,10 @@
1776 +-/* SPDX-License-Identifier: GPL-2.0 */
1777 ++/* SPDX-License-Identifier: GPL-2.0+ */
1778 + /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
1779 + * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
1780 + *
1781 + * Copyright (C) 2004 Sun Microsystems Inc.
1782 + * Copyright (c) 2003 Adrian Sun (asun@×××××××××××××.com)
1783 + *
1784 +- * This program is free software; you can redistribute it and/or
1785 +- * modify it under the terms of the GNU General Public License as
1786 +- * published by the Free Software Foundation; either version 2 of the
1787 +- * License, or (at your option) any later version.
1788 +- *
1789 +- * This program is distributed in the hope that it will be useful,
1790 +- * but WITHOUT ANY WARRANTY; without even the implied warranty of
1791 +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1792 +- * GNU General Public License for more details.
1793 +- *
1794 +- * You should have received a copy of the GNU General Public License
1795 +- * along with this program; if not, see <http://www.gnu.org/licenses/>.
1796 +- *
1797 + * vendor id: 0x108E (Sun Microsystems, Inc.)
1798 + * device id: 0xabba (Cassini)
1799 + * revision ids: 0x01 = Cassini
1800 +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
1801 +index cbec296107bd..f46da6262abe 100644
1802 +--- a/drivers/net/phy/marvell.c
1803 ++++ b/drivers/net/phy/marvell.c
1804 +@@ -1042,6 +1042,39 @@ static int m88e1145_config_init(struct phy_device *phydev)
1805 + return 0;
1806 + }
1807 +
1808 ++/* The VOD can be out of specification on link up. Poke an
1809 ++ * undocumented register, in an undocumented page, with a magic value
1810 ++ * to fix this.
1811 ++ */
1812 ++static int m88e6390_errata(struct phy_device *phydev)
1813 ++{
1814 ++ int err;
1815 ++
1816 ++ err = phy_write(phydev, MII_BMCR,
1817 ++ BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX);
1818 ++ if (err)
1819 ++ return err;
1820 ++
1821 ++ usleep_range(300, 400);
1822 ++
1823 ++ err = phy_write_paged(phydev, 0xf8, 0x08, 0x36);
1824 ++ if (err)
1825 ++ return err;
1826 ++
1827 ++ return genphy_soft_reset(phydev);
1828 ++}
1829 ++
1830 ++static int m88e6390_config_aneg(struct phy_device *phydev)
1831 ++{
1832 ++ int err;
1833 ++
1834 ++ err = m88e6390_errata(phydev);
1835 ++ if (err)
1836 ++ return err;
1837 ++
1838 ++ return m88e1510_config_aneg(phydev);
1839 ++}
1840 ++
1841 + /**
1842 + * fiber_lpa_to_ethtool_lpa_t
1843 + * @lpa: value of the MII_LPA register for fiber link
1844 +@@ -1397,7 +1430,7 @@ static int m88e1318_set_wol(struct phy_device *phydev,
1845 + * before enabling it if !phy_interrupt_is_valid()
1846 + */
1847 + if (!phy_interrupt_is_valid(phydev))
1848 +- phy_read(phydev, MII_M1011_IEVENT);
1849 ++ __phy_read(phydev, MII_M1011_IEVENT);
1850 +
1851 + /* Enable the WOL interrupt */
1852 + err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
1853 +@@ -2292,7 +2325,7 @@ static struct phy_driver marvell_drivers[] = {
1854 + .flags = PHY_HAS_INTERRUPT,
1855 + .probe = m88e6390_probe,
1856 + .config_init = &marvell_config_init,
1857 +- .config_aneg = &m88e1510_config_aneg,
1858 ++ .config_aneg = &m88e6390_config_aneg,
1859 + .read_status = &marvell_read_status,
1860 + .ack_interrupt = &marvell_ack_interrupt,
1861 + .config_intr = &marvell_config_intr,
1862 +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
1863 +index 2e59a8419b17..66b9cfe692fc 100644
1864 +--- a/drivers/net/phy/mdio_bus.c
1865 ++++ b/drivers/net/phy/mdio_bus.c
1866 +@@ -390,6 +390,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
1867 + if (IS_ERR(gpiod)) {
1868 + dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
1869 + bus->id);
1870 ++ device_del(&bus->dev);
1871 + return PTR_ERR(gpiod);
1872 + } else if (gpiod) {
1873 + bus->reset_gpiod = gpiod;
1874 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
1875 +index fd051ae787cb..5dd661fb662f 100644
1876 +--- a/drivers/net/phy/phy_device.c
1877 ++++ b/drivers/net/phy/phy_device.c
1878 +@@ -2196,6 +2196,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
1879 + {
1880 + int retval;
1881 +
1882 ++ if (WARN_ON(!new_driver->features)) {
1883 ++ pr_err("%s: Driver features are missing\n", new_driver->name);
1884 ++ return -EINVAL;
1885 ++ }
1886 ++
1887 + new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY;
1888 + new_driver->mdiodrv.driver.name = new_driver->name;
1889 + new_driver->mdiodrv.driver.bus = &mdio_bus_type;
1890 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
1891 +index 62dc564b251d..f22639f0116a 100644
1892 +--- a/drivers/net/ppp/pppoe.c
1893 ++++ b/drivers/net/ppp/pppoe.c
1894 +@@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
1895 + if (pskb_trim_rcsum(skb, len))
1896 + goto drop;
1897 +
1898 ++ ph = pppoe_hdr(skb);
1899 + pn = pppoe_pernet(dev_net(dev));
1900 +
1901 + /* Note that get_item does a sock_hold(), so sk_pppox(po)
1902 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
1903 +index 3bfa7f5e3513..2e5bcb3fdff7 100644
1904 +--- a/drivers/net/wireless/mediatek/mt76/mt76.h
1905 ++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
1906 +@@ -1,3 +1,4 @@
1907 ++
1908 + /*
1909 + * Copyright (C) 2016 Felix Fietkau <nbd@×××.name>
1910 + *
1911 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
1912 +index 9273d2d2764a..732f4b87fdcb 100644
1913 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
1914 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
1915 +@@ -116,9 +116,6 @@ void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
1916 + MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
1917 + }
1918 +
1919 +- if (changed & BSS_CHANGED_ASSOC)
1920 +- mt76x0_phy_recalibrate_after_assoc(dev);
1921 +-
1922 + mutex_unlock(&dev->mt76.mutex);
1923 + }
1924 + EXPORT_SYMBOL_GPL(mt76x0_bss_info_changed);
1925 +@@ -138,6 +135,12 @@ void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
1926 + struct mt76x02_dev *dev = hw->priv;
1927 +
1928 + clear_bit(MT76_SCANNING, &dev->mt76.state);
1929 ++
1930 ++ if (dev->cal.gain_init_done) {
1931 ++ /* Restore AGC gain and resume calibration after scanning. */
1932 ++ dev->cal.low_gain = -1;
1933 ++ ieee80211_queue_delayed_work(hw, &dev->cal_work, 0);
1934 ++ }
1935 + }
1936 + EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete);
1937 +
1938 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
1939 +index 2187bafaf2e9..0057f69d0c36 100644
1940 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
1941 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
1942 +@@ -41,6 +41,11 @@ static inline bool is_mt7610e(struct mt76x02_dev *dev)
1943 +
1944 + void mt76x0_init_debugfs(struct mt76x02_dev *dev);
1945 +
1946 ++static inline bool is_mt7630(struct mt76x02_dev *dev)
1947 ++{
1948 ++ return mt76_chip(&dev->mt76) == 0x7630;
1949 ++}
1950 ++
1951 + /* Init */
1952 + struct mt76x02_dev *
1953 + mt76x0_alloc_device(struct device *pdev,
1954 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
1955 +index cf024950e0ed..c34abd1c6030 100644
1956 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
1957 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
1958 +@@ -215,62 +215,6 @@ int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
1959 + return 0;
1960 + }
1961 +
1962 +-static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
1963 +-{
1964 +- u8 val;
1965 +-
1966 +- val = rf_rr(dev, MT_RF(0, 4));
1967 +- if ((val & 0x70) != 0x30)
1968 +- return;
1969 +-
1970 +- /*
1971 +- * Calibration Mode - Open loop, closed loop, and amplitude:
1972 +- * B0.R06.[0]: 1
1973 +- * B0.R06.[3:1] bp_close_code: 100
1974 +- * B0.R05.[7:0] bp_open_code: 0x0
1975 +- * B0.R04.[2:0] cal_bits: 000
1976 +- * B0.R03.[2:0] startup_time: 011
1977 +- * B0.R03.[6:4] settle_time:
1978 +- * 80MHz channel: 110
1979 +- * 40MHz channel: 101
1980 +- * 20MHz channel: 100
1981 +- */
1982 +- val = rf_rr(dev, MT_RF(0, 6));
1983 +- val &= ~0xf;
1984 +- val |= 0x09;
1985 +- rf_wr(dev, MT_RF(0, 6), val);
1986 +-
1987 +- val = rf_rr(dev, MT_RF(0, 5));
1988 +- if (val != 0)
1989 +- rf_wr(dev, MT_RF(0, 5), 0x0);
1990 +-
1991 +- val = rf_rr(dev, MT_RF(0, 4));
1992 +- val &= ~0x07;
1993 +- rf_wr(dev, MT_RF(0, 4), val);
1994 +-
1995 +- val = rf_rr(dev, MT_RF(0, 3));
1996 +- val &= ~0x77;
1997 +- if (channel == 1 || channel == 7 || channel == 9 || channel >= 13) {
1998 +- val |= 0x63;
1999 +- } else if (channel == 3 || channel == 4 || channel == 10) {
2000 +- val |= 0x53;
2001 +- } else if (channel == 2 || channel == 5 || channel == 6 ||
2002 +- channel == 8 || channel == 11 || channel == 12) {
2003 +- val |= 0x43;
2004 +- } else {
2005 +- WARN(1, "Unknown channel %u\n", channel);
2006 +- return;
2007 +- }
2008 +- rf_wr(dev, MT_RF(0, 3), val);
2009 +-
2010 +- /* TODO replace by mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); */
2011 +- val = rf_rr(dev, MT_RF(0, 4));
2012 +- val = ((val & ~(0x80)) | 0x80);
2013 +- rf_wr(dev, MT_RF(0, 4), val);
2014 +-
2015 +- msleep(2);
2016 +-}
2017 +-
2018 + static void
2019 + mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
2020 + {
2021 +@@ -518,21 +462,47 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band)
2022 +
2023 + static void mt76x0_ant_select(struct mt76x02_dev *dev)
2024 + {
2025 +- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
2026 +-
2027 +- /* single antenna mode */
2028 +- if (chan->band == NL80211_BAND_2GHZ) {
2029 +- mt76_rmw(dev, MT_COEXCFG3,
2030 +- BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
2031 +- mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
2032 ++ u16 ee_ant = mt76x02_eeprom_get(dev, MT_EE_ANTENNA);
2033 ++ u16 nic_conf2 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
2034 ++ u32 wlan, coex3, cmb;
2035 ++ bool ant_div;
2036 ++
2037 ++ wlan = mt76_rr(dev, MT_WLAN_FUN_CTRL);
2038 ++ cmb = mt76_rr(dev, MT_CMB_CTRL);
2039 ++ coex3 = mt76_rr(dev, MT_COEXCFG3);
2040 ++
2041 ++ cmb &= ~(BIT(14) | BIT(12));
2042 ++ wlan &= ~(BIT(6) | BIT(5));
2043 ++ coex3 &= ~GENMASK(5, 2);
2044 ++
2045 ++ if (ee_ant & MT_EE_ANTENNA_DUAL) {
2046 ++ /* dual antenna mode */
2047 ++ ant_div = !(nic_conf2 & MT_EE_NIC_CONF_2_ANT_OPT) &&
2048 ++ (nic_conf2 & MT_EE_NIC_CONF_2_ANT_DIV);
2049 ++ if (ant_div)
2050 ++ cmb |= BIT(12);
2051 ++ else
2052 ++ coex3 |= BIT(4);
2053 ++ coex3 |= BIT(3);
2054 ++ if (dev->mt76.cap.has_2ghz)
2055 ++ wlan |= BIT(6);
2056 + } else {
2057 +- mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(2),
2058 +- BIT(4) | BIT(3));
2059 +- mt76_clear(dev, MT_WLAN_FUN_CTRL,
2060 +- BIT(6) | BIT(5));
2061 ++ /* sigle antenna mode */
2062 ++ if (dev->mt76.cap.has_5ghz) {
2063 ++ coex3 |= BIT(3) | BIT(4);
2064 ++ } else {
2065 ++ wlan |= BIT(6);
2066 ++ coex3 |= BIT(1);
2067 ++ }
2068 + }
2069 +- mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
2070 ++
2071 ++ if (is_mt7630(dev))
2072 ++ cmb |= BIT(14) | BIT(11);
2073 ++
2074 ++ mt76_wr(dev, MT_WLAN_FUN_CTRL, wlan);
2075 ++ mt76_wr(dev, MT_CMB_CTRL, cmb);
2076 + mt76_clear(dev, MT_COEXCFG0, BIT(2));
2077 ++ mt76_wr(dev, MT_COEXCFG3, coex3);
2078 + }
2079 +
2080 + static void
2081 +@@ -585,8 +555,12 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
2082 + void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
2083 + {
2084 + struct ieee80211_channel *chan = dev->mt76.chandef.chan;
2085 ++ int is_5ghz = (chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
2086 + u32 val, tx_alc, reg_val;
2087 +
2088 ++ if (is_mt7630(dev))
2089 ++ return;
2090 ++
2091 + if (power_on) {
2092 + mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
2093 + mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value,
2094 +@@ -602,7 +576,7 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
2095 + reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
2096 + mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
2097 +
2098 +- if (chan->band == NL80211_BAND_5GHZ) {
2099 ++ if (is_5ghz) {
2100 + if (chan->hw_value < 100)
2101 + val = 0x701;
2102 + else if (chan->hw_value < 140)
2103 +@@ -615,7 +589,7 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
2104 +
2105 + mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val, false);
2106 + msleep(350);
2107 +- mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 1, false);
2108 ++ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false);
2109 + usleep_range(15000, 20000);
2110 +
2111 + mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
2112 +@@ -696,7 +670,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
2113 + mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
2114 + mt76x02_phy_set_band(dev, chandef->chan->band,
2115 + ch_group_index & 1);
2116 +- mt76x0_ant_select(dev);
2117 +
2118 + mt76_rmw(dev, MT_EXT_CCA_CFG,
2119 + (MT_EXT_CCA_CFG_CCA0 |
2120 +@@ -719,20 +692,16 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
2121 +
2122 + mt76x0_read_rx_gain(dev);
2123 + mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band);
2124 +- mt76x02_init_agc_gain(dev);
2125 +
2126 +- if (mt76_is_usb(dev)) {
2127 +- mt76x0_vco_cal(dev, channel);
2128 +- } else {
2129 +- /* enable vco */
2130 +- rf_set(dev, MT_RF(0, 4), BIT(7));
2131 +- }
2132 ++ /* enable vco */
2133 ++ rf_set(dev, MT_RF(0, 4), BIT(7));
2134 +
2135 + if (scan)
2136 + return 0;
2137 +
2138 +- if (mt76_is_mmio(dev))
2139 +- mt76x0_phy_calibrate(dev, false);
2140 ++ mt76x0_phy_calibrate(dev, false);
2141 ++ mt76x02_init_agc_gain(dev);
2142 ++
2143 + mt76x0_phy_set_txpower(dev);
2144 +
2145 + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
2146 +@@ -741,39 +710,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
2147 + return 0;
2148 + }
2149 +
2150 +-void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
2151 +-{
2152 +- u32 tx_alc, reg_val;
2153 +- u8 channel = dev->mt76.chandef.chan->hw_value;
2154 +- int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
2155 +-
2156 +- mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
2157 +-
2158 +- mt76x0_vco_cal(dev, channel);
2159 +-
2160 +- tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
2161 +- mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
2162 +- usleep_range(500, 700);
2163 +-
2164 +- reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
2165 +- mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
2166 +-
2167 +- mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0, false);
2168 +-
2169 +- mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false);
2170 +- mt76x02_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz, false);
2171 +- mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
2172 +- mt76x02_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz, false);
2173 +- mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz, false);
2174 +- mt76x02_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz, false);
2175 +-
2176 +- mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
2177 +- mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
2178 +- msleep(100);
2179 +-
2180 +- mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
2181 +-}
2182 +-
2183 + static void mt76x0_temp_sensor(struct mt76x02_dev *dev)
2184 + {
2185 + u8 rf_b7_73, rf_b0_66, rf_b0_67;
2186 +@@ -817,10 +753,8 @@ done:
2187 + static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
2188 + {
2189 + u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
2190 +- u32 val = 0x122c << 16 | 0xf2;
2191 +
2192 +- mt76_wr(dev, MT_BBP(AGC, 8),
2193 +- val | FIELD_PREP(MT_BBP_AGC_GAIN, gain));
2194 ++ mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, gain);
2195 + }
2196 +
2197 + static void
2198 +@@ -835,7 +769,8 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
2199 + low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
2200 + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
2201 +
2202 +- gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
2203 ++ gain_change = dev->cal.low_gain < 0 ||
2204 ++ (dev->cal.low_gain & 2) ^ (low_gain & 2);
2205 + dev->cal.low_gain = low_gain;
2206 +
2207 + if (!gain_change) {
2208 +@@ -924,6 +859,7 @@ void mt76x0_phy_init(struct mt76x02_dev *dev)
2209 + {
2210 + INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work);
2211 +
2212 ++ mt76x0_ant_select(dev);
2213 + mt76x0_rf_init(dev);
2214 + mt76x02_phy_set_rxpath(dev);
2215 + mt76x02_phy_set_txdac(dev);
2216 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
2217 +index a7fd36c2f633..ea517864186b 100644
2218 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
2219 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
2220 +@@ -117,6 +117,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
2221 + if (ret)
2222 + goto out;
2223 +
2224 ++ mt76x0_phy_calibrate(dev, true);
2225 + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
2226 + MT_CALIBRATE_INTERVAL);
2227 + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
2228 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
2229 +index 7806963b1905..9a5ae5c06840 100644
2230 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
2231 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
2232 +@@ -63,6 +63,7 @@ struct mt76x02_calibration {
2233 + bool tssi_comp_pending;
2234 + bool dpd_cal_done;
2235 + bool channel_cal_done;
2236 ++ bool gain_init_done;
2237 + };
2238 +
2239 + struct mt76x02_dev {
2240 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
2241 +index b3ec74835d10..1de041590050 100644
2242 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
2243 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
2244 +@@ -25,6 +25,7 @@ enum mt76x02_eeprom_field {
2245 + MT_EE_VERSION = 0x002,
2246 + MT_EE_MAC_ADDR = 0x004,
2247 + MT_EE_PCI_ID = 0x00A,
2248 ++ MT_EE_ANTENNA = 0x022,
2249 + MT_EE_NIC_CONF_0 = 0x034,
2250 + MT_EE_NIC_CONF_1 = 0x036,
2251 + MT_EE_COUNTRY_REGION_5GHZ = 0x038,
2252 +@@ -104,6 +105,8 @@ enum mt76x02_eeprom_field {
2253 + __MT_EE_MAX
2254 + };
2255 +
2256 ++#define MT_EE_ANTENNA_DUAL BIT(15)
2257 ++
2258 + #define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
2259 + #define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
2260 + #define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
2261 +@@ -118,12 +121,9 @@ enum mt76x02_eeprom_field {
2262 + #define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
2263 + #define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
2264 +
2265 +-#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
2266 +-#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
2267 +-#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
2268 ++#define MT_EE_NIC_CONF_2_ANT_OPT BIT(3)
2269 ++#define MT_EE_NIC_CONF_2_ANT_DIV BIT(4)
2270 + #define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
2271 +-#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
2272 +-#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
2273 +
2274 + #define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
2275 + MT_EE_USAGE_MAP_START + 1)
2276 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
2277 +index 0f1d7b5c9f68..977a8e7e26df 100644
2278 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
2279 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
2280 +@@ -254,5 +254,6 @@ void mt76x02_init_agc_gain(struct mt76x02_dev *dev)
2281 + memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
2282 + sizeof(dev->cal.agc_gain_cur));
2283 + dev->cal.low_gain = -1;
2284 ++ dev->cal.gain_init_done = true;
2285 + }
2286 + EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);
2287 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
2288 +index 1971a1b00038..9471b44ce558 100644
2289 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
2290 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
2291 +@@ -156,6 +156,9 @@ mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
2292 + struct mt76x02_dev *dev = hw->priv;
2293 +
2294 + clear_bit(MT76_SCANNING, &dev->mt76.state);
2295 ++
2296 ++ if (dev->cal.gain_init_done)
2297 ++ ieee80211_queue_delayed_work(hw, &dev->cal_work, 0);
2298 + }
2299 +
2300 + const struct ieee80211_ops mt76x2u_ops = {
2301 +diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
2302 +index 583086dd9cb9..bfc5ef6d85b7 100644
2303 +--- a/drivers/nvme/target/rdma.c
2304 ++++ b/drivers/nvme/target/rdma.c
2305 +@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
2306 + static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
2307 + static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
2308 + static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
2309 ++static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
2310 ++ struct nvmet_rdma_rsp *r);
2311 ++static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
2312 ++ struct nvmet_rdma_rsp *r);
2313 +
2314 + static const struct nvmet_fabrics_ops nvmet_rdma_ops;
2315 +
2316 +@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
2317 + spin_unlock_irqrestore(&queue->rsps_lock, flags);
2318 +
2319 + if (unlikely(!rsp)) {
2320 +- rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
2321 ++ int ret;
2322 ++
2323 ++ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
2324 + if (unlikely(!rsp))
2325 + return NULL;
2326 ++ ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
2327 ++ if (unlikely(ret)) {
2328 ++ kfree(rsp);
2329 ++ return NULL;
2330 ++ }
2331 ++
2332 + rsp->allocated = true;
2333 + }
2334 +
2335 +@@ -196,7 +208,8 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
2336 + {
2337 + unsigned long flags;
2338 +
2339 +- if (rsp->allocated) {
2340 ++ if (unlikely(rsp->allocated)) {
2341 ++ nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
2342 + kfree(rsp);
2343 + return;
2344 + }
2345 +diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
2346 +index 194ffd5c8580..039b2074db7e 100644
2347 +--- a/drivers/s390/char/sclp_config.c
2348 ++++ b/drivers/s390/char/sclp_config.c
2349 +@@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
2350 +
2351 + static void __ref sclp_cpu_change_notify(struct work_struct *work)
2352 + {
2353 ++ lock_device_hotplug();
2354 + smp_rescan_cpus();
2355 ++ unlock_device_hotplug();
2356 + }
2357 +
2358 + static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
2359 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
2360 +index f1c57cd33b5b..1cb35ab8a4ec 100644
2361 +--- a/drivers/scsi/ufs/ufshcd.c
2362 ++++ b/drivers/scsi/ufs/ufshcd.c
2363 +@@ -110,13 +110,19 @@
2364 + int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
2365 + const char *prefix)
2366 + {
2367 +- u8 *regs;
2368 ++ u32 *regs;
2369 ++ size_t pos;
2370 ++
2371 ++ if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
2372 ++ return -EINVAL;
2373 +
2374 + regs = kzalloc(len, GFP_KERNEL);
2375 + if (!regs)
2376 + return -ENOMEM;
2377 +
2378 +- memcpy_fromio(regs, hba->mmio_base + offset, len);
2379 ++ for (pos = 0; pos < len; pos += 4)
2380 ++ regs[pos / 4] = ufshcd_readl(hba, offset + pos);
2381 ++
2382 + ufshcd_hex_dump(prefix, regs, len);
2383 + kfree(regs);
2384 +
2385 +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
2386 +index 28cbd6b3d26c..dfee6985efa6 100644
2387 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
2388 ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
2389 +@@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
2390 + {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
2391 + {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
2392 + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
2393 ++ {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
2394 + {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
2395 + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
2396 + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
2397 +diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
2398 +index dabb391909aa..bb63519db7ae 100644
2399 +--- a/drivers/tty/n_hdlc.c
2400 ++++ b/drivers/tty/n_hdlc.c
2401 +@@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
2402 + /* too large for caller's buffer */
2403 + ret = -EOVERFLOW;
2404 + } else {
2405 ++ __set_current_state(TASK_RUNNING);
2406 + if (copy_to_user(buf, rbuf->buf, rbuf->count))
2407 + ret = -EFAULT;
2408 + else
2409 +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
2410 +index d4cca5bdaf1c..5c01bb6d1c24 100644
2411 +--- a/drivers/tty/serial/serial_core.c
2412 ++++ b/drivers/tty/serial/serial_core.c
2413 +@@ -550,10 +550,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
2414 + int ret = 0;
2415 +
2416 + circ = &state->xmit;
2417 +- if (!circ->buf)
2418 ++ port = uart_port_lock(state, flags);
2419 ++ if (!circ->buf) {
2420 ++ uart_port_unlock(port, flags);
2421 + return 0;
2422 ++ }
2423 +
2424 +- port = uart_port_lock(state, flags);
2425 + if (port && uart_circ_chars_free(circ) != 0) {
2426 + circ->buf[circ->head] = c;
2427 + circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
2428 +@@ -586,11 +588,13 @@ static int uart_write(struct tty_struct *tty,
2429 + return -EL3HLT;
2430 + }
2431 +
2432 ++ port = uart_port_lock(state, flags);
2433 + circ = &state->xmit;
2434 +- if (!circ->buf)
2435 ++ if (!circ->buf) {
2436 ++ uart_port_unlock(port, flags);
2437 + return 0;
2438 ++ }
2439 +
2440 +- port = uart_port_lock(state, flags);
2441 + while (port) {
2442 + c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
2443 + if (count < c)
2444 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
2445 +index 23c6fd238422..21ffcce16927 100644
2446 +--- a/drivers/tty/tty_io.c
2447 ++++ b/drivers/tty/tty_io.c
2448 +@@ -2189,7 +2189,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
2449 + ld = tty_ldisc_ref_wait(tty);
2450 + if (!ld)
2451 + return -EIO;
2452 +- ld->ops->receive_buf(tty, &ch, &mbz, 1);
2453 ++ if (ld->ops->receive_buf)
2454 ++ ld->ops->receive_buf(tty, &ch, &mbz, 1);
2455 + tty_ldisc_deref(ld);
2456 + return 0;
2457 + }
2458 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
2459 +index 41ec8e5010f3..bba75560d11e 100644
2460 +--- a/drivers/tty/vt/vt.c
2461 ++++ b/drivers/tty/vt/vt.c
2462 +@@ -1272,6 +1272,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
2463 + if (con_is_visible(vc))
2464 + update_screen(vc);
2465 + vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
2466 ++ notify_update(vc);
2467 + return err;
2468 + }
2469 +
2470 +@@ -2764,8 +2765,8 @@ rescan_last_byte:
2471 + con_flush(vc, draw_from, draw_to, &draw_x);
2472 + vc_uniscr_debug_check(vc);
2473 + console_conditional_schedule();
2474 +- console_unlock();
2475 + notify_update(vc);
2476 ++ console_unlock();
2477 + return n;
2478 + }
2479 +
2480 +@@ -2884,8 +2885,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2481 + unsigned char c;
2482 + static DEFINE_SPINLOCK(printing_lock);
2483 + const ushort *start;
2484 +- ushort cnt = 0;
2485 +- ushort myx;
2486 ++ ushort start_x, cnt;
2487 + int kmsg_console;
2488 +
2489 + /* console busy or not yet initialized */
2490 +@@ -2898,10 +2898,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2491 + if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
2492 + vc = vc_cons[kmsg_console - 1].d;
2493 +
2494 +- /* read `x' only after setting currcons properly (otherwise
2495 +- the `x' macro will read the x of the foreground console). */
2496 +- myx = vc->vc_x;
2497 +-
2498 + if (!vc_cons_allocated(fg_console)) {
2499 + /* impossible */
2500 + /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
2501 +@@ -2916,53 +2912,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2502 + hide_cursor(vc);
2503 +
2504 + start = (ushort *)vc->vc_pos;
2505 +-
2506 +- /* Contrived structure to try to emulate original need_wrap behaviour
2507 +- * Problems caused when we have need_wrap set on '\n' character */
2508 ++ start_x = vc->vc_x;
2509 ++ cnt = 0;
2510 + while (count--) {
2511 + c = *b++;
2512 + if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
2513 +- if (cnt > 0) {
2514 +- if (con_is_visible(vc))
2515 +- vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
2516 +- vc->vc_x += cnt;
2517 +- if (vc->vc_need_wrap)
2518 +- vc->vc_x--;
2519 +- cnt = 0;
2520 +- }
2521 ++ if (cnt && con_is_visible(vc))
2522 ++ vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
2523 ++ cnt = 0;
2524 + if (c == 8) { /* backspace */
2525 + bs(vc);
2526 + start = (ushort *)vc->vc_pos;
2527 +- myx = vc->vc_x;
2528 ++ start_x = vc->vc_x;
2529 + continue;
2530 + }
2531 + if (c != 13)
2532 + lf(vc);
2533 + cr(vc);
2534 + start = (ushort *)vc->vc_pos;
2535 +- myx = vc->vc_x;
2536 ++ start_x = vc->vc_x;
2537 + if (c == 10 || c == 13)
2538 + continue;
2539 + }
2540 ++ vc_uniscr_putc(vc, c);
2541 + scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
2542 + notify_write(vc, c);
2543 + cnt++;
2544 +- if (myx == vc->vc_cols - 1) {
2545 +- vc->vc_need_wrap = 1;
2546 +- continue;
2547 +- }
2548 +- vc->vc_pos += 2;
2549 +- myx++;
2550 +- }
2551 +- if (cnt > 0) {
2552 +- if (con_is_visible(vc))
2553 +- vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
2554 +- vc->vc_x += cnt;
2555 +- if (vc->vc_x == vc->vc_cols) {
2556 +- vc->vc_x--;
2557 ++ if (vc->vc_x == vc->vc_cols - 1) {
2558 + vc->vc_need_wrap = 1;
2559 ++ } else {
2560 ++ vc->vc_pos += 2;
2561 ++ vc->vc_x++;
2562 + }
2563 + }
2564 ++ if (cnt && con_is_visible(vc))
2565 ++ vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
2566 + set_cursor(vc);
2567 + notify_update(vc);
2568 +
2569 +diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
2570 +index dc7f7fd71684..c12ac56606c3 100644
2571 +--- a/drivers/usb/core/ledtrig-usbport.c
2572 ++++ b/drivers/usb/core/ledtrig-usbport.c
2573 +@@ -119,11 +119,6 @@ static const struct attribute_group ports_group = {
2574 + .attrs = ports_attrs,
2575 + };
2576 +
2577 +-static const struct attribute_group *ports_groups[] = {
2578 +- &ports_group,
2579 +- NULL
2580 +-};
2581 +-
2582 + /***************************************
2583 + * Adding & removing ports
2584 + ***************************************/
2585 +@@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
2586 + static int usbport_trig_activate(struct led_classdev *led_cdev)
2587 + {
2588 + struct usbport_trig_data *usbport_data;
2589 ++ int err;
2590 +
2591 + usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
2592 + if (!usbport_data)
2593 +@@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
2594 +
2595 + /* List of ports */
2596 + INIT_LIST_HEAD(&usbport_data->ports);
2597 ++ err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
2598 ++ if (err)
2599 ++ goto err_free;
2600 + usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
2601 + usbport_trig_update_count(usbport_data);
2602 +
2603 +@@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
2604 + usbport_data->nb.notifier_call = usbport_trig_notify;
2605 + led_set_trigger_data(led_cdev, usbport_data);
2606 + usb_register_notify(&usbport_data->nb);
2607 +-
2608 + return 0;
2609 ++
2610 ++err_free:
2611 ++ kfree(usbport_data);
2612 ++ return err;
2613 + }
2614 +
2615 + static void usbport_trig_deactivate(struct led_classdev *led_cdev)
2616 +@@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev)
2617 + usbport_trig_remove_port(usbport_data, port);
2618 + }
2619 +
2620 ++ sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
2621 ++
2622 + usb_unregister_notify(&usbport_data->nb);
2623 +
2624 + kfree(usbport_data);
2625 +@@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = {
2626 + .name = "usbport",
2627 + .activate = usbport_trig_activate,
2628 + .deactivate = usbport_trig_deactivate,
2629 +- .groups = ports_groups,
2630 + };
2631 +
2632 + static int __init usbport_trig_init(void)
2633 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2634 +index 9f92ee03dde7..2a4ea9a1b1e3 100644
2635 +--- a/drivers/usb/dwc3/gadget.c
2636 ++++ b/drivers/usb/dwc3/gadget.c
2637 +@@ -177,6 +177,8 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
2638 + req->started = false;
2639 + list_del(&req->list);
2640 + req->remaining = 0;
2641 ++ req->unaligned = false;
2642 ++ req->zero = false;
2643 +
2644 + if (req->request.status == -EINPROGRESS)
2645 + req->request.status = status;
2646 +diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
2647 +index f26109eafdbf..66ec1fdf9fe7 100644
2648 +--- a/drivers/usb/host/ehci-mv.c
2649 ++++ b/drivers/usb/host/ehci-mv.c
2650 +@@ -302,3 +302,4 @@ MODULE_AUTHOR("Chao Xie <chao.xie@×××××××.com>");
2651 + MODULE_AUTHOR("Neil Zhang <zhangwm@×××××××.com>");
2652 + MODULE_ALIAS("mv-ehci");
2653 + MODULE_LICENSE("GPL");
2654 ++MODULE_DEVICE_TABLE(of, ehci_mv_dt_ids);
2655 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2656 +index 609198d9594c..f459c1a18156 100644
2657 +--- a/drivers/usb/serial/ftdi_sio.c
2658 ++++ b/drivers/usb/serial/ftdi_sio.c
2659 +@@ -1783,6 +1783,10 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
2660 + int result;
2661 + u16 val;
2662 +
2663 ++ result = usb_autopm_get_interface(serial->interface);
2664 ++ if (result)
2665 ++ return result;
2666 ++
2667 + val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value;
2668 + result = usb_control_msg(serial->dev,
2669 + usb_sndctrlpipe(serial->dev, 0),
2670 +@@ -1795,6 +1799,8 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
2671 + val, result);
2672 + }
2673 +
2674 ++ usb_autopm_put_interface(serial->interface);
2675 ++
2676 + return result;
2677 + }
2678 +
2679 +@@ -1846,9 +1852,15 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
2680 + unsigned char *buf;
2681 + int result;
2682 +
2683 ++ result = usb_autopm_get_interface(serial->interface);
2684 ++ if (result)
2685 ++ return result;
2686 ++
2687 + buf = kmalloc(1, GFP_KERNEL);
2688 +- if (!buf)
2689 ++ if (!buf) {
2690 ++ usb_autopm_put_interface(serial->interface);
2691 + return -ENOMEM;
2692 ++ }
2693 +
2694 + result = usb_control_msg(serial->dev,
2695 + usb_rcvctrlpipe(serial->dev, 0),
2696 +@@ -1863,6 +1875,7 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
2697 + }
2698 +
2699 + kfree(buf);
2700 ++ usb_autopm_put_interface(serial->interface);
2701 +
2702 + return result;
2703 + }
2704 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
2705 +index 98e7a5df0f6d..bb3f9aa4a909 100644
2706 +--- a/drivers/usb/serial/pl2303.c
2707 ++++ b/drivers/usb/serial/pl2303.c
2708 +@@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = {
2709 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
2710 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
2711 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
2712 ++ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
2713 + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
2714 + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
2715 + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
2716 +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
2717 +index 4e2554d55362..559941ca884d 100644
2718 +--- a/drivers/usb/serial/pl2303.h
2719 ++++ b/drivers/usb/serial/pl2303.h
2720 +@@ -8,6 +8,7 @@
2721 +
2722 + #define PL2303_VENDOR_ID 0x067b
2723 + #define PL2303_PRODUCT_ID 0x2303
2724 ++#define PL2303_PRODUCT_ID_TB 0x2304
2725 + #define PL2303_PRODUCT_ID_RSAQ2 0x04bb
2726 + #define PL2303_PRODUCT_ID_DCU11 0x1234
2727 + #define PL2303_PRODUCT_ID_PHAROS 0xaaa0
2728 +@@ -20,6 +21,7 @@
2729 + #define PL2303_PRODUCT_ID_MOTOROLA 0x0307
2730 + #define PL2303_PRODUCT_ID_ZTEK 0xe1f1
2731 +
2732 ++
2733 + #define ATEN_VENDOR_ID 0x0557
2734 + #define ATEN_VENDOR_ID2 0x0547
2735 + #define ATEN_PRODUCT_ID 0x2008
2736 +diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
2737 +index 4d0273508043..edbbb13d6de6 100644
2738 +--- a/drivers/usb/serial/usb-serial-simple.c
2739 ++++ b/drivers/usb/serial/usb-serial-simple.c
2740 +@@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS);
2741 + /* Motorola Tetra driver */
2742 + #define MOTOROLA_TETRA_IDS() \
2743 + { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
2744 +- { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
2745 ++ { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
2746 ++ { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
2747 + DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
2748 +
2749 + /* Novatel Wireless GPS driver */
2750 +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
2751 +index ad7a6f475a44..784df2b49628 100644
2752 +--- a/drivers/vhost/net.c
2753 ++++ b/drivers/vhost/net.c
2754 +@@ -1192,7 +1192,8 @@ static void handle_rx(struct vhost_net *net)
2755 + if (nvq->done_idx > VHOST_NET_BATCH)
2756 + vhost_net_signal_used(nvq);
2757 + if (unlikely(vq_log))
2758 +- vhost_log_write(vq, vq_log, log, vhost_len);
2759 ++ vhost_log_write(vq, vq_log, log, vhost_len,
2760 ++ vq->iov, in);
2761 + total_len += vhost_len;
2762 + if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
2763 + vhost_poll_queue(&vq->poll);
2764 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
2765 +index 55e5aa662ad5..c66fc8308b5e 100644
2766 +--- a/drivers/vhost/vhost.c
2767 ++++ b/drivers/vhost/vhost.c
2768 +@@ -1733,13 +1733,87 @@ static int log_write(void __user *log_base,
2769 + return r;
2770 + }
2771 +
2772 ++static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
2773 ++{
2774 ++ struct vhost_umem *umem = vq->umem;
2775 ++ struct vhost_umem_node *u;
2776 ++ u64 start, end, l, min;
2777 ++ int r;
2778 ++ bool hit = false;
2779 ++
2780 ++ while (len) {
2781 ++ min = len;
2782 ++ /* More than one GPAs can be mapped into a single HVA. So
2783 ++ * iterate all possible umems here to be safe.
2784 ++ */
2785 ++ list_for_each_entry(u, &umem->umem_list, link) {
2786 ++ if (u->userspace_addr > hva - 1 + len ||
2787 ++ u->userspace_addr - 1 + u->size < hva)
2788 ++ continue;
2789 ++ start = max(u->userspace_addr, hva);
2790 ++ end = min(u->userspace_addr - 1 + u->size,
2791 ++ hva - 1 + len);
2792 ++ l = end - start + 1;
2793 ++ r = log_write(vq->log_base,
2794 ++ u->start + start - u->userspace_addr,
2795 ++ l);
2796 ++ if (r < 0)
2797 ++ return r;
2798 ++ hit = true;
2799 ++ min = min(l, min);
2800 ++ }
2801 ++
2802 ++ if (!hit)
2803 ++ return -EFAULT;
2804 ++
2805 ++ len -= min;
2806 ++ hva += min;
2807 ++ }
2808 ++
2809 ++ return 0;
2810 ++}
2811 ++
2812 ++static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
2813 ++{
2814 ++ struct iovec iov[64];
2815 ++ int i, ret;
2816 ++
2817 ++ if (!vq->iotlb)
2818 ++ return log_write(vq->log_base, vq->log_addr + used_offset, len);
2819 ++
2820 ++ ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
2821 ++ len, iov, 64, VHOST_ACCESS_WO);
2822 ++ if (ret)
2823 ++ return ret;
2824 ++
2825 ++ for (i = 0; i < ret; i++) {
2826 ++ ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
2827 ++ iov[i].iov_len);
2828 ++ if (ret)
2829 ++ return ret;
2830 ++ }
2831 ++
2832 ++ return 0;
2833 ++}
2834 ++
2835 + int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
2836 +- unsigned int log_num, u64 len)
2837 ++ unsigned int log_num, u64 len, struct iovec *iov, int count)
2838 + {
2839 + int i, r;
2840 +
2841 + /* Make sure data written is seen before log. */
2842 + smp_wmb();
2843 ++
2844 ++ if (vq->iotlb) {
2845 ++ for (i = 0; i < count; i++) {
2846 ++ r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
2847 ++ iov[i].iov_len);
2848 ++ if (r < 0)
2849 ++ return r;
2850 ++ }
2851 ++ return 0;
2852 ++ }
2853 ++
2854 + for (i = 0; i < log_num; ++i) {
2855 + u64 l = min(log[i].len, len);
2856 + r = log_write(vq->log_base, log[i].addr, l);
2857 +@@ -1769,9 +1843,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
2858 + smp_wmb();
2859 + /* Log used flag write. */
2860 + used = &vq->used->flags;
2861 +- log_write(vq->log_base, vq->log_addr +
2862 +- (used - (void __user *)vq->used),
2863 +- sizeof vq->used->flags);
2864 ++ log_used(vq, (used - (void __user *)vq->used),
2865 ++ sizeof vq->used->flags);
2866 + if (vq->log_ctx)
2867 + eventfd_signal(vq->log_ctx, 1);
2868 + }
2869 +@@ -1789,9 +1862,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
2870 + smp_wmb();
2871 + /* Log avail event write */
2872 + used = vhost_avail_event(vq);
2873 +- log_write(vq->log_base, vq->log_addr +
2874 +- (used - (void __user *)vq->used),
2875 +- sizeof *vhost_avail_event(vq));
2876 ++ log_used(vq, (used - (void __user *)vq->used),
2877 ++ sizeof *vhost_avail_event(vq));
2878 + if (vq->log_ctx)
2879 + eventfd_signal(vq->log_ctx, 1);
2880 + }
2881 +@@ -2191,10 +2263,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2882 + /* Make sure data is seen before log. */
2883 + smp_wmb();
2884 + /* Log used ring entry write. */
2885 +- log_write(vq->log_base,
2886 +- vq->log_addr +
2887 +- ((void __user *)used - (void __user *)vq->used),
2888 +- count * sizeof *used);
2889 ++ log_used(vq, ((void __user *)used - (void __user *)vq->used),
2890 ++ count * sizeof *used);
2891 + }
2892 + old = vq->last_used_idx;
2893 + new = (vq->last_used_idx += count);
2894 +@@ -2236,9 +2306,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2895 + /* Make sure used idx is seen before log. */
2896 + smp_wmb();
2897 + /* Log used index update. */
2898 +- log_write(vq->log_base,
2899 +- vq->log_addr + offsetof(struct vring_used, idx),
2900 +- sizeof vq->used->idx);
2901 ++ log_used(vq, offsetof(struct vring_used, idx),
2902 ++ sizeof vq->used->idx);
2903 + if (vq->log_ctx)
2904 + eventfd_signal(vq->log_ctx, 1);
2905 + }
2906 +diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
2907 +index 466ef7542291..1b675dad5e05 100644
2908 +--- a/drivers/vhost/vhost.h
2909 ++++ b/drivers/vhost/vhost.h
2910 +@@ -205,7 +205,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
2911 + bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
2912 +
2913 + int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
2914 +- unsigned int log_num, u64 len);
2915 ++ unsigned int log_num, u64 len,
2916 ++ struct iovec *iov, int count);
2917 + int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
2918 +
2919 + struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
2920 +diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
2921 +index 09731b2f6815..c6b3bdbbdbc9 100644
2922 +--- a/drivers/video/console/vgacon.c
2923 ++++ b/drivers/video/console/vgacon.c
2924 +@@ -271,6 +271,7 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
2925 +
2926 + static void vgacon_restore_screen(struct vc_data *c)
2927 + {
2928 ++ c->vc_origin = c->vc_visible_origin;
2929 + vgacon_scrollback_cur->save = 0;
2930 +
2931 + if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
2932 +@@ -287,8 +288,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
2933 + int start, end, count, soff;
2934 +
2935 + if (!lines) {
2936 +- c->vc_visible_origin = c->vc_origin;
2937 +- vga_set_mem_top(c);
2938 ++ vgacon_restore_screen(c);
2939 + return;
2940 + }
2941 +
2942 +@@ -298,6 +298,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
2943 + if (!vgacon_scrollback_cur->save) {
2944 + vgacon_cursor(c, CM_ERASE);
2945 + vgacon_save_screen(c);
2946 ++ c->vc_origin = (unsigned long)c->vc_screenbuf;
2947 + vgacon_scrollback_cur->save = 1;
2948 + }
2949 +
2950 +@@ -335,7 +336,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
2951 + int copysize;
2952 +
2953 + int diff = c->vc_rows - count;
2954 +- void *d = (void *) c->vc_origin;
2955 ++ void *d = (void *) c->vc_visible_origin;
2956 + void *s = (void *) c->vc_screenbuf;
2957 +
2958 + count *= c->vc_size_row;
2959 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
2960 +index a58666a3f8dd..08aaf580fa1c 100644
2961 +--- a/fs/ceph/caps.c
2962 ++++ b/fs/ceph/caps.c
2963 +@@ -1032,6 +1032,8 @@ static void drop_inode_snap_realm(struct ceph_inode_info *ci)
2964 + list_del_init(&ci->i_snap_realm_item);
2965 + ci->i_snap_realm_counter++;
2966 + ci->i_snap_realm = NULL;
2967 ++ if (realm->ino == ci->i_vino.ino)
2968 ++ realm->inode = NULL;
2969 + spin_unlock(&realm->inodes_with_caps_lock);
2970 + ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
2971 + realm);
2972 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
2973 +index f82fd342bca5..fce610f6cd24 100644
2974 +--- a/fs/cifs/cifssmb.c
2975 ++++ b/fs/cifs/cifssmb.c
2976 +@@ -1458,18 +1458,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server)
2977 + }
2978 +
2979 + static int
2980 +-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2981 ++__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
2982 ++ bool malformed)
2983 + {
2984 + int length;
2985 +- struct cifs_readdata *rdata = mid->callback_data;
2986 +
2987 + length = cifs_discard_remaining_data(server);
2988 +- dequeue_mid(mid, rdata->result);
2989 ++ dequeue_mid(mid, malformed);
2990 + mid->resp_buf = server->smallbuf;
2991 + server->smallbuf = NULL;
2992 + return length;
2993 + }
2994 +
2995 ++static int
2996 ++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2997 ++{
2998 ++ struct cifs_readdata *rdata = mid->callback_data;
2999 ++
3000 ++ return __cifs_readv_discard(server, mid, rdata->result);
3001 ++}
3002 ++
3003 + int
3004 + cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
3005 + {
3006 +@@ -1511,12 +1519,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
3007 + return -1;
3008 + }
3009 +
3010 ++ /* set up first two iov for signature check and to get credits */
3011 ++ rdata->iov[0].iov_base = buf;
3012 ++ rdata->iov[0].iov_len = 4;
3013 ++ rdata->iov[1].iov_base = buf + 4;
3014 ++ rdata->iov[1].iov_len = server->total_read - 4;
3015 ++ cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3016 ++ rdata->iov[0].iov_base, rdata->iov[0].iov_len);
3017 ++ cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
3018 ++ rdata->iov[1].iov_base, rdata->iov[1].iov_len);
3019 ++
3020 + /* Was the SMB read successful? */
3021 + rdata->result = server->ops->map_error(buf, false);
3022 + if (rdata->result != 0) {
3023 + cifs_dbg(FYI, "%s: server returned error %d\n",
3024 + __func__, rdata->result);
3025 +- return cifs_readv_discard(server, mid);
3026 ++ /* normal error on read response */
3027 ++ return __cifs_readv_discard(server, mid, false);
3028 + }
3029 +
3030 + /* Is there enough to get to the rest of the READ_RSP header? */
3031 +@@ -1560,14 +1579,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
3032 + server->total_read += length;
3033 + }
3034 +
3035 +- /* set up first iov for signature check */
3036 +- rdata->iov[0].iov_base = buf;
3037 +- rdata->iov[0].iov_len = 4;
3038 +- rdata->iov[1].iov_base = buf + 4;
3039 +- rdata->iov[1].iov_len = server->total_read - 4;
3040 +- cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
3041 +- rdata->iov[0].iov_base, server->total_read);
3042 +-
3043 + /* how much data is in the response? */
3044 + #ifdef CONFIG_CIFS_SMB_DIRECT
3045 + use_rdma_mr = rdata->mr;
3046 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3047 +index 6f24f129a751..b83ab72cf855 100644
3048 +--- a/fs/cifs/connect.c
3049 ++++ b/fs/cifs/connect.c
3050 +@@ -534,6 +534,21 @@ server_unresponsive(struct TCP_Server_Info *server)
3051 + return false;
3052 + }
3053 +
3054 ++static inline bool
3055 ++zero_credits(struct TCP_Server_Info *server)
3056 ++{
3057 ++ int val;
3058 ++
3059 ++ spin_lock(&server->req_lock);
3060 ++ val = server->credits + server->echo_credits + server->oplock_credits;
3061 ++ if (server->in_flight == 0 && val == 0) {
3062 ++ spin_unlock(&server->req_lock);
3063 ++ return true;
3064 ++ }
3065 ++ spin_unlock(&server->req_lock);
3066 ++ return false;
3067 ++}
3068 ++
3069 + static int
3070 + cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
3071 + {
3072 +@@ -546,6 +561,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
3073 + for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
3074 + try_to_freeze();
3075 +
3076 ++ /* reconnect if no credits and no requests in flight */
3077 ++ if (zero_credits(server)) {
3078 ++ cifs_reconnect(server);
3079 ++ return -ECONNABORTED;
3080 ++ }
3081 ++
3082 + if (server_unresponsive(server))
3083 + return -ECONNABORTED;
3084 + if (cifs_rdma_enabled(server) && server->smbd_conn)
3085 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
3086 +index 6a9c47541c53..7b8b58fb4d3f 100644
3087 +--- a/fs/cifs/smb2misc.c
3088 ++++ b/fs/cifs/smb2misc.c
3089 +@@ -648,6 +648,13 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
3090 + if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK)
3091 + return false;
3092 +
3093 ++ if (rsp->sync_hdr.CreditRequest) {
3094 ++ spin_lock(&server->req_lock);
3095 ++ server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest);
3096 ++ spin_unlock(&server->req_lock);
3097 ++ wake_up(&server->request_q);
3098 ++ }
3099 ++
3100 + if (rsp->StructureSize !=
3101 + smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
3102 + if (le16_to_cpu(rsp->StructureSize) == 44)
3103 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3104 +index 391b40e91910..d7dd7d38fad6 100644
3105 +--- a/fs/cifs/smb2ops.c
3106 ++++ b/fs/cifs/smb2ops.c
3107 +@@ -34,6 +34,7 @@
3108 + #include "cifs_ioctl.h"
3109 + #include "smbdirect.h"
3110 +
3111 ++/* Change credits for different ops and return the total number of credits */
3112 + static int
3113 + change_conf(struct TCP_Server_Info *server)
3114 + {
3115 +@@ -41,17 +42,15 @@ change_conf(struct TCP_Server_Info *server)
3116 + server->oplock_credits = server->echo_credits = 0;
3117 + switch (server->credits) {
3118 + case 0:
3119 +- return -1;
3120 ++ return 0;
3121 + case 1:
3122 + server->echoes = false;
3123 + server->oplocks = false;
3124 +- cifs_dbg(VFS, "disabling echoes and oplocks\n");
3125 + break;
3126 + case 2:
3127 + server->echoes = true;
3128 + server->oplocks = false;
3129 + server->echo_credits = 1;
3130 +- cifs_dbg(FYI, "disabling oplocks\n");
3131 + break;
3132 + default:
3133 + server->echoes = true;
3134 +@@ -64,14 +63,15 @@ change_conf(struct TCP_Server_Info *server)
3135 + server->echo_credits = 1;
3136 + }
3137 + server->credits -= server->echo_credits + server->oplock_credits;
3138 +- return 0;
3139 ++ return server->credits + server->echo_credits + server->oplock_credits;
3140 + }
3141 +
3142 + static void
3143 + smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
3144 + const int optype)
3145 + {
3146 +- int *val, rc = 0;
3147 ++ int *val, rc = -1;
3148 ++
3149 + spin_lock(&server->req_lock);
3150 + val = server->ops->get_credits_field(server, optype);
3151 +
3152 +@@ -101,8 +101,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
3153 + }
3154 + spin_unlock(&server->req_lock);
3155 + wake_up(&server->request_q);
3156 +- if (rc)
3157 +- cifs_reconnect(server);
3158 ++
3159 ++ if (server->tcpStatus == CifsNeedReconnect)
3160 ++ return;
3161 ++
3162 ++ switch (rc) {
3163 ++ case -1:
3164 ++ /* change_conf hasn't been executed */
3165 ++ break;
3166 ++ case 0:
3167 ++ cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
3168 ++ break;
3169 ++ case 1:
3170 ++ cifs_dbg(VFS, "disabling echoes and oplocks\n");
3171 ++ break;
3172 ++ case 2:
3173 ++ cifs_dbg(FYI, "disabling oplocks\n");
3174 ++ break;
3175 ++ default:
3176 ++ cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
3177 ++ }
3178 + }
3179 +
3180 + static void
3181 +@@ -165,14 +183,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
3182 +
3183 + scredits = server->credits;
3184 + /* can deadlock with reopen */
3185 +- if (scredits == 1) {
3186 ++ if (scredits <= 8) {
3187 + *num = SMB2_MAX_BUFFER_SIZE;
3188 + *credits = 0;
3189 + break;
3190 + }
3191 +
3192 +- /* leave one credit for a possible reopen */
3193 +- scredits--;
3194 ++ /* leave some credits for reopen and other ops */
3195 ++ scredits -= 8;
3196 + *num = min_t(unsigned int, size,
3197 + scredits * SMB2_MAX_BUFFER_SIZE);
3198 +
3199 +@@ -3101,11 +3119,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3200 + server->ops->is_status_pending(buf, server, 0))
3201 + return -1;
3202 +
3203 +- rdata->result = server->ops->map_error(buf, false);
3204 ++ /* set up first two iov to get credits */
3205 ++ rdata->iov[0].iov_base = buf;
3206 ++ rdata->iov[0].iov_len = 4;
3207 ++ rdata->iov[1].iov_base = buf + 4;
3208 ++ rdata->iov[1].iov_len =
3209 ++ min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4;
3210 ++ cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3211 ++ rdata->iov[0].iov_base, rdata->iov[0].iov_len);
3212 ++ cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
3213 ++ rdata->iov[1].iov_base, rdata->iov[1].iov_len);
3214 ++
3215 ++ rdata->result = server->ops->map_error(buf, true);
3216 + if (rdata->result != 0) {
3217 + cifs_dbg(FYI, "%s: server returned error %d\n",
3218 + __func__, rdata->result);
3219 +- dequeue_mid(mid, rdata->result);
3220 ++ /* normal error on read response */
3221 ++ dequeue_mid(mid, false);
3222 + return 0;
3223 + }
3224 +
3225 +@@ -3178,14 +3208,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3226 + return 0;
3227 + }
3228 +
3229 +- /* set up first iov for signature check */
3230 +- rdata->iov[0].iov_base = buf;
3231 +- rdata->iov[0].iov_len = 4;
3232 +- rdata->iov[1].iov_base = buf + 4;
3233 +- rdata->iov[1].iov_len = server->vals->read_rsp_size - 4;
3234 +- cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3235 +- rdata->iov[0].iov_base, server->vals->read_rsp_size);
3236 +-
3237 + length = rdata->copy_into_pages(server, rdata, &iter);
3238 +
3239 + kfree(bvec);
3240 +diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
3241 +index 105576daca4a..798f1253141a 100644
3242 +--- a/fs/notify/inotify/inotify_user.c
3243 ++++ b/fs/notify/inotify/inotify_user.c
3244 +@@ -724,8 +724,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
3245 + return -EBADF;
3246 +
3247 + /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
3248 +- if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE)))
3249 +- return -EINVAL;
3250 ++ if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
3251 ++ ret = -EINVAL;
3252 ++ goto fput_and_out;
3253 ++ }
3254 +
3255 + /* verify that this is indeed an inotify instance */
3256 + if (unlikely(f.file->f_op != &inotify_fops)) {
3257 +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
3258 +index d93e89761a8b..a6349a29748c 100644
3259 +--- a/include/linux/bpf_verifier.h
3260 ++++ b/include/linux/bpf_verifier.h
3261 +@@ -147,6 +147,7 @@ struct bpf_verifier_state {
3262 + /* call stack tracking */
3263 + struct bpf_func_state *frame[MAX_CALL_FRAMES];
3264 + u32 curframe;
3265 ++ bool speculative;
3266 + };
3267 +
3268 + #define bpf_get_spilled_reg(slot, frame) \
3269 +@@ -166,15 +167,25 @@ struct bpf_verifier_state_list {
3270 + struct bpf_verifier_state_list *next;
3271 + };
3272 +
3273 ++/* Possible states for alu_state member. */
3274 ++#define BPF_ALU_SANITIZE_SRC 1U
3275 ++#define BPF_ALU_SANITIZE_DST 2U
3276 ++#define BPF_ALU_NEG_VALUE (1U << 2)
3277 ++#define BPF_ALU_NON_POINTER (1U << 3)
3278 ++#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
3279 ++ BPF_ALU_SANITIZE_DST)
3280 ++
3281 + struct bpf_insn_aux_data {
3282 + union {
3283 + enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
3284 + unsigned long map_state; /* pointer/poison value for maps */
3285 + s32 call_imm; /* saved imm field of call insn */
3286 ++ u32 alu_limit; /* limit for add/sub register with pointer */
3287 + };
3288 + int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
3289 + int sanitize_stack_off; /* stack slot to be cleared */
3290 + bool seen; /* this insn was processed by the verifier */
3291 ++ u8 alu_state; /* used in combination with alu_limit */
3292 + };
3293 +
3294 + #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
3295 +@@ -210,6 +221,8 @@ struct bpf_subprog_info {
3296 + * one verifier_env per bpf_check() call
3297 + */
3298 + struct bpf_verifier_env {
3299 ++ u32 insn_idx;
3300 ++ u32 prev_insn_idx;
3301 + struct bpf_prog *prog; /* eBPF program being verified */
3302 + const struct bpf_verifier_ops *ops;
3303 + struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
3304 +diff --git a/include/linux/filter.h b/include/linux/filter.h
3305 +index 25a556589ae8..b776626aeb84 100644
3306 +--- a/include/linux/filter.h
3307 ++++ b/include/linux/filter.h
3308 +@@ -53,14 +53,10 @@ struct sock_reuseport;
3309 + #define BPF_REG_D BPF_REG_8 /* data, callee-saved */
3310 + #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */
3311 +
3312 +-/* Kernel hidden auxiliary/helper register for hardening step.
3313 +- * Only used by eBPF JITs. It's nothing more than a temporary
3314 +- * register that JITs use internally, only that here it's part
3315 +- * of eBPF instructions that have been rewritten for blinding
3316 +- * constants. See JIT pre-step in bpf_jit_blind_constants().
3317 +- */
3318 ++/* Kernel hidden auxiliary/helper register. */
3319 + #define BPF_REG_AX MAX_BPF_REG
3320 +-#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
3321 ++#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
3322 ++#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
3323 +
3324 + /* unused opcode to mark special call to bpf_tail_call() helper */
3325 + #define BPF_TAIL_CALL 0xf0
3326 +diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
3327 +index 14131b6fae68..dcb6977afce9 100644
3328 +--- a/include/linux/hyperv.h
3329 ++++ b/include/linux/hyperv.h
3330 +@@ -830,15 +830,6 @@ struct vmbus_channel {
3331 + * All Sub-channels of a primary channel are linked here.
3332 + */
3333 + struct list_head sc_list;
3334 +- /*
3335 +- * Current number of sub-channels.
3336 +- */
3337 +- int num_sc;
3338 +- /*
3339 +- * Number of a sub-channel (position within sc_list) which is supposed
3340 +- * to be used as the next outgoing channel.
3341 +- */
3342 +- int next_oc;
3343 + /*
3344 + * The primary channel this sub-channel belongs to.
3345 + * This will be NULL for the primary channel.
3346 +@@ -972,14 +963,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
3347 + void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
3348 + void (*chn_rescind_cb)(struct vmbus_channel *));
3349 +
3350 +-/*
3351 +- * Retrieve the (sub) channel on which to send an outgoing request.
3352 +- * When a primary channel has multiple sub-channels, we choose a
3353 +- * channel whose VCPU binding is closest to the VCPU on which
3354 +- * this call is being made.
3355 +- */
3356 +-struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
3357 +-
3358 + /*
3359 + * Check if sub-channels have already been offerred. This API will be useful
3360 + * when the driver is unloaded after establishing sub-channels. In this case,
3361 +@@ -1176,8 +1159,9 @@ struct hv_ring_buffer_debug_info {
3362 + u32 bytes_avail_towrite;
3363 + };
3364 +
3365 +-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
3366 +- struct hv_ring_buffer_debug_info *debug_info);
3367 ++
3368 ++int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
3369 ++ struct hv_ring_buffer_debug_info *debug_info);
3370 +
3371 + /* Vmbus interface */
3372 + #define vmbus_driver_register(driver) \
3373 +diff --git a/include/linux/phy.h b/include/linux/phy.h
3374 +index 306630d13523..f5d4235e3844 100644
3375 +--- a/include/linux/phy.h
3376 ++++ b/include/linux/phy.h
3377 +@@ -502,8 +502,8 @@ struct phy_device {
3378 + * only works for PHYs with IDs which match this field
3379 + * name: The friendly name of this PHY type
3380 + * phy_id_mask: Defines the important bits of the phy_id
3381 +- * features: A list of features (speed, duplex, etc) supported
3382 +- * by this PHY
3383 ++ * features: A mandatory list of features (speed, duplex, etc)
3384 ++ * supported by this PHY
3385 + * flags: A bitfield defining certain other features this PHY
3386 + * supports (like interrupts)
3387 + *
3388 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3389 +index 0d1b2c3f127b..a6d820ad17f0 100644
3390 +--- a/include/linux/skbuff.h
3391 ++++ b/include/linux/skbuff.h
3392 +@@ -3204,6 +3204,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3393 + *
3394 + * This is exactly the same as pskb_trim except that it ensures the
3395 + * checksum of received packets are still valid after the operation.
3396 ++ * It can change skb pointers.
3397 + */
3398 +
3399 + static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3400 +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
3401 +index c5969762a8f4..9c8214d2116d 100644
3402 +--- a/include/net/ip_fib.h
3403 ++++ b/include/net/ip_fib.h
3404 +@@ -241,7 +241,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
3405 + struct netlink_ext_ack *extack);
3406 + int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
3407 + struct netlink_callback *cb, struct fib_dump_filter *filter);
3408 +-int fib_table_flush(struct net *net, struct fib_table *table);
3409 ++int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
3410 + struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
3411 + void fib_table_flush_external(struct fib_table *table);
3412 + void fib_free_table(struct fib_table *tb);
3413 +diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
3414 +index fb78f6f500f3..f056b2a00d5c 100644
3415 +--- a/include/uapi/linux/input.h
3416 ++++ b/include/uapi/linux/input.h
3417 +@@ -26,13 +26,17 @@
3418 + */
3419 +
3420 + struct input_event {
3421 +-#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL)
3422 ++#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__)
3423 + struct timeval time;
3424 + #define input_event_sec time.tv_sec
3425 + #define input_event_usec time.tv_usec
3426 + #else
3427 + __kernel_ulong_t __sec;
3428 ++#if defined(__sparc__) && defined(__arch64__)
3429 ++ unsigned int __usec;
3430 ++#else
3431 + __kernel_ulong_t __usec;
3432 ++#endif
3433 + #define input_event_sec __sec
3434 + #define input_event_usec __usec
3435 + #endif
3436 +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
3437 +index b2890c268cb3..ac44653025ad 100644
3438 +--- a/kernel/bpf/core.c
3439 ++++ b/kernel/bpf/core.c
3440 +@@ -52,6 +52,7 @@
3441 + #define DST regs[insn->dst_reg]
3442 + #define SRC regs[insn->src_reg]
3443 + #define FP regs[BPF_REG_FP]
3444 ++#define AX regs[BPF_REG_AX]
3445 + #define ARG1 regs[BPF_REG_ARG1]
3446 + #define CTX regs[BPF_REG_CTX]
3447 + #define IMM insn->imm
3448 +@@ -726,6 +727,26 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
3449 + BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
3450 + BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
3451 +
3452 ++ /* Constraints on AX register:
3453 ++ *
3454 ++ * AX register is inaccessible from user space. It is mapped in
3455 ++ * all JITs, and used here for constant blinding rewrites. It is
3456 ++ * typically "stateless" meaning its contents are only valid within
3457 ++ * the executed instruction, but not across several instructions.
3458 ++ * There are a few exceptions however which are further detailed
3459 ++ * below.
3460 ++ *
3461 ++ * Constant blinding is only used by JITs, not in the interpreter.
3462 ++ * The interpreter uses AX in some occasions as a local temporary
3463 ++ * register e.g. in DIV or MOD instructions.
3464 ++ *
3465 ++ * In restricted circumstances, the verifier can also use the AX
3466 ++ * register for rewrites as long as they do not interfere with
3467 ++ * the above cases!
3468 ++ */
3469 ++ if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
3470 ++ goto out;
3471 ++
3472 + if (from->imm == 0 &&
3473 + (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
3474 + from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
3475 +@@ -1055,7 +1076,6 @@ bool bpf_opcode_in_insntable(u8 code)
3476 + */
3477 + static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
3478 + {
3479 +- u64 tmp;
3480 + #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
3481 + #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
3482 + static const void *jumptable[256] = {
3483 +@@ -1129,36 +1149,36 @@ select_insn:
3484 + (*(s64 *) &DST) >>= IMM;
3485 + CONT;
3486 + ALU64_MOD_X:
3487 +- div64_u64_rem(DST, SRC, &tmp);
3488 +- DST = tmp;
3489 ++ div64_u64_rem(DST, SRC, &AX);
3490 ++ DST = AX;
3491 + CONT;
3492 + ALU_MOD_X:
3493 +- tmp = (u32) DST;
3494 +- DST = do_div(tmp, (u32) SRC);
3495 ++ AX = (u32) DST;
3496 ++ DST = do_div(AX, (u32) SRC);
3497 + CONT;
3498 + ALU64_MOD_K:
3499 +- div64_u64_rem(DST, IMM, &tmp);
3500 +- DST = tmp;
3501 ++ div64_u64_rem(DST, IMM, &AX);
3502 ++ DST = AX;
3503 + CONT;
3504 + ALU_MOD_K:
3505 +- tmp = (u32) DST;
3506 +- DST = do_div(tmp, (u32) IMM);
3507 ++ AX = (u32) DST;
3508 ++ DST = do_div(AX, (u32) IMM);
3509 + CONT;
3510 + ALU64_DIV_X:
3511 + DST = div64_u64(DST, SRC);
3512 + CONT;
3513 + ALU_DIV_X:
3514 +- tmp = (u32) DST;
3515 +- do_div(tmp, (u32) SRC);
3516 +- DST = (u32) tmp;
3517 ++ AX = (u32) DST;
3518 ++ do_div(AX, (u32) SRC);
3519 ++ DST = (u32) AX;
3520 + CONT;
3521 + ALU64_DIV_K:
3522 + DST = div64_u64(DST, IMM);
3523 + CONT;
3524 + ALU_DIV_K:
3525 +- tmp = (u32) DST;
3526 +- do_div(tmp, (u32) IMM);
3527 +- DST = (u32) tmp;
3528 ++ AX = (u32) DST;
3529 ++ do_div(AX, (u32) IMM);
3530 ++ DST = (u32) AX;
3531 + CONT;
3532 + ALU_END_TO_BE:
3533 + switch (IMM) {
3534 +@@ -1414,7 +1434,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
3535 + static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
3536 + { \
3537 + u64 stack[stack_size / sizeof(u64)]; \
3538 +- u64 regs[MAX_BPF_REG]; \
3539 ++ u64 regs[MAX_BPF_EXT_REG]; \
3540 + \
3541 + FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
3542 + ARG1 = (u64) (unsigned long) ctx; \
3543 +@@ -1427,7 +1447,7 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
3544 + const struct bpf_insn *insn) \
3545 + { \
3546 + u64 stack[stack_size / sizeof(u64)]; \
3547 +- u64 regs[MAX_BPF_REG]; \
3548 ++ u64 regs[MAX_BPF_EXT_REG]; \
3549 + \
3550 + FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
3551 + BPF_R1 = r1; \
3552 +diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
3553 +index 99d243e1ad6e..52378d3e34b3 100644
3554 +--- a/kernel/bpf/map_in_map.c
3555 ++++ b/kernel/bpf/map_in_map.c
3556 +@@ -12,6 +12,7 @@
3557 + struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
3558 + {
3559 + struct bpf_map *inner_map, *inner_map_meta;
3560 ++ u32 inner_map_meta_size;
3561 + struct fd f;
3562 +
3563 + f = fdget(inner_map_ufd);
3564 +@@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
3565 + return ERR_PTR(-EINVAL);
3566 + }
3567 +
3568 +- inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER);
3569 ++ inner_map_meta_size = sizeof(*inner_map_meta);
3570 ++ /* In some cases verifier needs to access beyond just base map. */
3571 ++ if (inner_map->ops == &array_map_ops)
3572 ++ inner_map_meta_size = sizeof(struct bpf_array);
3573 ++
3574 ++ inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
3575 + if (!inner_map_meta) {
3576 + fdput(f);
3577 + return ERR_PTR(-ENOMEM);
3578 +@@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
3579 + inner_map_meta->key_size = inner_map->key_size;
3580 + inner_map_meta->value_size = inner_map->value_size;
3581 + inner_map_meta->map_flags = inner_map->map_flags;
3582 +- inner_map_meta->ops = inner_map->ops;
3583 + inner_map_meta->max_entries = inner_map->max_entries;
3584 +
3585 ++ /* Misc members not needed in bpf_map_meta_equal() check. */
3586 ++ inner_map_meta->ops = inner_map->ops;
3587 ++ if (inner_map->ops == &array_map_ops) {
3588 ++ inner_map_meta->unpriv_array = inner_map->unpriv_array;
3589 ++ container_of(inner_map_meta, struct bpf_array, map)->index_mask =
3590 ++ container_of(inner_map, struct bpf_array, map)->index_mask;
3591 ++ }
3592 ++
3593 + fdput(f);
3594 + return inner_map_meta;
3595 + }
3596 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3597 +index eedc7bd4185d..e4c826229152 100644
3598 +--- a/kernel/bpf/verifier.c
3599 ++++ b/kernel/bpf/verifier.c
3600 +@@ -648,6 +648,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
3601 + free_func_state(dst_state->frame[i]);
3602 + dst_state->frame[i] = NULL;
3603 + }
3604 ++ dst_state->speculative = src->speculative;
3605 + dst_state->curframe = src->curframe;
3606 + for (i = 0; i <= src->curframe; i++) {
3607 + dst = dst_state->frame[i];
3608 +@@ -692,7 +693,8 @@ static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
3609 + }
3610 +
3611 + static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
3612 +- int insn_idx, int prev_insn_idx)
3613 ++ int insn_idx, int prev_insn_idx,
3614 ++ bool speculative)
3615 + {
3616 + struct bpf_verifier_state *cur = env->cur_state;
3617 + struct bpf_verifier_stack_elem *elem;
3618 +@@ -710,6 +712,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
3619 + err = copy_verifier_state(&elem->st, cur);
3620 + if (err)
3621 + goto err;
3622 ++ elem->st.speculative |= speculative;
3623 + if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
3624 + verbose(env, "BPF program is too complex\n");
3625 + goto err;
3626 +@@ -1314,6 +1317,31 @@ static int check_stack_read(struct bpf_verifier_env *env,
3627 + }
3628 + }
3629 +
3630 ++static int check_stack_access(struct bpf_verifier_env *env,
3631 ++ const struct bpf_reg_state *reg,
3632 ++ int off, int size)
3633 ++{
3634 ++ /* Stack accesses must be at a fixed offset, so that we
3635 ++ * can determine what type of data were returned. See
3636 ++ * check_stack_read().
3637 ++ */
3638 ++ if (!tnum_is_const(reg->var_off)) {
3639 ++ char tn_buf[48];
3640 ++
3641 ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3642 ++ verbose(env, "variable stack access var_off=%s off=%d size=%d",
3643 ++ tn_buf, off, size);
3644 ++ return -EACCES;
3645 ++ }
3646 ++
3647 ++ if (off >= 0 || off < -MAX_BPF_STACK) {
3648 ++ verbose(env, "invalid stack off=%d size=%d\n", off, size);
3649 ++ return -EACCES;
3650 ++ }
3651 ++
3652 ++ return 0;
3653 ++}
3654 ++
3655 + /* check read/write into map element returned by bpf_map_lookup_elem() */
3656 + static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
3657 + int size, bool zero_size_allowed)
3658 +@@ -1345,13 +1373,17 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
3659 + */
3660 + if (env->log.level)
3661 + print_verifier_state(env, state);
3662 ++
3663 + /* The minimum value is only important with signed
3664 + * comparisons where we can't assume the floor of a
3665 + * value is 0. If we are using signed variables for our
3666 + * index'es we need to make sure that whatever we use
3667 + * will have a set floor within our range.
3668 + */
3669 +- if (reg->smin_value < 0) {
3670 ++ if (reg->smin_value < 0 &&
3671 ++ (reg->smin_value == S64_MIN ||
3672 ++ (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
3673 ++ reg->smin_value + off < 0)) {
3674 + verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3675 + regno);
3676 + return -EACCES;
3677 +@@ -1870,24 +1902,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
3678 + }
3679 +
3680 + } else if (reg->type == PTR_TO_STACK) {
3681 +- /* stack accesses must be at a fixed offset, so that we can
3682 +- * determine what type of data were returned.
3683 +- * See check_stack_read().
3684 +- */
3685 +- if (!tnum_is_const(reg->var_off)) {
3686 +- char tn_buf[48];
3687 +-
3688 +- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3689 +- verbose(env, "variable stack access var_off=%s off=%d size=%d",
3690 +- tn_buf, off, size);
3691 +- return -EACCES;
3692 +- }
3693 + off += reg->var_off.value;
3694 +- if (off >= 0 || off < -MAX_BPF_STACK) {
3695 +- verbose(env, "invalid stack off=%d size=%d\n", off,
3696 +- size);
3697 +- return -EACCES;
3698 +- }
3699 ++ err = check_stack_access(env, reg, off, size);
3700 ++ if (err)
3701 ++ return err;
3702 +
3703 + state = func(env, reg);
3704 + err = update_stack_depth(env, state, off);
3705 +@@ -2968,6 +2986,125 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env,
3706 + return true;
3707 + }
3708 +
3709 ++static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
3710 ++{
3711 ++ return &env->insn_aux_data[env->insn_idx];
3712 ++}
3713 ++
3714 ++static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
3715 ++ u32 *ptr_limit, u8 opcode, bool off_is_neg)
3716 ++{
3717 ++ bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
3718 ++ (opcode == BPF_SUB && !off_is_neg);
3719 ++ u32 off;
3720 ++
3721 ++ switch (ptr_reg->type) {
3722 ++ case PTR_TO_STACK:
3723 ++ off = ptr_reg->off + ptr_reg->var_off.value;
3724 ++ if (mask_to_left)
3725 ++ *ptr_limit = MAX_BPF_STACK + off;
3726 ++ else
3727 ++ *ptr_limit = -off;
3728 ++ return 0;
3729 ++ case PTR_TO_MAP_VALUE:
3730 ++ if (mask_to_left) {
3731 ++ *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
3732 ++ } else {
3733 ++ off = ptr_reg->smin_value + ptr_reg->off;
3734 ++ *ptr_limit = ptr_reg->map_ptr->value_size - off;
3735 ++ }
3736 ++ return 0;
3737 ++ default:
3738 ++ return -EINVAL;
3739 ++ }
3740 ++}
3741 ++
3742 ++static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
3743 ++ const struct bpf_insn *insn)
3744 ++{
3745 ++ return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
3746 ++}
3747 ++
3748 ++static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
3749 ++ u32 alu_state, u32 alu_limit)
3750 ++{
3751 ++ /* If we arrived here from different branches with different
3752 ++ * state or limits to sanitize, then this won't work.
3753 ++ */
3754 ++ if (aux->alu_state &&
3755 ++ (aux->alu_state != alu_state ||
3756 ++ aux->alu_limit != alu_limit))
3757 ++ return -EACCES;
3758 ++
3759 ++ /* Corresponding fixup done in fixup_bpf_calls(). */
3760 ++ aux->alu_state = alu_state;
3761 ++ aux->alu_limit = alu_limit;
3762 ++ return 0;
3763 ++}
3764 ++
3765 ++static int sanitize_val_alu(struct bpf_verifier_env *env,
3766 ++ struct bpf_insn *insn)
3767 ++{
3768 ++ struct bpf_insn_aux_data *aux = cur_aux(env);
3769 ++
3770 ++ if (can_skip_alu_sanitation(env, insn))
3771 ++ return 0;
3772 ++
3773 ++ return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
3774 ++}
3775 ++
3776 ++static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3777 ++ struct bpf_insn *insn,
3778 ++ const struct bpf_reg_state *ptr_reg,
3779 ++ struct bpf_reg_state *dst_reg,
3780 ++ bool off_is_neg)
3781 ++{
3782 ++ struct bpf_verifier_state *vstate = env->cur_state;
3783 ++ struct bpf_insn_aux_data *aux = cur_aux(env);
3784 ++ bool ptr_is_dst_reg = ptr_reg == dst_reg;
3785 ++ u8 opcode = BPF_OP(insn->code);
3786 ++ u32 alu_state, alu_limit;
3787 ++ struct bpf_reg_state tmp;
3788 ++ bool ret;
3789 ++
3790 ++ if (can_skip_alu_sanitation(env, insn))
3791 ++ return 0;
3792 ++
3793 ++ /* We already marked aux for masking from non-speculative
3794 ++ * paths, thus we got here in the first place. We only care
3795 ++ * to explore bad access from here.
3796 ++ */
3797 ++ if (vstate->speculative)
3798 ++ goto do_sim;
3799 ++
3800 ++ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
3801 ++ alu_state |= ptr_is_dst_reg ?
3802 ++ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
3803 ++
3804 ++ if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
3805 ++ return 0;
3806 ++ if (update_alu_sanitation_state(aux, alu_state, alu_limit))
3807 ++ return -EACCES;
3808 ++do_sim:
3809 ++ /* Simulate and find potential out-of-bounds access under
3810 ++ * speculative execution from truncation as a result of
3811 ++ * masking when off was not within expected range. If off
3812 ++ * sits in dst, then we temporarily need to move ptr there
3813 ++ * to simulate dst (== 0) +/-= ptr. Needed, for example,
3814 ++ * for cases where we use K-based arithmetic in one direction
3815 ++ * and truncated reg-based in the other in order to explore
3816 ++ * bad access.
3817 ++ */
3818 ++ if (!ptr_is_dst_reg) {
3819 ++ tmp = *dst_reg;
3820 ++ *dst_reg = *ptr_reg;
3821 ++ }
3822 ++ ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
3823 ++ if (!ptr_is_dst_reg)
3824 ++ *dst_reg = tmp;
3825 ++ return !ret ? -EFAULT : 0;
3826 ++}
3827 ++
3828 + /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
3829 + * Caller should also handle BPF_MOV case separately.
3830 + * If we return -EACCES, caller may want to try again treating pointer as a
3831 +@@ -2986,8 +3123,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3832 + smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
3833 + u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
3834 + umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
3835 ++ u32 dst = insn->dst_reg, src = insn->src_reg;
3836 + u8 opcode = BPF_OP(insn->code);
3837 +- u32 dst = insn->dst_reg;
3838 ++ int ret;
3839 +
3840 + dst_reg = &regs[dst];
3841 +
3842 +@@ -3020,6 +3158,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3843 + verbose(env, "R%d pointer arithmetic on %s prohibited\n",
3844 + dst, reg_type_str[ptr_reg->type]);
3845 + return -EACCES;
3846 ++ case PTR_TO_MAP_VALUE:
3847 ++ if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
3848 ++ verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
3849 ++ off_reg == dst_reg ? dst : src);
3850 ++ return -EACCES;
3851 ++ }
3852 ++ /* fall-through */
3853 + default:
3854 + break;
3855 + }
3856 +@@ -3036,6 +3181,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3857 +
3858 + switch (opcode) {
3859 + case BPF_ADD:
3860 ++ ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3861 ++ if (ret < 0) {
3862 ++ verbose(env, "R%d tried to add from different maps or paths\n", dst);
3863 ++ return ret;
3864 ++ }
3865 + /* We can take a fixed offset as long as it doesn't overflow
3866 + * the s32 'off' field
3867 + */
3868 +@@ -3086,6 +3236,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3869 + }
3870 + break;
3871 + case BPF_SUB:
3872 ++ ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3873 ++ if (ret < 0) {
3874 ++ verbose(env, "R%d tried to sub from different maps or paths\n", dst);
3875 ++ return ret;
3876 ++ }
3877 + if (dst_reg == off_reg) {
3878 + /* scalar -= pointer. Creates an unknown scalar */
3879 + verbose(env, "R%d tried to subtract pointer from scalar\n",
3880 +@@ -3165,6 +3320,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3881 + __update_reg_bounds(dst_reg);
3882 + __reg_deduce_bounds(dst_reg);
3883 + __reg_bound_offset(dst_reg);
3884 ++
3885 ++ /* For unprivileged we require that resulting offset must be in bounds
3886 ++ * in order to be able to sanitize access later on.
3887 ++ */
3888 ++ if (!env->allow_ptr_leaks) {
3889 ++ if (dst_reg->type == PTR_TO_MAP_VALUE &&
3890 ++ check_map_access(env, dst, dst_reg->off, 1, false)) {
3891 ++ verbose(env, "R%d pointer arithmetic of map value goes out of range, "
3892 ++ "prohibited for !root\n", dst);
3893 ++ return -EACCES;
3894 ++ } else if (dst_reg->type == PTR_TO_STACK &&
3895 ++ check_stack_access(env, dst_reg, dst_reg->off +
3896 ++ dst_reg->var_off.value, 1)) {
3897 ++ verbose(env, "R%d stack pointer arithmetic goes out of range, "
3898 ++ "prohibited for !root\n", dst);
3899 ++ return -EACCES;
3900 ++ }
3901 ++ }
3902 ++
3903 + return 0;
3904 + }
3905 +
3906 +@@ -3183,6 +3357,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3907 + s64 smin_val, smax_val;
3908 + u64 umin_val, umax_val;
3909 + u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
3910 ++ u32 dst = insn->dst_reg;
3911 ++ int ret;
3912 +
3913 + if (insn_bitness == 32) {
3914 + /* Relevant for 32-bit RSH: Information can propagate towards
3915 +@@ -3217,6 +3393,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3916 +
3917 + switch (opcode) {
3918 + case BPF_ADD:
3919 ++ ret = sanitize_val_alu(env, insn);
3920 ++ if (ret < 0) {
3921 ++ verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
3922 ++ return ret;
3923 ++ }
3924 + if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
3925 + signed_add_overflows(dst_reg->smax_value, smax_val)) {
3926 + dst_reg->smin_value = S64_MIN;
3927 +@@ -3236,6 +3417,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3928 + dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
3929 + break;
3930 + case BPF_SUB:
3931 ++ ret = sanitize_val_alu(env, insn);
3932 ++ if (ret < 0) {
3933 ++ verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
3934 ++ return ret;
3935 ++ }
3936 + if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
3937 + signed_sub_overflows(dst_reg->smax_value, smin_val)) {
3938 + /* Overflow possible, we know nothing */
3939 +@@ -4249,7 +4435,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
3940 + }
3941 + }
3942 +
3943 +- other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
3944 ++ other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
3945 ++ false);
3946 + if (!other_branch)
3947 + return -EFAULT;
3948 + other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
3949 +@@ -4990,6 +5177,12 @@ static bool states_equal(struct bpf_verifier_env *env,
3950 + if (old->curframe != cur->curframe)
3951 + return false;
3952 +
3953 ++ /* Verification state from speculative execution simulation
3954 ++ * must never prune a non-speculative execution one.
3955 ++ */
3956 ++ if (old->speculative && !cur->speculative)
3957 ++ return false;
3958 ++
3959 + /* for states to be equal callsites have to be the same
3960 + * and all frame states need to be equivalent
3961 + */
3962 +@@ -5180,7 +5373,6 @@ static int do_check(struct bpf_verifier_env *env)
3963 + struct bpf_insn *insns = env->prog->insnsi;
3964 + struct bpf_reg_state *regs;
3965 + int insn_cnt = env->prog->len, i;
3966 +- int insn_idx, prev_insn_idx = 0;
3967 + int insn_processed = 0;
3968 + bool do_print_state = false;
3969 +
3970 +@@ -5188,6 +5380,7 @@ static int do_check(struct bpf_verifier_env *env)
3971 + if (!state)
3972 + return -ENOMEM;
3973 + state->curframe = 0;
3974 ++ state->speculative = false;
3975 + state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
3976 + if (!state->frame[0]) {
3977 + kfree(state);
3978 +@@ -5198,19 +5391,19 @@ static int do_check(struct bpf_verifier_env *env)
3979 + BPF_MAIN_FUNC /* callsite */,
3980 + 0 /* frameno */,
3981 + 0 /* subprogno, zero == main subprog */);
3982 +- insn_idx = 0;
3983 ++
3984 + for (;;) {
3985 + struct bpf_insn *insn;
3986 + u8 class;
3987 + int err;
3988 +
3989 +- if (insn_idx >= insn_cnt) {
3990 ++ if (env->insn_idx >= insn_cnt) {
3991 + verbose(env, "invalid insn idx %d insn_cnt %d\n",
3992 +- insn_idx, insn_cnt);
3993 ++ env->insn_idx, insn_cnt);
3994 + return -EFAULT;
3995 + }
3996 +
3997 +- insn = &insns[insn_idx];
3998 ++ insn = &insns[env->insn_idx];
3999 + class = BPF_CLASS(insn->code);
4000 +
4001 + if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
4002 +@@ -5220,17 +5413,19 @@ static int do_check(struct bpf_verifier_env *env)
4003 + return -E2BIG;
4004 + }
4005 +
4006 +- err = is_state_visited(env, insn_idx);
4007 ++ err = is_state_visited(env, env->insn_idx);
4008 + if (err < 0)
4009 + return err;
4010 + if (err == 1) {
4011 + /* found equivalent state, can prune the search */
4012 + if (env->log.level) {
4013 + if (do_print_state)
4014 +- verbose(env, "\nfrom %d to %d: safe\n",
4015 +- prev_insn_idx, insn_idx);
4016 ++ verbose(env, "\nfrom %d to %d%s: safe\n",
4017 ++ env->prev_insn_idx, env->insn_idx,
4018 ++ env->cur_state->speculative ?
4019 ++ " (speculative execution)" : "");
4020 + else
4021 +- verbose(env, "%d: safe\n", insn_idx);
4022 ++ verbose(env, "%d: safe\n", env->insn_idx);
4023 + }
4024 + goto process_bpf_exit;
4025 + }
4026 +@@ -5243,10 +5438,12 @@ static int do_check(struct bpf_verifier_env *env)
4027 +
4028 + if (env->log.level > 1 || (env->log.level && do_print_state)) {
4029 + if (env->log.level > 1)
4030 +- verbose(env, "%d:", insn_idx);
4031 ++ verbose(env, "%d:", env->insn_idx);
4032 + else
4033 +- verbose(env, "\nfrom %d to %d:",
4034 +- prev_insn_idx, insn_idx);
4035 ++ verbose(env, "\nfrom %d to %d%s:",
4036 ++ env->prev_insn_idx, env->insn_idx,
4037 ++ env->cur_state->speculative ?
4038 ++ " (speculative execution)" : "");
4039 + print_verifier_state(env, state->frame[state->curframe]);
4040 + do_print_state = false;
4041 + }
4042 +@@ -5257,19 +5454,19 @@ static int do_check(struct bpf_verifier_env *env)
4043 + .private_data = env,
4044 + };
4045 +
4046 +- verbose(env, "%d: ", insn_idx);
4047 ++ verbose(env, "%d: ", env->insn_idx);
4048 + print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
4049 + }
4050 +
4051 + if (bpf_prog_is_dev_bound(env->prog->aux)) {
4052 +- err = bpf_prog_offload_verify_insn(env, insn_idx,
4053 +- prev_insn_idx);
4054 ++ err = bpf_prog_offload_verify_insn(env, env->insn_idx,
4055 ++ env->prev_insn_idx);
4056 + if (err)
4057 + return err;
4058 + }
4059 +
4060 + regs = cur_regs(env);
4061 +- env->insn_aux_data[insn_idx].seen = true;
4062 ++ env->insn_aux_data[env->insn_idx].seen = true;
4063 +
4064 + if (class == BPF_ALU || class == BPF_ALU64) {
4065 + err = check_alu_op(env, insn);
4066 +@@ -5295,13 +5492,13 @@ static int do_check(struct bpf_verifier_env *env)
4067 + /* check that memory (src_reg + off) is readable,
4068 + * the state of dst_reg will be updated by this func
4069 + */
4070 +- err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
4071 +- BPF_SIZE(insn->code), BPF_READ,
4072 +- insn->dst_reg, false);
4073 ++ err = check_mem_access(env, env->insn_idx, insn->src_reg,
4074 ++ insn->off, BPF_SIZE(insn->code),
4075 ++ BPF_READ, insn->dst_reg, false);
4076 + if (err)
4077 + return err;
4078 +
4079 +- prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
4080 ++ prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
4081 +
4082 + if (*prev_src_type == NOT_INIT) {
4083 + /* saw a valid insn
4084 +@@ -5326,10 +5523,10 @@ static int do_check(struct bpf_verifier_env *env)
4085 + enum bpf_reg_type *prev_dst_type, dst_reg_type;
4086 +
4087 + if (BPF_MODE(insn->code) == BPF_XADD) {
4088 +- err = check_xadd(env, insn_idx, insn);
4089 ++ err = check_xadd(env, env->insn_idx, insn);
4090 + if (err)
4091 + return err;
4092 +- insn_idx++;
4093 ++ env->insn_idx++;
4094 + continue;
4095 + }
4096 +
4097 +@@ -5345,13 +5542,13 @@ static int do_check(struct bpf_verifier_env *env)
4098 + dst_reg_type = regs[insn->dst_reg].type;
4099 +
4100 + /* check that memory (dst_reg + off) is writeable */
4101 +- err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4102 +- BPF_SIZE(insn->code), BPF_WRITE,
4103 +- insn->src_reg, false);
4104 ++ err = check_mem_access(env, env->insn_idx, insn->dst_reg,
4105 ++ insn->off, BPF_SIZE(insn->code),
4106 ++ BPF_WRITE, insn->src_reg, false);
4107 + if (err)
4108 + return err;
4109 +
4110 +- prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
4111 ++ prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
4112 +
4113 + if (*prev_dst_type == NOT_INIT) {
4114 + *prev_dst_type = dst_reg_type;
4115 +@@ -5379,9 +5576,9 @@ static int do_check(struct bpf_verifier_env *env)
4116 + }
4117 +
4118 + /* check that memory (dst_reg + off) is writeable */
4119 +- err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4120 +- BPF_SIZE(insn->code), BPF_WRITE,
4121 +- -1, false);
4122 ++ err = check_mem_access(env, env->insn_idx, insn->dst_reg,
4123 ++ insn->off, BPF_SIZE(insn->code),
4124 ++ BPF_WRITE, -1, false);
4125 + if (err)
4126 + return err;
4127 +
4128 +@@ -5399,9 +5596,9 @@ static int do_check(struct bpf_verifier_env *env)
4129 + }
4130 +
4131 + if (insn->src_reg == BPF_PSEUDO_CALL)
4132 +- err = check_func_call(env, insn, &insn_idx);
4133 ++ err = check_func_call(env, insn, &env->insn_idx);
4134 + else
4135 +- err = check_helper_call(env, insn->imm, insn_idx);
4136 ++ err = check_helper_call(env, insn->imm, env->insn_idx);
4137 + if (err)
4138 + return err;
4139 +
4140 +@@ -5414,7 +5611,7 @@ static int do_check(struct bpf_verifier_env *env)
4141 + return -EINVAL;
4142 + }
4143 +
4144 +- insn_idx += insn->off + 1;
4145 ++ env->insn_idx += insn->off + 1;
4146 + continue;
4147 +
4148 + } else if (opcode == BPF_EXIT) {
4149 +@@ -5428,8 +5625,8 @@ static int do_check(struct bpf_verifier_env *env)
4150 +
4151 + if (state->curframe) {
4152 + /* exit from nested function */
4153 +- prev_insn_idx = insn_idx;
4154 +- err = prepare_func_exit(env, &insn_idx);
4155 ++ env->prev_insn_idx = env->insn_idx;
4156 ++ err = prepare_func_exit(env, &env->insn_idx);
4157 + if (err)
4158 + return err;
4159 + do_print_state = true;
4160 +@@ -5459,7 +5656,8 @@ static int do_check(struct bpf_verifier_env *env)
4161 + if (err)
4162 + return err;
4163 + process_bpf_exit:
4164 +- err = pop_stack(env, &prev_insn_idx, &insn_idx);
4165 ++ err = pop_stack(env, &env->prev_insn_idx,
4166 ++ &env->insn_idx);
4167 + if (err < 0) {
4168 + if (err != -ENOENT)
4169 + return err;
4170 +@@ -5469,7 +5667,7 @@ process_bpf_exit:
4171 + continue;
4172 + }
4173 + } else {
4174 +- err = check_cond_jmp_op(env, insn, &insn_idx);
4175 ++ err = check_cond_jmp_op(env, insn, &env->insn_idx);
4176 + if (err)
4177 + return err;
4178 + }
4179 +@@ -5486,8 +5684,8 @@ process_bpf_exit:
4180 + if (err)
4181 + return err;
4182 +
4183 +- insn_idx++;
4184 +- env->insn_aux_data[insn_idx].seen = true;
4185 ++ env->insn_idx++;
4186 ++ env->insn_aux_data[env->insn_idx].seen = true;
4187 + } else {
4188 + verbose(env, "invalid BPF_LD mode\n");
4189 + return -EINVAL;
4190 +@@ -5497,7 +5695,7 @@ process_bpf_exit:
4191 + return -EINVAL;
4192 + }
4193 +
4194 +- insn_idx++;
4195 ++ env->insn_idx++;
4196 + }
4197 +
4198 + verbose(env, "processed %d insns (limit %d), stack depth ",
4199 +@@ -6220,6 +6418,57 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
4200 + continue;
4201 + }
4202 +
4203 ++ if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
4204 ++ insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
4205 ++ const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
4206 ++ const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
4207 ++ struct bpf_insn insn_buf[16];
4208 ++ struct bpf_insn *patch = &insn_buf[0];
4209 ++ bool issrc, isneg;
4210 ++ u32 off_reg;
4211 ++
4212 ++ aux = &env->insn_aux_data[i + delta];
4213 ++ if (!aux->alu_state)
4214 ++ continue;
4215 ++
4216 ++ isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
4217 ++ issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
4218 ++ BPF_ALU_SANITIZE_SRC;
4219 ++
4220 ++ off_reg = issrc ? insn->src_reg : insn->dst_reg;
4221 ++ if (isneg)
4222 ++ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
4223 ++ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
4224 ++ *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
4225 ++ *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
4226 ++ *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
4227 ++ *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
4228 ++ if (issrc) {
4229 ++ *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
4230 ++ off_reg);
4231 ++ insn->src_reg = BPF_REG_AX;
4232 ++ } else {
4233 ++ *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
4234 ++ BPF_REG_AX);
4235 ++ }
4236 ++ if (isneg)
4237 ++ insn->code = insn->code == code_add ?
4238 ++ code_sub : code_add;
4239 ++ *patch++ = *insn;
4240 ++ if (issrc && isneg)
4241 ++ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
4242 ++ cnt = patch - insn_buf;
4243 ++
4244 ++ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4245 ++ if (!new_prog)
4246 ++ return -ENOMEM;
4247 ++
4248 ++ delta += cnt - 1;
4249 ++ env->prog = prog = new_prog;
4250 ++ insn = new_prog->insnsi + i + delta;
4251 ++ continue;
4252 ++ }
4253 ++
4254 + if (insn->code != (BPF_JMP | BPF_CALL))
4255 + continue;
4256 + if (insn->src_reg == BPF_PSEUDO_CALL)
4257 +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
4258 +index 8f0644af40be..80f955210861 100644
4259 +--- a/kernel/time/posix-cpu-timers.c
4260 ++++ b/kernel/time/posix-cpu-timers.c
4261 +@@ -685,6 +685,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
4262 + * set up the signal and overrun bookkeeping.
4263 + */
4264 + timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
4265 ++ timer->it_interval = ns_to_ktime(timer->it.cpu.incr);
4266 +
4267 + /*
4268 + * This acts as a modification timestamp for the timer,
4269 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4270 +index e95b5b7c9c3d..995d1079f958 100644
4271 +--- a/mm/page_alloc.c
4272 ++++ b/mm/page_alloc.c
4273 +@@ -5542,18 +5542,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4274 + cond_resched();
4275 + }
4276 + }
4277 +-#ifdef CONFIG_SPARSEMEM
4278 +- /*
4279 +- * If the zone does not span the rest of the section then
4280 +- * we should at least initialize those pages. Otherwise we
4281 +- * could blow up on a poisoned page in some paths which depend
4282 +- * on full sections being initialized (e.g. memory hotplug).
4283 +- */
4284 +- while (end_pfn % PAGES_PER_SECTION) {
4285 +- __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
4286 +- end_pfn++;
4287 +- }
4288 +-#endif
4289 + }
4290 +
4291 + #ifdef CONFIG_ZONE_DEVICE
4292 +diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
4293 +index 2cb8da465b98..48ddc60b4fbd 100644
4294 +--- a/net/bridge/br_forward.c
4295 ++++ b/net/bridge/br_forward.c
4296 +@@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
4297 +
4298 + int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
4299 + {
4300 ++ skb_push(skb, ETH_HLEN);
4301 + if (!is_skb_forwardable(skb->dev, skb))
4302 + goto drop;
4303 +
4304 +- skb_push(skb, ETH_HLEN);
4305 + br_drop_fake_rtable(skb);
4306 +
4307 + if (skb->ip_summed == CHECKSUM_PARTIAL &&
4308 +@@ -98,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to,
4309 + net = dev_net(indev);
4310 + } else {
4311 + if (unlikely(netpoll_tx_running(to->br->dev))) {
4312 +- if (!is_skb_forwardable(skb->dev, skb)) {
4313 ++ skb_push(skb, ETH_HLEN);
4314 ++ if (!is_skb_forwardable(skb->dev, skb))
4315 + kfree_skb(skb);
4316 +- } else {
4317 +- skb_push(skb, ETH_HLEN);
4318 ++ else
4319 + br_netpoll_send_skb(to, skb);
4320 +- }
4321 + return;
4322 + }
4323 + br_hook = NF_BR_LOCAL_OUT;
4324 +diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
4325 +index 96c072e71ea2..5811208863b7 100644
4326 +--- a/net/bridge/br_netfilter_ipv6.c
4327 ++++ b/net/bridge/br_netfilter_ipv6.c
4328 +@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
4329 + IPSTATS_MIB_INDISCARDS);
4330 + goto drop;
4331 + }
4332 ++ hdr = ipv6_hdr(skb);
4333 + }
4334 + if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
4335 + goto drop;
4336 +diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
4337 +index 08cbed7d940e..419e8edf23ba 100644
4338 +--- a/net/bridge/netfilter/nft_reject_bridge.c
4339 ++++ b/net/bridge/netfilter/nft_reject_bridge.c
4340 +@@ -229,6 +229,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
4341 + pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
4342 + return false;
4343 +
4344 ++ ip6h = ipv6_hdr(skb);
4345 + thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
4346 + if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
4347 + return false;
4348 +diff --git a/net/can/bcm.c b/net/can/bcm.c
4349 +index 0af8f0db892a..79bb8afa9c0c 100644
4350 +--- a/net/can/bcm.c
4351 ++++ b/net/can/bcm.c
4352 +@@ -67,6 +67,9 @@
4353 + */
4354 + #define MAX_NFRAMES 256
4355 +
4356 ++/* limit timers to 400 days for sending/timeouts */
4357 ++#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
4358 ++
4359 + /* use of last_frames[index].flags */
4360 + #define RX_RECV 0x40 /* received data for this element */
4361 + #define RX_THR 0x80 /* element not been sent due to throttle feature */
4362 +@@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
4363 + return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
4364 + }
4365 +
4366 ++/* check limitations for timeval provided by user */
4367 ++static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
4368 ++{
4369 ++ if ((msg_head->ival1.tv_sec < 0) ||
4370 ++ (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
4371 ++ (msg_head->ival1.tv_usec < 0) ||
4372 ++ (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
4373 ++ (msg_head->ival2.tv_sec < 0) ||
4374 ++ (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
4375 ++ (msg_head->ival2.tv_usec < 0) ||
4376 ++ (msg_head->ival2.tv_usec >= USEC_PER_SEC))
4377 ++ return true;
4378 ++
4379 ++ return false;
4380 ++}
4381 ++
4382 + #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
4383 + #define OPSIZ sizeof(struct bcm_op)
4384 + #define MHSIZ sizeof(struct bcm_msg_head)
4385 +@@ -873,6 +892,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
4386 + if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
4387 + return -EINVAL;
4388 +
4389 ++ /* check timeval limitations */
4390 ++ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
4391 ++ return -EINVAL;
4392 ++
4393 + /* check the given can_id */
4394 + op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
4395 + if (op) {
4396 +@@ -1053,6 +1076,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
4397 + (!(msg_head->can_id & CAN_RTR_FLAG))))
4398 + return -EINVAL;
4399 +
4400 ++ /* check timeval limitations */
4401 ++ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
4402 ++ return -EINVAL;
4403 ++
4404 + /* check the given can_id */
4405 + op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
4406 + if (op) {
4407 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
4408 +index 6df95be96311..fe4f6a624238 100644
4409 +--- a/net/ipv4/fib_frontend.c
4410 ++++ b/net/ipv4/fib_frontend.c
4411 +@@ -203,7 +203,7 @@ static void fib_flush(struct net *net)
4412 + struct fib_table *tb;
4413 +
4414 + hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
4415 +- flushed += fib_table_flush(net, tb);
4416 ++ flushed += fib_table_flush(net, tb, false);
4417 + }
4418 +
4419 + if (flushed)
4420 +@@ -1463,7 +1463,7 @@ static void ip_fib_net_exit(struct net *net)
4421 +
4422 + hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
4423 + hlist_del(&tb->tb_hlist);
4424 +- fib_table_flush(net, tb);
4425 ++ fib_table_flush(net, tb, true);
4426 + fib_free_table(tb);
4427 + }
4428 + }
4429 +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
4430 +index 237c9f72b265..a573e37e0615 100644
4431 +--- a/net/ipv4/fib_trie.c
4432 ++++ b/net/ipv4/fib_trie.c
4433 +@@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb)
4434 + }
4435 +
4436 + /* Caller must hold RTNL. */
4437 +-int fib_table_flush(struct net *net, struct fib_table *tb)
4438 ++int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
4439 + {
4440 + struct trie *t = (struct trie *)tb->tb_data;
4441 + struct key_vector *pn = t->kv;
4442 +@@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
4443 + hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
4444 + struct fib_info *fi = fa->fa_info;
4445 +
4446 +- if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
4447 +- tb->tb_id != fa->tb_id) {
4448 ++ if (!fi || tb->tb_id != fa->tb_id ||
4449 ++ (!(fi->fib_flags & RTNH_F_DEAD) &&
4450 ++ !fib_props[fa->fa_type].error)) {
4451 ++ slen = fa->fa_slen;
4452 ++ continue;
4453 ++ }
4454 ++
4455 ++ /* Do not flush error routes if network namespace is
4456 ++ * not being dismantled
4457 ++ */
4458 ++ if (!flush_all && fib_props[fa->fa_type].error) {
4459 + slen = fa->fa_slen;
4460 + continue;
4461 + }
4462 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
4463 +index 0fe9419bd12b..3407a82d4549 100644
4464 +--- a/net/ipv4/ip_gre.c
4465 ++++ b/net/ipv4/ip_gre.c
4466 +@@ -567,8 +567,7 @@ err_free_skb:
4467 + dev->stats.tx_dropped++;
4468 + }
4469 +
4470 +-static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
4471 +- __be16 proto)
4472 ++static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
4473 + {
4474 + struct ip_tunnel *tunnel = netdev_priv(dev);
4475 + struct ip_tunnel_info *tun_info;
4476 +@@ -576,10 +575,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
4477 + struct erspan_metadata *md;
4478 + struct rtable *rt = NULL;
4479 + bool truncate = false;
4480 ++ __be16 df, proto;
4481 + struct flowi4 fl;
4482 + int tunnel_hlen;
4483 + int version;
4484 +- __be16 df;
4485 + int nhoff;
4486 + int thoff;
4487 +
4488 +@@ -624,18 +623,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
4489 + if (version == 1) {
4490 + erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
4491 + ntohl(md->u.index), truncate, true);
4492 ++ proto = htons(ETH_P_ERSPAN);
4493 + } else if (version == 2) {
4494 + erspan_build_header_v2(skb,
4495 + ntohl(tunnel_id_to_key32(key->tun_id)),
4496 + md->u.md2.dir,
4497 + get_hwid(&md->u.md2),
4498 + truncate, true);
4499 ++ proto = htons(ETH_P_ERSPAN2);
4500 + } else {
4501 + goto err_free_rt;
4502 + }
4503 +
4504 + gre_build_header(skb, 8, TUNNEL_SEQ,
4505 +- htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
4506 ++ proto, 0, htonl(tunnel->o_seqno++));
4507 +
4508 + df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
4509 +
4510 +@@ -719,12 +720,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
4511 + {
4512 + struct ip_tunnel *tunnel = netdev_priv(dev);
4513 + bool truncate = false;
4514 ++ __be16 proto;
4515 +
4516 + if (!pskb_inet_may_pull(skb))
4517 + goto free_skb;
4518 +
4519 + if (tunnel->collect_md) {
4520 +- erspan_fb_xmit(skb, dev, skb->protocol);
4521 ++ erspan_fb_xmit(skb, dev);
4522 + return NETDEV_TX_OK;
4523 + }
4524 +
4525 +@@ -740,19 +742,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
4526 + }
4527 +
4528 + /* Push ERSPAN header */
4529 +- if (tunnel->erspan_ver == 1)
4530 ++ if (tunnel->erspan_ver == 1) {
4531 + erspan_build_header(skb, ntohl(tunnel->parms.o_key),
4532 + tunnel->index,
4533 + truncate, true);
4534 +- else if (tunnel->erspan_ver == 2)
4535 ++ proto = htons(ETH_P_ERSPAN);
4536 ++ } else if (tunnel->erspan_ver == 2) {
4537 + erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
4538 + tunnel->dir, tunnel->hwid,
4539 + truncate, true);
4540 +- else
4541 ++ proto = htons(ETH_P_ERSPAN2);
4542 ++ } else {
4543 + goto free_skb;
4544 ++ }
4545 +
4546 + tunnel->parms.o_flags &= ~TUNNEL_KEY;
4547 +- __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
4548 ++ __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
4549 + return NETDEV_TX_OK;
4550 +
4551 + free_skb:
4552 +diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
4553 +index e609b08c9df4..3163428219cd 100644
4554 +--- a/net/ipv4/ip_input.c
4555 ++++ b/net/ipv4/ip_input.c
4556 +@@ -489,6 +489,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
4557 + goto drop;
4558 + }
4559 +
4560 ++ iph = ip_hdr(skb);
4561 + skb->transport_header = skb->network_header + iph->ihl*4;
4562 +
4563 + /* Remove any debris in the socket control block */
4564 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
4565 +index 9e6bc4d6daa7..40cbe5609663 100644
4566 +--- a/net/ipv4/tcp.c
4567 ++++ b/net/ipv4/tcp.c
4568 +@@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
4569 + flags = msg->msg_flags;
4570 +
4571 + if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
4572 +- if (sk->sk_state != TCP_ESTABLISHED) {
4573 ++ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
4574 + err = -EINVAL;
4575 + goto out_err;
4576 + }
4577 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
4578 +index 1976fddb9e00..ce125f4dc810 100644
4579 +--- a/net/ipv4/udp.c
4580 ++++ b/net/ipv4/udp.c
4581 +@@ -785,15 +785,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
4582 + const int hlen = skb_network_header_len(skb) +
4583 + sizeof(struct udphdr);
4584 +
4585 +- if (hlen + cork->gso_size > cork->fragsize)
4586 ++ if (hlen + cork->gso_size > cork->fragsize) {
4587 ++ kfree_skb(skb);
4588 + return -EINVAL;
4589 +- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
4590 ++ }
4591 ++ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
4592 ++ kfree_skb(skb);
4593 + return -EINVAL;
4594 +- if (sk->sk_no_check_tx)
4595 ++ }
4596 ++ if (sk->sk_no_check_tx) {
4597 ++ kfree_skb(skb);
4598 + return -EINVAL;
4599 ++ }
4600 + if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
4601 +- dst_xfrm(skb_dst(skb)))
4602 ++ dst_xfrm(skb_dst(skb))) {
4603 ++ kfree_skb(skb);
4604 + return -EIO;
4605 ++ }
4606 +
4607 + skb_shinfo(skb)->gso_size = cork->gso_size;
4608 + skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
4609 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
4610 +index 0f7d434c1eed..b529a79ac222 100644
4611 +--- a/net/ipv6/ip6_gre.c
4612 ++++ b/net/ipv6/ip6_gre.c
4613 +@@ -920,6 +920,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
4614 + __u8 dsfield = false;
4615 + struct flowi6 fl6;
4616 + int err = -EINVAL;
4617 ++ __be16 proto;
4618 + __u32 mtu;
4619 + int nhoff;
4620 + int thoff;
4621 +@@ -1033,8 +1034,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
4622 + }
4623 +
4624 + /* Push GRE header. */
4625 +- gre_build_header(skb, 8, TUNNEL_SEQ,
4626 +- htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
4627 ++ proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
4628 ++ : htons(ETH_P_ERSPAN2);
4629 ++ gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
4630 +
4631 + /* TooBig packet may have updated dst->dev's mtu */
4632 + if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
4633 +@@ -1167,6 +1169,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
4634 + t->parms.i_flags = p->i_flags;
4635 + t->parms.o_flags = p->o_flags;
4636 + t->parms.fwmark = p->fwmark;
4637 ++ t->parms.erspan_ver = p->erspan_ver;
4638 ++ t->parms.index = p->index;
4639 ++ t->parms.dir = p->dir;
4640 ++ t->parms.hwid = p->hwid;
4641 + dst_cache_reset(&t->dst_cache);
4642 + }
4643 +
4644 +@@ -2029,9 +2035,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
4645 + struct nlattr *data[],
4646 + struct netlink_ext_ack *extack)
4647 + {
4648 +- struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
4649 ++ struct ip6_tnl *t = netdev_priv(dev);
4650 ++ struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
4651 + struct __ip6_tnl_parm p;
4652 +- struct ip6_tnl *t;
4653 +
4654 + t = ip6gre_changelink_common(dev, tb, data, &p, extack);
4655 + if (IS_ERR(t))
4656 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
4657 +index d2d97d07ef27..d01ec252cb81 100644
4658 +--- a/net/ipv6/udp.c
4659 ++++ b/net/ipv6/udp.c
4660 +@@ -1056,15 +1056,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
4661 + const int hlen = skb_network_header_len(skb) +
4662 + sizeof(struct udphdr);
4663 +
4664 +- if (hlen + cork->gso_size > cork->fragsize)
4665 ++ if (hlen + cork->gso_size > cork->fragsize) {
4666 ++ kfree_skb(skb);
4667 + return -EINVAL;
4668 +- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
4669 ++ }
4670 ++ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
4671 ++ kfree_skb(skb);
4672 + return -EINVAL;
4673 +- if (udp_sk(sk)->no_check6_tx)
4674 ++ }
4675 ++ if (udp_sk(sk)->no_check6_tx) {
4676 ++ kfree_skb(skb);
4677 + return -EINVAL;
4678 ++ }
4679 + if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
4680 +- dst_xfrm(skb_dst(skb)))
4681 ++ dst_xfrm(skb_dst(skb))) {
4682 ++ kfree_skb(skb);
4683 + return -EIO;
4684 ++ }
4685 +
4686 + skb_shinfo(skb)->gso_size = cork->gso_size;
4687 + skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
4688 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
4689 +index 865ecef68196..c7b6010b2c09 100644
4690 +--- a/net/openvswitch/flow_netlink.c
4691 ++++ b/net/openvswitch/flow_netlink.c
4692 +@@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
4693 + return -EINVAL;
4694 + }
4695 +
4696 +- if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
4697 ++ if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
4698 + attrs |= 1 << type;
4699 + a[type] = nla;
4700 + }
4701 +diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
4702 +index 4cca8f274662..904730b8ce8f 100644
4703 +--- a/net/sched/act_tunnel_key.c
4704 ++++ b/net/sched/act_tunnel_key.c
4705 +@@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
4706 + [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
4707 + };
4708 +
4709 ++static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
4710 ++{
4711 ++ if (!p)
4712 ++ return;
4713 ++ if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
4714 ++ dst_release(&p->tcft_enc_metadata->dst);
4715 ++ kfree_rcu(p, rcu);
4716 ++}
4717 ++
4718 + static int tunnel_key_init(struct net *net, struct nlattr *nla,
4719 + struct nlattr *est, struct tc_action **a,
4720 + int ovr, int bind, bool rtnl_held,
4721 +@@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
4722 + rcu_swap_protected(t->params, params_new,
4723 + lockdep_is_held(&t->tcf_lock));
4724 + spin_unlock_bh(&t->tcf_lock);
4725 +- if (params_new)
4726 +- kfree_rcu(params_new, rcu);
4727 ++ tunnel_key_release_params(params_new);
4728 +
4729 + if (ret == ACT_P_CREATED)
4730 + tcf_idr_insert(tn, *a);
4731 +@@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a)
4732 + struct tcf_tunnel_key_params *params;
4733 +
4734 + params = rcu_dereference_protected(t->params, 1);
4735 +- if (params) {
4736 +- if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
4737 +- dst_release(&params->tcft_enc_metadata->dst);
4738 +-
4739 +- kfree_rcu(params, rcu);
4740 +- }
4741 ++ tunnel_key_release_params(params);
4742 + }
4743 +
4744 + static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
4745 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
4746 +index f427a1e00e7e..1c4436523aa5 100644
4747 +--- a/net/sched/cls_api.c
4748 ++++ b/net/sched/cls_api.c
4749 +@@ -1053,7 +1053,6 @@ static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
4750 + int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
4751 + struct tcf_result *res, bool compat_mode)
4752 + {
4753 +- __be16 protocol = tc_skb_protocol(skb);
4754 + #ifdef CONFIG_NET_CLS_ACT
4755 + const int max_reclassify_loop = 4;
4756 + const struct tcf_proto *orig_tp = tp;
4757 +@@ -1063,6 +1062,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
4758 + reclassify:
4759 + #endif
4760 + for (; tp; tp = rcu_dereference_bh(tp->next)) {
4761 ++ __be16 protocol = tc_skb_protocol(skb);
4762 + int err;
4763 +
4764 + if (tp->protocol != protocol &&
4765 +@@ -1095,7 +1095,6 @@ reset:
4766 + }
4767 +
4768 + tp = first_tp;
4769 +- protocol = tc_skb_protocol(skb);
4770 + goto reclassify;
4771 + #endif
4772 + }
4773 +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
4774 +index 208d940464d7..45bc2b72dc1c 100644
4775 +--- a/net/sched/cls_flower.c
4776 ++++ b/net/sched/cls_flower.c
4777 +@@ -1176,17 +1176,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
4778 + struct cls_fl_head *head = rtnl_dereference(tp->root);
4779 + struct cls_fl_filter *fold = *arg;
4780 + struct cls_fl_filter *fnew;
4781 ++ struct fl_flow_mask *mask;
4782 + struct nlattr **tb;
4783 +- struct fl_flow_mask mask = {};
4784 + int err;
4785 +
4786 + if (!tca[TCA_OPTIONS])
4787 + return -EINVAL;
4788 +
4789 +- tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
4790 +- if (!tb)
4791 ++ mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
4792 ++ if (!mask)
4793 + return -ENOBUFS;
4794 +
4795 ++ tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
4796 ++ if (!tb) {
4797 ++ err = -ENOBUFS;
4798 ++ goto errout_mask_alloc;
4799 ++ }
4800 ++
4801 + err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
4802 + fl_policy, NULL);
4803 + if (err < 0)
4804 +@@ -1229,12 +1235,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
4805 + }
4806 + }
4807 +
4808 +- err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
4809 ++ err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
4810 + tp->chain->tmplt_priv, extack);
4811 + if (err)
4812 + goto errout_idr;
4813 +
4814 +- err = fl_check_assign_mask(head, fnew, fold, &mask);
4815 ++ err = fl_check_assign_mask(head, fnew, fold, mask);
4816 + if (err)
4817 + goto errout_idr;
4818 +
4819 +@@ -1278,6 +1284,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
4820 + }
4821 +
4822 + kfree(tb);
4823 ++ kfree(mask);
4824 + return 0;
4825 +
4826 + errout_mask:
4827 +@@ -1291,6 +1298,8 @@ errout:
4828 + kfree(fnew);
4829 + errout_tb:
4830 + kfree(tb);
4831 ++errout_mask_alloc:
4832 ++ kfree(mask);
4833 + return err;
4834 + }
4835 +
4836 +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
4837 +index 73547d17d3c6..943f08be7c38 100644
4838 +--- a/net/sunrpc/xprt.c
4839 ++++ b/net/sunrpc/xprt.c
4840 +@@ -1177,7 +1177,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
4841 + INIT_LIST_HEAD(&req->rq_xmit2);
4842 + goto out;
4843 + }
4844 +- } else {
4845 ++ } else if (!req->rq_seqno) {
4846 + list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
4847 + if (pos->rq_task->tk_owner != task->tk_owner)
4848 + continue;
4849 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
4850 +index 51cc6589443f..152f54137082 100644
4851 +--- a/sound/pci/hda/patch_conexant.c
4852 ++++ b/sound/pci/hda/patch_conexant.c
4853 +@@ -931,6 +931,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
4854 + SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
4855 + SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
4856 + SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
4857 ++ SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
4858 + SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
4859 + SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
4860 + SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
4861 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4862 +index 0d95316d6dbd..8ddd016c04d0 100644
4863 +--- a/sound/pci/hda/patch_realtek.c
4864 ++++ b/sound/pci/hda/patch_realtek.c
4865 +@@ -6842,7 +6842,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4866 + {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
4867 + {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
4868 + {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
4869 +- {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"},
4870 ++ {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"},
4871 + {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"},
4872 + {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"},
4873 + {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"},
4874 +diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
4875 +index 4d46f4567c3a..bec2eefa8b0f 100644
4876 +--- a/sound/soc/codecs/rt5514-spi.c
4877 ++++ b/sound/soc/codecs/rt5514-spi.c
4878 +@@ -280,6 +280,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component)
4879 +
4880 + rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp),
4881 + GFP_KERNEL);
4882 ++ if (!rt5514_dsp)
4883 ++ return -ENOMEM;
4884 +
4885 + rt5514_dsp->dev = &rt5514_spi->dev;
4886 + mutex_init(&rt5514_dsp->dma_lock);
4887 +diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
4888 +index e2b5a11b16d1..f03195d2ab2e 100644
4889 +--- a/sound/soc/codecs/tlv320aic32x4.c
4890 ++++ b/sound/soc/codecs/tlv320aic32x4.c
4891 +@@ -822,6 +822,10 @@ static int aic32x4_set_bias_level(struct snd_soc_component *component,
4892 + case SND_SOC_BIAS_PREPARE:
4893 + break;
4894 + case SND_SOC_BIAS_STANDBY:
4895 ++ /* Initial cold start */
4896 ++ if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
4897 ++ break;
4898 ++
4899 + /* Switch off BCLK_N Divider */
4900 + snd_soc_component_update_bits(component, AIC32X4_BCLKN,
4901 + AIC32X4_BCLKEN, 0);
4902 +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4903 +index afc559866095..91a2436ce952 100644
4904 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4905 ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4906 +@@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream,
4907 + struct snd_pcm_hw_params *params,
4908 + struct snd_soc_dai *dai)
4909 + {
4910 +- snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
4911 ++ int ret;
4912 ++
4913 ++ ret =
4914 ++ snd_pcm_lib_malloc_pages(substream,
4915 ++ params_buffer_bytes(params));
4916 ++ if (ret)
4917 ++ return ret;
4918 + memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
4919 + return 0;
4920 + }
4921 +diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
4922 +index 460b4bdf4c1e..5d546dcdbc80 100644
4923 +--- a/tools/testing/selftests/x86/protection_keys.c
4924 ++++ b/tools/testing/selftests/x86/protection_keys.c
4925 +@@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
4926 + pkey_assert(err);
4927 + }
4928 +
4929 ++void become_child(void)
4930 ++{
4931 ++ pid_t forkret;
4932 ++
4933 ++ forkret = fork();
4934 ++ pkey_assert(forkret >= 0);
4935 ++ dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
4936 ++
4937 ++ if (!forkret) {
4938 ++ /* in the child */
4939 ++ return;
4940 ++ }
4941 ++ exit(0);
4942 ++}
4943 ++
4944 + /* Assumes that all pkeys other than 'pkey' are unallocated */
4945 + void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
4946 + {
4947 +@@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
4948 + int nr_allocated_pkeys = 0;
4949 + int i;
4950 +
4951 +- for (i = 0; i < NR_PKEYS*2; i++) {
4952 ++ for (i = 0; i < NR_PKEYS*3; i++) {
4953 + int new_pkey;
4954 + dprintf1("%s() alloc loop: %d\n", __func__, i);
4955 + new_pkey = alloc_pkey();
4956 +@@ -1152,20 +1167,26 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
4957 + if ((new_pkey == -1) && (errno == ENOSPC)) {
4958 + dprintf2("%s() failed to allocate pkey after %d tries\n",
4959 + __func__, nr_allocated_pkeys);
4960 +- break;
4961 ++ } else {
4962 ++ /*
4963 ++ * Ensure the number of successes never
4964 ++ * exceeds the number of keys supported
4965 ++ * in the hardware.
4966 ++ */
4967 ++ pkey_assert(nr_allocated_pkeys < NR_PKEYS);
4968 ++ allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
4969 + }
4970 +- pkey_assert(nr_allocated_pkeys < NR_PKEYS);
4971 +- allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
4972 ++
4973 ++ /*
4974 ++ * Make sure that allocation state is properly
4975 ++ * preserved across fork().
4976 ++ */
4977 ++ if (i == NR_PKEYS*2)
4978 ++ become_child();
4979 + }
4980 +
4981 + dprintf3("%s()::%d\n", __func__, __LINE__);
4982 +
4983 +- /*
4984 +- * ensure it did not reach the end of the loop without
4985 +- * failure:
4986 +- */
4987 +- pkey_assert(i < NR_PKEYS*2);
4988 +-
4989 + /*
4990 + * There are 16 pkeys supported in hardware. Three are
4991 + * allocated by the time we get here: