Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Thu, 31 Jan 2019 11:24:08
Message-Id: 1548933813.613bef45571bde8dff42f7f55d611b54193460ea.mpagano@gentoo
1 commit: 613bef45571bde8dff42f7f55d611b54193460ea
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Jan 31 11:23:33 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Jan 31 11:23:33 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=613bef45
7
8 proj/linux-patches: Linux patch 4.14.97
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1096_linux-4.14.97.patch | 3121 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3125 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 628069f..c08af0d 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -427,6 +427,10 @@ Patch: 1095_4.14.96.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.96
23
24 +Patch: 1096_4.14.97.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.97
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1096_linux-4.14.97.patch b/1096_linux-4.14.97.patch
33 new file mode 100644
34 index 0000000..d0a307b
35 --- /dev/null
36 +++ b/1096_linux-4.14.97.patch
37 @@ -0,0 +1,3121 @@
38 +diff --git a/Makefile b/Makefile
39 +index 57b45169ed85..485afde0f1f1 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 96
47 ++SUBLEVEL = 97
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
52 +index 9185541035cc..6958545390f0 100644
53 +--- a/arch/arc/include/asm/perf_event.h
54 ++++ b/arch/arc/include/asm/perf_event.h
55 +@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
56 +
57 + /* counts condition */
58 + [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
59 +- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
60 ++ /* All jump instructions that are taken */
61 ++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
62 + [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
63 + #ifdef CONFIG_ISA_ARCV2
64 + [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
65 +diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
66 +index 62ad4bcb841a..f230bb7092fd 100644
67 +--- a/arch/arc/lib/memset-archs.S
68 ++++ b/arch/arc/lib/memset-archs.S
69 +@@ -7,11 +7,39 @@
70 + */
71 +
72 + #include <linux/linkage.h>
73 ++#include <asm/cache.h>
74 +
75 +-#undef PREALLOC_NOT_AVAIL
76 ++/*
77 ++ * The memset implementation below is optimized to use prefetchw and prealloc
78 ++ * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
79 ++ * If you want to implement optimized memset for other possible L1 data cache
80 ++ * line lengths (32B and 128B) you should rewrite code carefully checking
81 ++ * we don't call any prefetchw/prealloc instruction for L1 cache lines which
82 ++ * don't belongs to memset area.
83 ++ */
84 ++
85 ++#if L1_CACHE_SHIFT == 6
86 ++
87 ++.macro PREALLOC_INSTR reg, off
88 ++ prealloc [\reg, \off]
89 ++.endm
90 ++
91 ++.macro PREFETCHW_INSTR reg, off
92 ++ prefetchw [\reg, \off]
93 ++.endm
94 ++
95 ++#else
96 ++
97 ++.macro PREALLOC_INSTR
98 ++.endm
99 ++
100 ++.macro PREFETCHW_INSTR
101 ++.endm
102 ++
103 ++#endif
104 +
105 + ENTRY_CFI(memset)
106 +- prefetchw [r0] ; Prefetch the write location
107 ++ PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
108 + mov.f 0, r2
109 + ;;; if size is zero
110 + jz.d [blink]
111 +@@ -48,11 +76,8 @@ ENTRY_CFI(memset)
112 +
113 + lpnz @.Lset64bytes
114 + ;; LOOP START
115 +-#ifdef PREALLOC_NOT_AVAIL
116 +- prefetchw [r3, 64] ;Prefetch the next write location
117 +-#else
118 +- prealloc [r3, 64]
119 +-#endif
120 ++ PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
121 ++
122 + #ifdef CONFIG_ARC_HAS_LL64
123 + std.ab r4, [r3, 8]
124 + std.ab r4, [r3, 8]
125 +@@ -85,7 +110,6 @@ ENTRY_CFI(memset)
126 + lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
127 + lpnz .Lset32bytes
128 + ;; LOOP START
129 +- prefetchw [r3, 32] ;Prefetch the next write location
130 + #ifdef CONFIG_ARC_HAS_LL64
131 + std.ab r4, [r3, 8]
132 + std.ab r4, [r3, 8]
133 +diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
134 +index ba145065c579..f890b2f9f82f 100644
135 +--- a/arch/arc/mm/init.c
136 ++++ b/arch/arc/mm/init.c
137 +@@ -138,7 +138,8 @@ void __init setup_arch_memory(void)
138 + */
139 +
140 + memblock_add_node(low_mem_start, low_mem_sz, 0);
141 +- memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
142 ++ memblock_reserve(CONFIG_LINUX_LINK_BASE,
143 ++ __pa(_end) - CONFIG_LINUX_LINK_BASE);
144 +
145 + #ifdef CONFIG_BLK_DEV_INITRD
146 + if (initrd_start)
147 +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
148 +index a3219837fa70..4ba5ad44a21a 100644
149 +--- a/arch/s390/kernel/early.c
150 ++++ b/arch/s390/kernel/early.c
151 +@@ -226,10 +226,10 @@ static noinline __init void detect_machine_type(void)
152 + if (stsi(vmms, 3, 2, 2) || !vmms->count)
153 + return;
154 +
155 +- /* Running under KVM? If not we assume z/VM */
156 ++ /* Detect known hypervisors */
157 + if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
158 + S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
159 +- else
160 ++ else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
161 + S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
162 + }
163 +
164 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
165 +index 98c1f7941142..3cb71fc94995 100644
166 +--- a/arch/s390/kernel/setup.c
167 ++++ b/arch/s390/kernel/setup.c
168 +@@ -884,6 +884,8 @@ void __init setup_arch(char **cmdline_p)
169 + pr_info("Linux is running under KVM in 64-bit mode\n");
170 + else if (MACHINE_IS_LPAR)
171 + pr_info("Linux is running natively in 64-bit mode\n");
172 ++ else
173 ++ pr_info("Linux is running as a guest in 64-bit mode\n");
174 +
175 + /* Have one command line that is parsed and saved in /proc/cmdline */
176 + /* boot_command_line has been already set up in early.c */
177 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
178 +index ae5df4177803..27258db640d7 100644
179 +--- a/arch/s390/kernel/smp.c
180 ++++ b/arch/s390/kernel/smp.c
181 +@@ -387,9 +387,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
182 + */
183 + void smp_call_ipl_cpu(void (*func)(void *), void *data)
184 + {
185 ++ struct lowcore *lc = pcpu_devices->lowcore;
186 ++
187 ++ if (pcpu_devices[0].address == stap())
188 ++ lc = &S390_lowcore;
189 ++
190 + pcpu_delegate(&pcpu_devices[0], func, data,
191 +- pcpu_devices->lowcore->panic_stack -
192 +- PANIC_FRAME_OFFSET + PAGE_SIZE);
193 ++ lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
194 + }
195 +
196 + int smp_find_processor_id(u16 address)
197 +@@ -1168,7 +1172,11 @@ static ssize_t __ref rescan_store(struct device *dev,
198 + {
199 + int rc;
200 +
201 ++ rc = lock_device_hotplug_sysfs();
202 ++ if (rc)
203 ++ return rc;
204 + rc = smp_rescan_cpus();
205 ++ unlock_device_hotplug();
206 + return rc ? rc : count;
207 + }
208 + static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
209 +diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
210 +index 1911310959f8..a77fd3c8d824 100644
211 +--- a/arch/x86/entry/vdso/vma.c
212 ++++ b/arch/x86/entry/vdso/vma.c
213 +@@ -112,7 +112,7 @@ static int vvar_fault(const struct vm_special_mapping *sm,
214 + __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
215 + } else if (sym_offset == image->sym_pvclock_page) {
216 + struct pvclock_vsyscall_time_info *pvti =
217 +- pvclock_pvti_cpu0_va();
218 ++ pvclock_get_pvti_cpu0_va();
219 + if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
220 + ret = vm_insert_pfn(
221 + vma,
222 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
223 +index ed97ef3b48a7..7ebcbd1d881d 100644
224 +--- a/arch/x86/include/asm/mmu_context.h
225 ++++ b/arch/x86/include/asm/mmu_context.h
226 +@@ -182,6 +182,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
227 +
228 + void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
229 +
230 ++/*
231 ++ * Init a new mm. Used on mm copies, like at fork()
232 ++ * and on mm's that are brand-new, like at execve().
233 ++ */
234 + static inline int init_new_context(struct task_struct *tsk,
235 + struct mm_struct *mm)
236 + {
237 +@@ -232,8 +236,22 @@ do { \
238 + } while (0)
239 + #endif
240 +
241 ++static inline void arch_dup_pkeys(struct mm_struct *oldmm,
242 ++ struct mm_struct *mm)
243 ++{
244 ++#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
245 ++ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
246 ++ return;
247 ++
248 ++ /* Duplicate the oldmm pkey state in mm: */
249 ++ mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
250 ++ mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
251 ++#endif
252 ++}
253 ++
254 + static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
255 + {
256 ++ arch_dup_pkeys(oldmm, mm);
257 + paravirt_arch_dup_mmap(oldmm, mm);
258 + return ldt_dup_context(oldmm, mm);
259 + }
260 +diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
261 +index 3e4ed8fb5f91..a7471dcd2205 100644
262 +--- a/arch/x86/include/asm/pvclock.h
263 ++++ b/arch/x86/include/asm/pvclock.h
264 +@@ -5,15 +5,6 @@
265 + #include <linux/clocksource.h>
266 + #include <asm/pvclock-abi.h>
267 +
268 +-#ifdef CONFIG_KVM_GUEST
269 +-extern struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void);
270 +-#else
271 +-static inline struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
272 +-{
273 +- return NULL;
274 +-}
275 +-#endif
276 +-
277 + /* some helper functions for xen and kvm pv clock sources */
278 + u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
279 + u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
280 +@@ -102,4 +93,14 @@ struct pvclock_vsyscall_time_info {
281 +
282 + #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
283 +
284 ++#ifdef CONFIG_PARAVIRT_CLOCK
285 ++void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti);
286 ++struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void);
287 ++#else
288 ++static inline struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void)
289 ++{
290 ++ return NULL;
291 ++}
292 ++#endif
293 ++
294 + #endif /* _ASM_X86_PVCLOCK_H */
295 +diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
296 +index 48703d430a2f..08806d64eacd 100644
297 +--- a/arch/x86/kernel/kvmclock.c
298 ++++ b/arch/x86/kernel/kvmclock.c
299 +@@ -47,12 +47,6 @@ early_param("no-kvmclock", parse_no_kvmclock);
300 + static struct pvclock_vsyscall_time_info *hv_clock;
301 + static struct pvclock_wall_clock wall_clock;
302 +
303 +-struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
304 +-{
305 +- return hv_clock;
306 +-}
307 +-EXPORT_SYMBOL_GPL(pvclock_pvti_cpu0_va);
308 +-
309 + /*
310 + * The wallclock is the time of day when we booted. Since then, some time may
311 + * have elapsed since the hypervisor wrote the data. So we try to account for
312 +@@ -335,6 +329,7 @@ int __init kvm_setup_vsyscall_timeinfo(void)
313 + return 1;
314 + }
315 +
316 ++ pvclock_set_pvti_cpu0_va(hv_clock);
317 + put_cpu();
318 +
319 + kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
320 +diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
321 +index 5c3f6d6a5078..761f6af6efa5 100644
322 +--- a/arch/x86/kernel/pvclock.c
323 ++++ b/arch/x86/kernel/pvclock.c
324 +@@ -25,8 +25,10 @@
325 +
326 + #include <asm/fixmap.h>
327 + #include <asm/pvclock.h>
328 ++#include <asm/vgtod.h>
329 +
330 + static u8 valid_flags __read_mostly = 0;
331 ++static struct pvclock_vsyscall_time_info *pvti_cpu0_va __read_mostly;
332 +
333 + void pvclock_set_flags(u8 flags)
334 + {
335 +@@ -144,3 +146,15 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
336 +
337 + set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
338 + }
339 ++
340 ++void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti)
341 ++{
342 ++ WARN_ON(vclock_was_used(VCLOCK_PVCLOCK));
343 ++ pvti_cpu0_va = pvti;
344 ++}
345 ++
346 ++struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void)
347 ++{
348 ++ return pvti_cpu0_va;
349 ++}
350 ++EXPORT_SYMBOL_GPL(pvclock_get_pvti_cpu0_va);
351 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
352 +index 130be2efafbe..867c22f8d59b 100644
353 +--- a/arch/x86/kvm/x86.c
354 ++++ b/arch/x86/kvm/x86.c
355 +@@ -5923,8 +5923,7 @@ restart:
356 + toggle_interruptibility(vcpu, ctxt->interruptibility);
357 + vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
358 + kvm_rip_write(vcpu, ctxt->eip);
359 +- if (r == EMULATE_DONE &&
360 +- (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
361 ++ if (r == EMULATE_DONE && ctxt->tf)
362 + kvm_vcpu_do_singlestep(vcpu, &r);
363 + if (!ctxt->have_exception ||
364 + exception_type(ctxt->exception.vector) == EXCPT_TRAP)
365 +@@ -7423,14 +7422,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
366 + }
367 + }
368 +
369 +- kvm_load_guest_fpu(vcpu);
370 +-
371 + if (unlikely(vcpu->arch.complete_userspace_io)) {
372 + int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
373 + vcpu->arch.complete_userspace_io = NULL;
374 + r = cui(vcpu);
375 + if (r <= 0)
376 +- goto out_fpu;
377 ++ goto out;
378 + } else
379 + WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
380 +
381 +@@ -7439,8 +7436,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
382 + else
383 + r = vcpu_run(vcpu);
384 +
385 +-out_fpu:
386 +- kvm_put_guest_fpu(vcpu);
387 + out:
388 + kvm_put_guest_fpu(vcpu);
389 + post_kvm_run_save(vcpu);
390 +diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
391 +index 79778ab200e4..a53665116458 100644
392 +--- a/arch/x86/lib/kaslr.c
393 ++++ b/arch/x86/lib/kaslr.c
394 +@@ -36,8 +36,8 @@ static inline u16 i8254(void)
395 + u16 status, timer;
396 +
397 + do {
398 +- outb(I8254_PORT_CONTROL,
399 +- I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
400 ++ outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
401 ++ I8254_PORT_CONTROL);
402 + status = inb(I8254_PORT_COUNTER0);
403 + timer = inb(I8254_PORT_COUNTER0);
404 + timer |= inb(I8254_PORT_COUNTER0) << 8;
405 +diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
406 +index 3e3a58ea669e..1d83152c761b 100644
407 +--- a/arch/x86/xen/suspend.c
408 ++++ b/arch/x86/xen/suspend.c
409 +@@ -22,6 +22,8 @@ static DEFINE_PER_CPU(u64, spec_ctrl);
410 +
411 + void xen_arch_pre_suspend(void)
412 + {
413 ++ xen_save_time_memory_area();
414 ++
415 + if (xen_pv_domain())
416 + xen_pv_pre_suspend();
417 + }
418 +@@ -32,6 +34,8 @@ void xen_arch_post_suspend(int cancelled)
419 + xen_pv_post_suspend(cancelled);
420 + else
421 + xen_hvm_post_suspend(cancelled);
422 ++
423 ++ xen_restore_time_memory_area();
424 + }
425 +
426 + static void xen_vcpu_notify_restore(void *data)
427 +diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
428 +index 80c2a4bdf230..03706331f567 100644
429 +--- a/arch/x86/xen/time.c
430 ++++ b/arch/x86/xen/time.c
431 +@@ -31,6 +31,8 @@
432 + /* Xen may fire a timer up to this many ns early */
433 + #define TIMER_SLOP 100000
434 +
435 ++static u64 xen_sched_clock_offset __read_mostly;
436 ++
437 + /* Get the TSC speed from Xen */
438 + static unsigned long xen_tsc_khz(void)
439 + {
440 +@@ -57,6 +59,11 @@ static u64 xen_clocksource_get_cycles(struct clocksource *cs)
441 + return xen_clocksource_read();
442 + }
443 +
444 ++static u64 xen_sched_clock(void)
445 ++{
446 ++ return xen_clocksource_read() - xen_sched_clock_offset;
447 ++}
448 ++
449 + static void xen_read_wallclock(struct timespec *ts)
450 + {
451 + struct shared_info *s = HYPERVISOR_shared_info;
452 +@@ -354,8 +361,6 @@ void xen_timer_resume(void)
453 + {
454 + int cpu;
455 +
456 +- pvclock_resume();
457 +-
458 + if (xen_clockevent != &xen_vcpuop_clockevent)
459 + return;
460 +
461 +@@ -367,12 +372,107 @@ void xen_timer_resume(void)
462 + }
463 +
464 + static const struct pv_time_ops xen_time_ops __initconst = {
465 +- .sched_clock = xen_clocksource_read,
466 ++ .sched_clock = xen_sched_clock,
467 + .steal_clock = xen_steal_clock,
468 + };
469 +
470 ++static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
471 ++static u64 xen_clock_value_saved;
472 ++
473 ++void xen_save_time_memory_area(void)
474 ++{
475 ++ struct vcpu_register_time_memory_area t;
476 ++ int ret;
477 ++
478 ++ xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
479 ++
480 ++ if (!xen_clock)
481 ++ return;
482 ++
483 ++ t.addr.v = NULL;
484 ++
485 ++ ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
486 ++ if (ret != 0)
487 ++ pr_notice("Cannot save secondary vcpu_time_info (err %d)",
488 ++ ret);
489 ++ else
490 ++ clear_page(xen_clock);
491 ++}
492 ++
493 ++void xen_restore_time_memory_area(void)
494 ++{
495 ++ struct vcpu_register_time_memory_area t;
496 ++ int ret;
497 ++
498 ++ if (!xen_clock)
499 ++ goto out;
500 ++
501 ++ t.addr.v = &xen_clock->pvti;
502 ++
503 ++ ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
504 ++
505 ++ /*
506 ++ * We don't disable VCLOCK_PVCLOCK entirely if it fails to register the
507 ++ * secondary time info with Xen or if we migrated to a host without the
508 ++ * necessary flags. On both of these cases what happens is either
509 ++ * process seeing a zeroed out pvti or seeing no PVCLOCK_TSC_STABLE_BIT
510 ++ * bit set. Userspace checks the latter and if 0, it discards the data
511 ++ * in pvti and fallbacks to a system call for a reliable timestamp.
512 ++ */
513 ++ if (ret != 0)
514 ++ pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
515 ++ ret);
516 ++
517 ++out:
518 ++ /* Need pvclock_resume() before using xen_clocksource_read(). */
519 ++ pvclock_resume();
520 ++ xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
521 ++}
522 ++
523 ++static void xen_setup_vsyscall_time_info(void)
524 ++{
525 ++ struct vcpu_register_time_memory_area t;
526 ++ struct pvclock_vsyscall_time_info *ti;
527 ++ int ret;
528 ++
529 ++ ti = (struct pvclock_vsyscall_time_info *)get_zeroed_page(GFP_KERNEL);
530 ++ if (!ti)
531 ++ return;
532 ++
533 ++ t.addr.v = &ti->pvti;
534 ++
535 ++ ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
536 ++ if (ret) {
537 ++ pr_notice("xen: VCLOCK_PVCLOCK not supported (err %d)\n", ret);
538 ++ free_page((unsigned long)ti);
539 ++ return;
540 ++ }
541 ++
542 ++ /*
543 ++ * If primary time info had this bit set, secondary should too since
544 ++ * it's the same data on both just different memory regions. But we
545 ++ * still check it in case hypervisor is buggy.
546 ++ */
547 ++ if (!(ti->pvti.flags & PVCLOCK_TSC_STABLE_BIT)) {
548 ++ t.addr.v = NULL;
549 ++ ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area,
550 ++ 0, &t);
551 ++ if (!ret)
552 ++ free_page((unsigned long)ti);
553 ++
554 ++ pr_notice("xen: VCLOCK_PVCLOCK not supported (tsc unstable)\n");
555 ++ return;
556 ++ }
557 ++
558 ++ xen_clock = ti;
559 ++ pvclock_set_pvti_cpu0_va(xen_clock);
560 ++
561 ++ xen_clocksource.archdata.vclock_mode = VCLOCK_PVCLOCK;
562 ++}
563 ++
564 + static void __init xen_time_init(void)
565 + {
566 ++ struct pvclock_vcpu_time_info *pvti;
567 + int cpu = smp_processor_id();
568 + struct timespec tp;
569 +
570 +@@ -396,6 +496,16 @@ static void __init xen_time_init(void)
571 +
572 + setup_force_cpu_cap(X86_FEATURE_TSC);
573 +
574 ++ /*
575 ++ * We check ahead on the primary time info if this
576 ++ * bit is supported hence speeding up Xen clocksource.
577 ++ */
578 ++ pvti = &__this_cpu_read(xen_vcpu)->time;
579 ++ if (pvti->flags & PVCLOCK_TSC_STABLE_BIT) {
580 ++ pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
581 ++ xen_setup_vsyscall_time_info();
582 ++ }
583 ++
584 + xen_setup_runstate_info(cpu);
585 + xen_setup_timer(cpu);
586 + xen_setup_cpu_clockevents();
587 +@@ -408,6 +518,7 @@ static void __init xen_time_init(void)
588 +
589 + void __ref xen_init_time_ops(void)
590 + {
591 ++ xen_sched_clock_offset = xen_clocksource_read();
592 + pv_time_ops = xen_time_ops;
593 +
594 + x86_init.timers.timer_init = xen_time_init;
595 +@@ -450,6 +561,7 @@ void __init xen_hvm_init_time_ops(void)
596 + return;
597 + }
598 +
599 ++ xen_sched_clock_offset = xen_clocksource_read();
600 + pv_time_ops = xen_time_ops;
601 + x86_init.timers.setup_percpu_clockev = xen_time_init;
602 + x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
603 +diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
604 +index f377e1820c6c..75011b80660f 100644
605 +--- a/arch/x86/xen/xen-ops.h
606 ++++ b/arch/x86/xen/xen-ops.h
607 +@@ -70,6 +70,8 @@ void xen_setup_runstate_info(int cpu);
608 + void xen_teardown_timer(int cpu);
609 + u64 xen_clocksource_read(void);
610 + void xen_setup_cpu_clockevents(void);
611 ++void xen_save_time_memory_area(void);
612 ++void xen_restore_time_memory_area(void);
613 + void __init xen_init_time_ops(void);
614 + void __init xen_hvm_init_time_ops(void);
615 +
616 +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
617 +index 8260b90eb64b..4a6c5e7b6835 100644
618 +--- a/drivers/acpi/nfit/core.c
619 ++++ b/drivers/acpi/nfit/core.c
620 +@@ -208,6 +208,32 @@ static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
621 + return xlat_nvdimm_status(buf, cmd, status);
622 + }
623 +
624 ++static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
625 ++ struct nd_cmd_pkg *call_pkg)
626 ++{
627 ++ if (call_pkg) {
628 ++ int i;
629 ++
630 ++ if (nfit_mem->family != call_pkg->nd_family)
631 ++ return -ENOTTY;
632 ++
633 ++ for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
634 ++ if (call_pkg->nd_reserved2[i])
635 ++ return -EINVAL;
636 ++ return call_pkg->nd_command;
637 ++ }
638 ++
639 ++ /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
640 ++ if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
641 ++ return cmd;
642 ++
643 ++ /*
644 ++ * Force function number validation to fail since 0 is never
645 ++ * published as a valid function in dsm_mask.
646 ++ */
647 ++ return 0;
648 ++}
649 ++
650 + int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
651 + unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
652 + {
653 +@@ -220,21 +246,11 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
654 + unsigned long cmd_mask, dsm_mask;
655 + u32 offset, fw_status = 0;
656 + acpi_handle handle;
657 +- unsigned int func;
658 + const guid_t *guid;
659 +- int rc, i;
660 ++ int func, rc, i;
661 +
662 + if (cmd_rc)
663 + *cmd_rc = -EINVAL;
664 +- func = cmd;
665 +- if (cmd == ND_CMD_CALL) {
666 +- call_pkg = buf;
667 +- func = call_pkg->nd_command;
668 +-
669 +- for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
670 +- if (call_pkg->nd_reserved2[i])
671 +- return -EINVAL;
672 +- }
673 +
674 + if (nvdimm) {
675 + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
676 +@@ -242,9 +258,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
677 +
678 + if (!adev)
679 + return -ENOTTY;
680 +- if (call_pkg && nfit_mem->family != call_pkg->nd_family)
681 +- return -ENOTTY;
682 +
683 ++ if (cmd == ND_CMD_CALL)
684 ++ call_pkg = buf;
685 ++ func = cmd_to_func(nfit_mem, cmd, call_pkg);
686 ++ if (func < 0)
687 ++ return func;
688 + dimm_name = nvdimm_name(nvdimm);
689 + cmd_name = nvdimm_cmd_name(cmd);
690 + cmd_mask = nvdimm_cmd_mask(nvdimm);
691 +@@ -255,6 +274,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
692 + } else {
693 + struct acpi_device *adev = to_acpi_dev(acpi_desc);
694 +
695 ++ func = cmd;
696 + cmd_name = nvdimm_bus_cmd_name(cmd);
697 + cmd_mask = nd_desc->cmd_mask;
698 + dsm_mask = cmd_mask;
699 +@@ -269,7 +289,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
700 + if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
701 + return -ENOTTY;
702 +
703 +- if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
704 ++ /*
705 ++ * Check for a valid command. For ND_CMD_CALL, we also have to
706 ++ * make sure that the DSM function is supported.
707 ++ */
708 ++ if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
709 ++ return -ENOTTY;
710 ++ else if (!test_bit(cmd, &cmd_mask))
711 + return -ENOTTY;
712 +
713 + in_obj.type = ACPI_TYPE_PACKAGE;
714 +@@ -1503,6 +1529,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
715 + return 0;
716 + }
717 +
718 ++ /*
719 ++ * Function 0 is the command interrogation function, don't
720 ++ * export it to potential userspace use, and enable it to be
721 ++ * used as an error value in acpi_nfit_ctl().
722 ++ */
723 ++ dsm_mask &= ~1UL;
724 ++
725 + guid = to_nfit_uuid(nfit_mem->family);
726 + for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
727 + if (acpi_check_dsm(adev_dimm->handle, guid, 1, 1ULL << i))
728 +diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
729 +index b5e3103c1175..e43c876a9223 100644
730 +--- a/drivers/char/mwave/mwavedd.c
731 ++++ b/drivers/char/mwave/mwavedd.c
732 +@@ -59,6 +59,7 @@
733 + #include <linux/mutex.h>
734 + #include <linux/delay.h>
735 + #include <linux/serial_8250.h>
736 ++#include <linux/nospec.h>
737 + #include "smapi.h"
738 + #include "mwavedd.h"
739 + #include "3780i.h"
740 +@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
741 + ipcnum);
742 + return -EINVAL;
743 + }
744 ++ ipcnum = array_index_nospec(ipcnum,
745 ++ ARRAY_SIZE(pDrvData->IPCs));
746 + PRINTK_3(TRACE_MWAVE,
747 + "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
748 + " ipcnum %x entry usIntCount %x\n",
749 +@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
750 + " Invalid ipcnum %x\n", ipcnum);
751 + return -EINVAL;
752 + }
753 ++ ipcnum = array_index_nospec(ipcnum,
754 ++ ARRAY_SIZE(pDrvData->IPCs));
755 + PRINTK_3(TRACE_MWAVE,
756 + "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
757 + " ipcnum %x, usIntCount %x\n",
758 +@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
759 + ipcnum);
760 + return -EINVAL;
761 + }
762 ++ ipcnum = array_index_nospec(ipcnum,
763 ++ ARRAY_SIZE(pDrvData->IPCs));
764 + mutex_lock(&mwave_mutex);
765 + if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
766 + pDrvData->IPCs[ipcnum].bIsEnabled = false;
767 +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
768 +index db0e6652d7ef..0824405f93fb 100644
769 +--- a/drivers/hv/hv_balloon.c
770 ++++ b/drivers/hv/hv_balloon.c
771 +@@ -846,12 +846,14 @@ static unsigned long handle_pg_range(unsigned long pg_start,
772 + pfn_cnt -= pgs_ol;
773 + /*
774 + * Check if the corresponding memory block is already
775 +- * online by checking its last previously backed page.
776 +- * In case it is we need to bring rest (which was not
777 +- * backed previously) online too.
778 ++ * online. It is possible to observe struct pages still
779 ++ * being uninitialized here so check section instead.
780 ++ * In case the section is online we need to bring the
781 ++ * rest of pfns (which were not backed previously)
782 ++ * online too.
783 + */
784 + if (start_pfn > has->start_pfn &&
785 +- !PageReserved(pfn_to_page(start_pfn - 1)))
786 ++ online_section_nr(pfn_to_section_nr(start_pfn)))
787 + hv_bring_pgs_online(has, start_pfn, pgs_ol);
788 +
789 + }
790 +diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
791 +index 3f8dde8d59ba..74c1dfb8183b 100644
792 +--- a/drivers/hv/ring_buffer.c
793 ++++ b/drivers/hv/ring_buffer.c
794 +@@ -141,26 +141,25 @@ static u32 hv_copyto_ringbuffer(
795 + }
796 +
797 + /* Get various debug metrics for the specified ring buffer. */
798 +-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
799 +- struct hv_ring_buffer_debug_info *debug_info)
800 ++int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
801 ++ struct hv_ring_buffer_debug_info *debug_info)
802 + {
803 + u32 bytes_avail_towrite;
804 + u32 bytes_avail_toread;
805 +
806 +- if (ring_info->ring_buffer) {
807 +- hv_get_ringbuffer_availbytes(ring_info,
808 +- &bytes_avail_toread,
809 +- &bytes_avail_towrite);
810 +-
811 +- debug_info->bytes_avail_toread = bytes_avail_toread;
812 +- debug_info->bytes_avail_towrite = bytes_avail_towrite;
813 +- debug_info->current_read_index =
814 +- ring_info->ring_buffer->read_index;
815 +- debug_info->current_write_index =
816 +- ring_info->ring_buffer->write_index;
817 +- debug_info->current_interrupt_mask =
818 +- ring_info->ring_buffer->interrupt_mask;
819 +- }
820 ++ if (!ring_info->ring_buffer)
821 ++ return -EINVAL;
822 ++
823 ++ hv_get_ringbuffer_availbytes(ring_info,
824 ++ &bytes_avail_toread,
825 ++ &bytes_avail_towrite);
826 ++ debug_info->bytes_avail_toread = bytes_avail_toread;
827 ++ debug_info->bytes_avail_towrite = bytes_avail_towrite;
828 ++ debug_info->current_read_index = ring_info->ring_buffer->read_index;
829 ++ debug_info->current_write_index = ring_info->ring_buffer->write_index;
830 ++ debug_info->current_interrupt_mask
831 ++ = ring_info->ring_buffer->interrupt_mask;
832 ++ return 0;
833 + }
834 + EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
835 +
836 +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
837 +index 4218a616f1d3..1fd812ed679b 100644
838 +--- a/drivers/hv/vmbus_drv.c
839 ++++ b/drivers/hv/vmbus_drv.c
840 +@@ -297,12 +297,16 @@ static ssize_t out_intr_mask_show(struct device *dev,
841 + {
842 + struct hv_device *hv_dev = device_to_hv_device(dev);
843 + struct hv_ring_buffer_debug_info outbound;
844 ++ int ret;
845 +
846 + if (!hv_dev->channel)
847 + return -ENODEV;
848 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
849 +- return -EINVAL;
850 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
851 ++
852 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
853 ++ &outbound);
854 ++ if (ret < 0)
855 ++ return ret;
856 ++
857 + return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
858 + }
859 + static DEVICE_ATTR_RO(out_intr_mask);
860 +@@ -312,12 +316,15 @@ static ssize_t out_read_index_show(struct device *dev,
861 + {
862 + struct hv_device *hv_dev = device_to_hv_device(dev);
863 + struct hv_ring_buffer_debug_info outbound;
864 ++ int ret;
865 +
866 + if (!hv_dev->channel)
867 + return -ENODEV;
868 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
869 +- return -EINVAL;
870 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
871 ++
872 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
873 ++ &outbound);
874 ++ if (ret < 0)
875 ++ return ret;
876 + return sprintf(buf, "%d\n", outbound.current_read_index);
877 + }
878 + static DEVICE_ATTR_RO(out_read_index);
879 +@@ -328,12 +335,15 @@ static ssize_t out_write_index_show(struct device *dev,
880 + {
881 + struct hv_device *hv_dev = device_to_hv_device(dev);
882 + struct hv_ring_buffer_debug_info outbound;
883 ++ int ret;
884 +
885 + if (!hv_dev->channel)
886 + return -ENODEV;
887 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
888 +- return -EINVAL;
889 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
890 ++
891 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
892 ++ &outbound);
893 ++ if (ret < 0)
894 ++ return ret;
895 + return sprintf(buf, "%d\n", outbound.current_write_index);
896 + }
897 + static DEVICE_ATTR_RO(out_write_index);
898 +@@ -344,12 +354,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
899 + {
900 + struct hv_device *hv_dev = device_to_hv_device(dev);
901 + struct hv_ring_buffer_debug_info outbound;
902 ++ int ret;
903 +
904 + if (!hv_dev->channel)
905 + return -ENODEV;
906 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
907 +- return -EINVAL;
908 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
909 ++
910 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
911 ++ &outbound);
912 ++ if (ret < 0)
913 ++ return ret;
914 + return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
915 + }
916 + static DEVICE_ATTR_RO(out_read_bytes_avail);
917 +@@ -360,12 +373,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
918 + {
919 + struct hv_device *hv_dev = device_to_hv_device(dev);
920 + struct hv_ring_buffer_debug_info outbound;
921 ++ int ret;
922 +
923 + if (!hv_dev->channel)
924 + return -ENODEV;
925 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
926 +- return -EINVAL;
927 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
928 ++
929 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
930 ++ &outbound);
931 ++ if (ret < 0)
932 ++ return ret;
933 + return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
934 + }
935 + static DEVICE_ATTR_RO(out_write_bytes_avail);
936 +@@ -375,12 +391,15 @@ static ssize_t in_intr_mask_show(struct device *dev,
937 + {
938 + struct hv_device *hv_dev = device_to_hv_device(dev);
939 + struct hv_ring_buffer_debug_info inbound;
940 ++ int ret;
941 +
942 + if (!hv_dev->channel)
943 + return -ENODEV;
944 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
945 +- return -EINVAL;
946 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
947 ++
948 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
949 ++ if (ret < 0)
950 ++ return ret;
951 ++
952 + return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
953 + }
954 + static DEVICE_ATTR_RO(in_intr_mask);
955 +@@ -390,12 +409,15 @@ static ssize_t in_read_index_show(struct device *dev,
956 + {
957 + struct hv_device *hv_dev = device_to_hv_device(dev);
958 + struct hv_ring_buffer_debug_info inbound;
959 ++ int ret;
960 +
961 + if (!hv_dev->channel)
962 + return -ENODEV;
963 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
964 +- return -EINVAL;
965 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
966 ++
967 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
968 ++ if (ret < 0)
969 ++ return ret;
970 ++
971 + return sprintf(buf, "%d\n", inbound.current_read_index);
972 + }
973 + static DEVICE_ATTR_RO(in_read_index);
974 +@@ -405,12 +427,15 @@ static ssize_t in_write_index_show(struct device *dev,
975 + {
976 + struct hv_device *hv_dev = device_to_hv_device(dev);
977 + struct hv_ring_buffer_debug_info inbound;
978 ++ int ret;
979 +
980 + if (!hv_dev->channel)
981 + return -ENODEV;
982 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
983 +- return -EINVAL;
984 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
985 ++
986 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
987 ++ if (ret < 0)
988 ++ return ret;
989 ++
990 + return sprintf(buf, "%d\n", inbound.current_write_index);
991 + }
992 + static DEVICE_ATTR_RO(in_write_index);
993 +@@ -421,12 +446,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
994 + {
995 + struct hv_device *hv_dev = device_to_hv_device(dev);
996 + struct hv_ring_buffer_debug_info inbound;
997 ++ int ret;
998 +
999 + if (!hv_dev->channel)
1000 + return -ENODEV;
1001 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1002 +- return -EINVAL;
1003 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1004 ++
1005 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1006 ++ if (ret < 0)
1007 ++ return ret;
1008 ++
1009 + return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
1010 + }
1011 + static DEVICE_ATTR_RO(in_read_bytes_avail);
1012 +@@ -437,12 +465,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
1013 + {
1014 + struct hv_device *hv_dev = device_to_hv_device(dev);
1015 + struct hv_ring_buffer_debug_info inbound;
1016 ++ int ret;
1017 +
1018 + if (!hv_dev->channel)
1019 + return -ENODEV;
1020 +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1021 +- return -EINVAL;
1022 +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1023 ++
1024 ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1025 ++ if (ret < 0)
1026 ++ return ret;
1027 ++
1028 + return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
1029 + }
1030 + static DEVICE_ATTR_RO(in_write_bytes_avail);
1031 +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
1032 +index f55dcdf99bc5..26476a64e663 100644
1033 +--- a/drivers/input/joystick/xpad.c
1034 ++++ b/drivers/input/joystick/xpad.c
1035 +@@ -255,6 +255,8 @@ static const struct xpad_device {
1036 + { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
1037 + { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
1038 + { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
1039 ++ { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
1040 ++ { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
1041 + { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
1042 + { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
1043 + { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
1044 +@@ -431,6 +433,7 @@ static const struct usb_device_id xpad_table[] = {
1045 + XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
1046 + XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
1047 + XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
1048 ++ XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
1049 + XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
1050 + XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
1051 + XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
1052 +diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
1053 +index 443151de90c6..8c95d3f78072 100644
1054 +--- a/drivers/input/misc/uinput.c
1055 ++++ b/drivers/input/misc/uinput.c
1056 +@@ -39,6 +39,7 @@
1057 + #include <linux/fs.h>
1058 + #include <linux/miscdevice.h>
1059 + #include <linux/uinput.h>
1060 ++#include <linux/overflow.h>
1061 + #include <linux/input/mt.h>
1062 + #include "../input-compat.h"
1063 +
1064 +@@ -356,7 +357,7 @@ static int uinput_open(struct inode *inode, struct file *file)
1065 + static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
1066 + const struct input_absinfo *abs)
1067 + {
1068 +- int min, max;
1069 ++ int min, max, range;
1070 +
1071 + min = abs->minimum;
1072 + max = abs->maximum;
1073 +@@ -368,7 +369,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
1074 + return -EINVAL;
1075 + }
1076 +
1077 +- if (abs->flat > max - min) {
1078 ++ if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
1079 + printk(KERN_DEBUG
1080 + "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
1081 + UINPUT_NAME, code, abs->flat, min, max);
1082 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
1083 +index 2ea39a83737f..7638ca03fb1f 100644
1084 +--- a/drivers/irqchip/irq-gic-v3-its.c
1085 ++++ b/drivers/irqchip/irq-gic-v3-its.c
1086 +@@ -2086,13 +2086,14 @@ static void its_free_device(struct its_device *its_dev)
1087 + kfree(its_dev);
1088 + }
1089 +
1090 +-static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1091 ++static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
1092 + {
1093 + int idx;
1094 +
1095 +- idx = find_first_zero_bit(dev->event_map.lpi_map,
1096 +- dev->event_map.nr_lpis);
1097 +- if (idx == dev->event_map.nr_lpis)
1098 ++ idx = bitmap_find_free_region(dev->event_map.lpi_map,
1099 ++ dev->event_map.nr_lpis,
1100 ++ get_count_order(nvecs));
1101 ++ if (idx < 0)
1102 + return -ENOSPC;
1103 +
1104 + *hwirq = dev->event_map.lpi_base + idx;
1105 +@@ -2188,21 +2189,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1106 + int err;
1107 + int i;
1108 +
1109 +- for (i = 0; i < nr_irqs; i++) {
1110 +- err = its_alloc_device_irq(its_dev, &hwirq);
1111 +- if (err)
1112 +- return err;
1113 ++ err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
1114 ++ if (err)
1115 ++ return err;
1116 +
1117 +- err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
1118 ++ for (i = 0; i < nr_irqs; i++) {
1119 ++ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
1120 + if (err)
1121 + return err;
1122 +
1123 + irq_domain_set_hwirq_and_chip(domain, virq + i,
1124 +- hwirq, &its_irq_chip, its_dev);
1125 ++ hwirq + i, &its_irq_chip, its_dev);
1126 + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
1127 + pr_debug("ID:%d pID:%d vID:%d\n",
1128 +- (int)(hwirq - its_dev->event_map.lpi_base),
1129 +- (int) hwirq, virq + i);
1130 ++ (int)(hwirq + i - its_dev->event_map.lpi_base),
1131 ++ (int)(hwirq + i), virq + i);
1132 + }
1133 +
1134 + return 0;
1135 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1136 +index 2652ef68d58d..1f6d8b6be5c7 100644
1137 +--- a/drivers/md/dm-crypt.c
1138 ++++ b/drivers/md/dm-crypt.c
1139 +@@ -2413,9 +2413,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
1140 + * capi:cipher_api_spec-iv:ivopts
1141 + */
1142 + tmp = &cipher_in[strlen("capi:")];
1143 +- cipher_api = strsep(&tmp, "-");
1144 +- *ivmode = strsep(&tmp, ":");
1145 +- *ivopts = tmp;
1146 ++
1147 ++ /* Separate IV options if present, it can contain another '-' in hash name */
1148 ++ *ivopts = strrchr(tmp, ':');
1149 ++ if (*ivopts) {
1150 ++ **ivopts = '\0';
1151 ++ (*ivopts)++;
1152 ++ }
1153 ++ /* Parse IV mode */
1154 ++ *ivmode = strrchr(tmp, '-');
1155 ++ if (*ivmode) {
1156 ++ **ivmode = '\0';
1157 ++ (*ivmode)++;
1158 ++ }
1159 ++ /* The rest is crypto API spec */
1160 ++ cipher_api = tmp;
1161 +
1162 + if (*ivmode && !strcmp(*ivmode, "lmk"))
1163 + cc->tfms_count = 64;
1164 +@@ -2485,11 +2497,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
1165 + goto bad_mem;
1166 +
1167 + chainmode = strsep(&tmp, "-");
1168 +- *ivopts = strsep(&tmp, "-");
1169 +- *ivmode = strsep(&*ivopts, ":");
1170 +-
1171 +- if (tmp)
1172 +- DMWARN("Ignoring unexpected additional cipher options");
1173 ++ *ivmode = strsep(&tmp, ":");
1174 ++ *ivopts = tmp;
1175 +
1176 + /*
1177 + * For compatibility with the original dm-crypt mapping format, if
1178 +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
1179 +index 45ff8fd00248..b85a66f42814 100644
1180 +--- a/drivers/md/dm-thin-metadata.c
1181 ++++ b/drivers/md/dm-thin-metadata.c
1182 +@@ -1687,7 +1687,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
1183 + return r;
1184 + }
1185 +
1186 +-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1187 ++int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1188 + {
1189 + int r;
1190 + uint32_t ref_count;
1191 +@@ -1695,7 +1695,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
1192 + down_read(&pmd->root_lock);
1193 + r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1194 + if (!r)
1195 +- *result = (ref_count != 0);
1196 ++ *result = (ref_count > 1);
1197 + up_read(&pmd->root_lock);
1198 +
1199 + return r;
1200 +diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
1201 +index 35e954ea20a9..f6be0d733c20 100644
1202 +--- a/drivers/md/dm-thin-metadata.h
1203 ++++ b/drivers/md/dm-thin-metadata.h
1204 +@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
1205 +
1206 + int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
1207 +
1208 +-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
1209 ++int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
1210 +
1211 + int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
1212 + int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
1213 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1214 +index da98fc7b995c..40b624d8255d 100644
1215 +--- a/drivers/md/dm-thin.c
1216 ++++ b/drivers/md/dm-thin.c
1217 +@@ -1042,7 +1042,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1218 + * passdown we have to check that these blocks are now unused.
1219 + */
1220 + int r = 0;
1221 +- bool used = true;
1222 ++ bool shared = true;
1223 + struct thin_c *tc = m->tc;
1224 + struct pool *pool = tc->pool;
1225 + dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
1226 +@@ -1052,11 +1052,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1227 + while (b != end) {
1228 + /* find start of unmapped run */
1229 + for (; b < end; b++) {
1230 +- r = dm_pool_block_is_used(pool->pmd, b, &used);
1231 ++ r = dm_pool_block_is_shared(pool->pmd, b, &shared);
1232 + if (r)
1233 + goto out;
1234 +
1235 +- if (!used)
1236 ++ if (!shared)
1237 + break;
1238 + }
1239 +
1240 +@@ -1065,11 +1065,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1241 +
1242 + /* find end of run */
1243 + for (e = b + 1; e != end; e++) {
1244 +- r = dm_pool_block_is_used(pool->pmd, e, &used);
1245 ++ r = dm_pool_block_is_shared(pool->pmd, e, &shared);
1246 + if (r)
1247 + goto out;
1248 +
1249 +- if (used)
1250 ++ if (shared)
1251 + break;
1252 + }
1253 +
1254 +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
1255 +index e4b10b2d1a08..23739a60517f 100644
1256 +--- a/drivers/misc/mei/hw-me-regs.h
1257 ++++ b/drivers/misc/mei/hw-me-regs.h
1258 +@@ -127,6 +127,8 @@
1259 + #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
1260 + #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
1261 +
1262 ++#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */
1263 ++
1264 + #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
1265 +
1266 + #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
1267 +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
1268 +index c77e08cbbfd1..04bf2dd134d0 100644
1269 +--- a/drivers/misc/mei/pci-me.c
1270 ++++ b/drivers/misc/mei/pci-me.c
1271 +@@ -93,6 +93,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
1272 + {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
1273 + {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
1274 +
1275 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
1276 ++
1277 + {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
1278 +
1279 + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
1280 +diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
1281 +index 8c15637178ff..9ed786935a30 100644
1282 +--- a/drivers/mmc/host/Kconfig
1283 ++++ b/drivers/mmc/host/Kconfig
1284 +@@ -429,6 +429,7 @@ config MMC_SDHCI_MSM
1285 + tristate "Qualcomm SDHCI Controller Support"
1286 + depends on ARCH_QCOM || (ARM && COMPILE_TEST)
1287 + depends on MMC_SDHCI_PLTFM
1288 ++ select MMC_SDHCI_IO_ACCESSORS
1289 + help
1290 + This selects the Secure Digital Host Controller Interface (SDHCI)
1291 + support present in Qualcomm SOCs. The controller supports
1292 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1293 +index 035daca63168..7d61d8801220 100644
1294 +--- a/drivers/net/can/dev.c
1295 ++++ b/drivers/net/can/dev.c
1296 +@@ -479,8 +479,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
1297 + struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
1298 + {
1299 + struct can_priv *priv = netdev_priv(dev);
1300 +- struct sk_buff *skb = priv->echo_skb[idx];
1301 +- struct canfd_frame *cf;
1302 +
1303 + if (idx >= priv->echo_skb_max) {
1304 + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
1305 +@@ -488,20 +486,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
1306 + return NULL;
1307 + }
1308 +
1309 +- if (!skb) {
1310 +- netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
1311 +- __func__, idx);
1312 +- return NULL;
1313 +- }
1314 ++ if (priv->echo_skb[idx]) {
1315 ++ /* Using "struct canfd_frame::len" for the frame
1316 ++ * length is supported on both CAN and CANFD frames.
1317 ++ */
1318 ++ struct sk_buff *skb = priv->echo_skb[idx];
1319 ++ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
1320 ++ u8 len = cf->len;
1321 +
1322 +- /* Using "struct canfd_frame::len" for the frame
1323 +- * length is supported on both CAN and CANFD frames.
1324 +- */
1325 +- cf = (struct canfd_frame *)skb->data;
1326 +- *len_ptr = cf->len;
1327 +- priv->echo_skb[idx] = NULL;
1328 ++ *len_ptr = len;
1329 ++ priv->echo_skb[idx] = NULL;
1330 +
1331 +- return skb;
1332 ++ return skb;
1333 ++ }
1334 ++
1335 ++ return NULL;
1336 + }
1337 +
1338 + /*
1339 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
1340 +index d272dc6984ac..b40d4377cc71 100644
1341 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
1342 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
1343 +@@ -431,8 +431,6 @@
1344 + #define MAC_MDIOSCAR_PA_WIDTH 5
1345 + #define MAC_MDIOSCAR_RA_INDEX 0
1346 + #define MAC_MDIOSCAR_RA_WIDTH 16
1347 +-#define MAC_MDIOSCAR_REG_INDEX 0
1348 +-#define MAC_MDIOSCAR_REG_WIDTH 21
1349 + #define MAC_MDIOSCCDR_BUSY_INDEX 22
1350 + #define MAC_MDIOSCCDR_BUSY_WIDTH 1
1351 + #define MAC_MDIOSCCDR_CMD_INDEX 16
1352 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1353 +index e107e180e2c8..1e4bb33925e6 100644
1354 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1355 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1356 +@@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1357 + }
1358 + }
1359 +
1360 ++static unsigned int xgbe_create_mdio_sca(int port, int reg)
1361 ++{
1362 ++ unsigned int mdio_sca, da;
1363 ++
1364 ++ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
1365 ++
1366 ++ mdio_sca = 0;
1367 ++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
1368 ++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
1369 ++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
1370 ++
1371 ++ return mdio_sca;
1372 ++}
1373 ++
1374 + static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1375 + int reg, u16 val)
1376 + {
1377 +@@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1378 +
1379 + reinit_completion(&pdata->mdio_complete);
1380 +
1381 +- mdio_sca = 0;
1382 +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1383 +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1384 ++ mdio_sca = xgbe_create_mdio_sca(addr, reg);
1385 + XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1386 +
1387 + mdio_sccd = 0;
1388 +@@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1389 +
1390 + reinit_completion(&pdata->mdio_complete);
1391 +
1392 +- mdio_sca = 0;
1393 +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1394 +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1395 ++ mdio_sca = xgbe_create_mdio_sca(addr, reg);
1396 + XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1397 +
1398 + mdio_sccd = 0;
1399 +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
1400 +index 8e2a19616bc9..c87bc0a5efa3 100644
1401 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h
1402 ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
1403 +@@ -444,7 +444,8 @@ struct stmmac_dma_ops {
1404 + int rxfifosz);
1405 + void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
1406 + int fifosz);
1407 +- void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
1408 ++ void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel,
1409 ++ int fifosz);
1410 + /* To track extra statistic (if supported) */
1411 + void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
1412 + void __iomem *ioaddr);
1413 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1414 +index e84831e1b63b..898849bbc7d4 100644
1415 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1416 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1417 +@@ -271,9 +271,10 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
1418 + }
1419 +
1420 + static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
1421 +- u32 channel)
1422 ++ u32 channel, int fifosz)
1423 + {
1424 + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
1425 ++ unsigned int tqs = fifosz / 256 - 1;
1426 +
1427 + if (mode == SF_DMA_MODE) {
1428 + pr_debug("GMAC: enable TX store and forward mode\n");
1429 +@@ -306,12 +307,14 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
1430 + * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
1431 + * with reset values: TXQEN off, TQS 256 bytes.
1432 + *
1433 +- * Write the bits in both cases, since it will have no effect when RO.
1434 +- * For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might
1435 +- * be RO, however, writing the whole TQS field will result in a value
1436 +- * equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
1437 ++ * TXQEN must be written for multi-channel operation and TQS must
1438 ++ * reflect the available fifo size per queue (total fifo size / number
1439 ++ * of enabled queues).
1440 + */
1441 +- mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
1442 ++ mtl_tx_op |= MTL_OP_MODE_TXQEN;
1443 ++ mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK;
1444 ++ mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT;
1445 ++
1446 + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
1447 + }
1448 +
1449 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1450 +index bafbebeb0e00..a901feaad4e1 100644
1451 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1452 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1453 +@@ -1765,12 +1765,19 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1454 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
1455 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
1456 + int rxfifosz = priv->plat->rx_fifo_size;
1457 ++ int txfifosz = priv->plat->tx_fifo_size;
1458 + u32 txmode = 0;
1459 + u32 rxmode = 0;
1460 + u32 chan = 0;
1461 +
1462 + if (rxfifosz == 0)
1463 + rxfifosz = priv->dma_cap.rx_fifo_size;
1464 ++ if (txfifosz == 0)
1465 ++ txfifosz = priv->dma_cap.tx_fifo_size;
1466 ++
1467 ++ /* Adjust for real per queue fifo size */
1468 ++ rxfifosz /= rx_channels_count;
1469 ++ txfifosz /= tx_channels_count;
1470 +
1471 + if (priv->plat->force_thresh_dma_mode) {
1472 + txmode = tc;
1473 +@@ -1798,7 +1805,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1474 + rxfifosz);
1475 +
1476 + for (chan = 0; chan < tx_channels_count; chan++)
1477 +- priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1478 ++ priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1479 ++ txfifosz);
1480 + } else {
1481 + priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1482 + rxfifosz);
1483 +@@ -1967,15 +1975,25 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1484 + static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1485 + u32 rxmode, u32 chan)
1486 + {
1487 ++ u32 rx_channels_count = priv->plat->rx_queues_to_use;
1488 ++ u32 tx_channels_count = priv->plat->tx_queues_to_use;
1489 + int rxfifosz = priv->plat->rx_fifo_size;
1490 ++ int txfifosz = priv->plat->tx_fifo_size;
1491 +
1492 + if (rxfifosz == 0)
1493 + rxfifosz = priv->dma_cap.rx_fifo_size;
1494 ++ if (txfifosz == 0)
1495 ++ txfifosz = priv->dma_cap.tx_fifo_size;
1496 ++
1497 ++ /* Adjust for real per queue fifo size */
1498 ++ rxfifosz /= rx_channels_count;
1499 ++ txfifosz /= tx_channels_count;
1500 +
1501 + if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1502 + priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1503 + rxfifosz);
1504 +- priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1505 ++ priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1506 ++ txfifosz);
1507 + } else {
1508 + priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1509 + rxfifosz);
1510 +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
1511 +index 2df7b62c1a36..1ece41277993 100644
1512 +--- a/drivers/net/phy/mdio_bus.c
1513 ++++ b/drivers/net/phy/mdio_bus.c
1514 +@@ -358,6 +358,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
1515 + if (IS_ERR(gpiod)) {
1516 + dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
1517 + bus->id);
1518 ++ device_del(&bus->dev);
1519 + return PTR_ERR(gpiod);
1520 + } else if (gpiod) {
1521 + bus->reset_gpiod = gpiod;
1522 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
1523 +index 951892da3352..c37ef5287caa 100644
1524 +--- a/drivers/net/ppp/pppoe.c
1525 ++++ b/drivers/net/ppp/pppoe.c
1526 +@@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
1527 + if (pskb_trim_rcsum(skb, len))
1528 + goto drop;
1529 +
1530 ++ ph = pppoe_hdr(skb);
1531 + pn = pppoe_pernet(dev_net(dev));
1532 +
1533 + /* Note that get_item does a sock_hold(), so sk_pppox(po)
1534 +diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
1535 +index 5d8140e58f6f..da56cc277b71 100644
1536 +--- a/drivers/nvme/target/rdma.c
1537 ++++ b/drivers/nvme/target/rdma.c
1538 +@@ -137,6 +137,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
1539 + static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
1540 + static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
1541 + static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
1542 ++static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
1543 ++ struct nvmet_rdma_rsp *r);
1544 ++static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
1545 ++ struct nvmet_rdma_rsp *r);
1546 +
1547 + static struct nvmet_fabrics_ops nvmet_rdma_ops;
1548 +
1549 +@@ -175,9 +179,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
1550 + spin_unlock_irqrestore(&queue->rsps_lock, flags);
1551 +
1552 + if (unlikely(!rsp)) {
1553 +- rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
1554 ++ int ret;
1555 ++
1556 ++ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
1557 + if (unlikely(!rsp))
1558 + return NULL;
1559 ++ ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
1560 ++ if (unlikely(ret)) {
1561 ++ kfree(rsp);
1562 ++ return NULL;
1563 ++ }
1564 ++
1565 + rsp->allocated = true;
1566 + }
1567 +
1568 +@@ -189,7 +201,8 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
1569 + {
1570 + unsigned long flags;
1571 +
1572 +- if (rsp->allocated) {
1573 ++ if (unlikely(rsp->allocated)) {
1574 ++ nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
1575 + kfree(rsp);
1576 + return;
1577 + }
1578 +diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c
1579 +index 2b1b212c219e..c67dd11e08b1 100644
1580 +--- a/drivers/ptp/ptp_kvm.c
1581 ++++ b/drivers/ptp/ptp_kvm.c
1582 +@@ -178,8 +178,11 @@ static int __init ptp_kvm_init(void)
1583 + {
1584 + long ret;
1585 +
1586 ++ if (!kvm_para_available())
1587 ++ return -ENODEV;
1588 ++
1589 + clock_pair_gpa = slow_virt_to_phys(&clock_pair);
1590 +- hv_clock = pvclock_pvti_cpu0_va();
1591 ++ hv_clock = pvclock_get_pvti_cpu0_va();
1592 +
1593 + if (!hv_clock)
1594 + return -ENODEV;
1595 +diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
1596 +index 194ffd5c8580..039b2074db7e 100644
1597 +--- a/drivers/s390/char/sclp_config.c
1598 ++++ b/drivers/s390/char/sclp_config.c
1599 +@@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
1600 +
1601 + static void __ref sclp_cpu_change_notify(struct work_struct *work)
1602 + {
1603 ++ lock_device_hotplug();
1604 + smp_rescan_cpus();
1605 ++ unlock_device_hotplug();
1606 + }
1607 +
1608 + static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
1609 +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1610 +index 32c7225a831e..2fc7056cbff7 100644
1611 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1612 ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1613 +@@ -43,6 +43,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
1614 + {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
1615 + {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
1616 + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
1617 ++ {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
1618 + {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
1619 + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
1620 + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
1621 +diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
1622 +index 7b2a466616d6..08bd6b965847 100644
1623 +--- a/drivers/tty/n_hdlc.c
1624 ++++ b/drivers/tty/n_hdlc.c
1625 +@@ -598,6 +598,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
1626 + /* too large for caller's buffer */
1627 + ret = -EOVERFLOW;
1628 + } else {
1629 ++ __set_current_state(TASK_RUNNING);
1630 + if (copy_to_user(buf, rbuf->buf, rbuf->count))
1631 + ret = -EFAULT;
1632 + else
1633 +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
1634 +index 543d0f95f094..94ac6c6e8fb8 100644
1635 +--- a/drivers/tty/serial/serial_core.c
1636 ++++ b/drivers/tty/serial/serial_core.c
1637 +@@ -563,10 +563,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
1638 + int ret = 0;
1639 +
1640 + circ = &state->xmit;
1641 +- if (!circ->buf)
1642 ++ port = uart_port_lock(state, flags);
1643 ++ if (!circ->buf) {
1644 ++ uart_port_unlock(port, flags);
1645 + return 0;
1646 ++ }
1647 +
1648 +- port = uart_port_lock(state, flags);
1649 + if (port && uart_circ_chars_free(circ) != 0) {
1650 + circ->buf[circ->head] = c;
1651 + circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
1652 +@@ -599,11 +601,13 @@ static int uart_write(struct tty_struct *tty,
1653 + return -EL3HLT;
1654 + }
1655 +
1656 ++ port = uart_port_lock(state, flags);
1657 + circ = &state->xmit;
1658 +- if (!circ->buf)
1659 ++ if (!circ->buf) {
1660 ++ uart_port_unlock(port, flags);
1661 + return 0;
1662 ++ }
1663 +
1664 +- port = uart_port_lock(state, flags);
1665 + while (port) {
1666 + c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
1667 + if (count < c)
1668 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
1669 +index 417b81c67fe9..7e351d205393 100644
1670 +--- a/drivers/tty/tty_io.c
1671 ++++ b/drivers/tty/tty_io.c
1672 +@@ -2180,7 +2180,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
1673 + ld = tty_ldisc_ref_wait(tty);
1674 + if (!ld)
1675 + return -EIO;
1676 +- ld->ops->receive_buf(tty, &ch, &mbz, 1);
1677 ++ if (ld->ops->receive_buf)
1678 ++ ld->ops->receive_buf(tty, &ch, &mbz, 1);
1679 + tty_ldisc_deref(ld);
1680 + return 0;
1681 + }
1682 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1683 +index e77421e7bf46..1fb5e7f409c4 100644
1684 +--- a/drivers/tty/vt/vt.c
1685 ++++ b/drivers/tty/vt/vt.c
1686 +@@ -953,6 +953,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
1687 + if (con_is_visible(vc))
1688 + update_screen(vc);
1689 + vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
1690 ++ notify_update(vc);
1691 + return err;
1692 + }
1693 +
1694 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
1695 +index b8704c0678f9..727bf3c9f53b 100644
1696 +--- a/drivers/usb/dwc3/gadget.c
1697 ++++ b/drivers/usb/dwc3/gadget.c
1698 +@@ -182,6 +182,8 @@ void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
1699 + req->started = false;
1700 + list_del(&req->list);
1701 + req->remaining = 0;
1702 ++ req->unaligned = false;
1703 ++ req->zero = false;
1704 +
1705 + if (req->request.status == -EINPROGRESS)
1706 + req->request.status = status;
1707 +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
1708 +index 510d28a9d190..35aecbcac6f7 100644
1709 +--- a/drivers/usb/host/xhci-mtk.c
1710 ++++ b/drivers/usb/host/xhci-mtk.c
1711 +@@ -724,14 +724,16 @@ static int xhci_mtk_remove(struct platform_device *dev)
1712 + struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev);
1713 + struct usb_hcd *hcd = mtk->hcd;
1714 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1715 ++ struct usb_hcd *shared_hcd = xhci->shared_hcd;
1716 +
1717 +- usb_remove_hcd(xhci->shared_hcd);
1718 ++ usb_remove_hcd(shared_hcd);
1719 ++ xhci->shared_hcd = NULL;
1720 + xhci_mtk_phy_power_off(mtk);
1721 + xhci_mtk_phy_exit(mtk);
1722 + device_init_wakeup(&dev->dev, false);
1723 +
1724 + usb_remove_hcd(hcd);
1725 +- usb_put_hcd(xhci->shared_hcd);
1726 ++ usb_put_hcd(shared_hcd);
1727 + usb_put_hcd(hcd);
1728 + xhci_mtk_sch_exit(mtk);
1729 + xhci_mtk_clks_disable(mtk);
1730 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1731 +index 0fbc549cc55c..1de006aebec5 100644
1732 +--- a/drivers/usb/host/xhci-pci.c
1733 ++++ b/drivers/usb/host/xhci-pci.c
1734 +@@ -370,6 +370,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
1735 + if (xhci->shared_hcd) {
1736 + usb_remove_hcd(xhci->shared_hcd);
1737 + usb_put_hcd(xhci->shared_hcd);
1738 ++ xhci->shared_hcd = NULL;
1739 + }
1740 +
1741 + /* Workaround for spurious wakeups at shutdown with HSW */
1742 +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
1743 +index 830dd0dbbce0..108a212294bf 100644
1744 +--- a/drivers/usb/host/xhci-plat.c
1745 ++++ b/drivers/usb/host/xhci-plat.c
1746 +@@ -332,14 +332,16 @@ static int xhci_plat_remove(struct platform_device *dev)
1747 + struct usb_hcd *hcd = platform_get_drvdata(dev);
1748 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1749 + struct clk *clk = xhci->clk;
1750 ++ struct usb_hcd *shared_hcd = xhci->shared_hcd;
1751 +
1752 + xhci->xhc_state |= XHCI_STATE_REMOVING;
1753 +
1754 +- usb_remove_hcd(xhci->shared_hcd);
1755 ++ usb_remove_hcd(shared_hcd);
1756 ++ xhci->shared_hcd = NULL;
1757 + usb_phy_shutdown(hcd->usb_phy);
1758 +
1759 + usb_remove_hcd(hcd);
1760 +- usb_put_hcd(xhci->shared_hcd);
1761 ++ usb_put_hcd(shared_hcd);
1762 +
1763 + if (!IS_ERR(clk))
1764 + clk_disable_unprepare(clk);
1765 +diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
1766 +index 32ddafe7af87..28df32d85671 100644
1767 +--- a/drivers/usb/host/xhci-tegra.c
1768 ++++ b/drivers/usb/host/xhci-tegra.c
1769 +@@ -1178,6 +1178,7 @@ static int tegra_xusb_remove(struct platform_device *pdev)
1770 +
1771 + usb_remove_hcd(xhci->shared_hcd);
1772 + usb_put_hcd(xhci->shared_hcd);
1773 ++ xhci->shared_hcd = NULL;
1774 + usb_remove_hcd(tegra->hcd);
1775 + usb_put_hcd(tegra->hcd);
1776 +
1777 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
1778 +index 930eecd86429..c78de07c4d00 100644
1779 +--- a/drivers/usb/host/xhci.c
1780 ++++ b/drivers/usb/host/xhci.c
1781 +@@ -669,8 +669,6 @@ static void xhci_stop(struct usb_hcd *hcd)
1782 +
1783 + /* Only halt host and free memory after both hcds are removed */
1784 + if (!usb_hcd_is_primary_hcd(hcd)) {
1785 +- /* usb core will free this hcd shortly, unset pointer */
1786 +- xhci->shared_hcd = NULL;
1787 + mutex_unlock(&xhci->mutex);
1788 + return;
1789 + }
1790 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
1791 +index 5fa1e6fb49a6..5e86be81f4c9 100644
1792 +--- a/drivers/usb/serial/pl2303.c
1793 ++++ b/drivers/usb/serial/pl2303.c
1794 +@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
1795 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
1796 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
1797 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
1798 ++ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
1799 + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
1800 + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
1801 + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
1802 +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
1803 +index b46e74a90af2..f21445acc486 100644
1804 +--- a/drivers/usb/serial/pl2303.h
1805 ++++ b/drivers/usb/serial/pl2303.h
1806 +@@ -13,6 +13,7 @@
1807 +
1808 + #define PL2303_VENDOR_ID 0x067b
1809 + #define PL2303_PRODUCT_ID 0x2303
1810 ++#define PL2303_PRODUCT_ID_TB 0x2304
1811 + #define PL2303_PRODUCT_ID_RSAQ2 0x04bb
1812 + #define PL2303_PRODUCT_ID_DCU11 0x1234
1813 + #define PL2303_PRODUCT_ID_PHAROS 0xaaa0
1814 +@@ -25,6 +26,7 @@
1815 + #define PL2303_PRODUCT_ID_MOTOROLA 0x0307
1816 + #define PL2303_PRODUCT_ID_ZTEK 0xe1f1
1817 +
1818 ++
1819 + #define ATEN_VENDOR_ID 0x0557
1820 + #define ATEN_VENDOR_ID2 0x0547
1821 + #define ATEN_PRODUCT_ID 0x2008
1822 +diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
1823 +index 6d6acf2c07c3..511242111403 100644
1824 +--- a/drivers/usb/serial/usb-serial-simple.c
1825 ++++ b/drivers/usb/serial/usb-serial-simple.c
1826 +@@ -88,7 +88,8 @@ DEVICE(moto_modem, MOTO_IDS);
1827 + /* Motorola Tetra driver */
1828 + #define MOTOROLA_TETRA_IDS() \
1829 + { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
1830 +- { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
1831 ++ { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
1832 ++ { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
1833 + DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
1834 +
1835 + /* Novatel Wireless GPS driver */
1836 +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
1837 +index 6123b4dd8638..4eba9ee179e3 100644
1838 +--- a/drivers/vhost/net.c
1839 ++++ b/drivers/vhost/net.c
1840 +@@ -851,7 +851,8 @@ static void handle_rx(struct vhost_net *net)
1841 + vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
1842 + headcount);
1843 + if (unlikely(vq_log))
1844 +- vhost_log_write(vq, vq_log, log, vhost_len);
1845 ++ vhost_log_write(vq, vq_log, log, vhost_len,
1846 ++ vq->iov, in);
1847 + total_len += vhost_len;
1848 + if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
1849 + vhost_poll_queue(&vq->poll);
1850 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
1851 +index 97518685ab58..37fcb3ca89f1 100644
1852 +--- a/drivers/vhost/vhost.c
1853 ++++ b/drivers/vhost/vhost.c
1854 +@@ -1726,13 +1726,87 @@ static int log_write(void __user *log_base,
1855 + return r;
1856 + }
1857 +
1858 ++static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1859 ++{
1860 ++ struct vhost_umem *umem = vq->umem;
1861 ++ struct vhost_umem_node *u;
1862 ++ u64 start, end, l, min;
1863 ++ int r;
1864 ++ bool hit = false;
1865 ++
1866 ++ while (len) {
1867 ++ min = len;
1868 ++ /* More than one GPAs can be mapped into a single HVA. So
1869 ++ * iterate all possible umems here to be safe.
1870 ++ */
1871 ++ list_for_each_entry(u, &umem->umem_list, link) {
1872 ++ if (u->userspace_addr > hva - 1 + len ||
1873 ++ u->userspace_addr - 1 + u->size < hva)
1874 ++ continue;
1875 ++ start = max(u->userspace_addr, hva);
1876 ++ end = min(u->userspace_addr - 1 + u->size,
1877 ++ hva - 1 + len);
1878 ++ l = end - start + 1;
1879 ++ r = log_write(vq->log_base,
1880 ++ u->start + start - u->userspace_addr,
1881 ++ l);
1882 ++ if (r < 0)
1883 ++ return r;
1884 ++ hit = true;
1885 ++ min = min(l, min);
1886 ++ }
1887 ++
1888 ++ if (!hit)
1889 ++ return -EFAULT;
1890 ++
1891 ++ len -= min;
1892 ++ hva += min;
1893 ++ }
1894 ++
1895 ++ return 0;
1896 ++}
1897 ++
1898 ++static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1899 ++{
1900 ++ struct iovec iov[64];
1901 ++ int i, ret;
1902 ++
1903 ++ if (!vq->iotlb)
1904 ++ return log_write(vq->log_base, vq->log_addr + used_offset, len);
1905 ++
1906 ++ ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1907 ++ len, iov, 64, VHOST_ACCESS_WO);
1908 ++ if (ret)
1909 ++ return ret;
1910 ++
1911 ++ for (i = 0; i < ret; i++) {
1912 ++ ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1913 ++ iov[i].iov_len);
1914 ++ if (ret)
1915 ++ return ret;
1916 ++ }
1917 ++
1918 ++ return 0;
1919 ++}
1920 ++
1921 + int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1922 +- unsigned int log_num, u64 len)
1923 ++ unsigned int log_num, u64 len, struct iovec *iov, int count)
1924 + {
1925 + int i, r;
1926 +
1927 + /* Make sure data written is seen before log. */
1928 + smp_wmb();
1929 ++
1930 ++ if (vq->iotlb) {
1931 ++ for (i = 0; i < count; i++) {
1932 ++ r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1933 ++ iov[i].iov_len);
1934 ++ if (r < 0)
1935 ++ return r;
1936 ++ }
1937 ++ return 0;
1938 ++ }
1939 ++
1940 + for (i = 0; i < log_num; ++i) {
1941 + u64 l = min(log[i].len, len);
1942 + r = log_write(vq->log_base, log[i].addr, l);
1943 +@@ -1762,9 +1836,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1944 + smp_wmb();
1945 + /* Log used flag write. */
1946 + used = &vq->used->flags;
1947 +- log_write(vq->log_base, vq->log_addr +
1948 +- (used - (void __user *)vq->used),
1949 +- sizeof vq->used->flags);
1950 ++ log_used(vq, (used - (void __user *)vq->used),
1951 ++ sizeof vq->used->flags);
1952 + if (vq->log_ctx)
1953 + eventfd_signal(vq->log_ctx, 1);
1954 + }
1955 +@@ -1782,9 +1855,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1956 + smp_wmb();
1957 + /* Log avail event write */
1958 + used = vhost_avail_event(vq);
1959 +- log_write(vq->log_base, vq->log_addr +
1960 +- (used - (void __user *)vq->used),
1961 +- sizeof *vhost_avail_event(vq));
1962 ++ log_used(vq, (used - (void __user *)vq->used),
1963 ++ sizeof *vhost_avail_event(vq));
1964 + if (vq->log_ctx)
1965 + eventfd_signal(vq->log_ctx, 1);
1966 + }
1967 +@@ -2189,10 +2261,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1968 + /* Make sure data is seen before log. */
1969 + smp_wmb();
1970 + /* Log used ring entry write. */
1971 +- log_write(vq->log_base,
1972 +- vq->log_addr +
1973 +- ((void __user *)used - (void __user *)vq->used),
1974 +- count * sizeof *used);
1975 ++ log_used(vq, ((void __user *)used - (void __user *)vq->used),
1976 ++ count * sizeof *used);
1977 + }
1978 + old = vq->last_used_idx;
1979 + new = (vq->last_used_idx += count);
1980 +@@ -2234,9 +2304,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1981 + /* Make sure used idx is seen before log. */
1982 + smp_wmb();
1983 + /* Log used index update. */
1984 +- log_write(vq->log_base,
1985 +- vq->log_addr + offsetof(struct vring_used, idx),
1986 +- sizeof vq->used->idx);
1987 ++ log_used(vq, offsetof(struct vring_used, idx),
1988 ++ sizeof vq->used->idx);
1989 + if (vq->log_ctx)
1990 + eventfd_signal(vq->log_ctx, 1);
1991 + }
1992 +diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
1993 +index 79c6e7a60a5e..75d21d4a8354 100644
1994 +--- a/drivers/vhost/vhost.h
1995 ++++ b/drivers/vhost/vhost.h
1996 +@@ -208,7 +208,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
1997 + bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
1998 +
1999 + int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
2000 +- unsigned int log_num, u64 len);
2001 ++ unsigned int log_num, u64 len,
2002 ++ struct iovec *iov, int count);
2003 + int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
2004 +
2005 + struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
2006 +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
2007 +index e6c1934734b7..fe1f16351f94 100644
2008 +--- a/drivers/xen/events/events_base.c
2009 ++++ b/drivers/xen/events/events_base.c
2010 +@@ -1650,7 +1650,7 @@ void xen_callback_vector(void)
2011 + xen_have_vector_callback = 0;
2012 + return;
2013 + }
2014 +- pr_info("Xen HVM callback vector for event delivery is enabled\n");
2015 ++ pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
2016 + alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
2017 + xen_hvm_callback_vector);
2018 + }
2019 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
2020 +index dd80a1bdf9e2..f86457713e60 100644
2021 +--- a/fs/btrfs/dev-replace.c
2022 ++++ b/fs/btrfs/dev-replace.c
2023 +@@ -351,6 +351,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
2024 + break;
2025 + case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
2026 + case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
2027 ++ ASSERT(0);
2028 + ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
2029 + goto leave;
2030 + }
2031 +@@ -395,6 +396,10 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
2032 + if (IS_ERR(trans)) {
2033 + ret = PTR_ERR(trans);
2034 + btrfs_dev_replace_lock(dev_replace, 1);
2035 ++ dev_replace->replace_state =
2036 ++ BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
2037 ++ dev_replace->srcdev = NULL;
2038 ++ dev_replace->tgtdev = NULL;
2039 + goto leave;
2040 + }
2041 +
2042 +@@ -416,8 +421,6 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
2043 + return ret;
2044 +
2045 + leave:
2046 +- dev_replace->srcdev = NULL;
2047 +- dev_replace->tgtdev = NULL;
2048 + btrfs_dev_replace_unlock(dev_replace, 1);
2049 + btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
2050 + return ret;
2051 +@@ -801,6 +804,8 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
2052 + "cannot continue dev_replace, tgtdev is missing");
2053 + btrfs_info(fs_info,
2054 + "you may cancel the operation after 'mount -o degraded'");
2055 ++ dev_replace->replace_state =
2056 ++ BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
2057 + btrfs_dev_replace_unlock(dev_replace, 1);
2058 + return 0;
2059 + }
2060 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
2061 +index 2e936f94f102..905d0fa1a1cc 100644
2062 +--- a/fs/cifs/cifssmb.c
2063 ++++ b/fs/cifs/cifssmb.c
2064 +@@ -1445,18 +1445,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server)
2065 + }
2066 +
2067 + static int
2068 +-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2069 ++__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
2070 ++ bool malformed)
2071 + {
2072 + int length;
2073 +- struct cifs_readdata *rdata = mid->callback_data;
2074 +
2075 + length = cifs_discard_remaining_data(server);
2076 +- dequeue_mid(mid, rdata->result);
2077 ++ dequeue_mid(mid, malformed);
2078 + mid->resp_buf = server->smallbuf;
2079 + server->smallbuf = NULL;
2080 + return length;
2081 + }
2082 +
2083 ++static int
2084 ++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2085 ++{
2086 ++ struct cifs_readdata *rdata = mid->callback_data;
2087 ++
2088 ++ return __cifs_readv_discard(server, mid, rdata->result);
2089 ++}
2090 ++
2091 + int
2092 + cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2093 + {
2094 +@@ -1496,12 +1504,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2095 + return -1;
2096 + }
2097 +
2098 ++ /* set up first two iov for signature check and to get credits */
2099 ++ rdata->iov[0].iov_base = buf;
2100 ++ rdata->iov[0].iov_len = 4;
2101 ++ rdata->iov[1].iov_base = buf + 4;
2102 ++ rdata->iov[1].iov_len = server->total_read - 4;
2103 ++ cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
2104 ++ rdata->iov[0].iov_base, rdata->iov[0].iov_len);
2105 ++ cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
2106 ++ rdata->iov[1].iov_base, rdata->iov[1].iov_len);
2107 ++
2108 + /* Was the SMB read successful? */
2109 + rdata->result = server->ops->map_error(buf, false);
2110 + if (rdata->result != 0) {
2111 + cifs_dbg(FYI, "%s: server returned error %d\n",
2112 + __func__, rdata->result);
2113 +- return cifs_readv_discard(server, mid);
2114 ++ /* normal error on read response */
2115 ++ return __cifs_readv_discard(server, mid, false);
2116 + }
2117 +
2118 + /* Is there enough to get to the rest of the READ_RSP header? */
2119 +@@ -1544,14 +1563,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2120 + server->total_read += length;
2121 + }
2122 +
2123 +- /* set up first iov for signature check */
2124 +- rdata->iov[0].iov_base = buf;
2125 +- rdata->iov[0].iov_len = 4;
2126 +- rdata->iov[1].iov_base = buf + 4;
2127 +- rdata->iov[1].iov_len = server->total_read - 4;
2128 +- cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
2129 +- rdata->iov[0].iov_base, server->total_read);
2130 +-
2131 + /* how much data is in the response? */
2132 + data_len = server->ops->read_data_length(buf);
2133 + if (data_offset + data_len > buflen) {
2134 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
2135 +index d6248137c219..000b7bfa8cf0 100644
2136 +--- a/fs/cifs/connect.c
2137 ++++ b/fs/cifs/connect.c
2138 +@@ -524,6 +524,21 @@ server_unresponsive(struct TCP_Server_Info *server)
2139 + return false;
2140 + }
2141 +
2142 ++static inline bool
2143 ++zero_credits(struct TCP_Server_Info *server)
2144 ++{
2145 ++ int val;
2146 ++
2147 ++ spin_lock(&server->req_lock);
2148 ++ val = server->credits + server->echo_credits + server->oplock_credits;
2149 ++ if (server->in_flight == 0 && val == 0) {
2150 ++ spin_unlock(&server->req_lock);
2151 ++ return true;
2152 ++ }
2153 ++ spin_unlock(&server->req_lock);
2154 ++ return false;
2155 ++}
2156 ++
2157 + static int
2158 + cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
2159 + {
2160 +@@ -536,6 +551,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
2161 + for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
2162 + try_to_freeze();
2163 +
2164 ++ /* reconnect if no credits and no requests in flight */
2165 ++ if (zero_credits(server)) {
2166 ++ cifs_reconnect(server);
2167 ++ return -ECONNABORTED;
2168 ++ }
2169 ++
2170 + if (server_unresponsive(server))
2171 + return -ECONNABORTED;
2172 +
2173 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2174 +index 3372eedaa94d..fb1c65f93114 100644
2175 +--- a/fs/cifs/smb2ops.c
2176 ++++ b/fs/cifs/smb2ops.c
2177 +@@ -33,6 +33,7 @@
2178 + #include "smb2glob.h"
2179 + #include "cifs_ioctl.h"
2180 +
2181 ++/* Change credits for different ops and return the total number of credits */
2182 + static int
2183 + change_conf(struct TCP_Server_Info *server)
2184 + {
2185 +@@ -40,17 +41,15 @@ change_conf(struct TCP_Server_Info *server)
2186 + server->oplock_credits = server->echo_credits = 0;
2187 + switch (server->credits) {
2188 + case 0:
2189 +- return -1;
2190 ++ return 0;
2191 + case 1:
2192 + server->echoes = false;
2193 + server->oplocks = false;
2194 +- cifs_dbg(VFS, "disabling echoes and oplocks\n");
2195 + break;
2196 + case 2:
2197 + server->echoes = true;
2198 + server->oplocks = false;
2199 + server->echo_credits = 1;
2200 +- cifs_dbg(FYI, "disabling oplocks\n");
2201 + break;
2202 + default:
2203 + server->echoes = true;
2204 +@@ -63,14 +62,15 @@ change_conf(struct TCP_Server_Info *server)
2205 + server->echo_credits = 1;
2206 + }
2207 + server->credits -= server->echo_credits + server->oplock_credits;
2208 +- return 0;
2209 ++ return server->credits + server->echo_credits + server->oplock_credits;
2210 + }
2211 +
2212 + static void
2213 + smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
2214 + const int optype)
2215 + {
2216 +- int *val, rc = 0;
2217 ++ int *val, rc = -1;
2218 ++
2219 + spin_lock(&server->req_lock);
2220 + val = server->ops->get_credits_field(server, optype);
2221 + *val += add;
2222 +@@ -94,8 +94,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
2223 + }
2224 + spin_unlock(&server->req_lock);
2225 + wake_up(&server->request_q);
2226 +- if (rc)
2227 +- cifs_reconnect(server);
2228 ++
2229 ++ if (server->tcpStatus == CifsNeedReconnect)
2230 ++ return;
2231 ++
2232 ++ switch (rc) {
2233 ++ case -1:
2234 ++ /* change_conf hasn't been executed */
2235 ++ break;
2236 ++ case 0:
2237 ++ cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
2238 ++ break;
2239 ++ case 1:
2240 ++ cifs_dbg(VFS, "disabling echoes and oplocks\n");
2241 ++ break;
2242 ++ case 2:
2243 ++ cifs_dbg(FYI, "disabling oplocks\n");
2244 ++ break;
2245 ++ default:
2246 ++ cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
2247 ++ }
2248 + }
2249 +
2250 + static void
2251 +@@ -153,14 +171,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
2252 +
2253 + scredits = server->credits;
2254 + /* can deadlock with reopen */
2255 +- if (scredits == 1) {
2256 ++ if (scredits <= 8) {
2257 + *num = SMB2_MAX_BUFFER_SIZE;
2258 + *credits = 0;
2259 + break;
2260 + }
2261 +
2262 +- /* leave one credit for a possible reopen */
2263 +- scredits--;
2264 ++ /* leave some credits for reopen and other ops */
2265 ++ scredits -= 8;
2266 + *num = min_t(unsigned int, size,
2267 + scredits * SMB2_MAX_BUFFER_SIZE);
2268 +
2269 +@@ -2531,11 +2549,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
2270 + server->ops->is_status_pending(buf, server, 0))
2271 + return -1;
2272 +
2273 +- rdata->result = server->ops->map_error(buf, false);
2274 ++ /* set up first two iov to get credits */
2275 ++ rdata->iov[0].iov_base = buf;
2276 ++ rdata->iov[0].iov_len = 4;
2277 ++ rdata->iov[1].iov_base = buf + 4;
2278 ++ rdata->iov[1].iov_len =
2279 ++ min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4;
2280 ++ cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
2281 ++ rdata->iov[0].iov_base, rdata->iov[0].iov_len);
2282 ++ cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
2283 ++ rdata->iov[1].iov_base, rdata->iov[1].iov_len);
2284 ++
2285 ++ rdata->result = server->ops->map_error(buf, true);
2286 + if (rdata->result != 0) {
2287 + cifs_dbg(FYI, "%s: server returned error %d\n",
2288 + __func__, rdata->result);
2289 +- dequeue_mid(mid, rdata->result);
2290 ++ /* normal error on read response */
2291 ++ dequeue_mid(mid, false);
2292 + return 0;
2293 + }
2294 +
2295 +@@ -2605,14 +2635,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
2296 + return 0;
2297 + }
2298 +
2299 +- /* set up first iov for signature check */
2300 +- rdata->iov[0].iov_base = buf;
2301 +- rdata->iov[0].iov_len = 4;
2302 +- rdata->iov[1].iov_base = buf + 4;
2303 +- rdata->iov[1].iov_len = server->vals->read_rsp_size - 4;
2304 +- cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
2305 +- rdata->iov[0].iov_base, server->vals->read_rsp_size);
2306 +-
2307 + length = rdata->copy_into_pages(server, rdata, &iter);
2308 +
2309 + kfree(bvec);
2310 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
2311 +index 65de72d65562..12060fbfbb05 100644
2312 +--- a/fs/f2fs/node.c
2313 ++++ b/fs/f2fs/node.c
2314 +@@ -694,6 +694,7 @@ static void truncate_node(struct dnode_of_data *dn)
2315 + {
2316 + struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
2317 + struct node_info ni;
2318 ++ pgoff_t index;
2319 +
2320 + get_node_info(sbi, dn->nid, &ni);
2321 + f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
2322 +@@ -712,10 +713,11 @@ static void truncate_node(struct dnode_of_data *dn)
2323 + clear_node_page_dirty(dn->node_page);
2324 + set_sbi_flag(sbi, SBI_IS_DIRTY);
2325 +
2326 ++ index = dn->node_page->index;
2327 + f2fs_put_page(dn->node_page, 1);
2328 +
2329 + invalidate_mapping_pages(NODE_MAPPING(sbi),
2330 +- dn->node_page->index, dn->node_page->index);
2331 ++ index, index);
2332 +
2333 + dn->node_page = NULL;
2334 + trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
2335 +diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
2336 +index 28b76f0894d4..673fa522a7ab 100644
2337 +--- a/include/linux/compiler-clang.h
2338 ++++ b/include/linux/compiler-clang.h
2339 +@@ -24,3 +24,17 @@
2340 + #ifdef __noretpoline
2341 + #undef __noretpoline
2342 + #endif
2343 ++
2344 ++/*
2345 ++ * Not all versions of clang implement the the type-generic versions
2346 ++ * of the builtin overflow checkers. Fortunately, clang implements
2347 ++ * __has_builtin allowing us to avoid awkward version
2348 ++ * checks. Unfortunately, we don't know which version of gcc clang
2349 ++ * pretends to be, so the macro may or may not be defined.
2350 ++ */
2351 ++#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
2352 ++#if __has_builtin(__builtin_mul_overflow) && \
2353 ++ __has_builtin(__builtin_add_overflow) && \
2354 ++ __has_builtin(__builtin_sub_overflow)
2355 ++#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
2356 ++#endif
2357 +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
2358 +index 00b06d7efb83..4816355b9875 100644
2359 +--- a/include/linux/compiler-gcc.h
2360 ++++ b/include/linux/compiler-gcc.h
2361 +@@ -358,3 +358,7 @@
2362 + * code
2363 + */
2364 + #define uninitialized_var(x) x = x
2365 ++
2366 ++#if GCC_VERSION >= 50100
2367 ++#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
2368 ++#endif
2369 +diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
2370 +index bfa08160db3a..547cdc920a3c 100644
2371 +--- a/include/linux/compiler-intel.h
2372 ++++ b/include/linux/compiler-intel.h
2373 +@@ -44,3 +44,7 @@
2374 + #define __builtin_bswap16 _bswap16
2375 + #endif
2376 +
2377 ++/*
2378 ++ * icc defines __GNUC__, but does not implement the builtin overflow checkers.
2379 ++ */
2380 ++#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
2381 +diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
2382 +index d1324d3c72b0..8d3ca6da3342 100644
2383 +--- a/include/linux/hyperv.h
2384 ++++ b/include/linux/hyperv.h
2385 +@@ -1130,8 +1130,9 @@ struct hv_ring_buffer_debug_info {
2386 + u32 bytes_avail_towrite;
2387 + };
2388 +
2389 +-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
2390 +- struct hv_ring_buffer_debug_info *debug_info);
2391 ++
2392 ++int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
2393 ++ struct hv_ring_buffer_debug_info *debug_info);
2394 +
2395 + /* Vmbus interface */
2396 + #define vmbus_driver_register(driver) \
2397 +diff --git a/include/linux/overflow.h b/include/linux/overflow.h
2398 +new file mode 100644
2399 +index 000000000000..c8890ec358a7
2400 +--- /dev/null
2401 ++++ b/include/linux/overflow.h
2402 +@@ -0,0 +1,205 @@
2403 ++/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2404 ++#ifndef __LINUX_OVERFLOW_H
2405 ++#define __LINUX_OVERFLOW_H
2406 ++
2407 ++#include <linux/compiler.h>
2408 ++
2409 ++/*
2410 ++ * In the fallback code below, we need to compute the minimum and
2411 ++ * maximum values representable in a given type. These macros may also
2412 ++ * be useful elsewhere, so we provide them outside the
2413 ++ * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
2414 ++ *
2415 ++ * It would seem more obvious to do something like
2416 ++ *
2417 ++ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
2418 ++ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
2419 ++ *
2420 ++ * Unfortunately, the middle expressions, strictly speaking, have
2421 ++ * undefined behaviour, and at least some versions of gcc warn about
2422 ++ * the type_max expression (but not if -fsanitize=undefined is in
2423 ++ * effect; in that case, the warning is deferred to runtime...).
2424 ++ *
2425 ++ * The slightly excessive casting in type_min is to make sure the
2426 ++ * macros also produce sensible values for the exotic type _Bool. [The
2427 ++ * overflow checkers only almost work for _Bool, but that's
2428 ++ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
2429 ++ * _Bools. Besides, the gcc builtins don't allow _Bool* as third
2430 ++ * argument.]
2431 ++ *
2432 ++ * Idea stolen from
2433 ++ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
2434 ++ * credit to Christian Biere.
2435 ++ */
2436 ++#define is_signed_type(type) (((type)(-1)) < (type)1)
2437 ++#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
2438 ++#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
2439 ++#define type_min(T) ((T)((T)-type_max(T)-(T)1))
2440 ++
2441 ++
2442 ++#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
2443 ++/*
2444 ++ * For simplicity and code hygiene, the fallback code below insists on
2445 ++ * a, b and *d having the same type (similar to the min() and max()
2446 ++ * macros), whereas gcc's type-generic overflow checkers accept
2447 ++ * different types. Hence we don't just make check_add_overflow an
2448 ++ * alias for __builtin_add_overflow, but add type checks similar to
2449 ++ * below.
2450 ++ */
2451 ++#define check_add_overflow(a, b, d) ({ \
2452 ++ typeof(a) __a = (a); \
2453 ++ typeof(b) __b = (b); \
2454 ++ typeof(d) __d = (d); \
2455 ++ (void) (&__a == &__b); \
2456 ++ (void) (&__a == __d); \
2457 ++ __builtin_add_overflow(__a, __b, __d); \
2458 ++})
2459 ++
2460 ++#define check_sub_overflow(a, b, d) ({ \
2461 ++ typeof(a) __a = (a); \
2462 ++ typeof(b) __b = (b); \
2463 ++ typeof(d) __d = (d); \
2464 ++ (void) (&__a == &__b); \
2465 ++ (void) (&__a == __d); \
2466 ++ __builtin_sub_overflow(__a, __b, __d); \
2467 ++})
2468 ++
2469 ++#define check_mul_overflow(a, b, d) ({ \
2470 ++ typeof(a) __a = (a); \
2471 ++ typeof(b) __b = (b); \
2472 ++ typeof(d) __d = (d); \
2473 ++ (void) (&__a == &__b); \
2474 ++ (void) (&__a == __d); \
2475 ++ __builtin_mul_overflow(__a, __b, __d); \
2476 ++})
2477 ++
2478 ++#else
2479 ++
2480 ++
2481 ++/* Checking for unsigned overflow is relatively easy without causing UB. */
2482 ++#define __unsigned_add_overflow(a, b, d) ({ \
2483 ++ typeof(a) __a = (a); \
2484 ++ typeof(b) __b = (b); \
2485 ++ typeof(d) __d = (d); \
2486 ++ (void) (&__a == &__b); \
2487 ++ (void) (&__a == __d); \
2488 ++ *__d = __a + __b; \
2489 ++ *__d < __a; \
2490 ++})
2491 ++#define __unsigned_sub_overflow(a, b, d) ({ \
2492 ++ typeof(a) __a = (a); \
2493 ++ typeof(b) __b = (b); \
2494 ++ typeof(d) __d = (d); \
2495 ++ (void) (&__a == &__b); \
2496 ++ (void) (&__a == __d); \
2497 ++ *__d = __a - __b; \
2498 ++ __a < __b; \
2499 ++})
2500 ++/*
2501 ++ * If one of a or b is a compile-time constant, this avoids a division.
2502 ++ */
2503 ++#define __unsigned_mul_overflow(a, b, d) ({ \
2504 ++ typeof(a) __a = (a); \
2505 ++ typeof(b) __b = (b); \
2506 ++ typeof(d) __d = (d); \
2507 ++ (void) (&__a == &__b); \
2508 ++ (void) (&__a == __d); \
2509 ++ *__d = __a * __b; \
2510 ++ __builtin_constant_p(__b) ? \
2511 ++ __b > 0 && __a > type_max(typeof(__a)) / __b : \
2512 ++ __a > 0 && __b > type_max(typeof(__b)) / __a; \
2513 ++})
2514 ++
2515 ++/*
2516 ++ * For signed types, detecting overflow is much harder, especially if
2517 ++ * we want to avoid UB. But the interface of these macros is such that
2518 ++ * we must provide a result in *d, and in fact we must produce the
2519 ++ * result promised by gcc's builtins, which is simply the possibly
2520 ++ * wrapped-around value. Fortunately, we can just formally do the
2521 ++ * operations in the widest relevant unsigned type (u64) and then
2522 ++ * truncate the result - gcc is smart enough to generate the same code
2523 ++ * with and without the (u64) casts.
2524 ++ */
2525 ++
2526 ++/*
2527 ++ * Adding two signed integers can overflow only if they have the same
2528 ++ * sign, and overflow has happened iff the result has the opposite
2529 ++ * sign.
2530 ++ */
2531 ++#define __signed_add_overflow(a, b, d) ({ \
2532 ++ typeof(a) __a = (a); \
2533 ++ typeof(b) __b = (b); \
2534 ++ typeof(d) __d = (d); \
2535 ++ (void) (&__a == &__b); \
2536 ++ (void) (&__a == __d); \
2537 ++ *__d = (u64)__a + (u64)__b; \
2538 ++ (((~(__a ^ __b)) & (*__d ^ __a)) \
2539 ++ & type_min(typeof(__a))) != 0; \
2540 ++})
2541 ++
2542 ++/*
2543 ++ * Subtraction is similar, except that overflow can now happen only
2544 ++ * when the signs are opposite. In this case, overflow has happened if
2545 ++ * the result has the opposite sign of a.
2546 ++ */
2547 ++#define __signed_sub_overflow(a, b, d) ({ \
2548 ++ typeof(a) __a = (a); \
2549 ++ typeof(b) __b = (b); \
2550 ++ typeof(d) __d = (d); \
2551 ++ (void) (&__a == &__b); \
2552 ++ (void) (&__a == __d); \
2553 ++ *__d = (u64)__a - (u64)__b; \
2554 ++ ((((__a ^ __b)) & (*__d ^ __a)) \
2555 ++ & type_min(typeof(__a))) != 0; \
2556 ++})
2557 ++
2558 ++/*
2559 ++ * Signed multiplication is rather hard. gcc always follows C99, so
2560 ++ * division is truncated towards 0. This means that we can write the
2561 ++ * overflow check like this:
2562 ++ *
2563 ++ * (a > 0 && (b > MAX/a || b < MIN/a)) ||
2564 ++ * (a < -1 && (b > MIN/a || b < MAX/a) ||
2565 ++ * (a == -1 && b == MIN)
2566 ++ *
2567 ++ * The redundant casts of -1 are to silence an annoying -Wtype-limits
2568 ++ * (included in -Wextra) warning: When the type is u8 or u16, the
2569 ++ * __b_c_e in check_mul_overflow obviously selects
2570 ++ * __unsigned_mul_overflow, but unfortunately gcc still parses this
2571 ++ * code and warns about the limited range of __b.
2572 ++ */
2573 ++
2574 ++#define __signed_mul_overflow(a, b, d) ({ \
2575 ++ typeof(a) __a = (a); \
2576 ++ typeof(b) __b = (b); \
2577 ++ typeof(d) __d = (d); \
2578 ++ typeof(a) __tmax = type_max(typeof(a)); \
2579 ++ typeof(a) __tmin = type_min(typeof(a)); \
2580 ++ (void) (&__a == &__b); \
2581 ++ (void) (&__a == __d); \
2582 ++ *__d = (u64)__a * (u64)__b; \
2583 ++ (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
2584 ++ (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
2585 ++ (__b == (typeof(__b))-1 && __a == __tmin); \
2586 ++})
2587 ++
2588 ++
2589 ++#define check_add_overflow(a, b, d) \
2590 ++ __builtin_choose_expr(is_signed_type(typeof(a)), \
2591 ++ __signed_add_overflow(a, b, d), \
2592 ++ __unsigned_add_overflow(a, b, d))
2593 ++
2594 ++#define check_sub_overflow(a, b, d) \
2595 ++ __builtin_choose_expr(is_signed_type(typeof(a)), \
2596 ++ __signed_sub_overflow(a, b, d), \
2597 ++ __unsigned_sub_overflow(a, b, d))
2598 ++
2599 ++#define check_mul_overflow(a, b, d) \
2600 ++ __builtin_choose_expr(is_signed_type(typeof(a)), \
2601 ++ __signed_mul_overflow(a, b, d), \
2602 ++ __unsigned_mul_overflow(a, b, d))
2603 ++
2604 ++
2605 ++#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
2606 ++
2607 ++#endif /* __LINUX_OVERFLOW_H */
2608 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
2609 +index f6250555ce7d..39c2570ddcf6 100644
2610 +--- a/include/linux/skbuff.h
2611 ++++ b/include/linux/skbuff.h
2612 +@@ -3163,6 +3163,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
2613 + *
2614 + * This is exactly the same as pskb_trim except that it ensures the
2615 + * checksum of received packets are still valid after the operation.
2616 ++ * It can change skb pointers.
2617 + */
2618 +
2619 + static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2620 +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
2621 +index 32df52869a14..b711317a796c 100644
2622 +--- a/include/net/ip_fib.h
2623 ++++ b/include/net/ip_fib.h
2624 +@@ -233,7 +233,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
2625 + struct netlink_ext_ack *extack);
2626 + int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
2627 + struct netlink_callback *cb);
2628 +-int fib_table_flush(struct net *net, struct fib_table *table);
2629 ++int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
2630 + struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
2631 + void fib_table_flush_external(struct fib_table *table);
2632 + void fib_free_table(struct fib_table *tb);
2633 +diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
2634 +index 98188c87f5c1..504c71601511 100644
2635 +--- a/include/xen/interface/vcpu.h
2636 ++++ b/include/xen/interface/vcpu.h
2637 +@@ -178,4 +178,46 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info);
2638 +
2639 + /* Send an NMI to the specified VCPU. @extra_arg == NULL. */
2640 + #define VCPUOP_send_nmi 11
2641 ++
2642 ++/*
2643 ++ * Get the physical ID information for a pinned vcpu's underlying physical
2644 ++ * processor. The physical ID informmation is architecture-specific.
2645 ++ * On x86: id[31:0]=apic_id, id[63:32]=acpi_id.
2646 ++ * This command returns -EINVAL if it is not a valid operation for this VCPU.
2647 ++ */
2648 ++#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */
2649 ++struct vcpu_get_physid {
2650 ++ uint64_t phys_id;
2651 ++};
2652 ++DEFINE_GUEST_HANDLE_STRUCT(vcpu_get_physid);
2653 ++#define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid))
2654 ++#define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32))
2655 ++
2656 ++/*
2657 ++ * Register a memory location to get a secondary copy of the vcpu time
2658 ++ * parameters. The master copy still exists as part of the vcpu shared
2659 ++ * memory area, and this secondary copy is updated whenever the master copy
2660 ++ * is updated (and using the same versioning scheme for synchronisation).
2661 ++ *
2662 ++ * The intent is that this copy may be mapped (RO) into userspace so
2663 ++ * that usermode can compute system time using the time info and the
2664 ++ * tsc. Usermode will see an array of vcpu_time_info structures, one
2665 ++ * for each vcpu, and choose the right one by an existing mechanism
2666 ++ * which allows it to get the current vcpu number (such as via a
2667 ++ * segment limit). It can then apply the normal algorithm to compute
2668 ++ * system time from the tsc.
2669 ++ *
2670 ++ * @extra_arg == pointer to vcpu_register_time_info_memory_area structure.
2671 ++ */
2672 ++#define VCPUOP_register_vcpu_time_memory_area 13
2673 ++DEFINE_GUEST_HANDLE_STRUCT(vcpu_time_info);
2674 ++struct vcpu_register_time_memory_area {
2675 ++ union {
2676 ++ GUEST_HANDLE(vcpu_time_info) h;
2677 ++ struct pvclock_vcpu_time_info *v;
2678 ++ uint64_t p;
2679 ++ } addr;
2680 ++};
2681 ++DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_time_memory_area);
2682 ++
2683 + #endif /* __XEN_PUBLIC_VCPU_H__ */
2684 +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
2685 +index 2da660d53a4b..6e8c230ca877 100644
2686 +--- a/kernel/time/posix-cpu-timers.c
2687 ++++ b/kernel/time/posix-cpu-timers.c
2688 +@@ -685,6 +685,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
2689 + * set up the signal and overrun bookkeeping.
2690 + */
2691 + timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
2692 ++ timer->it_interval = ns_to_ktime(timer->it.cpu.incr);
2693 +
2694 + /*
2695 + * This acts as a modification timestamp for the timer,
2696 +diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
2697 +index 48fb17417fac..57f69f31a2a2 100644
2698 +--- a/net/bridge/br_forward.c
2699 ++++ b/net/bridge/br_forward.c
2700 +@@ -35,10 +35,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
2701 +
2702 + int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
2703 + {
2704 ++ skb_push(skb, ETH_HLEN);
2705 + if (!is_skb_forwardable(skb->dev, skb))
2706 + goto drop;
2707 +
2708 +- skb_push(skb, ETH_HLEN);
2709 + br_drop_fake_rtable(skb);
2710 +
2711 + if (skb->ip_summed == CHECKSUM_PARTIAL &&
2712 +@@ -96,12 +96,11 @@ static void __br_forward(const struct net_bridge_port *to,
2713 + net = dev_net(indev);
2714 + } else {
2715 + if (unlikely(netpoll_tx_running(to->br->dev))) {
2716 +- if (!is_skb_forwardable(skb->dev, skb)) {
2717 ++ skb_push(skb, ETH_HLEN);
2718 ++ if (!is_skb_forwardable(skb->dev, skb))
2719 + kfree_skb(skb);
2720 +- } else {
2721 +- skb_push(skb, ETH_HLEN);
2722 ++ else
2723 + br_netpoll_send_skb(to, skb);
2724 +- }
2725 + return;
2726 + }
2727 + br_hook = NF_BR_LOCAL_OUT;
2728 +diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
2729 +index 96c072e71ea2..5811208863b7 100644
2730 +--- a/net/bridge/br_netfilter_ipv6.c
2731 ++++ b/net/bridge/br_netfilter_ipv6.c
2732 +@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
2733 + IPSTATS_MIB_INDISCARDS);
2734 + goto drop;
2735 + }
2736 ++ hdr = ipv6_hdr(skb);
2737 + }
2738 + if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
2739 + goto drop;
2740 +diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
2741 +index eaf05de37f75..b09ec869c913 100644
2742 +--- a/net/bridge/netfilter/nft_reject_bridge.c
2743 ++++ b/net/bridge/netfilter/nft_reject_bridge.c
2744 +@@ -230,6 +230,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
2745 + pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
2746 + return false;
2747 +
2748 ++ ip6h = ipv6_hdr(skb);
2749 + thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
2750 + if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
2751 + return false;
2752 +diff --git a/net/can/bcm.c b/net/can/bcm.c
2753 +index 13690334efa3..12d851c4604d 100644
2754 +--- a/net/can/bcm.c
2755 ++++ b/net/can/bcm.c
2756 +@@ -67,6 +67,9 @@
2757 + */
2758 + #define MAX_NFRAMES 256
2759 +
2760 ++/* limit timers to 400 days for sending/timeouts */
2761 ++#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
2762 ++
2763 + /* use of last_frames[index].flags */
2764 + #define RX_RECV 0x40 /* received data for this element */
2765 + #define RX_THR 0x80 /* element not been sent due to throttle feature */
2766 +@@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
2767 + return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
2768 + }
2769 +
2770 ++/* check limitations for timeval provided by user */
2771 ++static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
2772 ++{
2773 ++ if ((msg_head->ival1.tv_sec < 0) ||
2774 ++ (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
2775 ++ (msg_head->ival1.tv_usec < 0) ||
2776 ++ (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
2777 ++ (msg_head->ival2.tv_sec < 0) ||
2778 ++ (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
2779 ++ (msg_head->ival2.tv_usec < 0) ||
2780 ++ (msg_head->ival2.tv_usec >= USEC_PER_SEC))
2781 ++ return true;
2782 ++
2783 ++ return false;
2784 ++}
2785 ++
2786 + #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
2787 + #define OPSIZ sizeof(struct bcm_op)
2788 + #define MHSIZ sizeof(struct bcm_msg_head)
2789 +@@ -886,6 +905,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
2790 + if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
2791 + return -EINVAL;
2792 +
2793 ++ /* check timeval limitations */
2794 ++ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
2795 ++ return -EINVAL;
2796 ++
2797 + /* check the given can_id */
2798 + op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
2799 + if (op) {
2800 +@@ -1065,6 +1088,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
2801 + (!(msg_head->can_id & CAN_RTR_FLAG))))
2802 + return -EINVAL;
2803 +
2804 ++ /* check timeval limitations */
2805 ++ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
2806 ++ return -EINVAL;
2807 ++
2808 + /* check the given can_id */
2809 + op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
2810 + if (op) {
2811 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
2812 +index 1b3f860f7dcd..b5317b2b191d 100644
2813 +--- a/net/ipv4/fib_frontend.c
2814 ++++ b/net/ipv4/fib_frontend.c
2815 +@@ -193,7 +193,7 @@ static void fib_flush(struct net *net)
2816 + struct fib_table *tb;
2817 +
2818 + hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
2819 +- flushed += fib_table_flush(net, tb);
2820 ++ flushed += fib_table_flush(net, tb, false);
2821 + }
2822 +
2823 + if (flushed)
2824 +@@ -1299,7 +1299,7 @@ static void ip_fib_net_exit(struct net *net)
2825 +
2826 + hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
2827 + hlist_del(&tb->tb_hlist);
2828 +- fib_table_flush(net, tb);
2829 ++ fib_table_flush(net, tb, true);
2830 + fib_free_table(tb);
2831 + }
2832 + }
2833 +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
2834 +index c636650a6a70..bb847d280778 100644
2835 +--- a/net/ipv4/fib_trie.c
2836 ++++ b/net/ipv4/fib_trie.c
2837 +@@ -1836,7 +1836,7 @@ void fib_table_flush_external(struct fib_table *tb)
2838 + }
2839 +
2840 + /* Caller must hold RTNL. */
2841 +-int fib_table_flush(struct net *net, struct fib_table *tb)
2842 ++int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
2843 + {
2844 + struct trie *t = (struct trie *)tb->tb_data;
2845 + struct key_vector *pn = t->kv;
2846 +@@ -1884,8 +1884,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
2847 + hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
2848 + struct fib_info *fi = fa->fa_info;
2849 +
2850 +- if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
2851 +- tb->tb_id != fa->tb_id) {
2852 ++ if (!fi || tb->tb_id != fa->tb_id ||
2853 ++ (!(fi->fib_flags & RTNH_F_DEAD) &&
2854 ++ !fib_props[fa->fa_type].error)) {
2855 ++ slen = fa->fa_slen;
2856 ++ continue;
2857 ++ }
2858 ++
2859 ++ /* Do not flush error routes if network namespace is
2860 ++ * not being dismantled
2861 ++ */
2862 ++ if (!flush_all && fib_props[fa->fa_type].error) {
2863 + slen = fa->fa_slen;
2864 + continue;
2865 + }
2866 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
2867 +index 653be98fe3fb..6ffee9d2b0e5 100644
2868 +--- a/net/ipv4/inet_fragment.c
2869 ++++ b/net/ipv4/inet_fragment.c
2870 +@@ -90,7 +90,7 @@ static void inet_frags_free_cb(void *ptr, void *arg)
2871 +
2872 + void inet_frags_exit_net(struct netns_frags *nf)
2873 + {
2874 +- nf->low_thresh = 0; /* prevent creation of new frags */
2875 ++ nf->high_thresh = 0; /* prevent creation of new frags */
2876 +
2877 + rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
2878 + }
2879 +diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
2880 +index 57fc13c6ab2b..1b160378ea9c 100644
2881 +--- a/net/ipv4/ip_input.c
2882 ++++ b/net/ipv4/ip_input.c
2883 +@@ -481,6 +481,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
2884 + goto drop;
2885 + }
2886 +
2887 ++ iph = ip_hdr(skb);
2888 + skb->transport_header = skb->network_header + iph->ihl*4;
2889 +
2890 + /* Remove any debris in the socket control block */
2891 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2892 +index 8109985e78a1..fd14501ac3af 100644
2893 +--- a/net/ipv4/tcp.c
2894 ++++ b/net/ipv4/tcp.c
2895 +@@ -1178,7 +1178,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
2896 + flags = msg->msg_flags;
2897 +
2898 + if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
2899 +- if (sk->sk_state != TCP_ESTABLISHED) {
2900 ++ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
2901 + err = -EINVAL;
2902 + goto out_err;
2903 + }
2904 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
2905 +index f70e9cbf33d5..e687b89dafe6 100644
2906 +--- a/net/openvswitch/flow_netlink.c
2907 ++++ b/net/openvswitch/flow_netlink.c
2908 +@@ -459,7 +459,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
2909 + return -EINVAL;
2910 + }
2911 +
2912 +- if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
2913 ++ if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
2914 + attrs |= 1 << type;
2915 + a[type] = nla;
2916 + }
2917 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
2918 +index 04a70793c1fe..32819d1e2075 100644
2919 +--- a/net/sched/cls_api.c
2920 ++++ b/net/sched/cls_api.c
2921 +@@ -318,7 +318,6 @@ EXPORT_SYMBOL(tcf_block_put);
2922 + int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
2923 + struct tcf_result *res, bool compat_mode)
2924 + {
2925 +- __be16 protocol = tc_skb_protocol(skb);
2926 + #ifdef CONFIG_NET_CLS_ACT
2927 + const int max_reclassify_loop = 4;
2928 + const struct tcf_proto *orig_tp = tp;
2929 +@@ -328,6 +327,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
2930 + reclassify:
2931 + #endif
2932 + for (; tp; tp = rcu_dereference_bh(tp->next)) {
2933 ++ __be16 protocol = tc_skb_protocol(skb);
2934 + int err;
2935 +
2936 + if (tp->protocol != protocol &&
2937 +@@ -359,7 +359,6 @@ reset:
2938 + }
2939 +
2940 + tp = first_tp;
2941 +- protocol = tc_skb_protocol(skb);
2942 + goto reclassify;
2943 + #endif
2944 + }
2945 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2946 +index 0a225dc85044..fb1cec46380d 100644
2947 +--- a/sound/pci/hda/patch_conexant.c
2948 ++++ b/sound/pci/hda/patch_conexant.c
2949 +@@ -969,6 +969,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
2950 + SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
2951 + SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
2952 + SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
2953 ++ SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
2954 + SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
2955 + SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
2956 + SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
2957 +diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
2958 +index 12f2ecf3a4fe..662afc529060 100644
2959 +--- a/sound/soc/codecs/rt5514-spi.c
2960 ++++ b/sound/soc/codecs/rt5514-spi.c
2961 +@@ -265,6 +265,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_platform *platform)
2962 +
2963 + rt5514_dsp = devm_kzalloc(platform->dev, sizeof(*rt5514_dsp),
2964 + GFP_KERNEL);
2965 ++ if (!rt5514_dsp)
2966 ++ return -ENOMEM;
2967 +
2968 + rt5514_dsp->dev = &rt5514_spi->dev;
2969 + mutex_init(&rt5514_dsp->dma_lock);
2970 +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
2971 +index 43e7fdd19f29..4558c8b93036 100644
2972 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
2973 ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
2974 +@@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream,
2975 + struct snd_pcm_hw_params *params,
2976 + struct snd_soc_dai *dai)
2977 + {
2978 +- snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
2979 ++ int ret;
2980 ++
2981 ++ ret =
2982 ++ snd_pcm_lib_malloc_pages(substream,
2983 ++ params_buffer_bytes(params));
2984 ++ if (ret)
2985 ++ return ret;
2986 + memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
2987 + return 0;
2988 + }
2989 +diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
2990 +index 1e9c974faf67..f1fe5acdbba4 100644
2991 +--- a/tools/perf/util/unwind-libdw.c
2992 ++++ b/tools/perf/util/unwind-libdw.c
2993 +@@ -44,13 +44,13 @@ static int __report_module(struct addr_location *al, u64 ip,
2994 + Dwarf_Addr s;
2995 +
2996 + dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
2997 +- if (s != al->map->start)
2998 ++ if (s != al->map->start - al->map->pgoff)
2999 + mod = 0;
3000 + }
3001 +
3002 + if (!mod)
3003 + mod = dwfl_report_elf(ui->dwfl, dso->short_name,
3004 +- dso->long_name, -1, al->map->start,
3005 ++ (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start - al->map->pgoff,
3006 + false);
3007 +
3008 + return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
3009 +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
3010 +index e350cf3d4f90..194759ec9e70 100644
3011 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
3012 ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
3013 +@@ -145,15 +145,6 @@ struct seccomp_data {
3014 + #define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
3015 + #endif
3016 +
3017 +-#ifndef PTRACE_SECCOMP_GET_METADATA
3018 +-#define PTRACE_SECCOMP_GET_METADATA 0x420d
3019 +-
3020 +-struct seccomp_metadata {
3021 +- __u64 filter_off; /* Input: which filter */
3022 +- __u64 flags; /* Output: filter's flags */
3023 +-};
3024 +-#endif
3025 +-
3026 + #ifndef seccomp
3027 + int seccomp(unsigned int op, unsigned int flags, void *args)
3028 + {
3029 +@@ -2870,58 +2861,6 @@ TEST(get_action_avail)
3030 + EXPECT_EQ(errno, EOPNOTSUPP);
3031 + }
3032 +
3033 +-TEST(get_metadata)
3034 +-{
3035 +- pid_t pid;
3036 +- int pipefd[2];
3037 +- char buf;
3038 +- struct seccomp_metadata md;
3039 +-
3040 +- ASSERT_EQ(0, pipe(pipefd));
3041 +-
3042 +- pid = fork();
3043 +- ASSERT_GE(pid, 0);
3044 +- if (pid == 0) {
3045 +- struct sock_filter filter[] = {
3046 +- BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
3047 +- };
3048 +- struct sock_fprog prog = {
3049 +- .len = (unsigned short)ARRAY_SIZE(filter),
3050 +- .filter = filter,
3051 +- };
3052 +-
3053 +- /* one with log, one without */
3054 +- ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
3055 +- SECCOMP_FILTER_FLAG_LOG, &prog));
3056 +- ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
3057 +-
3058 +- ASSERT_EQ(0, close(pipefd[0]));
3059 +- ASSERT_EQ(1, write(pipefd[1], "1", 1));
3060 +- ASSERT_EQ(0, close(pipefd[1]));
3061 +-
3062 +- while (1)
3063 +- sleep(100);
3064 +- }
3065 +-
3066 +- ASSERT_EQ(0, close(pipefd[1]));
3067 +- ASSERT_EQ(1, read(pipefd[0], &buf, 1));
3068 +-
3069 +- ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid));
3070 +- ASSERT_EQ(pid, waitpid(pid, NULL, 0));
3071 +-
3072 +- md.filter_off = 0;
3073 +- ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md));
3074 +- EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG);
3075 +- EXPECT_EQ(md.filter_off, 0);
3076 +-
3077 +- md.filter_off = 1;
3078 +- ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md));
3079 +- EXPECT_EQ(md.flags, 0);
3080 +- EXPECT_EQ(md.filter_off, 1);
3081 +-
3082 +- ASSERT_EQ(0, kill(pid, SIGKILL));
3083 +-}
3084 +-
3085 + /*
3086 + * TODO:
3087 + * - add microbenchmarks
3088 +diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
3089 +index 460b4bdf4c1e..5d546dcdbc80 100644
3090 +--- a/tools/testing/selftests/x86/protection_keys.c
3091 ++++ b/tools/testing/selftests/x86/protection_keys.c
3092 +@@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
3093 + pkey_assert(err);
3094 + }
3095 +
3096 ++void become_child(void)
3097 ++{
3098 ++ pid_t forkret;
3099 ++
3100 ++ forkret = fork();
3101 ++ pkey_assert(forkret >= 0);
3102 ++ dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
3103 ++
3104 ++ if (!forkret) {
3105 ++ /* in the child */
3106 ++ return;
3107 ++ }
3108 ++ exit(0);
3109 ++}
3110 ++
3111 + /* Assumes that all pkeys other than 'pkey' are unallocated */
3112 + void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
3113 + {
3114 +@@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
3115 + int nr_allocated_pkeys = 0;
3116 + int i;
3117 +
3118 +- for (i = 0; i < NR_PKEYS*2; i++) {
3119 ++ for (i = 0; i < NR_PKEYS*3; i++) {
3120 + int new_pkey;
3121 + dprintf1("%s() alloc loop: %d\n", __func__, i);
3122 + new_pkey = alloc_pkey();
3123 +@@ -1152,20 +1167,26 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
3124 + if ((new_pkey == -1) && (errno == ENOSPC)) {
3125 + dprintf2("%s() failed to allocate pkey after %d tries\n",
3126 + __func__, nr_allocated_pkeys);
3127 +- break;
3128 ++ } else {
3129 ++ /*
3130 ++ * Ensure the number of successes never
3131 ++ * exceeds the number of keys supported
3132 ++ * in the hardware.
3133 ++ */
3134 ++ pkey_assert(nr_allocated_pkeys < NR_PKEYS);
3135 ++ allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
3136 + }
3137 +- pkey_assert(nr_allocated_pkeys < NR_PKEYS);
3138 +- allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
3139 ++
3140 ++ /*
3141 ++ * Make sure that allocation state is properly
3142 ++ * preserved across fork().
3143 ++ */
3144 ++ if (i == NR_PKEYS*2)
3145 ++ become_child();
3146 + }
3147 +
3148 + dprintf3("%s()::%d\n", __func__, __LINE__);
3149 +
3150 +- /*
3151 +- * ensure it did not reach the end of the loop without
3152 +- * failure:
3153 +- */
3154 +- pkey_assert(i < NR_PKEYS*2);
3155 +-
3156 + /*
3157 + * There are 16 pkeys supported in hardware. Three are
3158 + * allocated by the time we get here: