Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Thu, 14 May 2020 11:32:16
Message-Id: 1589455918.6b83990b35d00daf19790e5ed53c976672be9481.mpagano@gentoo
1 commit: 6b83990b35d00daf19790e5ed53c976672be9481
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu May 14 11:31:58 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu May 14 11:31:58 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6b83990b
7
8 Linux patch 5.4.41
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1040_linux-5.4.41.patch | 2828 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2832 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index ed1776e..e6a9058 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -203,6 +203,10 @@ Patch: 1039_linux-5.4.40.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.40
23
24 +Patch: 1040_linux-5.4.41.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.41
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1040_linux-5.4.41.patch b/1040_linux-5.4.41.patch
33 new file mode 100644
34 index 0000000..50cddb8
35 --- /dev/null
36 +++ b/1040_linux-5.4.41.patch
37 @@ -0,0 +1,2828 @@
38 +diff --git a/Makefile b/Makefile
39 +index 6d4fca82529a..a8c772b299aa 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 40
47 ++SUBLEVEL = 41
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c
52 +index ae5aefc44a4d..ffa8d73fe722 100644
53 +--- a/arch/arm/crypto/nhpoly1305-neon-glue.c
54 ++++ b/arch/arm/crypto/nhpoly1305-neon-glue.c
55 +@@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
56 + return crypto_nhpoly1305_update(desc, src, srclen);
57 +
58 + do {
59 +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
60 ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K);
61 +
62 + kernel_neon_begin();
63 + crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
64 +diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c
65 +index 895d3727c1fb..c5405e6a6db7 100644
66 +--- a/arch/arm64/crypto/nhpoly1305-neon-glue.c
67 ++++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c
68 +@@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
69 + return crypto_nhpoly1305_update(desc, src, srclen);
70 +
71 + do {
72 +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
73 ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K);
74 +
75 + kernel_neon_begin();
76 + crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
77 +diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
78 +index dfd626447482..5271ab366bee 100644
79 +--- a/arch/arm64/kvm/guest.c
80 ++++ b/arch/arm64/kvm/guest.c
81 +@@ -202,6 +202,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
82 + }
83 +
84 + memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
85 ++
86 ++ if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
87 ++ int i;
88 ++
89 ++ for (i = 0; i < 16; i++)
90 ++ *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
91 ++ }
92 + out:
93 + return err;
94 + }
95 +diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
96 +index bbeb6a5a6ba6..0be3355e3499 100644
97 +--- a/arch/arm64/mm/hugetlbpage.c
98 ++++ b/arch/arm64/mm/hugetlbpage.c
99 +@@ -230,6 +230,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
100 + ptep = (pte_t *)pudp;
101 + } else if (sz == (CONT_PTE_SIZE)) {
102 + pmdp = pmd_alloc(mm, pudp, addr);
103 ++ if (!pmdp)
104 ++ return NULL;
105 +
106 + WARN_ON(addr & (sz - 1));
107 + /*
108 +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
109 +index f5d813c1304d..319812923012 100644
110 +--- a/arch/riscv/mm/init.c
111 ++++ b/arch/riscv/mm/init.c
112 +@@ -116,7 +116,8 @@ void __init setup_bootmem(void)
113 + memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
114 +
115 + set_max_mapnr(PFN_DOWN(mem_size));
116 +- max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
117 ++ max_pfn = PFN_DOWN(memblock_end_of_DRAM());
118 ++ max_low_pfn = max_pfn;
119 +
120 + #ifdef CONFIG_BLK_DEV_INITRD
121 + setup_initrd();
122 +diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
123 +index ed52ffa8d5d4..560310e29e27 100644
124 +--- a/arch/s390/kvm/priv.c
125 ++++ b/arch/s390/kvm/priv.c
126 +@@ -626,10 +626,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
127 + * available for the guest are AQIC and TAPQ with the t bit set
128 + * since we do not set IC.3 (FIII) we currently will only intercept
129 + * the AQIC function code.
130 ++ * Note: running nested under z/VM can result in intercepts for other
131 ++ * function codes, e.g. PQAP(QCI). We do not support this and bail out.
132 + */
133 + reg0 = vcpu->run->s.regs.gprs[0];
134 + fc = (reg0 >> 24) & 0xff;
135 +- if (WARN_ON_ONCE(fc != 0x03))
136 ++ if (fc != 0x03)
137 + return -EOPNOTSUPP;
138 +
139 + /* PQAP instruction is allowed for guest kernel only */
140 +diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c
141 +index f7567cbd35b6..80fcb85736e1 100644
142 +--- a/arch/x86/crypto/nhpoly1305-avx2-glue.c
143 ++++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c
144 +@@ -29,7 +29,7 @@ static int nhpoly1305_avx2_update(struct shash_desc *desc,
145 + return crypto_nhpoly1305_update(desc, src, srclen);
146 +
147 + do {
148 +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
149 ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K);
150 +
151 + kernel_fpu_begin();
152 + crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2);
153 +diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c
154 +index a661ede3b5cf..cc6b7c1a2705 100644
155 +--- a/arch/x86/crypto/nhpoly1305-sse2-glue.c
156 ++++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c
157 +@@ -29,7 +29,7 @@ static int nhpoly1305_sse2_update(struct shash_desc *desc,
158 + return crypto_nhpoly1305_update(desc, src, srclen);
159 +
160 + do {
161 +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
162 ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K);
163 +
164 + kernel_fpu_begin();
165 + crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2);
166 +diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
167 +index 515c0ceeb4a3..b3f121478738 100644
168 +--- a/arch/x86/entry/calling.h
169 ++++ b/arch/x86/entry/calling.h
170 +@@ -98,13 +98,6 @@ For 32-bit we have the following conventions - kernel is built with
171 + #define SIZEOF_PTREGS 21*8
172 +
173 + .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
174 +- /*
175 +- * Push registers and sanitize registers of values that a
176 +- * speculation attack might otherwise want to exploit. The
177 +- * lower registers are likely clobbered well before they
178 +- * could be put to use in a speculative execution gadget.
179 +- * Interleave XOR with PUSH for better uop scheduling:
180 +- */
181 + .if \save_ret
182 + pushq %rsi /* pt_regs->si */
183 + movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
184 +@@ -114,34 +107,43 @@ For 32-bit we have the following conventions - kernel is built with
185 + pushq %rsi /* pt_regs->si */
186 + .endif
187 + pushq \rdx /* pt_regs->dx */
188 +- xorl %edx, %edx /* nospec dx */
189 + pushq %rcx /* pt_regs->cx */
190 +- xorl %ecx, %ecx /* nospec cx */
191 + pushq \rax /* pt_regs->ax */
192 + pushq %r8 /* pt_regs->r8 */
193 +- xorl %r8d, %r8d /* nospec r8 */
194 + pushq %r9 /* pt_regs->r9 */
195 +- xorl %r9d, %r9d /* nospec r9 */
196 + pushq %r10 /* pt_regs->r10 */
197 +- xorl %r10d, %r10d /* nospec r10 */
198 + pushq %r11 /* pt_regs->r11 */
199 +- xorl %r11d, %r11d /* nospec r11*/
200 + pushq %rbx /* pt_regs->rbx */
201 +- xorl %ebx, %ebx /* nospec rbx*/
202 + pushq %rbp /* pt_regs->rbp */
203 +- xorl %ebp, %ebp /* nospec rbp*/
204 + pushq %r12 /* pt_regs->r12 */
205 +- xorl %r12d, %r12d /* nospec r12*/
206 + pushq %r13 /* pt_regs->r13 */
207 +- xorl %r13d, %r13d /* nospec r13*/
208 + pushq %r14 /* pt_regs->r14 */
209 +- xorl %r14d, %r14d /* nospec r14*/
210 + pushq %r15 /* pt_regs->r15 */
211 +- xorl %r15d, %r15d /* nospec r15*/
212 + UNWIND_HINT_REGS
213 ++
214 + .if \save_ret
215 + pushq %rsi /* return address on top of stack */
216 + .endif
217 ++
218 ++ /*
219 ++ * Sanitize registers of values that a speculation attack might
220 ++ * otherwise want to exploit. The lower registers are likely clobbered
221 ++ * well before they could be put to use in a speculative execution
222 ++ * gadget.
223 ++ */
224 ++ xorl %edx, %edx /* nospec dx */
225 ++ xorl %ecx, %ecx /* nospec cx */
226 ++ xorl %r8d, %r8d /* nospec r8 */
227 ++ xorl %r9d, %r9d /* nospec r9 */
228 ++ xorl %r10d, %r10d /* nospec r10 */
229 ++ xorl %r11d, %r11d /* nospec r11 */
230 ++ xorl %ebx, %ebx /* nospec rbx */
231 ++ xorl %ebp, %ebp /* nospec rbp */
232 ++ xorl %r12d, %r12d /* nospec r12 */
233 ++ xorl %r13d, %r13d /* nospec r13 */
234 ++ xorl %r14d, %r14d /* nospec r14 */
235 ++ xorl %r15d, %r15d /* nospec r15 */
236 ++
237 + .endm
238 +
239 + .macro POP_REGS pop_rdi=1 skip_r11rcx=0
240 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
241 +index b7c3ea4cb19d..2ba3d53ac5b1 100644
242 +--- a/arch/x86/entry/entry_64.S
243 ++++ b/arch/x86/entry/entry_64.S
244 +@@ -249,7 +249,6 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
245 + */
246 + syscall_return_via_sysret:
247 + /* rcx and r11 are already restored (see code above) */
248 +- UNWIND_HINT_EMPTY
249 + POP_REGS pop_rdi=0 skip_r11rcx=1
250 +
251 + /*
252 +@@ -258,6 +257,7 @@ syscall_return_via_sysret:
253 + */
254 + movq %rsp, %rdi
255 + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
256 ++ UNWIND_HINT_EMPTY
257 +
258 + pushq RSP-RDI(%rdi) /* RSP */
259 + pushq (%rdi) /* RDI */
260 +@@ -512,7 +512,7 @@ END(spurious_entries_start)
261 + * +----------------------------------------------------+
262 + */
263 + ENTRY(interrupt_entry)
264 +- UNWIND_HINT_FUNC
265 ++ UNWIND_HINT_IRET_REGS offset=16
266 + ASM_CLAC
267 + cld
268 +
269 +@@ -544,9 +544,9 @@ ENTRY(interrupt_entry)
270 + pushq 5*8(%rdi) /* regs->eflags */
271 + pushq 4*8(%rdi) /* regs->cs */
272 + pushq 3*8(%rdi) /* regs->ip */
273 ++ UNWIND_HINT_IRET_REGS
274 + pushq 2*8(%rdi) /* regs->orig_ax */
275 + pushq 8(%rdi) /* return address */
276 +- UNWIND_HINT_FUNC
277 +
278 + movq (%rdi), %rdi
279 + jmp 2f
280 +@@ -637,6 +637,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
281 + */
282 + movq %rsp, %rdi
283 + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
284 ++ UNWIND_HINT_EMPTY
285 +
286 + /* Copy the IRET frame to the trampoline stack. */
287 + pushq 6*8(%rdi) /* SS */
288 +@@ -1739,7 +1740,7 @@ ENTRY(rewind_stack_do_exit)
289 +
290 + movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
291 + leaq -PTREGS_SIZE(%rax), %rsp
292 +- UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
293 ++ UNWIND_HINT_REGS
294 +
295 + call do_exit
296 + END(rewind_stack_do_exit)
297 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
298 +index 380cee9bc175..f5341edbfa16 100644
299 +--- a/arch/x86/include/asm/kvm_host.h
300 ++++ b/arch/x86/include/asm/kvm_host.h
301 +@@ -1608,8 +1608,8 @@ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
302 + static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
303 + {
304 + /* We can only post Fixed and LowPrio IRQs */
305 +- return (irq->delivery_mode == dest_Fixed ||
306 +- irq->delivery_mode == dest_LowestPrio);
307 ++ return (irq->delivery_mode == APIC_DM_FIXED ||
308 ++ irq->delivery_mode == APIC_DM_LOWEST);
309 + }
310 +
311 + static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
312 +diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
313 +index 499578f7e6d7..70fc159ebe69 100644
314 +--- a/arch/x86/include/asm/unwind.h
315 ++++ b/arch/x86/include/asm/unwind.h
316 +@@ -19,7 +19,7 @@ struct unwind_state {
317 + #if defined(CONFIG_UNWINDER_ORC)
318 + bool signal, full_regs;
319 + unsigned long sp, bp, ip;
320 +- struct pt_regs *regs;
321 ++ struct pt_regs *regs, *prev_regs;
322 + #elif defined(CONFIG_UNWINDER_FRAME_POINTER)
323 + bool got_irq;
324 + unsigned long *bp, *orig_sp, ip;
325 +diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
326 +index 332ae6530fa8..fb37221a1532 100644
327 +--- a/arch/x86/kernel/unwind_orc.c
328 ++++ b/arch/x86/kernel/unwind_orc.c
329 +@@ -142,9 +142,6 @@ static struct orc_entry *orc_find(unsigned long ip)
330 + {
331 + static struct orc_entry *orc;
332 +
333 +- if (!orc_init)
334 +- return NULL;
335 +-
336 + if (ip == 0)
337 + return &null_orc_entry;
338 +
339 +@@ -378,9 +375,38 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
340 + return true;
341 + }
342 +
343 ++/*
344 ++ * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
345 ++ * value from state->regs.
346 ++ *
347 ++ * Otherwise, if state->regs just points to IRET regs, and the previous frame
348 ++ * had full regs, it's safe to get the value from the previous regs. This can
349 ++ * happen when early/late IRQ entry code gets interrupted by an NMI.
350 ++ */
351 ++static bool get_reg(struct unwind_state *state, unsigned int reg_off,
352 ++ unsigned long *val)
353 ++{
354 ++ unsigned int reg = reg_off/8;
355 ++
356 ++ if (!state->regs)
357 ++ return false;
358 ++
359 ++ if (state->full_regs) {
360 ++ *val = ((unsigned long *)state->regs)[reg];
361 ++ return true;
362 ++ }
363 ++
364 ++ if (state->prev_regs) {
365 ++ *val = ((unsigned long *)state->prev_regs)[reg];
366 ++ return true;
367 ++ }
368 ++
369 ++ return false;
370 ++}
371 ++
372 + bool unwind_next_frame(struct unwind_state *state)
373 + {
374 +- unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp;
375 ++ unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
376 + enum stack_type prev_type = state->stack_info.type;
377 + struct orc_entry *orc;
378 + bool indirect = false;
379 +@@ -442,39 +468,35 @@ bool unwind_next_frame(struct unwind_state *state)
380 + break;
381 +
382 + case ORC_REG_R10:
383 +- if (!state->regs || !state->full_regs) {
384 ++ if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
385 + orc_warn("missing regs for base reg R10 at ip %pB\n",
386 + (void *)state->ip);
387 + goto err;
388 + }
389 +- sp = state->regs->r10;
390 + break;
391 +
392 + case ORC_REG_R13:
393 +- if (!state->regs || !state->full_regs) {
394 ++ if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
395 + orc_warn("missing regs for base reg R13 at ip %pB\n",
396 + (void *)state->ip);
397 + goto err;
398 + }
399 +- sp = state->regs->r13;
400 + break;
401 +
402 + case ORC_REG_DI:
403 +- if (!state->regs || !state->full_regs) {
404 ++ if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
405 + orc_warn("missing regs for base reg DI at ip %pB\n",
406 + (void *)state->ip);
407 + goto err;
408 + }
409 +- sp = state->regs->di;
410 + break;
411 +
412 + case ORC_REG_DX:
413 +- if (!state->regs || !state->full_regs) {
414 ++ if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
415 + orc_warn("missing regs for base reg DX at ip %pB\n",
416 + (void *)state->ip);
417 + goto err;
418 + }
419 +- sp = state->regs->dx;
420 + break;
421 +
422 + default:
423 +@@ -501,6 +523,7 @@ bool unwind_next_frame(struct unwind_state *state)
424 +
425 + state->sp = sp;
426 + state->regs = NULL;
427 ++ state->prev_regs = NULL;
428 + state->signal = false;
429 + break;
430 +
431 +@@ -512,6 +535,7 @@ bool unwind_next_frame(struct unwind_state *state)
432 + }
433 +
434 + state->regs = (struct pt_regs *)sp;
435 ++ state->prev_regs = NULL;
436 + state->full_regs = true;
437 + state->signal = true;
438 + break;
439 +@@ -523,6 +547,8 @@ bool unwind_next_frame(struct unwind_state *state)
440 + goto err;
441 + }
442 +
443 ++ if (state->full_regs)
444 ++ state->prev_regs = state->regs;
445 + state->regs = (void *)sp - IRET_FRAME_OFFSET;
446 + state->full_regs = false;
447 + state->signal = true;
448 +@@ -531,14 +557,14 @@ bool unwind_next_frame(struct unwind_state *state)
449 + default:
450 + orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
451 + orc->type, (void *)orig_ip);
452 +- break;
453 ++ goto err;
454 + }
455 +
456 + /* Find BP: */
457 + switch (orc->bp_reg) {
458 + case ORC_REG_UNDEFINED:
459 +- if (state->regs && state->full_regs)
460 +- state->bp = state->regs->bp;
461 ++ if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
462 ++ state->bp = tmp;
463 + break;
464 +
465 + case ORC_REG_PREV_SP:
466 +@@ -582,6 +608,9 @@ EXPORT_SYMBOL_GPL(unwind_next_frame);
467 + void __unwind_start(struct unwind_state *state, struct task_struct *task,
468 + struct pt_regs *regs, unsigned long *first_frame)
469 + {
470 ++ if (!orc_init)
471 ++ goto done;
472 ++
473 + memset(state, 0, sizeof(*state));
474 + state->task = task;
475 +
476 +@@ -648,7 +677,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
477 + /* Otherwise, skip ahead to the user-specified starting frame: */
478 + while (!unwind_done(state) &&
479 + (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
480 +- state->sp <= (unsigned long)first_frame))
481 ++ state->sp < (unsigned long)first_frame))
482 + unwind_next_frame(state);
483 +
484 + return;
485 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
486 +index 51ff6b342279..fda2126f9a97 100644
487 +--- a/arch/x86/kvm/svm.c
488 ++++ b/arch/x86/kvm/svm.c
489 +@@ -1861,7 +1861,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
490 + return NULL;
491 +
492 + /* Pin the user virtual address. */
493 +- npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
494 ++ npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
495 + if (npinned != npages) {
496 + pr_err("SEV: Failure locking %lu pages.\n", npages);
497 + goto err;
498 +diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
499 +index c7012f6c971c..ca4252f81bf8 100644
500 +--- a/arch/x86/kvm/vmx/vmenter.S
501 ++++ b/arch/x86/kvm/vmx/vmenter.S
502 +@@ -86,6 +86,9 @@ ENTRY(vmx_vmexit)
503 + /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
504 + FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
505 +
506 ++ /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
507 ++ or $1, %_ASM_AX
508 ++
509 + pop %_ASM_AX
510 + .Lvmexit_skip_rsb:
511 + #endif
512 +diff --git a/block/blk-iocost.c b/block/blk-iocost.c
513 +index 2dc5dc54e257..d083f7704082 100644
514 +--- a/block/blk-iocost.c
515 ++++ b/block/blk-iocost.c
516 +@@ -469,7 +469,7 @@ struct ioc_gq {
517 + */
518 + atomic64_t vtime;
519 + atomic64_t done_vtime;
520 +- atomic64_t abs_vdebt;
521 ++ u64 abs_vdebt;
522 + u64 last_vtime;
523 +
524 + /*
525 +@@ -1145,7 +1145,7 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
526 + struct iocg_wake_ctx ctx = { .iocg = iocg };
527 + u64 margin_ns = (u64)(ioc->period_us *
528 + WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC;
529 +- u64 abs_vdebt, vdebt, vshortage, expires, oexpires;
530 ++ u64 vdebt, vshortage, expires, oexpires;
531 + s64 vbudget;
532 + u32 hw_inuse;
533 +
534 +@@ -1155,18 +1155,15 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
535 + vbudget = now->vnow - atomic64_read(&iocg->vtime);
536 +
537 + /* pay off debt */
538 +- abs_vdebt = atomic64_read(&iocg->abs_vdebt);
539 +- vdebt = abs_cost_to_cost(abs_vdebt, hw_inuse);
540 ++ vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
541 + if (vdebt && vbudget > 0) {
542 + u64 delta = min_t(u64, vbudget, vdebt);
543 + u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse),
544 +- abs_vdebt);
545 ++ iocg->abs_vdebt);
546 +
547 + atomic64_add(delta, &iocg->vtime);
548 + atomic64_add(delta, &iocg->done_vtime);
549 +- atomic64_sub(abs_delta, &iocg->abs_vdebt);
550 +- if (WARN_ON_ONCE(atomic64_read(&iocg->abs_vdebt) < 0))
551 +- atomic64_set(&iocg->abs_vdebt, 0);
552 ++ iocg->abs_vdebt -= abs_delta;
553 + }
554 +
555 + /*
556 +@@ -1222,12 +1219,18 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
557 + u64 expires, oexpires;
558 + u32 hw_inuse;
559 +
560 ++ lockdep_assert_held(&iocg->waitq.lock);
561 ++
562 + /* debt-adjust vtime */
563 + current_hweight(iocg, NULL, &hw_inuse);
564 +- vtime += abs_cost_to_cost(atomic64_read(&iocg->abs_vdebt), hw_inuse);
565 ++ vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
566 +
567 +- /* clear or maintain depending on the overage */
568 +- if (time_before_eq64(vtime, now->vnow)) {
569 ++ /*
570 ++ * Clear or maintain depending on the overage. Non-zero vdebt is what
571 ++ * guarantees that @iocg is online and future iocg_kick_delay() will
572 ++ * clear use_delay. Don't leave it on when there's no vdebt.
573 ++ */
574 ++ if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) {
575 + blkcg_clear_delay(blkg);
576 + return false;
577 + }
578 +@@ -1261,9 +1264,12 @@ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
579 + {
580 + struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer);
581 + struct ioc_now now;
582 ++ unsigned long flags;
583 +
584 ++ spin_lock_irqsave(&iocg->waitq.lock, flags);
585 + ioc_now(iocg->ioc, &now);
586 + iocg_kick_delay(iocg, &now, 0);
587 ++ spin_unlock_irqrestore(&iocg->waitq.lock, flags);
588 +
589 + return HRTIMER_NORESTART;
590 + }
591 +@@ -1371,14 +1377,13 @@ static void ioc_timer_fn(struct timer_list *timer)
592 + * should have woken up in the last period and expire idle iocgs.
593 + */
594 + list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
595 +- if (!waitqueue_active(&iocg->waitq) &&
596 +- !atomic64_read(&iocg->abs_vdebt) && !iocg_is_idle(iocg))
597 ++ if (!waitqueue_active(&iocg->waitq) && iocg->abs_vdebt &&
598 ++ !iocg_is_idle(iocg))
599 + continue;
600 +
601 + spin_lock(&iocg->waitq.lock);
602 +
603 +- if (waitqueue_active(&iocg->waitq) ||
604 +- atomic64_read(&iocg->abs_vdebt)) {
605 ++ if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) {
606 + /* might be oversleeping vtime / hweight changes, kick */
607 + iocg_kick_waitq(iocg, &now);
608 + iocg_kick_delay(iocg, &now, 0);
609 +@@ -1721,28 +1726,49 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
610 + * tests are racy but the races aren't systemic - we only miss once
611 + * in a while which is fine.
612 + */
613 +- if (!waitqueue_active(&iocg->waitq) &&
614 +- !atomic64_read(&iocg->abs_vdebt) &&
615 ++ if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
616 + time_before_eq64(vtime + cost, now.vnow)) {
617 + iocg_commit_bio(iocg, bio, cost);
618 + return;
619 + }
620 +
621 + /*
622 +- * We're over budget. If @bio has to be issued regardless,
623 +- * remember the abs_cost instead of advancing vtime.
624 +- * iocg_kick_waitq() will pay off the debt before waking more IOs.
625 ++ * We activated above but w/o any synchronization. Deactivation is
626 ++ * synchronized with waitq.lock and we won't get deactivated as long
627 ++ * as we're waiting or has debt, so we're good if we're activated
628 ++ * here. In the unlikely case that we aren't, just issue the IO.
629 ++ */
630 ++ spin_lock_irq(&iocg->waitq.lock);
631 ++
632 ++ if (unlikely(list_empty(&iocg->active_list))) {
633 ++ spin_unlock_irq(&iocg->waitq.lock);
634 ++ iocg_commit_bio(iocg, bio, cost);
635 ++ return;
636 ++ }
637 ++
638 ++ /*
639 ++ * We're over budget. If @bio has to be issued regardless, remember
640 ++ * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
641 ++ * off the debt before waking more IOs.
642 ++ *
643 + * This way, the debt is continuously paid off each period with the
644 +- * actual budget available to the cgroup. If we just wound vtime,
645 +- * we would incorrectly use the current hw_inuse for the entire
646 +- * amount which, for example, can lead to the cgroup staying
647 +- * blocked for a long time even with substantially raised hw_inuse.
648 ++ * actual budget available to the cgroup. If we just wound vtime, we
649 ++ * would incorrectly use the current hw_inuse for the entire amount
650 ++ * which, for example, can lead to the cgroup staying blocked for a
651 ++ * long time even with substantially raised hw_inuse.
652 ++ *
653 ++ * An iocg with vdebt should stay online so that the timer can keep
654 ++ * deducting its vdebt and [de]activate use_delay mechanism
655 ++ * accordingly. We don't want to race against the timer trying to
656 ++ * clear them and leave @iocg inactive w/ dangling use_delay heavily
657 ++ * penalizing the cgroup and its descendants.
658 + */
659 + if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
660 +- atomic64_add(abs_cost, &iocg->abs_vdebt);
661 ++ iocg->abs_vdebt += abs_cost;
662 + if (iocg_kick_delay(iocg, &now, cost))
663 + blkcg_schedule_throttle(rqos->q,
664 + (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
665 ++ spin_unlock_irq(&iocg->waitq.lock);
666 + return;
667 + }
668 +
669 +@@ -1759,20 +1785,6 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
670 + * All waiters are on iocg->waitq and the wait states are
671 + * synchronized using waitq.lock.
672 + */
673 +- spin_lock_irq(&iocg->waitq.lock);
674 +-
675 +- /*
676 +- * We activated above but w/o any synchronization. Deactivation is
677 +- * synchronized with waitq.lock and we won't get deactivated as
678 +- * long as we're waiting, so we're good if we're activated here.
679 +- * In the unlikely case that we are deactivated, just issue the IO.
680 +- */
681 +- if (unlikely(list_empty(&iocg->active_list))) {
682 +- spin_unlock_irq(&iocg->waitq.lock);
683 +- iocg_commit_bio(iocg, bio, cost);
684 +- return;
685 +- }
686 +-
687 + init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
688 + wait.wait.private = current;
689 + wait.bio = bio;
690 +@@ -1804,6 +1816,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
691 + struct ioc_now now;
692 + u32 hw_inuse;
693 + u64 abs_cost, cost;
694 ++ unsigned long flags;
695 +
696 + /* bypass if disabled or for root cgroup */
697 + if (!ioc->enabled || !iocg->level)
698 +@@ -1823,15 +1836,28 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
699 + iocg->cursor = bio_end;
700 +
701 + /*
702 +- * Charge if there's enough vtime budget and the existing request
703 +- * has cost assigned. Otherwise, account it as debt. See debt
704 +- * handling in ioc_rqos_throttle() for details.
705 ++ * Charge if there's enough vtime budget and the existing request has
706 ++ * cost assigned.
707 + */
708 + if (rq->bio && rq->bio->bi_iocost_cost &&
709 +- time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow))
710 ++ time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
711 + iocg_commit_bio(iocg, bio, cost);
712 +- else
713 +- atomic64_add(abs_cost, &iocg->abs_vdebt);
714 ++ return;
715 ++ }
716 ++
717 ++ /*
718 ++ * Otherwise, account it as debt if @iocg is online, which it should
719 ++ * be for the vast majority of cases. See debt handling in
720 ++ * ioc_rqos_throttle() for details.
721 ++ */
722 ++ spin_lock_irqsave(&iocg->waitq.lock, flags);
723 ++ if (likely(!list_empty(&iocg->active_list))) {
724 ++ iocg->abs_vdebt += abs_cost;
725 ++ iocg_kick_delay(iocg, &now, cost);
726 ++ } else {
727 ++ iocg_commit_bio(iocg, bio, cost);
728 ++ }
729 ++ spin_unlock_irqrestore(&iocg->waitq.lock, flags);
730 + }
731 +
732 + static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
733 +@@ -2001,7 +2027,6 @@ static void ioc_pd_init(struct blkg_policy_data *pd)
734 + iocg->ioc = ioc;
735 + atomic64_set(&iocg->vtime, now.vnow);
736 + atomic64_set(&iocg->done_vtime, now.vnow);
737 +- atomic64_set(&iocg->abs_vdebt, 0);
738 + atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
739 + INIT_LIST_HEAD(&iocg->active_list);
740 + iocg->hweight_active = HWEIGHT_WHOLE;
741 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
742 +index 630e8342d162..5e1dce424154 100644
743 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
744 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
745 +@@ -3070,15 +3070,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
746 + }
747 + }
748 +
749 +- amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
750 +- amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
751 +-
752 +- amdgpu_amdkfd_suspend(adev);
753 +-
754 + amdgpu_ras_suspend(adev);
755 +
756 + r = amdgpu_device_ip_suspend_phase1(adev);
757 +
758 ++ amdgpu_amdkfd_suspend(adev);
759 ++
760 + /* evict vram memory */
761 + amdgpu_bo_evict_vram(adev);
762 +
763 +diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
764 +index f156f245fdec..9e95f6fd5406 100644
765 +--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
766 ++++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
767 +@@ -824,6 +824,7 @@ static const struct of_device_id ingenic_drm_of_match[] = {
768 + { .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info },
769 + { /* sentinel */ },
770 + };
771 ++MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
772 +
773 + static struct platform_driver ingenic_drm_driver = {
774 + .driver = {
775 +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
776 +index c7bc9db5b192..17a638f15082 100644
777 +--- a/drivers/hid/usbhid/hid-core.c
778 ++++ b/drivers/hid/usbhid/hid-core.c
779 +@@ -682,16 +682,21 @@ static int usbhid_open(struct hid_device *hid)
780 + struct usbhid_device *usbhid = hid->driver_data;
781 + int res;
782 +
783 ++ mutex_lock(&usbhid->mutex);
784 ++
785 + set_bit(HID_OPENED, &usbhid->iofl);
786 +
787 +- if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
788 +- return 0;
789 ++ if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
790 ++ res = 0;
791 ++ goto Done;
792 ++ }
793 +
794 + res = usb_autopm_get_interface(usbhid->intf);
795 + /* the device must be awake to reliably request remote wakeup */
796 + if (res < 0) {
797 + clear_bit(HID_OPENED, &usbhid->iofl);
798 +- return -EIO;
799 ++ res = -EIO;
800 ++ goto Done;
801 + }
802 +
803 + usbhid->intf->needs_remote_wakeup = 1;
804 +@@ -725,6 +730,9 @@ static int usbhid_open(struct hid_device *hid)
805 + msleep(50);
806 +
807 + clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
808 ++
809 ++ Done:
810 ++ mutex_unlock(&usbhid->mutex);
811 + return res;
812 + }
813 +
814 +@@ -732,6 +740,8 @@ static void usbhid_close(struct hid_device *hid)
815 + {
816 + struct usbhid_device *usbhid = hid->driver_data;
817 +
818 ++ mutex_lock(&usbhid->mutex);
819 ++
820 + /*
821 + * Make sure we don't restart data acquisition due to
822 + * a resumption we no longer care about by avoiding racing
823 +@@ -743,12 +753,13 @@ static void usbhid_close(struct hid_device *hid)
824 + clear_bit(HID_IN_POLLING, &usbhid->iofl);
825 + spin_unlock_irq(&usbhid->lock);
826 +
827 +- if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
828 +- return;
829 ++ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
830 ++ hid_cancel_delayed_stuff(usbhid);
831 ++ usb_kill_urb(usbhid->urbin);
832 ++ usbhid->intf->needs_remote_wakeup = 0;
833 ++ }
834 +
835 +- hid_cancel_delayed_stuff(usbhid);
836 +- usb_kill_urb(usbhid->urbin);
837 +- usbhid->intf->needs_remote_wakeup = 0;
838 ++ mutex_unlock(&usbhid->mutex);
839 + }
840 +
841 + /*
842 +@@ -1057,6 +1068,8 @@ static int usbhid_start(struct hid_device *hid)
843 + unsigned int n, insize = 0;
844 + int ret;
845 +
846 ++ mutex_lock(&usbhid->mutex);
847 ++
848 + clear_bit(HID_DISCONNECTED, &usbhid->iofl);
849 +
850 + usbhid->bufsize = HID_MIN_BUFFER_SIZE;
851 +@@ -1177,6 +1190,8 @@ static int usbhid_start(struct hid_device *hid)
852 + usbhid_set_leds(hid);
853 + device_set_wakeup_enable(&dev->dev, 1);
854 + }
855 ++
856 ++ mutex_unlock(&usbhid->mutex);
857 + return 0;
858 +
859 + fail:
860 +@@ -1187,6 +1202,7 @@ fail:
861 + usbhid->urbout = NULL;
862 + usbhid->urbctrl = NULL;
863 + hid_free_buffers(dev, hid);
864 ++ mutex_unlock(&usbhid->mutex);
865 + return ret;
866 + }
867 +
868 +@@ -1202,6 +1218,8 @@ static void usbhid_stop(struct hid_device *hid)
869 + usbhid->intf->needs_remote_wakeup = 0;
870 + }
871 +
872 ++ mutex_lock(&usbhid->mutex);
873 ++
874 + clear_bit(HID_STARTED, &usbhid->iofl);
875 + spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
876 + set_bit(HID_DISCONNECTED, &usbhid->iofl);
877 +@@ -1222,6 +1240,8 @@ static void usbhid_stop(struct hid_device *hid)
878 + usbhid->urbout = NULL;
879 +
880 + hid_free_buffers(hid_to_usb_dev(hid), hid);
881 ++
882 ++ mutex_unlock(&usbhid->mutex);
883 + }
884 +
885 + static int usbhid_power(struct hid_device *hid, int lvl)
886 +@@ -1382,6 +1402,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
887 + INIT_WORK(&usbhid->reset_work, hid_reset);
888 + timer_setup(&usbhid->io_retry, hid_retry_timeout, 0);
889 + spin_lock_init(&usbhid->lock);
890 ++ mutex_init(&usbhid->mutex);
891 +
892 + ret = hid_add_device(hid);
893 + if (ret) {
894 +diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
895 +index 8620408bd7af..75fe85d3d27a 100644
896 +--- a/drivers/hid/usbhid/usbhid.h
897 ++++ b/drivers/hid/usbhid/usbhid.h
898 +@@ -80,6 +80,7 @@ struct usbhid_device {
899 + dma_addr_t outbuf_dma; /* Output buffer dma */
900 + unsigned long last_out; /* record of last output for timeouts */
901 +
902 ++ struct mutex mutex; /* start/stop/open/close */
903 + spinlock_t lock; /* fifo spinlock */
904 + unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
905 + struct timer_list io_retry; /* Retry timer */
906 +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
907 +index 5ded94b7bf68..cd71e7133944 100644
908 +--- a/drivers/hid/wacom_sys.c
909 ++++ b/drivers/hid/wacom_sys.c
910 +@@ -319,9 +319,11 @@ static void wacom_feature_mapping(struct hid_device *hdev,
911 + data[0] = field->report->id;
912 + ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
913 + data, n, WAC_CMD_RETRIES);
914 +- if (ret == n) {
915 ++ if (ret == n && features->type == HID_GENERIC) {
916 + ret = hid_report_raw_event(hdev,
917 + HID_FEATURE_REPORT, data, n, 0);
918 ++ } else if (ret == 2 && features->type != HID_GENERIC) {
919 ++ features->touch_max = data[1];
920 + } else {
921 + features->touch_max = 16;
922 + hid_warn(hdev, "wacom_feature_mapping: "
923 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
924 +index d99a9d407671..1c96809b51c9 100644
925 +--- a/drivers/hid/wacom_wac.c
926 ++++ b/drivers/hid/wacom_wac.c
927 +@@ -1427,11 +1427,13 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
928 + {
929 + struct input_dev *pad_input = wacom->pad_input;
930 + unsigned char *data = wacom->data;
931 ++ int nbuttons = wacom->features.numbered_buttons;
932 +
933 +- int buttons = data[282] | ((data[281] & 0x40) << 2);
934 ++ int expresskeys = data[282];
935 ++ int center = (data[281] & 0x40) >> 6;
936 + int ring = data[285] & 0x7F;
937 + bool ringstatus = data[285] & 0x80;
938 +- bool prox = buttons || ringstatus;
939 ++ bool prox = expresskeys || center || ringstatus;
940 +
941 + /* Fix touchring data: userspace expects 0 at left and increasing clockwise */
942 + ring = 71 - ring;
943 +@@ -1439,7 +1441,8 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
944 + if (ring > 71)
945 + ring -= 72;
946 +
947 +- wacom_report_numbered_buttons(pad_input, 9, buttons);
948 ++ wacom_report_numbered_buttons(pad_input, nbuttons,
949 ++ expresskeys | (center << (nbuttons - 1)));
950 +
951 + input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
952 +
953 +@@ -2637,9 +2640,25 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
954 + case HID_DG_TIPSWITCH:
955 + hid_data->last_slot_field = equivalent_usage;
956 + break;
957 ++ case HID_DG_CONTACTCOUNT:
958 ++ hid_data->cc_report = report->id;
959 ++ hid_data->cc_index = i;
960 ++ hid_data->cc_value_index = j;
961 ++ break;
962 + }
963 + }
964 + }
965 ++
966 ++ if (hid_data->cc_report != 0 &&
967 ++ hid_data->cc_index >= 0) {
968 ++ struct hid_field *field = report->field[hid_data->cc_index];
969 ++ int value = field->value[hid_data->cc_value_index];
970 ++ if (value)
971 ++ hid_data->num_expected = value;
972 ++ }
973 ++ else {
974 ++ hid_data->num_expected = wacom_wac->features.touch_max;
975 ++ }
976 + }
977 +
978 + static void wacom_wac_finger_report(struct hid_device *hdev,
979 +@@ -2649,7 +2668,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
980 + struct wacom_wac *wacom_wac = &wacom->wacom_wac;
981 + struct input_dev *input = wacom_wac->touch_input;
982 + unsigned touch_max = wacom_wac->features.touch_max;
983 +- struct hid_data *hid_data = &wacom_wac->hid_data;
984 +
985 + /* If more packets of data are expected, give us a chance to
986 + * process them rather than immediately syncing a partial
987 +@@ -2663,7 +2681,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
988 +
989 + input_sync(input);
990 + wacom_wac->hid_data.num_received = 0;
991 +- hid_data->num_expected = 0;
992 +
993 + /* keep touch state for pen event */
994 + wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac);
995 +@@ -2738,73 +2755,12 @@ static void wacom_report_events(struct hid_device *hdev,
996 + }
997 + }
998 +
999 +-static void wacom_set_num_expected(struct hid_device *hdev,
1000 +- struct hid_report *report,
1001 +- int collection_index,
1002 +- struct hid_field *field,
1003 +- int field_index)
1004 +-{
1005 +- struct wacom *wacom = hid_get_drvdata(hdev);
1006 +- struct wacom_wac *wacom_wac = &wacom->wacom_wac;
1007 +- struct hid_data *hid_data = &wacom_wac->hid_data;
1008 +- unsigned int original_collection_level =
1009 +- hdev->collection[collection_index].level;
1010 +- bool end_collection = false;
1011 +- int i;
1012 +-
1013 +- if (hid_data->num_expected)
1014 +- return;
1015 +-
1016 +- // find the contact count value for this segment
1017 +- for (i = field_index; i < report->maxfield && !end_collection; i++) {
1018 +- struct hid_field *field = report->field[i];
1019 +- unsigned int field_level =
1020 +- hdev->collection[field->usage[0].collection_index].level;
1021 +- unsigned int j;
1022 +-
1023 +- if (field_level != original_collection_level)
1024 +- continue;
1025 +-
1026 +- for (j = 0; j < field->maxusage; j++) {
1027 +- struct hid_usage *usage = &field->usage[j];
1028 +-
1029 +- if (usage->collection_index != collection_index) {
1030 +- end_collection = true;
1031 +- break;
1032 +- }
1033 +- if (wacom_equivalent_usage(usage->hid) == HID_DG_CONTACTCOUNT) {
1034 +- hid_data->cc_report = report->id;
1035 +- hid_data->cc_index = i;
1036 +- hid_data->cc_value_index = j;
1037 +-
1038 +- if (hid_data->cc_report != 0 &&
1039 +- hid_data->cc_index >= 0) {
1040 +-
1041 +- struct hid_field *field =
1042 +- report->field[hid_data->cc_index];
1043 +- int value =
1044 +- field->value[hid_data->cc_value_index];
1045 +-
1046 +- if (value)
1047 +- hid_data->num_expected = value;
1048 +- }
1049 +- }
1050 +- }
1051 +- }
1052 +-
1053 +- if (hid_data->cc_report == 0 || hid_data->cc_index < 0)
1054 +- hid_data->num_expected = wacom_wac->features.touch_max;
1055 +-}
1056 +-
1057 + static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report,
1058 + int collection_index, struct hid_field *field,
1059 + int field_index)
1060 + {
1061 + struct wacom *wacom = hid_get_drvdata(hdev);
1062 +
1063 +- if (WACOM_FINGER_FIELD(field))
1064 +- wacom_set_num_expected(hdev, report, collection_index, field,
1065 +- field_index);
1066 + wacom_report_events(hdev, report, collection_index, field_index);
1067 +
1068 + /*
1069 +diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
1070 +index 6c340a4f4fd2..60e659a24f90 100644
1071 +--- a/drivers/iommu/virtio-iommu.c
1072 ++++ b/drivers/iommu/virtio-iommu.c
1073 +@@ -454,7 +454,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
1074 + if (!region)
1075 + return -ENOMEM;
1076 +
1077 +- list_add(&vdev->resv_regions, &region->list);
1078 ++ list_add(&region->list, &vdev->resv_regions);
1079 + return 0;
1080 + }
1081 +
1082 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1083 +index 6862594b49ab..edf8452a2574 100644
1084 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1085 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1086 +@@ -6649,7 +6649,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
1087 + int rc;
1088 +
1089 + if (!mem_size)
1090 +- return 0;
1091 ++ return -EINVAL;
1092 +
1093 + ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
1094 + if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
1095 +@@ -9755,6 +9755,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
1096 + netdev_features_t features)
1097 + {
1098 + struct bnxt *bp = netdev_priv(dev);
1099 ++ netdev_features_t vlan_features;
1100 +
1101 + if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
1102 + features &= ~NETIF_F_NTUPLE;
1103 +@@ -9771,12 +9772,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
1104 + /* Both CTAG and STAG VLAN accelaration on the RX side have to be
1105 + * turned on or off together.
1106 + */
1107 +- if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
1108 +- (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
1109 ++ vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
1110 ++ NETIF_F_HW_VLAN_STAG_RX);
1111 ++ if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
1112 ++ NETIF_F_HW_VLAN_STAG_RX)) {
1113 + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
1114 + features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
1115 + NETIF_F_HW_VLAN_STAG_RX);
1116 +- else
1117 ++ else if (vlan_features)
1118 + features |= NETIF_F_HW_VLAN_CTAG_RX |
1119 + NETIF_F_HW_VLAN_STAG_RX;
1120 + }
1121 +@@ -12066,12 +12069,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
1122 + }
1123 + }
1124 +
1125 +- if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
1126 +- dev_close(netdev);
1127 ++ if (result != PCI_ERS_RESULT_RECOVERED) {
1128 ++ if (netif_running(netdev))
1129 ++ dev_close(netdev);
1130 ++ pci_disable_device(pdev);
1131 ++ }
1132 +
1133 + rtnl_unlock();
1134 +
1135 +- return PCI_ERS_RESULT_RECOVERED;
1136 ++ return result;
1137 + }
1138 +
1139 + /**
1140 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1141 +index cda7ba31095a..a61a5873ab0a 100644
1142 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1143 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1144 +@@ -1058,7 +1058,6 @@ struct bnxt_vf_info {
1145 + #define BNXT_VF_LINK_FORCED 0x4
1146 + #define BNXT_VF_LINK_UP 0x8
1147 + #define BNXT_VF_TRUST 0x10
1148 +- u32 func_flags; /* func cfg flags */
1149 + u32 min_tx_rate;
1150 + u32 max_tx_rate;
1151 + void *hwrm_cmd_req_addr;
1152 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
1153 +index 689c47ab2155..ba94edec9fb8 100644
1154 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
1155 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
1156 +@@ -39,7 +39,7 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
1157 + #define NVM_OFF_DIS_GRE_VER_CHECK 171
1158 + #define NVM_OFF_ENABLE_SRIOV 401
1159 +
1160 +-#define BNXT_MSIX_VEC_MAX 1280
1161 ++#define BNXT_MSIX_VEC_MAX 512
1162 + #define BNXT_MSIX_VEC_MIN_MAX 128
1163 +
1164 + enum bnxt_nvm_dir_type {
1165 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
1166 +index f6f3454d6059..1046b22220a3 100644
1167 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
1168 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
1169 +@@ -85,11 +85,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
1170 + if (old_setting == setting)
1171 + return 0;
1172 +
1173 +- func_flags = vf->func_flags;
1174 + if (setting)
1175 +- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
1176 ++ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
1177 + else
1178 +- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
1179 ++ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
1180 + /*TODO: if the driver supports VLAN filter on guest VLAN,
1181 + * the spoof check should also include vlan anti-spoofing
1182 + */
1183 +@@ -98,7 +97,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
1184 + req.flags = cpu_to_le32(func_flags);
1185 + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1186 + if (!rc) {
1187 +- vf->func_flags = func_flags;
1188 + if (setting)
1189 + vf->flags |= BNXT_VF_SPOOFCHK;
1190 + else
1191 +@@ -230,7 +228,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
1192 + memcpy(vf->mac_addr, mac, ETH_ALEN);
1193 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
1194 + req.fid = cpu_to_le16(vf->fw_fid);
1195 +- req.flags = cpu_to_le32(vf->func_flags);
1196 + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1197 + memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1198 + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1199 +@@ -268,7 +265,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
1200 +
1201 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
1202 + req.fid = cpu_to_le16(vf->fw_fid);
1203 +- req.flags = cpu_to_le32(vf->func_flags);
1204 + req.dflt_vlan = cpu_to_le16(vlan_tag);
1205 + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
1206 + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1207 +@@ -307,7 +303,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
1208 + return 0;
1209 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
1210 + req.fid = cpu_to_le16(vf->fw_fid);
1211 +- req.flags = cpu_to_le32(vf->func_flags);
1212 + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
1213 + req.max_bw = cpu_to_le32(max_tx_rate);
1214 + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
1215 +@@ -479,7 +474,6 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
1216 + vf = &bp->pf.vf[vf_id];
1217 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
1218 + req.fid = cpu_to_le16(vf->fw_fid);
1219 +- req.flags = cpu_to_le32(vf->func_flags);
1220 +
1221 + if (is_valid_ether_addr(vf->mac_addr)) {
1222 + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1223 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1224 +index 234c13ebbc41..1ec19d9fab00 100644
1225 +--- a/drivers/net/ethernet/cadence/macb_main.c
1226 ++++ b/drivers/net/ethernet/cadence/macb_main.c
1227 +@@ -334,8 +334,10 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1228 + int status;
1229 +
1230 + status = pm_runtime_get_sync(&bp->pdev->dev);
1231 +- if (status < 0)
1232 ++ if (status < 0) {
1233 ++ pm_runtime_put_noidle(&bp->pdev->dev);
1234 + goto mdio_pm_exit;
1235 ++ }
1236 +
1237 + status = macb_mdio_wait_for_idle(bp);
1238 + if (status < 0)
1239 +@@ -367,8 +369,10 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1240 + int status;
1241 +
1242 + status = pm_runtime_get_sync(&bp->pdev->dev);
1243 +- if (status < 0)
1244 ++ if (status < 0) {
1245 ++ pm_runtime_put_noidle(&bp->pdev->dev);
1246 + goto mdio_pm_exit;
1247 ++ }
1248 +
1249 + status = macb_mdio_wait_for_idle(bp);
1250 + if (status < 0)
1251 +@@ -3691,8 +3695,10 @@ static int at91ether_open(struct net_device *dev)
1252 + int ret;
1253 +
1254 + ret = pm_runtime_get_sync(&lp->pdev->dev);
1255 +- if (ret < 0)
1256 ++ if (ret < 0) {
1257 ++ pm_runtime_put_noidle(&lp->pdev->dev);
1258 + return ret;
1259 ++ }
1260 +
1261 + /* Clear internal statistics */
1262 + ctl = macb_readl(lp, NCR);
1263 +@@ -4048,15 +4054,9 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
1264 +
1265 + static int fu540_c000_init(struct platform_device *pdev)
1266 + {
1267 +- struct resource *res;
1268 +-
1269 +- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1270 +- if (!res)
1271 +- return -ENODEV;
1272 +-
1273 +- mgmt->reg = ioremap(res->start, resource_size(res));
1274 +- if (!mgmt->reg)
1275 +- return -ENOMEM;
1276 ++ mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
1277 ++ if (IS_ERR(mgmt->reg))
1278 ++ return PTR_ERR(mgmt->reg);
1279 +
1280 + return macb_init(pdev);
1281 + }
1282 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1283 +index 35478cba2aa5..4344a59c823f 100644
1284 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1285 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1286 +@@ -1422,6 +1422,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
1287 + struct mvpp2_ethtool_fs *efs;
1288 + int ret;
1289 +
1290 ++ if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1291 ++ return -EINVAL;
1292 ++
1293 + efs = port->rfs_rules[info->fs.location];
1294 + if (!efs)
1295 + return -EINVAL;
1296 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1297 +index ef44c6979a31..373b8c832850 100644
1298 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1299 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1300 +@@ -4319,6 +4319,8 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
1301 +
1302 + if (!mvpp22_rss_is_supported())
1303 + return -EOPNOTSUPP;
1304 ++ if (rss_context >= MVPP22_N_RSS_TABLES)
1305 ++ return -EINVAL;
1306 +
1307 + if (hfunc)
1308 + *hfunc = ETH_RSS_HASH_CRC32;
1309 +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
1310 +index d44ac666e730..87c2e8de6102 100644
1311 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c
1312 ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
1313 +@@ -2550,6 +2550,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
1314 +
1315 + if (!err || err == -ENOSPC) {
1316 + priv->def_counter[port] = idx;
1317 ++ err = 0;
1318 + } else if (err == -ENOENT) {
1319 + err = 0;
1320 + continue;
1321 +@@ -2600,7 +2601,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
1322 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1323 + if (!err)
1324 + *idx = get_param_l(&out_param);
1325 +-
1326 ++ if (WARN_ON(err == -ENOSPC))
1327 ++ err = -EINVAL;
1328 + return err;
1329 + }
1330 + return __mlx4_counter_alloc(dev, idx);
1331 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1332 +index ea934cd02448..08048a2d7259 100644
1333 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1334 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1335 +@@ -888,7 +888,6 @@ static void cmd_work_handler(struct work_struct *work)
1336 + }
1337 +
1338 + cmd->ent_arr[ent->idx] = ent;
1339 +- set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
1340 + lay = get_inst(cmd, ent->idx);
1341 + ent->lay = lay;
1342 + memset(lay, 0, sizeof(*lay));
1343 +@@ -910,6 +909,7 @@ static void cmd_work_handler(struct work_struct *work)
1344 +
1345 + if (ent->callback)
1346 + schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
1347 ++ set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
1348 +
1349 + /* Skip sending command to fw if internal error */
1350 + if (pci_channel_offline(dev->pdev) ||
1351 +@@ -922,6 +922,10 @@ static void cmd_work_handler(struct work_struct *work)
1352 + MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
1353 +
1354 + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
1355 ++ /* no doorbell, no need to keep the entry */
1356 ++ free_ent(cmd, ent->idx);
1357 ++ if (ent->callback)
1358 ++ free_cmd(ent);
1359 + return;
1360 + }
1361 +
1362 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
1363 +index 095ec7b1399d..7c77378accf0 100644
1364 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
1365 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
1366 +@@ -689,6 +689,12 @@ static void dr_cq_event(struct mlx5_core_cq *mcq,
1367 + pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
1368 + }
1369 +
1370 ++static void dr_cq_complete(struct mlx5_core_cq *mcq,
1371 ++ struct mlx5_eqe *eqe)
1372 ++{
1373 ++ pr_err("CQ completion CQ: #%u\n", mcq->cqn);
1374 ++}
1375 ++
1376 + static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
1377 + struct mlx5_uars_page *uar,
1378 + size_t ncqe)
1379 +@@ -750,6 +756,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
1380 + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
1381 +
1382 + cq->mcq.event = dr_cq_event;
1383 ++ cq->mcq.comp = dr_cq_complete;
1384 +
1385 + err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
1386 + kvfree(in);
1387 +@@ -761,7 +768,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
1388 + cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
1389 + cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
1390 + *cq->mcq.set_ci_db = 0;
1391 +- *cq->mcq.arm_db = 0;
1392 ++
1393 ++ /* set no-zero value, in order to avoid the HW to run db-recovery on
1394 ++ * CQ that used in polling mode.
1395 ++ */
1396 ++ *cq->mcq.arm_db = cpu_to_be32(2 << 28);
1397 ++
1398 + cq->mcq.vector = 0;
1399 + cq->mcq.irqn = irqn;
1400 + cq->mcq.uar = uar;
1401 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
1402 +index e993159e8e4c..295b27112d36 100644
1403 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
1404 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
1405 +@@ -986,8 +986,9 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
1406 + unsigned int priority,
1407 + struct mlxsw_afk_element_usage *elusage)
1408 + {
1409 ++ struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
1410 + struct mlxsw_sp_acl_tcam_vregion *vregion;
1411 +- struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1412 ++ struct list_head *pos;
1413 + int err;
1414 +
1415 + if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
1416 +@@ -1025,7 +1026,14 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
1417 + }
1418 +
1419 + mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1420 +- list_add_tail(&vchunk->list, &vregion->vchunk_list);
1421 ++
1422 ++ /* Position the vchunk inside the list according to priority */
1423 ++ list_for_each(pos, &vregion->vchunk_list) {
1424 ++ vchunk2 = list_entry(pos, typeof(*vchunk2), list);
1425 ++ if (vchunk2->priority > priority)
1426 ++ break;
1427 ++ }
1428 ++ list_add_tail(&vchunk->list, pos);
1429 + mutex_unlock(&vregion->lock);
1430 +
1431 + return vchunk;
1432 +diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
1433 +index 9183b3e85d21..354efffac0f9 100644
1434 +--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
1435 ++++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
1436 +@@ -283,6 +283,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
1437 + if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
1438 + nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n");
1439 + eth_hw_addr_random(nn->dp.netdev);
1440 ++ nfp_nsp_close(nsp);
1441 + return;
1442 + }
1443 +
1444 +diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
1445 +index 12466a72cefc..aab0cf57c654 100644
1446 +--- a/drivers/net/ethernet/toshiba/tc35815.c
1447 ++++ b/drivers/net/ethernet/toshiba/tc35815.c
1448 +@@ -644,7 +644,7 @@ static int tc_mii_probe(struct net_device *dev)
1449 + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
1450 + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
1451 + }
1452 +- linkmode_and(phydev->supported, phydev->supported, mask);
1453 ++ linkmode_andnot(phydev->supported, phydev->supported, mask);
1454 + linkmode_copy(phydev->advertising, phydev->supported);
1455 +
1456 + lp->link = 0;
1457 +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
1458 +index a0abc729f0ca..4c86a73db475 100644
1459 +--- a/drivers/net/macsec.c
1460 ++++ b/drivers/net/macsec.c
1461 +@@ -1309,7 +1309,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1462 + struct crypto_aead *tfm;
1463 + int ret;
1464 +
1465 +- tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
1466 ++ /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
1467 ++ tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
1468 +
1469 + if (IS_ERR(tfm))
1470 + return tfm;
1471 +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
1472 +index 8f241b57fcf6..1c75b2627ca8 100644
1473 +--- a/drivers/net/phy/dp83640.c
1474 ++++ b/drivers/net/phy/dp83640.c
1475 +@@ -1119,7 +1119,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
1476 + goto out;
1477 + }
1478 + dp83640_clock_init(clock, bus);
1479 +- list_add_tail(&phyter_clocks, &clock->list);
1480 ++ list_add_tail(&clock->list, &phyter_clocks);
1481 + out:
1482 + mutex_unlock(&phyter_clocks_lock);
1483 +
1484 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1485 +index 6c738a271257..4bb8552a00d3 100644
1486 +--- a/drivers/net/usb/qmi_wwan.c
1487 ++++ b/drivers/net/usb/qmi_wwan.c
1488 +@@ -1359,6 +1359,7 @@ static const struct usb_device_id products[] = {
1489 + {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
1490 + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
1491 + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
1492 ++ {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
1493 + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
1494 + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
1495 + {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
1496 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1497 +index 31b7dcd791c2..f0e0af3aa714 100644
1498 +--- a/drivers/nvme/host/core.c
1499 ++++ b/drivers/nvme/host/core.c
1500 +@@ -1071,8 +1071,17 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1501 +
1502 + status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1503 + NVME_IDENTIFY_DATA_SIZE);
1504 +- if (status)
1505 ++ if (status) {
1506 ++ dev_warn(ctrl->device,
1507 ++ "Identify Descriptors failed (%d)\n", status);
1508 ++ /*
1509 ++ * Don't treat an error as fatal, as we potentially already
1510 ++ * have a NGUID or EUI-64.
1511 ++ */
1512 ++ if (status > 0 && !(status & NVME_SC_DNR))
1513 ++ status = 0;
1514 + goto free_data;
1515 ++ }
1516 +
1517 + for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1518 + struct nvme_ns_id_desc *cur = data + pos;
1519 +@@ -1730,26 +1739,15 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
1520 + static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
1521 + struct nvme_id_ns *id, struct nvme_ns_ids *ids)
1522 + {
1523 +- int ret = 0;
1524 +-
1525 + memset(ids, 0, sizeof(*ids));
1526 +
1527 + if (ctrl->vs >= NVME_VS(1, 1, 0))
1528 + memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
1529 + if (ctrl->vs >= NVME_VS(1, 2, 0))
1530 + memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
1531 +- if (ctrl->vs >= NVME_VS(1, 3, 0)) {
1532 +- /* Don't treat error as fatal we potentially
1533 +- * already have a NGUID or EUI-64
1534 +- */
1535 +- ret = nvme_identify_ns_descs(ctrl, nsid, ids);
1536 +- if (ret)
1537 +- dev_warn(ctrl->device,
1538 +- "Identify Descriptors failed (%d)\n", ret);
1539 +- if (ret > 0)
1540 +- ret = 0;
1541 +- }
1542 +- return ret;
1543 ++ if (ctrl->vs >= NVME_VS(1, 3, 0))
1544 ++ return nvme_identify_ns_descs(ctrl, nsid, ids);
1545 ++ return 0;
1546 + }
1547 +
1548 + static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
1549 +diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
1550 +index 13179f063a61..6f9c0d18d9ce 100644
1551 +--- a/drivers/staging/gasket/gasket_core.c
1552 ++++ b/drivers/staging/gasket/gasket_core.c
1553 +@@ -926,6 +926,10 @@ do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
1554 + gasket_get_bar_index(gasket_dev,
1555 + (vma->vm_pgoff << PAGE_SHIFT) +
1556 + driver_desc->legacy_mmap_address_offset);
1557 ++
1558 ++ if (bar_index < 0)
1559 ++ return DO_MAP_REGION_INVALID;
1560 ++
1561 + phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
1562 + while (mapped_bytes < map_length) {
1563 + /*
1564 +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
1565 +index fe098cf14e6a..3cb9aacfe0b2 100644
1566 +--- a/drivers/tty/serial/xilinx_uartps.c
1567 ++++ b/drivers/tty/serial/xilinx_uartps.c
1568 +@@ -1445,6 +1445,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
1569 + cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS;
1570 + #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
1571 + cdns_uart_uart_driver.cons = &cdns_uart_console;
1572 ++ cdns_uart_console.index = id;
1573 + #endif
1574 +
1575 + rc = uart_register_driver(&cdns_uart_uart_driver);
1576 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1577 +index 8b3ecef50394..fd0361d72738 100644
1578 +--- a/drivers/tty/vt/vt.c
1579 ++++ b/drivers/tty/vt/vt.c
1580 +@@ -365,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
1581 + return uniscr;
1582 + }
1583 +
1584 ++static void vc_uniscr_free(struct uni_screen *uniscr)
1585 ++{
1586 ++ vfree(uniscr);
1587 ++}
1588 ++
1589 + static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
1590 + {
1591 +- vfree(vc->vc_uni_screen);
1592 ++ vc_uniscr_free(vc->vc_uni_screen);
1593 + vc->vc_uni_screen = new_uniscr;
1594 + }
1595 +
1596 +@@ -1230,7 +1235,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
1597 + err = resize_screen(vc, new_cols, new_rows, user);
1598 + if (err) {
1599 + kfree(newscreen);
1600 +- kfree(new_uniscr);
1601 ++ vc_uniscr_free(new_uniscr);
1602 + return err;
1603 + }
1604 +
1605 +diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
1606 +index af648ba6544d..46105457e1ca 100644
1607 +--- a/drivers/usb/chipidea/ci_hdrc_msm.c
1608 ++++ b/drivers/usb/chipidea/ci_hdrc_msm.c
1609 +@@ -114,7 +114,7 @@ static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
1610 + hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
1611 + HS_PHY_ULPI_TX_PKT_EN_CLR_FIX, 0);
1612 +
1613 +- if (!IS_ERR(ci->platdata->vbus_extcon.edev)) {
1614 ++ if (!IS_ERR(ci->platdata->vbus_extcon.edev) || ci->role_switch) {
1615 + hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
1616 + HS_PHY_SESS_VLD_CTRL_EN,
1617 + HS_PHY_SESS_VLD_CTRL_EN);
1618 +diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
1619 +index 633550ec3025..f29c3a936a08 100644
1620 +--- a/drivers/usb/serial/garmin_gps.c
1621 ++++ b/drivers/usb/serial/garmin_gps.c
1622 +@@ -1138,8 +1138,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p,
1623 + send it directly to the tty port */
1624 + if (garmin_data_p->flags & FLAGS_QUEUING) {
1625 + pkt_add(garmin_data_p, data, data_length);
1626 +- } else if (bulk_data ||
1627 +- getLayerId(data) == GARMIN_LAYERID_APPL) {
1628 ++ } else if (bulk_data || (data_length >= sizeof(u32) &&
1629 ++ getLayerId(data) == GARMIN_LAYERID_APPL)) {
1630 +
1631 + spin_lock_irqsave(&garmin_data_p->lock, flags);
1632 + garmin_data_p->flags |= APP_RESP_SEEN;
1633 +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1634 +index 613f91add03d..ce0401d3137f 100644
1635 +--- a/drivers/usb/serial/qcserial.c
1636 ++++ b/drivers/usb/serial/qcserial.c
1637 +@@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = {
1638 + {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
1639 + {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
1640 + {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
1641 ++ {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
1642 + {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
1643 + {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
1644 + {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
1645 +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
1646 +index 1b23741036ee..37157ed9a881 100644
1647 +--- a/drivers/usb/storage/unusual_uas.h
1648 ++++ b/drivers/usb/storage/unusual_uas.h
1649 +@@ -28,6 +28,13 @@
1650 + * and don't forget to CC: the USB development list <linux-usb@×××××××××××.org>
1651 + */
1652 +
1653 ++/* Reported-by: Julian Groß <julian.g@××××××.de> */
1654 ++UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
1655 ++ "LaCie",
1656 ++ "2Big Quadra USB3",
1657 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1658 ++ US_FL_NO_REPORT_OPCODES),
1659 ++
1660 + /*
1661 + * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
1662 + * commands in UAS mode. Observed with the 1.28 firmware; are there others?
1663 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
1664 +index 8c1f04c3a684..b79fe6549df6 100644
1665 +--- a/fs/ceph/mds_client.c
1666 ++++ b/fs/ceph/mds_client.c
1667 +@@ -3072,8 +3072,7 @@ static void handle_session(struct ceph_mds_session *session,
1668 + void *end = p + msg->front.iov_len;
1669 + struct ceph_mds_session_head *h;
1670 + u32 op;
1671 +- u64 seq;
1672 +- unsigned long features = 0;
1673 ++ u64 seq, features = 0;
1674 + int wake = 0;
1675 + bool blacklisted = false;
1676 +
1677 +@@ -3092,9 +3091,8 @@ static void handle_session(struct ceph_mds_session *session,
1678 + goto bad;
1679 + /* version >= 3, feature bits */
1680 + ceph_decode_32_safe(&p, end, len, bad);
1681 +- ceph_decode_need(&p, end, len, bad);
1682 +- memcpy(&features, p, min_t(size_t, len, sizeof(features)));
1683 +- p += len;
1684 ++ ceph_decode_64_safe(&p, end, features, bad);
1685 ++ p += len - sizeof(features);
1686 + }
1687 +
1688 + mutex_lock(&mdsc->mutex);
1689 +diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
1690 +index de56dee60540..19507e2fdb57 100644
1691 +--- a/fs/ceph/quota.c
1692 ++++ b/fs/ceph/quota.c
1693 +@@ -159,8 +159,8 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
1694 + }
1695 +
1696 + if (IS_ERR(in)) {
1697 +- pr_warn("Can't lookup inode %llx (err: %ld)\n",
1698 +- realm->ino, PTR_ERR(in));
1699 ++ dout("Can't lookup inode %llx (err: %ld)\n",
1700 ++ realm->ino, PTR_ERR(in));
1701 + qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
1702 + } else {
1703 + qri->timeout = 0;
1704 +diff --git a/fs/coredump.c b/fs/coredump.c
1705 +index d25bad2ed061..5c0375e7440f 100644
1706 +--- a/fs/coredump.c
1707 ++++ b/fs/coredump.c
1708 +@@ -788,6 +788,14 @@ void do_coredump(const kernel_siginfo_t *siginfo)
1709 + if (displaced)
1710 + put_files_struct(displaced);
1711 + if (!dump_interrupted()) {
1712 ++ /*
1713 ++ * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
1714 ++ * have this set to NULL.
1715 ++ */
1716 ++ if (!cprm.file) {
1717 ++ pr_info("Core dump to |%s disabled\n", cn.corename);
1718 ++ goto close_fail;
1719 ++ }
1720 + file_start_write(cprm.file);
1721 + core_dumped = binfmt->core_dump(&cprm);
1722 + file_end_write(cprm.file);
1723 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
1724 +index 33cff49769cc..6307c1d883e0 100644
1725 +--- a/fs/eventpoll.c
1726 ++++ b/fs/eventpoll.c
1727 +@@ -1176,6 +1176,10 @@ static inline bool chain_epi_lockless(struct epitem *epi)
1728 + {
1729 + struct eventpoll *ep = epi->ep;
1730 +
1731 ++ /* Fast preliminary check */
1732 ++ if (epi->next != EP_UNACTIVE_PTR)
1733 ++ return false;
1734 ++
1735 + /* Check that the same epi has not been just chained from another CPU */
1736 + if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
1737 + return false;
1738 +@@ -1242,16 +1246,12 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
1739 + * chained in ep->ovflist and requeued later on.
1740 + */
1741 + if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
1742 +- if (epi->next == EP_UNACTIVE_PTR &&
1743 +- chain_epi_lockless(epi))
1744 ++ if (chain_epi_lockless(epi))
1745 ++ ep_pm_stay_awake_rcu(epi);
1746 ++ } else if (!ep_is_linked(epi)) {
1747 ++ /* In the usual case, add event to ready list. */
1748 ++ if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
1749 + ep_pm_stay_awake_rcu(epi);
1750 +- goto out_unlock;
1751 +- }
1752 +-
1753 +- /* If this file is already in the ready list we exit soon */
1754 +- if (!ep_is_linked(epi) &&
1755 +- list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) {
1756 +- ep_pm_stay_awake_rcu(epi);
1757 + }
1758 +
1759 + /*
1760 +@@ -1827,7 +1827,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1761 + {
1762 + int res = 0, eavail, timed_out = 0;
1763 + u64 slack = 0;
1764 +- bool waiter = false;
1765 + wait_queue_entry_t wait;
1766 + ktime_t expires, *to = NULL;
1767 +
1768 +@@ -1872,21 +1871,23 @@ fetch_events:
1769 + */
1770 + ep_reset_busy_poll_napi_id(ep);
1771 +
1772 +- /*
1773 +- * We don't have any available event to return to the caller. We need
1774 +- * to sleep here, and we will be woken by ep_poll_callback() when events
1775 +- * become available.
1776 +- */
1777 +- if (!waiter) {
1778 +- waiter = true;
1779 +- init_waitqueue_entry(&wait, current);
1780 +-
1781 ++ do {
1782 ++ /*
1783 ++ * Internally init_wait() uses autoremove_wake_function(),
1784 ++ * thus wait entry is removed from the wait queue on each
1785 ++ * wakeup. Why it is important? In case of several waiters
1786 ++ * each new wakeup will hit the next waiter, giving it the
1787 ++ * chance to harvest new event. Otherwise wakeup can be
1788 ++ * lost. This is also good performance-wise, because on
1789 ++ * normal wakeup path no need to call __remove_wait_queue()
1790 ++ * explicitly, thus ep->lock is not taken, which halts the
1791 ++ * event delivery.
1792 ++ */
1793 ++ init_wait(&wait);
1794 + write_lock_irq(&ep->lock);
1795 + __add_wait_queue_exclusive(&ep->wq, &wait);
1796 + write_unlock_irq(&ep->lock);
1797 +- }
1798 +
1799 +- for (;;) {
1800 + /*
1801 + * We don't want to sleep if the ep_poll_callback() sends us
1802 + * a wakeup in between. That's why we set the task state
1803 +@@ -1916,10 +1917,20 @@ fetch_events:
1804 + timed_out = 1;
1805 + break;
1806 + }
1807 +- }
1808 ++
1809 ++ /* We were woken up, thus go and try to harvest some events */
1810 ++ eavail = 1;
1811 ++
1812 ++ } while (0);
1813 +
1814 + __set_current_state(TASK_RUNNING);
1815 +
1816 ++ if (!list_empty_careful(&wait.entry)) {
1817 ++ write_lock_irq(&ep->lock);
1818 ++ __remove_wait_queue(&ep->wq, &wait);
1819 ++ write_unlock_irq(&ep->lock);
1820 ++ }
1821 ++
1822 + send_events:
1823 + /*
1824 + * Try to transfer events to user space. In case we get 0 events and
1825 +@@ -1930,12 +1941,6 @@ send_events:
1826 + !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
1827 + goto fetch_events;
1828 +
1829 +- if (waiter) {
1830 +- write_lock_irq(&ep->lock);
1831 +- __remove_wait_queue(&ep->wq, &wait);
1832 +- write_unlock_irq(&ep->lock);
1833 +- }
1834 +-
1835 + return res;
1836 + }
1837 +
1838 +diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
1839 +index 5778d1347b35..f5d30573f4a9 100644
1840 +--- a/fs/notify/fanotify/fanotify.c
1841 ++++ b/fs/notify/fanotify/fanotify.c
1842 +@@ -26,7 +26,7 @@ static bool should_merge(struct fsnotify_event *old_fsn,
1843 + old = FANOTIFY_E(old_fsn);
1844 + new = FANOTIFY_E(new_fsn);
1845 +
1846 +- if (old_fsn->inode != new_fsn->inode || old->pid != new->pid ||
1847 ++ if (old_fsn->objectid != new_fsn->objectid || old->pid != new->pid ||
1848 + old->fh_type != new->fh_type || old->fh_len != new->fh_len)
1849 + return false;
1850 +
1851 +@@ -314,7 +314,12 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
1852 + if (!event)
1853 + goto out;
1854 + init: __maybe_unused
1855 +- fsnotify_init_event(&event->fse, inode);
1856 ++ /*
1857 ++ * Use the victim inode instead of the watching inode as the id for
1858 ++ * event queue, so event reported on parent is merged with event
1859 ++ * reported on child when both directory and child watches exist.
1860 ++ */
1861 ++ fsnotify_init_event(&event->fse, (unsigned long)id);
1862 + event->mask = mask;
1863 + if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
1864 + event->pid = get_pid(task_pid(current));
1865 +diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
1866 +index d510223d302c..589dee962993 100644
1867 +--- a/fs/notify/inotify/inotify_fsnotify.c
1868 ++++ b/fs/notify/inotify/inotify_fsnotify.c
1869 +@@ -39,7 +39,7 @@ static bool event_compare(struct fsnotify_event *old_fsn,
1870 + if (old->mask & FS_IN_IGNORED)
1871 + return false;
1872 + if ((old->mask == new->mask) &&
1873 +- (old_fsn->inode == new_fsn->inode) &&
1874 ++ (old_fsn->objectid == new_fsn->objectid) &&
1875 + (old->name_len == new->name_len) &&
1876 + (!old->name_len || !strcmp(old->name, new->name)))
1877 + return true;
1878 +@@ -118,7 +118,7 @@ int inotify_handle_event(struct fsnotify_group *group,
1879 + mask &= ~IN_ISDIR;
1880 +
1881 + fsn_event = &event->fse;
1882 +- fsnotify_init_event(fsn_event, inode);
1883 ++ fsnotify_init_event(fsn_event, (unsigned long)inode);
1884 + event->mask = mask;
1885 + event->wd = i_mark->wd;
1886 + event->sync_cookie = cookie;
1887 +diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
1888 +index 107537a543fd..81ffc8629fc4 100644
1889 +--- a/fs/notify/inotify/inotify_user.c
1890 ++++ b/fs/notify/inotify/inotify_user.c
1891 +@@ -635,7 +635,7 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events)
1892 + return ERR_PTR(-ENOMEM);
1893 + }
1894 + group->overflow_event = &oevent->fse;
1895 +- fsnotify_init_event(group->overflow_event, NULL);
1896 ++ fsnotify_init_event(group->overflow_event, 0);
1897 + oevent->mask = FS_Q_OVERFLOW;
1898 + oevent->wd = -1;
1899 + oevent->sync_cookie = 0;
1900 +diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
1901 +index 4fc87dee005a..2849bdbb3acb 100644
1902 +--- a/include/linux/backing-dev-defs.h
1903 ++++ b/include/linux/backing-dev-defs.h
1904 +@@ -220,6 +220,7 @@ struct backing_dev_info {
1905 + wait_queue_head_t wb_waitq;
1906 +
1907 + struct device *dev;
1908 ++ char dev_name[64];
1909 + struct device *owner;
1910 +
1911 + struct timer_list laptop_mode_wb_timer;
1912 +diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
1913 +index f88197c1ffc2..c9ad5c3b7b4b 100644
1914 +--- a/include/linux/backing-dev.h
1915 ++++ b/include/linux/backing-dev.h
1916 +@@ -505,13 +505,6 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
1917 + (1 << WB_async_congested));
1918 + }
1919 +
1920 +-extern const char *bdi_unknown_name;
1921 +-
1922 +-static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
1923 +-{
1924 +- if (!bdi || !bdi->dev)
1925 +- return bdi_unknown_name;
1926 +- return dev_name(bdi->dev);
1927 +-}
1928 ++const char *bdi_dev_name(struct backing_dev_info *bdi);
1929 +
1930 + #endif /* _LINUX_BACKING_DEV_H */
1931 +diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
1932 +index 1915bdba2fad..64cfb5446f4d 100644
1933 +--- a/include/linux/fsnotify_backend.h
1934 ++++ b/include/linux/fsnotify_backend.h
1935 +@@ -133,8 +133,7 @@ struct fsnotify_ops {
1936 + */
1937 + struct fsnotify_event {
1938 + struct list_head list;
1939 +- /* inode may ONLY be dereferenced during handle_event(). */
1940 +- struct inode *inode; /* either the inode the event happened to or its parent */
1941 ++ unsigned long objectid; /* identifier for queue merges */
1942 + };
1943 +
1944 + /*
1945 +@@ -500,10 +499,10 @@ extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
1946 + extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
1947 +
1948 + static inline void fsnotify_init_event(struct fsnotify_event *event,
1949 +- struct inode *inode)
1950 ++ unsigned long objectid)
1951 + {
1952 + INIT_LIST_HEAD(&event->list);
1953 +- event->inode = inode;
1954 ++ event->objectid = objectid;
1955 + }
1956 +
1957 + #else
1958 +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
1959 +index 0d1fe9297ac6..6f6ade63b04c 100644
1960 +--- a/include/linux/virtio_net.h
1961 ++++ b/include/linux/virtio_net.h
1962 +@@ -3,6 +3,8 @@
1963 + #define _LINUX_VIRTIO_NET_H
1964 +
1965 + #include <linux/if_vlan.h>
1966 ++#include <uapi/linux/tcp.h>
1967 ++#include <uapi/linux/udp.h>
1968 + #include <uapi/linux/virtio_net.h>
1969 +
1970 + static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
1971 +@@ -28,17 +30,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
1972 + bool little_endian)
1973 + {
1974 + unsigned int gso_type = 0;
1975 ++ unsigned int thlen = 0;
1976 ++ unsigned int ip_proto;
1977 +
1978 + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1979 + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1980 + case VIRTIO_NET_HDR_GSO_TCPV4:
1981 + gso_type = SKB_GSO_TCPV4;
1982 ++ ip_proto = IPPROTO_TCP;
1983 ++ thlen = sizeof(struct tcphdr);
1984 + break;
1985 + case VIRTIO_NET_HDR_GSO_TCPV6:
1986 + gso_type = SKB_GSO_TCPV6;
1987 ++ ip_proto = IPPROTO_TCP;
1988 ++ thlen = sizeof(struct tcphdr);
1989 + break;
1990 + case VIRTIO_NET_HDR_GSO_UDP:
1991 + gso_type = SKB_GSO_UDP;
1992 ++ ip_proto = IPPROTO_UDP;
1993 ++ thlen = sizeof(struct udphdr);
1994 + break;
1995 + default:
1996 + return -EINVAL;
1997 +@@ -57,16 +67,22 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
1998 +
1999 + if (!skb_partial_csum_set(skb, start, off))
2000 + return -EINVAL;
2001 ++
2002 ++ if (skb_transport_offset(skb) + thlen > skb_headlen(skb))
2003 ++ return -EINVAL;
2004 + } else {
2005 + /* gso packets without NEEDS_CSUM do not set transport_offset.
2006 + * probe and drop if does not match one of the above types.
2007 + */
2008 + if (gso_type && skb->network_header) {
2009 ++ struct flow_keys_basic keys;
2010 ++
2011 + if (!skb->protocol)
2012 + virtio_net_hdr_set_proto(skb, hdr);
2013 + retry:
2014 +- skb_probe_transport_header(skb);
2015 +- if (!skb_transport_header_was_set(skb)) {
2016 ++ if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2017 ++ NULL, 0, 0, 0,
2018 ++ 0)) {
2019 + /* UFO does not specify ipv4 or 6: try both */
2020 + if (gso_type & SKB_GSO_UDP &&
2021 + skb->protocol == htons(ETH_P_IP)) {
2022 +@@ -75,6 +91,12 @@ retry:
2023 + }
2024 + return -EINVAL;
2025 + }
2026 ++
2027 ++ if (keys.control.thoff + thlen > skb_headlen(skb) ||
2028 ++ keys.basic.ip_proto != ip_proto)
2029 ++ return -EINVAL;
2030 ++
2031 ++ skb_set_transport_header(skb, keys.control.thoff);
2032 + }
2033 + }
2034 +
2035 +diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
2036 +index c8e2bebd8d93..0f0d1efe06dd 100644
2037 +--- a/include/net/inet_ecn.h
2038 ++++ b/include/net/inet_ecn.h
2039 +@@ -99,6 +99,20 @@ static inline int IP_ECN_set_ce(struct iphdr *iph)
2040 + return 1;
2041 + }
2042 +
2043 ++static inline int IP_ECN_set_ect1(struct iphdr *iph)
2044 ++{
2045 ++ u32 check = (__force u32)iph->check;
2046 ++
2047 ++ if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
2048 ++ return 0;
2049 ++
2050 ++ check += (__force u16)htons(0x100);
2051 ++
2052 ++ iph->check = (__force __sum16)(check + (check>=0xFFFF));
2053 ++ iph->tos ^= INET_ECN_MASK;
2054 ++ return 1;
2055 ++}
2056 ++
2057 + static inline void IP_ECN_clear(struct iphdr *iph)
2058 + {
2059 + iph->tos &= ~INET_ECN_MASK;
2060 +@@ -134,6 +148,22 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
2061 + return 1;
2062 + }
2063 +
2064 ++static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph)
2065 ++{
2066 ++ __be32 from, to;
2067 ++
2068 ++ if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0)
2069 ++ return 0;
2070 ++
2071 ++ from = *(__be32 *)iph;
2072 ++ to = from ^ htonl(INET_ECN_MASK << 20);
2073 ++ *(__be32 *)iph = to;
2074 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
2075 ++ skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
2076 ++ (__force __wsum)to);
2077 ++ return 1;
2078 ++}
2079 ++
2080 + static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
2081 + {
2082 + dscp &= ~INET_ECN_MASK;
2083 +@@ -159,6 +189,25 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
2084 + return 0;
2085 + }
2086 +
2087 ++static inline int INET_ECN_set_ect1(struct sk_buff *skb)
2088 ++{
2089 ++ switch (skb->protocol) {
2090 ++ case cpu_to_be16(ETH_P_IP):
2091 ++ if (skb_network_header(skb) + sizeof(struct iphdr) <=
2092 ++ skb_tail_pointer(skb))
2093 ++ return IP_ECN_set_ect1(ip_hdr(skb));
2094 ++ break;
2095 ++
2096 ++ case cpu_to_be16(ETH_P_IPV6):
2097 ++ if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
2098 ++ skb_tail_pointer(skb))
2099 ++ return IP6_ECN_set_ect1(skb, ipv6_hdr(skb));
2100 ++ break;
2101 ++ }
2102 ++
2103 ++ return 0;
2104 ++}
2105 ++
2106 + /*
2107 + * RFC 6040 4.2
2108 + * To decapsulate the inner header at the tunnel egress, a compliant
2109 +@@ -208,8 +257,12 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb,
2110 + int rc;
2111 +
2112 + rc = __INET_ECN_decapsulate(outer, inner, &set_ce);
2113 +- if (!rc && set_ce)
2114 +- INET_ECN_set_ce(skb);
2115 ++ if (!rc) {
2116 ++ if (set_ce)
2117 ++ INET_ECN_set_ce(skb);
2118 ++ else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1)
2119 ++ INET_ECN_set_ect1(skb);
2120 ++ }
2121 +
2122 + return rc;
2123 + }
2124 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
2125 +index 4b5656c71abc..bd0f1595bdc7 100644
2126 +--- a/include/net/ip6_fib.h
2127 ++++ b/include/net/ip6_fib.h
2128 +@@ -177,6 +177,7 @@ struct fib6_info {
2129 + struct rt6_info {
2130 + struct dst_entry dst;
2131 + struct fib6_info __rcu *from;
2132 ++ int sernum;
2133 +
2134 + struct rt6key rt6i_dst;
2135 + struct rt6key rt6i_src;
2136 +@@ -260,6 +261,9 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
2137 + struct fib6_info *from;
2138 + u32 cookie = 0;
2139 +
2140 ++ if (rt->sernum)
2141 ++ return rt->sernum;
2142 ++
2143 + rcu_read_lock();
2144 +
2145 + from = rcu_dereference(rt->from);
2146 +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
2147 +index c7e15a213ef2..0fca98a3d2d3 100644
2148 +--- a/include/net/net_namespace.h
2149 ++++ b/include/net/net_namespace.h
2150 +@@ -428,6 +428,13 @@ static inline int rt_genid_ipv4(struct net *net)
2151 + return atomic_read(&net->ipv4.rt_genid);
2152 + }
2153 +
2154 ++#if IS_ENABLED(CONFIG_IPV6)
2155 ++static inline int rt_genid_ipv6(const struct net *net)
2156 ++{
2157 ++ return atomic_read(&net->ipv6.fib6_sernum);
2158 ++}
2159 ++#endif
2160 ++
2161 + static inline void rt_genid_bump_ipv4(struct net *net)
2162 + {
2163 + atomic_inc(&net->ipv4.rt_genid);
2164 +diff --git a/ipc/mqueue.c b/ipc/mqueue.c
2165 +index 3d920ff15c80..2ea0c08188e6 100644
2166 +--- a/ipc/mqueue.c
2167 ++++ b/ipc/mqueue.c
2168 +@@ -82,6 +82,7 @@ struct mqueue_inode_info {
2169 +
2170 + struct sigevent notify;
2171 + struct pid *notify_owner;
2172 ++ u32 notify_self_exec_id;
2173 + struct user_namespace *notify_user_ns;
2174 + struct user_struct *user; /* user who created, for accounting */
2175 + struct sock *notify_sock;
2176 +@@ -709,28 +710,44 @@ static void __do_notify(struct mqueue_inode_info *info)
2177 + * synchronously. */
2178 + if (info->notify_owner &&
2179 + info->attr.mq_curmsgs == 1) {
2180 +- struct kernel_siginfo sig_i;
2181 + switch (info->notify.sigev_notify) {
2182 + case SIGEV_NONE:
2183 + break;
2184 +- case SIGEV_SIGNAL:
2185 +- /* sends signal */
2186 ++ case SIGEV_SIGNAL: {
2187 ++ struct kernel_siginfo sig_i;
2188 ++ struct task_struct *task;
2189 ++
2190 ++ /* do_mq_notify() accepts sigev_signo == 0, why?? */
2191 ++ if (!info->notify.sigev_signo)
2192 ++ break;
2193 +
2194 + clear_siginfo(&sig_i);
2195 + sig_i.si_signo = info->notify.sigev_signo;
2196 + sig_i.si_errno = 0;
2197 + sig_i.si_code = SI_MESGQ;
2198 + sig_i.si_value = info->notify.sigev_value;
2199 +- /* map current pid/uid into info->owner's namespaces */
2200 + rcu_read_lock();
2201 ++ /* map current pid/uid into info->owner's namespaces */
2202 + sig_i.si_pid = task_tgid_nr_ns(current,
2203 + ns_of_pid(info->notify_owner));
2204 +- sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
2205 ++ sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
2206 ++ current_uid());
2207 ++ /*
2208 ++ * We can't use kill_pid_info(), this signal should
2209 ++ * bypass check_kill_permission(). It is from kernel
2210 ++ * but si_fromuser() can't know this.
2211 ++ * We do check the self_exec_id, to avoid sending
2212 ++ * signals to programs that don't expect them.
2213 ++ */
2214 ++ task = pid_task(info->notify_owner, PIDTYPE_TGID);
2215 ++ if (task && task->self_exec_id ==
2216 ++ info->notify_self_exec_id) {
2217 ++ do_send_sig_info(info->notify.sigev_signo,
2218 ++ &sig_i, task, PIDTYPE_TGID);
2219 ++ }
2220 + rcu_read_unlock();
2221 +-
2222 +- kill_pid_info(info->notify.sigev_signo,
2223 +- &sig_i, info->notify_owner);
2224 + break;
2225 ++ }
2226 + case SIGEV_THREAD:
2227 + set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
2228 + netlink_sendskb(info->notify_sock, info->notify_cookie);
2229 +@@ -1315,6 +1332,7 @@ retry:
2230 + info->notify.sigev_signo = notification->sigev_signo;
2231 + info->notify.sigev_value = notification->sigev_value;
2232 + info->notify.sigev_notify = SIGEV_SIGNAL;
2233 ++ info->notify_self_exec_id = current->self_exec_id;
2234 + break;
2235 + }
2236 +
2237 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2238 +index c6ccaf6c62f7..721947b9962d 100644
2239 +--- a/kernel/trace/trace.c
2240 ++++ b/kernel/trace/trace.c
2241 +@@ -8318,6 +8318,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
2242 + */
2243 + allocate_snapshot = false;
2244 + #endif
2245 ++
2246 ++ /*
2247 ++ * Because of some magic with the way alloc_percpu() works on
2248 ++ * x86_64, we need to synchronize the pgd of all the tables,
2249 ++ * otherwise the trace events that happen in x86_64 page fault
2250 ++ * handlers can't cope with accessing the chance that a
2251 ++ * alloc_percpu()'d memory might be touched in the page fault trace
2252 ++ * event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
2253 ++ * calls in tracing, because something might get triggered within a
2254 ++ * page fault trace event!
2255 ++ */
2256 ++ vmalloc_sync_mappings();
2257 ++
2258 + return 0;
2259 + }
2260 +
2261 +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
2262 +index 2f0f7fcee73e..fba4b48451f6 100644
2263 +--- a/kernel/trace/trace_kprobe.c
2264 ++++ b/kernel/trace/trace_kprobe.c
2265 +@@ -454,7 +454,7 @@ static bool __within_notrace_func(unsigned long addr)
2266 +
2267 + static bool within_notrace_func(struct trace_kprobe *tk)
2268 + {
2269 +- unsigned long addr = addr = trace_kprobe_address(tk);
2270 ++ unsigned long addr = trace_kprobe_address(tk);
2271 + char symname[KSYM_NAME_LEN], *p;
2272 +
2273 + if (!__within_notrace_func(addr))
2274 +diff --git a/kernel/umh.c b/kernel/umh.c
2275 +index 7f255b5a8845..11bf5eea474c 100644
2276 +--- a/kernel/umh.c
2277 ++++ b/kernel/umh.c
2278 +@@ -544,6 +544,11 @@ EXPORT_SYMBOL_GPL(fork_usermode_blob);
2279 + * Runs a user-space application. The application is started
2280 + * asynchronously if wait is not set, and runs as a child of system workqueues.
2281 + * (ie. it runs with full root capabilities and optimized affinity).
2282 ++ *
2283 ++ * Note: successful return value does not guarantee the helper was called at
2284 ++ * all. You can't rely on sub_info->{init,cleanup} being called even for
2285 ++ * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers
2286 ++ * into a successful no-op.
2287 + */
2288 + int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
2289 + {
2290 +diff --git a/mm/backing-dev.c b/mm/backing-dev.c
2291 +index 62f05f605fb5..3f2480e4c5af 100644
2292 +--- a/mm/backing-dev.c
2293 ++++ b/mm/backing-dev.c
2294 +@@ -21,7 +21,7 @@ struct backing_dev_info noop_backing_dev_info = {
2295 + EXPORT_SYMBOL_GPL(noop_backing_dev_info);
2296 +
2297 + static struct class *bdi_class;
2298 +-const char *bdi_unknown_name = "(unknown)";
2299 ++static const char *bdi_unknown_name = "(unknown)";
2300 +
2301 + /*
2302 + * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
2303 +@@ -938,7 +938,8 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
2304 + if (bdi->dev) /* The driver needs to use separate queues per device */
2305 + return 0;
2306 +
2307 +- dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
2308 ++ vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
2309 ++ dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
2310 + if (IS_ERR(dev))
2311 + return PTR_ERR(dev);
2312 +
2313 +@@ -1043,6 +1044,14 @@ void bdi_put(struct backing_dev_info *bdi)
2314 + }
2315 + EXPORT_SYMBOL(bdi_put);
2316 +
2317 ++const char *bdi_dev_name(struct backing_dev_info *bdi)
2318 ++{
2319 ++ if (!bdi || !bdi->dev)
2320 ++ return bdi_unknown_name;
2321 ++ return bdi->dev_name;
2322 ++}
2323 ++EXPORT_SYMBOL_GPL(bdi_dev_name);
2324 ++
2325 + static wait_queue_head_t congestion_wqh[2] = {
2326 + __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
2327 + __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
2328 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2329 +index 8159000781be..0d6f3ea86738 100644
2330 +--- a/mm/memcontrol.c
2331 ++++ b/mm/memcontrol.c
2332 +@@ -5101,19 +5101,22 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
2333 + unsigned int size;
2334 + int node;
2335 + int __maybe_unused i;
2336 ++ long error = -ENOMEM;
2337 +
2338 + size = sizeof(struct mem_cgroup);
2339 + size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
2340 +
2341 + memcg = kzalloc(size, GFP_KERNEL);
2342 + if (!memcg)
2343 +- return NULL;
2344 ++ return ERR_PTR(error);
2345 +
2346 + memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
2347 + 1, MEM_CGROUP_ID_MAX,
2348 + GFP_KERNEL);
2349 +- if (memcg->id.id < 0)
2350 ++ if (memcg->id.id < 0) {
2351 ++ error = memcg->id.id;
2352 + goto fail;
2353 ++ }
2354 +
2355 + memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
2356 + if (!memcg->vmstats_local)
2357 +@@ -5158,7 +5161,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
2358 + fail:
2359 + mem_cgroup_id_remove(memcg);
2360 + __mem_cgroup_free(memcg);
2361 +- return NULL;
2362 ++ return ERR_PTR(error);
2363 + }
2364 +
2365 + static struct cgroup_subsys_state * __ref
2366 +@@ -5169,8 +5172,8 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
2367 + long error = -ENOMEM;
2368 +
2369 + memcg = mem_cgroup_alloc();
2370 +- if (!memcg)
2371 +- return ERR_PTR(error);
2372 ++ if (IS_ERR(memcg))
2373 ++ return ERR_CAST(memcg);
2374 +
2375 + memcg->high = PAGE_COUNTER_MAX;
2376 + memcg->soft_limit = PAGE_COUNTER_MAX;
2377 +@@ -5220,7 +5223,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
2378 + fail:
2379 + mem_cgroup_id_remove(memcg);
2380 + mem_cgroup_free(memcg);
2381 +- return ERR_PTR(-ENOMEM);
2382 ++ return ERR_PTR(error);
2383 + }
2384 +
2385 + static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
2386 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2387 +index d387ca74cb5a..98d5c940facd 100644
2388 +--- a/mm/page_alloc.c
2389 ++++ b/mm/page_alloc.c
2390 +@@ -1555,6 +1555,7 @@ void set_zone_contiguous(struct zone *zone)
2391 + if (!__pageblock_pfn_to_page(block_start_pfn,
2392 + block_end_pfn, zone))
2393 + return;
2394 ++ cond_resched();
2395 + }
2396 +
2397 + /* We confirm that there is no hole */
2398 +@@ -2350,6 +2351,14 @@ static inline void boost_watermark(struct zone *zone)
2399 +
2400 + if (!watermark_boost_factor)
2401 + return;
2402 ++ /*
2403 ++ * Don't bother in zones that are unlikely to produce results.
2404 ++ * On small machines, including kdump capture kernels running
2405 ++ * in a small area, boosting the watermark can cause an out of
2406 ++ * memory situation immediately.
2407 ++ */
2408 ++ if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2409 ++ return;
2410 +
2411 + max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2412 + watermark_boost_factor, 10000);
2413 +diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
2414 +index 8033f24f506c..a9e7540c5691 100644
2415 +--- a/net/batman-adv/bat_v_ogm.c
2416 ++++ b/net/batman-adv/bat_v_ogm.c
2417 +@@ -897,7 +897,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
2418 +
2419 + orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig);
2420 + if (!orig_node)
2421 +- return;
2422 ++ goto out;
2423 +
2424 + neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming,
2425 + ethhdr->h_source);
2426 +diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
2427 +index 580609389f0f..70e3b161c663 100644
2428 +--- a/net/batman-adv/network-coding.c
2429 ++++ b/net/batman-adv/network-coding.c
2430 +@@ -1009,15 +1009,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
2431 + */
2432 + static u8 batadv_nc_random_weight_tq(u8 tq)
2433 + {
2434 +- u8 rand_val, rand_tq;
2435 +-
2436 +- get_random_bytes(&rand_val, sizeof(rand_val));
2437 +-
2438 + /* randomize the estimated packet loss (max TQ - estimated TQ) */
2439 +- rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq);
2440 +-
2441 +- /* normalize the randomized packet loss */
2442 +- rand_tq /= BATADV_TQ_MAX_VALUE;
2443 ++ u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq);
2444 +
2445 + /* convert to (randomized) estimated tq again */
2446 + return BATADV_TQ_MAX_VALUE - rand_tq;
2447 +diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
2448 +index e5bbc28ed12c..079a13493880 100644
2449 +--- a/net/batman-adv/sysfs.c
2450 ++++ b/net/batman-adv/sysfs.c
2451 +@@ -1150,7 +1150,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
2452 + ret = batadv_parse_throughput(net_dev, buff, "throughput_override",
2453 + &tp_override);
2454 + if (!ret)
2455 +- return count;
2456 ++ goto out;
2457 +
2458 + old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
2459 + if (old_tp_override == tp_override)
2460 +@@ -1190,6 +1190,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj,
2461 +
2462 + tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
2463 +
2464 ++ batadv_hardif_put(hard_iface);
2465 + return sprintf(buff, "%u.%u MBit\n", tp_override / 10,
2466 + tp_override % 10);
2467 + }
2468 +diff --git a/net/core/devlink.c b/net/core/devlink.c
2469 +index 4c25f1aa2d37..5667cae57072 100644
2470 +--- a/net/core/devlink.c
2471 ++++ b/net/core/devlink.c
2472 +@@ -3907,6 +3907,11 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
2473 + end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
2474 + end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
2475 + dump = false;
2476 ++
2477 ++ if (start_offset == end_offset) {
2478 ++ err = 0;
2479 ++ goto nla_put_failure;
2480 ++ }
2481 + }
2482 +
2483 + err = devlink_nl_region_read_snapshot_fill(skb, devlink,
2484 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2485 +index 920784a9b7ff..7b40d12f0c22 100644
2486 +--- a/net/core/neighbour.c
2487 ++++ b/net/core/neighbour.c
2488 +@@ -1954,6 +1954,9 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
2489 + NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2490 + }
2491 +
2492 ++ if (protocol)
2493 ++ neigh->protocol = protocol;
2494 ++
2495 + if (ndm->ndm_flags & NTF_EXT_LEARNED)
2496 + flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2497 +
2498 +@@ -1967,9 +1970,6 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
2499 + err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2500 + NETLINK_CB(skb).portid, extack);
2501 +
2502 +- if (protocol)
2503 +- neigh->protocol = protocol;
2504 +-
2505 + neigh_release(neigh);
2506 +
2507 + out:
2508 +diff --git a/net/dsa/master.c b/net/dsa/master.c
2509 +index 3255dfc97f86..be0b4ed3b7d8 100644
2510 +--- a/net/dsa/master.c
2511 ++++ b/net/dsa/master.c
2512 +@@ -259,7 +259,8 @@ static void dsa_master_ndo_teardown(struct net_device *dev)
2513 + {
2514 + struct dsa_port *cpu_dp = dev->dsa_ptr;
2515 +
2516 +- dev->netdev_ops = cpu_dp->orig_ndo_ops;
2517 ++ if (cpu_dp->orig_ndo_ops)
2518 ++ dev->netdev_ops = cpu_dp->orig_ndo_ops;
2519 + cpu_dp->orig_ndo_ops = NULL;
2520 + }
2521 +
2522 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2523 +index 894c7370c1bd..c81d8e9e5169 100644
2524 +--- a/net/ipv6/route.c
2525 ++++ b/net/ipv6/route.c
2526 +@@ -1388,9 +1388,18 @@ static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
2527 + }
2528 + ip6_rt_copy_init(pcpu_rt, res);
2529 + pcpu_rt->rt6i_flags |= RTF_PCPU;
2530 ++
2531 ++ if (f6i->nh)
2532 ++ pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
2533 ++
2534 + return pcpu_rt;
2535 + }
2536 +
2537 ++static bool rt6_is_valid(const struct rt6_info *rt6)
2538 ++{
2539 ++ return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
2540 ++}
2541 ++
2542 + /* It should be called with rcu_read_lock() acquired */
2543 + static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
2544 + {
2545 +@@ -1398,6 +1407,19 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
2546 +
2547 + pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
2548 +
2549 ++ if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
2550 ++ struct rt6_info *prev, **p;
2551 ++
2552 ++ p = this_cpu_ptr(res->nh->rt6i_pcpu);
2553 ++ prev = xchg(p, NULL);
2554 ++ if (prev) {
2555 ++ dst_dev_put(&prev->dst);
2556 ++ dst_release(&prev->dst);
2557 ++ }
2558 ++
2559 ++ pcpu_rt = NULL;
2560 ++ }
2561 ++
2562 + return pcpu_rt;
2563 + }
2564 +
2565 +@@ -2599,6 +2621,9 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2566 +
2567 + rt = container_of(dst, struct rt6_info, dst);
2568 +
2569 ++ if (rt->sernum)
2570 ++ return rt6_is_valid(rt) ? dst : NULL;
2571 ++
2572 + rcu_read_lock();
2573 +
2574 + /* All IPV6 dsts are created with ->obsolete set to the value
2575 +diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
2576 +index 3d816a1e5442..59151dc07fdc 100644
2577 +--- a/net/netfilter/nf_nat_proto.c
2578 ++++ b/net/netfilter/nf_nat_proto.c
2579 +@@ -68,15 +68,13 @@ static bool udp_manip_pkt(struct sk_buff *skb,
2580 + enum nf_nat_manip_type maniptype)
2581 + {
2582 + struct udphdr *hdr;
2583 +- bool do_csum;
2584 +
2585 + if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
2586 + return false;
2587 +
2588 + hdr = (struct udphdr *)(skb->data + hdroff);
2589 +- do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL;
2590 ++ __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check);
2591 +
2592 +- __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, do_csum);
2593 + return true;
2594 + }
2595 +
2596 +diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
2597 +index 9f5dea0064ea..916a3c7f9eaf 100644
2598 +--- a/net/netfilter/nfnetlink_osf.c
2599 ++++ b/net/netfilter/nfnetlink_osf.c
2600 +@@ -165,12 +165,12 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
2601 + static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
2602 + const struct sk_buff *skb,
2603 + const struct iphdr *ip,
2604 +- unsigned char *opts)
2605 ++ unsigned char *opts,
2606 ++ struct tcphdr *_tcph)
2607 + {
2608 + const struct tcphdr *tcp;
2609 +- struct tcphdr _tcph;
2610 +
2611 +- tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph);
2612 ++ tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph);
2613 + if (!tcp)
2614 + return NULL;
2615 +
2616 +@@ -205,10 +205,11 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family,
2617 + int fmatch = FMATCH_WRONG;
2618 + struct nf_osf_hdr_ctx ctx;
2619 + const struct tcphdr *tcp;
2620 ++ struct tcphdr _tcph;
2621 +
2622 + memset(&ctx, 0, sizeof(ctx));
2623 +
2624 +- tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
2625 ++ tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
2626 + if (!tcp)
2627 + return false;
2628 +
2629 +@@ -265,10 +266,11 @@ bool nf_osf_find(const struct sk_buff *skb,
2630 + const struct nf_osf_finger *kf;
2631 + struct nf_osf_hdr_ctx ctx;
2632 + const struct tcphdr *tcp;
2633 ++ struct tcphdr _tcph;
2634 +
2635 + memset(&ctx, 0, sizeof(ctx));
2636 +
2637 +- tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
2638 ++ tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
2639 + if (!tcp)
2640 + return false;
2641 +
2642 +diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
2643 +index dba70377bbd9..4021f726b58f 100644
2644 +--- a/net/sched/sch_choke.c
2645 ++++ b/net/sched/sch_choke.c
2646 +@@ -323,7 +323,8 @@ static void choke_reset(struct Qdisc *sch)
2647 +
2648 + sch->q.qlen = 0;
2649 + sch->qstats.backlog = 0;
2650 +- memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
2651 ++ if (q->tab)
2652 ++ memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
2653 + q->head = q->tail = 0;
2654 + red_restart(&q->vars);
2655 + }
2656 +diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
2657 +index c261c0a18868..76d72c3f52ed 100644
2658 +--- a/net/sched/sch_fq_codel.c
2659 ++++ b/net/sched/sch_fq_codel.c
2660 +@@ -417,7 +417,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
2661 + q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
2662 +
2663 + if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
2664 +- q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
2665 ++ q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
2666 +
2667 + if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
2668 + q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
2669 +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
2670 +index c787d4d46017..5a6def5e4e6d 100644
2671 +--- a/net/sched/sch_sfq.c
2672 ++++ b/net/sched/sch_sfq.c
2673 +@@ -637,6 +637,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
2674 + if (ctl->divisor &&
2675 + (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
2676 + return -EINVAL;
2677 ++
2678 ++ /* slot->allot is a short, make sure quantum is not too big. */
2679 ++ if (ctl->quantum) {
2680 ++ unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
2681 ++
2682 ++ if (scaled <= 0 || scaled > SHRT_MAX)
2683 ++ return -EINVAL;
2684 ++ }
2685 ++
2686 + if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
2687 + ctl_v1->Wlog))
2688 + return -EINVAL;
2689 +diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
2690 +index 0fb10abf7579..7a5e4c454715 100644
2691 +--- a/net/sched/sch_skbprio.c
2692 ++++ b/net/sched/sch_skbprio.c
2693 +@@ -169,6 +169,9 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
2694 + {
2695 + struct tc_skbprio_qopt *ctl = nla_data(opt);
2696 +
2697 ++ if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
2698 ++ return -EINVAL;
2699 ++
2700 + sch->limit = ctl->limit;
2701 + return 0;
2702 + }
2703 +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
2704 +index c6d83a64eac3..5625a9500f21 100644
2705 +--- a/net/sctp/sm_statefuns.c
2706 ++++ b/net/sctp/sm_statefuns.c
2707 +@@ -1865,7 +1865,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
2708 + */
2709 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
2710 + return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
2711 +- SCTP_ST_CHUNK(0), NULL,
2712 ++ SCTP_ST_CHUNK(0), repl,
2713 + commands);
2714 + } else {
2715 + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
2716 +@@ -5470,7 +5470,7 @@ enum sctp_disposition sctp_sf_do_9_2_start_shutdown(
2717 + * in the Cumulative TSN Ack field the last sequential TSN it
2718 + * has received from the peer.
2719 + */
2720 +- reply = sctp_make_shutdown(asoc, NULL);
2721 ++ reply = sctp_make_shutdown(asoc, arg);
2722 + if (!reply)
2723 + goto nomem;
2724 +
2725 +@@ -6068,7 +6068,7 @@ enum sctp_disposition sctp_sf_autoclose_timer_expire(
2726 + disposition = SCTP_DISPOSITION_CONSUME;
2727 + if (sctp_outq_is_empty(&asoc->outqueue)) {
2728 + disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
2729 +- arg, commands);
2730 ++ NULL, commands);
2731 + }
2732 +
2733 + return disposition;
2734 +diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
2735 +index 3a12fc18239b..73dbed0c4b6b 100644
2736 +--- a/net/tipc/topsrv.c
2737 ++++ b/net/tipc/topsrv.c
2738 +@@ -402,10 +402,11 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
2739 + read_lock_bh(&sk->sk_callback_lock);
2740 + ret = tipc_conn_rcv_sub(srv, con, &s);
2741 + read_unlock_bh(&sk->sk_callback_lock);
2742 ++ if (!ret)
2743 ++ return 0;
2744 + }
2745 +- if (ret < 0)
2746 +- tipc_conn_close(con);
2747 +
2748 ++ tipc_conn_close(con);
2749 + return ret;
2750 + }
2751 +
2752 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
2753 +index 41e9c2932b34..5513a08a4308 100644
2754 +--- a/net/tls/tls_sw.c
2755 ++++ b/net/tls/tls_sw.c
2756 +@@ -797,6 +797,8 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
2757 + *copied -= sk_msg_free(sk, msg);
2758 + tls_free_open_rec(sk);
2759 + }
2760 ++ if (psock)
2761 ++ sk_psock_put(sk, psock);
2762 + return err;
2763 + }
2764 + more_data:
2765 +@@ -2076,8 +2078,9 @@ static void tls_data_ready(struct sock *sk)
2766 + strp_data_ready(&ctx->strp);
2767 +
2768 + psock = sk_psock_get(sk);
2769 +- if (psock && !list_empty(&psock->ingress_msg)) {
2770 +- ctx->saved_data_ready(sk);
2771 ++ if (psock) {
2772 ++ if (!list_empty(&psock->ingress_msg))
2773 ++ ctx->saved_data_ready(sk);
2774 + sk_psock_put(sk, psock);
2775 + }
2776 + }
2777 +diff --git a/scripts/decodecode b/scripts/decodecode
2778 +index ba8b8d5834e6..fbdb325cdf4f 100755
2779 +--- a/scripts/decodecode
2780 ++++ b/scripts/decodecode
2781 +@@ -126,7 +126,7 @@ faultlinenum=$(( $(wc -l $T.oo | cut -d" " -f1) - \
2782 + faultline=`cat $T.dis | head -1 | cut -d":" -f2-`
2783 + faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'`
2784 +
2785 +-cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/"
2786 ++cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/"
2787 + echo
2788 + cat $T.aa
2789 + cleanup
2790 +diff --git a/tools/cgroup/iocost_monitor.py b/tools/cgroup/iocost_monitor.py
2791 +index f79b23582a1d..7e344a78a627 100644
2792 +--- a/tools/cgroup/iocost_monitor.py
2793 ++++ b/tools/cgroup/iocost_monitor.py
2794 +@@ -159,7 +159,12 @@ class IocgStat:
2795 + else:
2796 + self.inflight_pct = 0
2797 +
2798 +- self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000
2799 ++ # vdebt used to be an atomic64_t and is now u64, support both
2800 ++ try:
2801 ++ self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000
2802 ++ except:
2803 ++ self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000
2804 ++
2805 + self.use_delay = blkg.use_delay.counter.value_()
2806 + self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000
2807 +
2808 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
2809 +index d6a971326f87..fcc6cd404f56 100644
2810 +--- a/tools/objtool/check.c
2811 ++++ b/tools/objtool/check.c
2812 +@@ -1402,7 +1402,7 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s
2813 + struct cfi_reg *cfa = &state->cfa;
2814 + struct stack_op *op = &insn->stack_op;
2815 +
2816 +- if (cfa->base != CFI_SP)
2817 ++ if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2818 + return 0;
2819 +
2820 + /* push */
2821 +diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c
2822 +index d31f267961e7..25c0e47d57cb 100644
2823 +--- a/virt/kvm/arm/hyp/aarch32.c
2824 ++++ b/virt/kvm/arm/hyp/aarch32.c
2825 +@@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
2826 + */
2827 + void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
2828 + {
2829 ++ u32 pc = *vcpu_pc(vcpu);
2830 + bool is_thumb;
2831 +
2832 + is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
2833 + if (is_thumb && !is_wide_instr)
2834 +- *vcpu_pc(vcpu) += 2;
2835 ++ pc += 2;
2836 + else
2837 +- *vcpu_pc(vcpu) += 4;
2838 ++ pc += 4;
2839 ++
2840 ++ *vcpu_pc(vcpu) = pc;
2841 ++
2842 + kvm_adjust_itstate(vcpu);
2843 + }
2844 +diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
2845 +index 0d090482720d..7eacf00e5abe 100644
2846 +--- a/virt/kvm/arm/vgic/vgic-mmio.c
2847 ++++ b/virt/kvm/arm/vgic/vgic-mmio.c
2848 +@@ -389,7 +389,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
2849 + static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
2850 + {
2851 + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
2852 +- intid > VGIC_NR_PRIVATE_IRQS)
2853 ++ intid >= VGIC_NR_PRIVATE_IRQS)
2854 + kvm_arm_halt_guest(vcpu->kvm);
2855 + }
2856 +
2857 +@@ -397,7 +397,7 @@ static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
2858 + static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
2859 + {
2860 + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
2861 +- intid > VGIC_NR_PRIVATE_IRQS)
2862 ++ intid >= VGIC_NR_PRIVATE_IRQS)
2863 + kvm_arm_resume_guest(vcpu->kvm);
2864 + }
2865 +