Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.6 commit in: /
Date: Thu, 14 May 2020 11:34:35
Message-Id: 1589456051.ee77ed5cd54e726d06c811541727b80b2472cd96.mpagano@gentoo
1 commit: ee77ed5cd54e726d06c811541727b80b2472cd96
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu May 14 11:34:11 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu May 14 11:34:11 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ee77ed5c
7
8 Linux patch 5.6.13
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1012_linux-5.6.13.patch | 3958 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3962 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index f4994be..6a6ec25 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -91,6 +91,10 @@ Patch: 1011_linux-5.6.12.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.6.12
23
24 +Patch: 1012_linux-5.6.13.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.6.13
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1012_linux-5.6.13.patch b/1012_linux-5.6.13.patch
33 new file mode 100644
34 index 0000000..cf736d2
35 --- /dev/null
36 +++ b/1012_linux-5.6.13.patch
37 @@ -0,0 +1,3958 @@
38 +diff --git a/Makefile b/Makefile
39 +index 97e4c4d9ac95..d252219666fd 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 6
46 +-SUBLEVEL = 12
47 ++SUBLEVEL = 13
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c
52 +index 6fdb0ac62b3d..59da6c0b63b6 100644
53 +--- a/arch/arm/crypto/chacha-glue.c
54 ++++ b/arch/arm/crypto/chacha-glue.c
55 +@@ -91,9 +91,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
56 + return;
57 + }
58 +
59 +- kernel_neon_begin();
60 +- chacha_doneon(state, dst, src, bytes, nrounds);
61 +- kernel_neon_end();
62 ++ do {
63 ++ unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
64 ++
65 ++ kernel_neon_begin();
66 ++ chacha_doneon(state, dst, src, todo, nrounds);
67 ++ kernel_neon_end();
68 ++
69 ++ bytes -= todo;
70 ++ src += todo;
71 ++ dst += todo;
72 ++ } while (bytes);
73 + }
74 + EXPORT_SYMBOL(chacha_crypt_arch);
75 +
76 +diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c
77 +index ae5aefc44a4d..ffa8d73fe722 100644
78 +--- a/arch/arm/crypto/nhpoly1305-neon-glue.c
79 ++++ b/arch/arm/crypto/nhpoly1305-neon-glue.c
80 +@@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
81 + return crypto_nhpoly1305_update(desc, src, srclen);
82 +
83 + do {
84 +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
85 ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K);
86 +
87 + kernel_neon_begin();
88 + crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
89 +diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
90 +index ceec04ec2f40..13cfef4ae22e 100644
91 +--- a/arch/arm/crypto/poly1305-glue.c
92 ++++ b/arch/arm/crypto/poly1305-glue.c
93 +@@ -160,13 +160,20 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
94 + unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
95 +
96 + if (static_branch_likely(&have_neon) && do_neon) {
97 +- kernel_neon_begin();
98 +- poly1305_blocks_neon(&dctx->h, src, len, 1);
99 +- kernel_neon_end();
100 ++ do {
101 ++ unsigned int todo = min_t(unsigned int, len, SZ_4K);
102 ++
103 ++ kernel_neon_begin();
104 ++ poly1305_blocks_neon(&dctx->h, src, todo, 1);
105 ++ kernel_neon_end();
106 ++
107 ++ len -= todo;
108 ++ src += todo;
109 ++ } while (len);
110 + } else {
111 + poly1305_blocks_arm(&dctx->h, src, len, 1);
112 ++ src += len;
113 + }
114 +- src += len;
115 + nbytes %= POLY1305_BLOCK_SIZE;
116 + }
117 +
118 +diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c
119 +index 37ca3e889848..af2bbca38e70 100644
120 +--- a/arch/arm64/crypto/chacha-neon-glue.c
121 ++++ b/arch/arm64/crypto/chacha-neon-glue.c
122 +@@ -87,9 +87,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
123 + !crypto_simd_usable())
124 + return chacha_crypt_generic(state, dst, src, bytes, nrounds);
125 +
126 +- kernel_neon_begin();
127 +- chacha_doneon(state, dst, src, bytes, nrounds);
128 +- kernel_neon_end();
129 ++ do {
130 ++ unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
131 ++
132 ++ kernel_neon_begin();
133 ++ chacha_doneon(state, dst, src, todo, nrounds);
134 ++ kernel_neon_end();
135 ++
136 ++ bytes -= todo;
137 ++ src += todo;
138 ++ dst += todo;
139 ++ } while (bytes);
140 + }
141 + EXPORT_SYMBOL(chacha_crypt_arch);
142 +
143 +diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c
144 +index 895d3727c1fb..c5405e6a6db7 100644
145 +--- a/arch/arm64/crypto/nhpoly1305-neon-glue.c
146 ++++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c
147 +@@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
148 + return crypto_nhpoly1305_update(desc, src, srclen);
149 +
150 + do {
151 +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
152 ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K);
153 +
154 + kernel_neon_begin();
155 + crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
156 +diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
157 +index e97b092f56b8..f33ada70c4ed 100644
158 +--- a/arch/arm64/crypto/poly1305-glue.c
159 ++++ b/arch/arm64/crypto/poly1305-glue.c
160 +@@ -143,13 +143,20 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
161 + unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
162 +
163 + if (static_branch_likely(&have_neon) && crypto_simd_usable()) {
164 +- kernel_neon_begin();
165 +- poly1305_blocks_neon(&dctx->h, src, len, 1);
166 +- kernel_neon_end();
167 ++ do {
168 ++ unsigned int todo = min_t(unsigned int, len, SZ_4K);
169 ++
170 ++ kernel_neon_begin();
171 ++ poly1305_blocks_neon(&dctx->h, src, todo, 1);
172 ++ kernel_neon_end();
173 ++
174 ++ len -= todo;
175 ++ src += todo;
176 ++ } while (len);
177 + } else {
178 + poly1305_blocks(&dctx->h, src, len, 1);
179 ++ src += len;
180 + }
181 +- src += len;
182 + nbytes %= POLY1305_BLOCK_SIZE;
183 + }
184 +
185 +diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
186 +index 2bd92301d32f..6194cb3309d0 100644
187 +--- a/arch/arm64/kvm/guest.c
188 ++++ b/arch/arm64/kvm/guest.c
189 +@@ -201,6 +201,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
190 + }
191 +
192 + memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
193 ++
194 ++ if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
195 ++ int i;
196 ++
197 ++ for (i = 0; i < 16; i++)
198 ++ *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
199 ++ }
200 + out:
201 + return err;
202 + }
203 +diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
204 +index bbeb6a5a6ba6..0be3355e3499 100644
205 +--- a/arch/arm64/mm/hugetlbpage.c
206 ++++ b/arch/arm64/mm/hugetlbpage.c
207 +@@ -230,6 +230,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
208 + ptep = (pte_t *)pudp;
209 + } else if (sz == (CONT_PTE_SIZE)) {
210 + pmdp = pmd_alloc(mm, pudp, addr);
211 ++ if (!pmdp)
212 ++ return NULL;
213 +
214 + WARN_ON(addr & (sz - 1));
215 + /*
216 +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
217 +index fab855963c73..157924baa191 100644
218 +--- a/arch/riscv/mm/init.c
219 ++++ b/arch/riscv/mm/init.c
220 +@@ -149,7 +149,8 @@ void __init setup_bootmem(void)
221 + memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
222 +
223 + set_max_mapnr(PFN_DOWN(mem_size));
224 +- max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
225 ++ max_pfn = PFN_DOWN(memblock_end_of_DRAM());
226 ++ max_low_pfn = max_pfn;
227 +
228 + #ifdef CONFIG_BLK_DEV_INITRD
229 + setup_initrd();
230 +diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
231 +index ed52ffa8d5d4..560310e29e27 100644
232 +--- a/arch/s390/kvm/priv.c
233 ++++ b/arch/s390/kvm/priv.c
234 +@@ -626,10 +626,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
235 + * available for the guest are AQIC and TAPQ with the t bit set
236 + * since we do not set IC.3 (FIII) we currently will only intercept
237 + * the AQIC function code.
238 ++ * Note: running nested under z/VM can result in intercepts for other
239 ++ * function codes, e.g. PQAP(QCI). We do not support this and bail out.
240 + */
241 + reg0 = vcpu->run->s.regs.gprs[0];
242 + fc = (reg0 >> 24) & 0xff;
243 +- if (WARN_ON_ONCE(fc != 0x03))
244 ++ if (fc != 0x03)
245 + return -EOPNOTSUPP;
246 +
247 + /* PQAP instruction is allowed for guest kernel only */
248 +diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c
249 +index 06ef2d4a4701..6737bcea1fa1 100644
250 +--- a/arch/x86/crypto/blake2s-glue.c
251 ++++ b/arch/x86/crypto/blake2s-glue.c
252 +@@ -32,16 +32,16 @@ void blake2s_compress_arch(struct blake2s_state *state,
253 + const u32 inc)
254 + {
255 + /* SIMD disables preemption, so relax after processing each page. */
256 +- BUILD_BUG_ON(PAGE_SIZE / BLAKE2S_BLOCK_SIZE < 8);
257 ++ BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8);
258 +
259 + if (!static_branch_likely(&blake2s_use_ssse3) || !crypto_simd_usable()) {
260 + blake2s_compress_generic(state, block, nblocks, inc);
261 + return;
262 + }
263 +
264 +- for (;;) {
265 ++ do {
266 + const size_t blocks = min_t(size_t, nblocks,
267 +- PAGE_SIZE / BLAKE2S_BLOCK_SIZE);
268 ++ SZ_4K / BLAKE2S_BLOCK_SIZE);
269 +
270 + kernel_fpu_begin();
271 + if (IS_ENABLED(CONFIG_AS_AVX512) &&
272 +@@ -52,10 +52,8 @@ void blake2s_compress_arch(struct blake2s_state *state,
273 + kernel_fpu_end();
274 +
275 + nblocks -= blocks;
276 +- if (!nblocks)
277 +- break;
278 + block += blocks * BLAKE2S_BLOCK_SIZE;
279 +- }
280 ++ } while (nblocks);
281 + }
282 + EXPORT_SYMBOL(blake2s_compress_arch);
283 +
284 +diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
285 +index 68a74953efaf..ebf2cd7ff2f0 100644
286 +--- a/arch/x86/crypto/chacha_glue.c
287 ++++ b/arch/x86/crypto/chacha_glue.c
288 +@@ -154,9 +154,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
289 + bytes <= CHACHA_BLOCK_SIZE)
290 + return chacha_crypt_generic(state, dst, src, bytes, nrounds);
291 +
292 +- kernel_fpu_begin();
293 +- chacha_dosimd(state, dst, src, bytes, nrounds);
294 +- kernel_fpu_end();
295 ++ do {
296 ++ unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
297 ++
298 ++ kernel_fpu_begin();
299 ++ chacha_dosimd(state, dst, src, todo, nrounds);
300 ++ kernel_fpu_end();
301 ++
302 ++ bytes -= todo;
303 ++ src += todo;
304 ++ dst += todo;
305 ++ } while (bytes);
306 + }
307 + EXPORT_SYMBOL(chacha_crypt_arch);
308 +
309 +diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c
310 +index f7567cbd35b6..80fcb85736e1 100644
311 +--- a/arch/x86/crypto/nhpoly1305-avx2-glue.c
312 ++++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c
313 +@@ -29,7 +29,7 @@ static int nhpoly1305_avx2_update(struct shash_desc *desc,
314 + return crypto_nhpoly1305_update(desc, src, srclen);
315 +
316 + do {
317 +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
318 ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K);
319 +
320 + kernel_fpu_begin();
321 + crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2);
322 +diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c
323 +index a661ede3b5cf..cc6b7c1a2705 100644
324 +--- a/arch/x86/crypto/nhpoly1305-sse2-glue.c
325 ++++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c
326 +@@ -29,7 +29,7 @@ static int nhpoly1305_sse2_update(struct shash_desc *desc,
327 + return crypto_nhpoly1305_update(desc, src, srclen);
328 +
329 + do {
330 +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
331 ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K);
332 +
333 + kernel_fpu_begin();
334 + crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2);
335 +diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
336 +index 79bb58737d52..61b2bc8b6986 100644
337 +--- a/arch/x86/crypto/poly1305_glue.c
338 ++++ b/arch/x86/crypto/poly1305_glue.c
339 +@@ -91,8 +91,8 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
340 + struct poly1305_arch_internal *state = ctx;
341 +
342 + /* SIMD disables preemption, so relax after processing each page. */
343 +- BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE ||
344 +- PAGE_SIZE % POLY1305_BLOCK_SIZE);
345 ++ BUILD_BUG_ON(SZ_4K < POLY1305_BLOCK_SIZE ||
346 ++ SZ_4K % POLY1305_BLOCK_SIZE);
347 +
348 + if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx) ||
349 + (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) ||
350 +@@ -102,8 +102,8 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
351 + return;
352 + }
353 +
354 +- for (;;) {
355 +- const size_t bytes = min_t(size_t, len, PAGE_SIZE);
356 ++ do {
357 ++ const size_t bytes = min_t(size_t, len, SZ_4K);
358 +
359 + kernel_fpu_begin();
360 + if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512))
361 +@@ -113,11 +113,10 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
362 + else
363 + poly1305_blocks_avx(ctx, inp, bytes, padbit);
364 + kernel_fpu_end();
365 ++
366 + len -= bytes;
367 +- if (!len)
368 +- break;
369 + inp += bytes;
370 +- }
371 ++ } while (len);
372 + }
373 +
374 + static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
375 +diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
376 +index 0789e13ece90..1c7f13bb6728 100644
377 +--- a/arch/x86/entry/calling.h
378 ++++ b/arch/x86/entry/calling.h
379 +@@ -98,13 +98,6 @@ For 32-bit we have the following conventions - kernel is built with
380 + #define SIZEOF_PTREGS 21*8
381 +
382 + .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
383 +- /*
384 +- * Push registers and sanitize registers of values that a
385 +- * speculation attack might otherwise want to exploit. The
386 +- * lower registers are likely clobbered well before they
387 +- * could be put to use in a speculative execution gadget.
388 +- * Interleave XOR with PUSH for better uop scheduling:
389 +- */
390 + .if \save_ret
391 + pushq %rsi /* pt_regs->si */
392 + movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
393 +@@ -114,34 +107,43 @@ For 32-bit we have the following conventions - kernel is built with
394 + pushq %rsi /* pt_regs->si */
395 + .endif
396 + pushq \rdx /* pt_regs->dx */
397 +- xorl %edx, %edx /* nospec dx */
398 + pushq %rcx /* pt_regs->cx */
399 +- xorl %ecx, %ecx /* nospec cx */
400 + pushq \rax /* pt_regs->ax */
401 + pushq %r8 /* pt_regs->r8 */
402 +- xorl %r8d, %r8d /* nospec r8 */
403 + pushq %r9 /* pt_regs->r9 */
404 +- xorl %r9d, %r9d /* nospec r9 */
405 + pushq %r10 /* pt_regs->r10 */
406 +- xorl %r10d, %r10d /* nospec r10 */
407 + pushq %r11 /* pt_regs->r11 */
408 +- xorl %r11d, %r11d /* nospec r11*/
409 + pushq %rbx /* pt_regs->rbx */
410 +- xorl %ebx, %ebx /* nospec rbx*/
411 + pushq %rbp /* pt_regs->rbp */
412 +- xorl %ebp, %ebp /* nospec rbp*/
413 + pushq %r12 /* pt_regs->r12 */
414 +- xorl %r12d, %r12d /* nospec r12*/
415 + pushq %r13 /* pt_regs->r13 */
416 +- xorl %r13d, %r13d /* nospec r13*/
417 + pushq %r14 /* pt_regs->r14 */
418 +- xorl %r14d, %r14d /* nospec r14*/
419 + pushq %r15 /* pt_regs->r15 */
420 +- xorl %r15d, %r15d /* nospec r15*/
421 + UNWIND_HINT_REGS
422 ++
423 + .if \save_ret
424 + pushq %rsi /* return address on top of stack */
425 + .endif
426 ++
427 ++ /*
428 ++ * Sanitize registers of values that a speculation attack might
429 ++ * otherwise want to exploit. The lower registers are likely clobbered
430 ++ * well before they could be put to use in a speculative execution
431 ++ * gadget.
432 ++ */
433 ++ xorl %edx, %edx /* nospec dx */
434 ++ xorl %ecx, %ecx /* nospec cx */
435 ++ xorl %r8d, %r8d /* nospec r8 */
436 ++ xorl %r9d, %r9d /* nospec r9 */
437 ++ xorl %r10d, %r10d /* nospec r10 */
438 ++ xorl %r11d, %r11d /* nospec r11 */
439 ++ xorl %ebx, %ebx /* nospec rbx */
440 ++ xorl %ebp, %ebp /* nospec rbp */
441 ++ xorl %r12d, %r12d /* nospec r12 */
442 ++ xorl %r13d, %r13d /* nospec r13 */
443 ++ xorl %r14d, %r14d /* nospec r14 */
444 ++ xorl %r15d, %r15d /* nospec r15 */
445 ++
446 + .endm
447 +
448 + .macro POP_REGS pop_rdi=1 skip_r11rcx=0
449 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
450 +index f2bb91e87877..faa53fee0663 100644
451 +--- a/arch/x86/entry/entry_64.S
452 ++++ b/arch/x86/entry/entry_64.S
453 +@@ -249,7 +249,6 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
454 + */
455 + syscall_return_via_sysret:
456 + /* rcx and r11 are already restored (see code above) */
457 +- UNWIND_HINT_EMPTY
458 + POP_REGS pop_rdi=0 skip_r11rcx=1
459 +
460 + /*
461 +@@ -258,6 +257,7 @@ syscall_return_via_sysret:
462 + */
463 + movq %rsp, %rdi
464 + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
465 ++ UNWIND_HINT_EMPTY
466 +
467 + pushq RSP-RDI(%rdi) /* RSP */
468 + pushq (%rdi) /* RDI */
469 +@@ -279,8 +279,7 @@ SYM_CODE_END(entry_SYSCALL_64)
470 + * %rdi: prev task
471 + * %rsi: next task
472 + */
473 +-SYM_CODE_START(__switch_to_asm)
474 +- UNWIND_HINT_FUNC
475 ++SYM_FUNC_START(__switch_to_asm)
476 + /*
477 + * Save callee-saved registers
478 + * This must match the order in inactive_task_frame
479 +@@ -321,7 +320,7 @@ SYM_CODE_START(__switch_to_asm)
480 + popq %rbp
481 +
482 + jmp __switch_to
483 +-SYM_CODE_END(__switch_to_asm)
484 ++SYM_FUNC_END(__switch_to_asm)
485 +
486 + /*
487 + * A newly forked process directly context switches into this address.
488 +@@ -512,7 +511,7 @@ SYM_CODE_END(spurious_entries_start)
489 + * +----------------------------------------------------+
490 + */
491 + SYM_CODE_START(interrupt_entry)
492 +- UNWIND_HINT_FUNC
493 ++ UNWIND_HINT_IRET_REGS offset=16
494 + ASM_CLAC
495 + cld
496 +
497 +@@ -544,9 +543,9 @@ SYM_CODE_START(interrupt_entry)
498 + pushq 5*8(%rdi) /* regs->eflags */
499 + pushq 4*8(%rdi) /* regs->cs */
500 + pushq 3*8(%rdi) /* regs->ip */
501 ++ UNWIND_HINT_IRET_REGS
502 + pushq 2*8(%rdi) /* regs->orig_ax */
503 + pushq 8(%rdi) /* return address */
504 +- UNWIND_HINT_FUNC
505 +
506 + movq (%rdi), %rdi
507 + jmp 2f
508 +@@ -637,6 +636,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
509 + */
510 + movq %rsp, %rdi
511 + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
512 ++ UNWIND_HINT_EMPTY
513 +
514 + /* Copy the IRET frame to the trampoline stack. */
515 + pushq 6*8(%rdi) /* SS */
516 +@@ -1739,7 +1739,7 @@ SYM_CODE_START(rewind_stack_do_exit)
517 +
518 + movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
519 + leaq -PTREGS_SIZE(%rax), %rsp
520 +- UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
521 ++ UNWIND_HINT_REGS
522 +
523 + call do_exit
524 + SYM_CODE_END(rewind_stack_do_exit)
525 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
526 +index d79b40cd8283..7ba99c0759cf 100644
527 +--- a/arch/x86/include/asm/kvm_host.h
528 ++++ b/arch/x86/include/asm/kvm_host.h
529 +@@ -1664,8 +1664,8 @@ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
530 + static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
531 + {
532 + /* We can only post Fixed and LowPrio IRQs */
533 +- return (irq->delivery_mode == dest_Fixed ||
534 +- irq->delivery_mode == dest_LowestPrio);
535 ++ return (irq->delivery_mode == APIC_DM_FIXED ||
536 ++ irq->delivery_mode == APIC_DM_LOWEST);
537 + }
538 +
539 + static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
540 +diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
541 +index 499578f7e6d7..70fc159ebe69 100644
542 +--- a/arch/x86/include/asm/unwind.h
543 ++++ b/arch/x86/include/asm/unwind.h
544 +@@ -19,7 +19,7 @@ struct unwind_state {
545 + #if defined(CONFIG_UNWINDER_ORC)
546 + bool signal, full_regs;
547 + unsigned long sp, bp, ip;
548 +- struct pt_regs *regs;
549 ++ struct pt_regs *regs, *prev_regs;
550 + #elif defined(CONFIG_UNWINDER_FRAME_POINTER)
551 + bool got_irq;
552 + unsigned long *bp, *orig_sp, ip;
553 +diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
554 +index e9cc182aa97e..80537dcbddef 100644
555 +--- a/arch/x86/kernel/unwind_orc.c
556 ++++ b/arch/x86/kernel/unwind_orc.c
557 +@@ -142,9 +142,6 @@ static struct orc_entry *orc_find(unsigned long ip)
558 + {
559 + static struct orc_entry *orc;
560 +
561 +- if (!orc_init)
562 +- return NULL;
563 +-
564 + if (ip == 0)
565 + return &null_orc_entry;
566 +
567 +@@ -381,9 +378,38 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
568 + return true;
569 + }
570 +
571 ++/*
572 ++ * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
573 ++ * value from state->regs.
574 ++ *
575 ++ * Otherwise, if state->regs just points to IRET regs, and the previous frame
576 ++ * had full regs, it's safe to get the value from the previous regs. This can
577 ++ * happen when early/late IRQ entry code gets interrupted by an NMI.
578 ++ */
579 ++static bool get_reg(struct unwind_state *state, unsigned int reg_off,
580 ++ unsigned long *val)
581 ++{
582 ++ unsigned int reg = reg_off/8;
583 ++
584 ++ if (!state->regs)
585 ++ return false;
586 ++
587 ++ if (state->full_regs) {
588 ++ *val = ((unsigned long *)state->regs)[reg];
589 ++ return true;
590 ++ }
591 ++
592 ++ if (state->prev_regs) {
593 ++ *val = ((unsigned long *)state->prev_regs)[reg];
594 ++ return true;
595 ++ }
596 ++
597 ++ return false;
598 ++}
599 ++
600 + bool unwind_next_frame(struct unwind_state *state)
601 + {
602 +- unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp;
603 ++ unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
604 + enum stack_type prev_type = state->stack_info.type;
605 + struct orc_entry *orc;
606 + bool indirect = false;
607 +@@ -445,39 +471,35 @@ bool unwind_next_frame(struct unwind_state *state)
608 + break;
609 +
610 + case ORC_REG_R10:
611 +- if (!state->regs || !state->full_regs) {
612 ++ if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
613 + orc_warn("missing regs for base reg R10 at ip %pB\n",
614 + (void *)state->ip);
615 + goto err;
616 + }
617 +- sp = state->regs->r10;
618 + break;
619 +
620 + case ORC_REG_R13:
621 +- if (!state->regs || !state->full_regs) {
622 ++ if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
623 + orc_warn("missing regs for base reg R13 at ip %pB\n",
624 + (void *)state->ip);
625 + goto err;
626 + }
627 +- sp = state->regs->r13;
628 + break;
629 +
630 + case ORC_REG_DI:
631 +- if (!state->regs || !state->full_regs) {
632 ++ if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
633 + orc_warn("missing regs for base reg DI at ip %pB\n",
634 + (void *)state->ip);
635 + goto err;
636 + }
637 +- sp = state->regs->di;
638 + break;
639 +
640 + case ORC_REG_DX:
641 +- if (!state->regs || !state->full_regs) {
642 ++ if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
643 + orc_warn("missing regs for base reg DX at ip %pB\n",
644 + (void *)state->ip);
645 + goto err;
646 + }
647 +- sp = state->regs->dx;
648 + break;
649 +
650 + default:
651 +@@ -504,6 +526,7 @@ bool unwind_next_frame(struct unwind_state *state)
652 +
653 + state->sp = sp;
654 + state->regs = NULL;
655 ++ state->prev_regs = NULL;
656 + state->signal = false;
657 + break;
658 +
659 +@@ -515,6 +538,7 @@ bool unwind_next_frame(struct unwind_state *state)
660 + }
661 +
662 + state->regs = (struct pt_regs *)sp;
663 ++ state->prev_regs = NULL;
664 + state->full_regs = true;
665 + state->signal = true;
666 + break;
667 +@@ -526,6 +550,8 @@ bool unwind_next_frame(struct unwind_state *state)
668 + goto err;
669 + }
670 +
671 ++ if (state->full_regs)
672 ++ state->prev_regs = state->regs;
673 + state->regs = (void *)sp - IRET_FRAME_OFFSET;
674 + state->full_regs = false;
675 + state->signal = true;
676 +@@ -534,14 +560,14 @@ bool unwind_next_frame(struct unwind_state *state)
677 + default:
678 + orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
679 + orc->type, (void *)orig_ip);
680 +- break;
681 ++ goto err;
682 + }
683 +
684 + /* Find BP: */
685 + switch (orc->bp_reg) {
686 + case ORC_REG_UNDEFINED:
687 +- if (state->regs && state->full_regs)
688 +- state->bp = state->regs->bp;
689 ++ if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
690 ++ state->bp = tmp;
691 + break;
692 +
693 + case ORC_REG_PREV_SP:
694 +@@ -585,6 +611,9 @@ EXPORT_SYMBOL_GPL(unwind_next_frame);
695 + void __unwind_start(struct unwind_state *state, struct task_struct *task,
696 + struct pt_regs *regs, unsigned long *first_frame)
697 + {
698 ++ if (!orc_init)
699 ++ goto done;
700 ++
701 + memset(state, 0, sizeof(*state));
702 + state->task = task;
703 +
704 +@@ -651,7 +680,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
705 + /* Otherwise, skip ahead to the user-specified starting frame: */
706 + while (!unwind_done(state) &&
707 + (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
708 +- state->sp <= (unsigned long)first_frame))
709 ++ state->sp < (unsigned long)first_frame))
710 + unwind_next_frame(state);
711 +
712 + return;
713 +diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
714 +index 750ff0b29404..d057376bd3d3 100644
715 +--- a/arch/x86/kvm/ioapic.c
716 ++++ b/arch/x86/kvm/ioapic.c
717 +@@ -225,12 +225,12 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
718 + }
719 +
720 + /*
721 +- * AMD SVM AVIC accelerate EOI write and do not trap,
722 +- * in-kernel IOAPIC will not be able to receive the EOI.
723 +- * In this case, we do lazy update of the pending EOI when
724 +- * trying to set IOAPIC irq.
725 ++ * AMD SVM AVIC accelerate EOI write iff the interrupt is edge
726 ++ * triggered, in which case the in-kernel IOAPIC will not be able
727 ++ * to receive the EOI. In this case, we do a lazy update of the
728 ++ * pending EOI when trying to set IOAPIC irq.
729 + */
730 +- if (kvm_apicv_activated(ioapic->kvm))
731 ++ if (edge && kvm_apicv_activated(ioapic->kvm))
732 + ioapic_lazy_update_eoi(ioapic, irq);
733 +
734 + /*
735 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
736 +index 451377533bcb..c974c49221eb 100644
737 +--- a/arch/x86/kvm/svm.c
738 ++++ b/arch/x86/kvm/svm.c
739 +@@ -1886,7 +1886,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
740 + return NULL;
741 +
742 + /* Pin the user virtual address. */
743 +- npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
744 ++ npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
745 + if (npinned != npages) {
746 + pr_err("SEV: Failure locking %lu pages.\n", npages);
747 + goto err;
748 +diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
749 +index 861ae40e7144..99410f372c41 100644
750 +--- a/arch/x86/kvm/vmx/vmenter.S
751 ++++ b/arch/x86/kvm/vmx/vmenter.S
752 +@@ -86,6 +86,9 @@ SYM_FUNC_START(vmx_vmexit)
753 + /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
754 + FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
755 +
756 ++ /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
757 ++ or $1, %_ASM_AX
758 ++
759 + pop %_ASM_AX
760 + .Lvmexit_skip_rsb:
761 + #endif
762 +diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
763 +index c4aedd00c1ba..7ab317e3184e 100644
764 +--- a/arch/x86/mm/pat/set_memory.c
765 ++++ b/arch/x86/mm/pat/set_memory.c
766 +@@ -42,7 +42,8 @@ struct cpa_data {
767 + unsigned long pfn;
768 + unsigned int flags;
769 + unsigned int force_split : 1,
770 +- force_static_prot : 1;
771 ++ force_static_prot : 1,
772 ++ force_flush_all : 1;
773 + struct page **pages;
774 + };
775 +
776 +@@ -352,10 +353,10 @@ static void cpa_flush(struct cpa_data *data, int cache)
777 + return;
778 + }
779 +
780 +- if (cpa->numpages <= tlb_single_page_flush_ceiling)
781 +- on_each_cpu(__cpa_flush_tlb, cpa, 1);
782 +- else
783 ++ if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling)
784 + flush_tlb_all();
785 ++ else
786 ++ on_each_cpu(__cpa_flush_tlb, cpa, 1);
787 +
788 + if (!cache)
789 + return;
790 +@@ -1595,6 +1596,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
791 + alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
792 + alias_cpa.curpage = 0;
793 +
794 ++ cpa->force_flush_all = 1;
795 ++
796 + ret = __change_page_attr_set_clr(&alias_cpa, 0);
797 + if (ret)
798 + return ret;
799 +@@ -1615,6 +1618,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
800 + alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
801 + alias_cpa.curpage = 0;
802 +
803 ++ cpa->force_flush_all = 1;
804 + /*
805 + * The high mapping range is imprecise, so ignore the
806 + * return value.
807 +diff --git a/block/blk-iocost.c b/block/blk-iocost.c
808 +index 2dc5dc54e257..d083f7704082 100644
809 +--- a/block/blk-iocost.c
810 ++++ b/block/blk-iocost.c
811 +@@ -469,7 +469,7 @@ struct ioc_gq {
812 + */
813 + atomic64_t vtime;
814 + atomic64_t done_vtime;
815 +- atomic64_t abs_vdebt;
816 ++ u64 abs_vdebt;
817 + u64 last_vtime;
818 +
819 + /*
820 +@@ -1145,7 +1145,7 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
821 + struct iocg_wake_ctx ctx = { .iocg = iocg };
822 + u64 margin_ns = (u64)(ioc->period_us *
823 + WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC;
824 +- u64 abs_vdebt, vdebt, vshortage, expires, oexpires;
825 ++ u64 vdebt, vshortage, expires, oexpires;
826 + s64 vbudget;
827 + u32 hw_inuse;
828 +
829 +@@ -1155,18 +1155,15 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
830 + vbudget = now->vnow - atomic64_read(&iocg->vtime);
831 +
832 + /* pay off debt */
833 +- abs_vdebt = atomic64_read(&iocg->abs_vdebt);
834 +- vdebt = abs_cost_to_cost(abs_vdebt, hw_inuse);
835 ++ vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
836 + if (vdebt && vbudget > 0) {
837 + u64 delta = min_t(u64, vbudget, vdebt);
838 + u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse),
839 +- abs_vdebt);
840 ++ iocg->abs_vdebt);
841 +
842 + atomic64_add(delta, &iocg->vtime);
843 + atomic64_add(delta, &iocg->done_vtime);
844 +- atomic64_sub(abs_delta, &iocg->abs_vdebt);
845 +- if (WARN_ON_ONCE(atomic64_read(&iocg->abs_vdebt) < 0))
846 +- atomic64_set(&iocg->abs_vdebt, 0);
847 ++ iocg->abs_vdebt -= abs_delta;
848 + }
849 +
850 + /*
851 +@@ -1222,12 +1219,18 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
852 + u64 expires, oexpires;
853 + u32 hw_inuse;
854 +
855 ++ lockdep_assert_held(&iocg->waitq.lock);
856 ++
857 + /* debt-adjust vtime */
858 + current_hweight(iocg, NULL, &hw_inuse);
859 +- vtime += abs_cost_to_cost(atomic64_read(&iocg->abs_vdebt), hw_inuse);
860 ++ vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
861 +
862 +- /* clear or maintain depending on the overage */
863 +- if (time_before_eq64(vtime, now->vnow)) {
864 ++ /*
865 ++ * Clear or maintain depending on the overage. Non-zero vdebt is what
866 ++ * guarantees that @iocg is online and future iocg_kick_delay() will
867 ++ * clear use_delay. Don't leave it on when there's no vdebt.
868 ++ */
869 ++ if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) {
870 + blkcg_clear_delay(blkg);
871 + return false;
872 + }
873 +@@ -1261,9 +1264,12 @@ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
874 + {
875 + struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer);
876 + struct ioc_now now;
877 ++ unsigned long flags;
878 +
879 ++ spin_lock_irqsave(&iocg->waitq.lock, flags);
880 + ioc_now(iocg->ioc, &now);
881 + iocg_kick_delay(iocg, &now, 0);
882 ++ spin_unlock_irqrestore(&iocg->waitq.lock, flags);
883 +
884 + return HRTIMER_NORESTART;
885 + }
886 +@@ -1371,14 +1377,13 @@ static void ioc_timer_fn(struct timer_list *timer)
887 + * should have woken up in the last period and expire idle iocgs.
888 + */
889 + list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
890 +- if (!waitqueue_active(&iocg->waitq) &&
891 +- !atomic64_read(&iocg->abs_vdebt) && !iocg_is_idle(iocg))
892 ++ if (!waitqueue_active(&iocg->waitq) && iocg->abs_vdebt &&
893 ++ !iocg_is_idle(iocg))
894 + continue;
895 +
896 + spin_lock(&iocg->waitq.lock);
897 +
898 +- if (waitqueue_active(&iocg->waitq) ||
899 +- atomic64_read(&iocg->abs_vdebt)) {
900 ++ if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) {
901 + /* might be oversleeping vtime / hweight changes, kick */
902 + iocg_kick_waitq(iocg, &now);
903 + iocg_kick_delay(iocg, &now, 0);
904 +@@ -1721,28 +1726,49 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
905 + * tests are racy but the races aren't systemic - we only miss once
906 + * in a while which is fine.
907 + */
908 +- if (!waitqueue_active(&iocg->waitq) &&
909 +- !atomic64_read(&iocg->abs_vdebt) &&
910 ++ if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
911 + time_before_eq64(vtime + cost, now.vnow)) {
912 + iocg_commit_bio(iocg, bio, cost);
913 + return;
914 + }
915 +
916 + /*
917 +- * We're over budget. If @bio has to be issued regardless,
918 +- * remember the abs_cost instead of advancing vtime.
919 +- * iocg_kick_waitq() will pay off the debt before waking more IOs.
920 ++ * We activated above but w/o any synchronization. Deactivation is
921 ++ * synchronized with waitq.lock and we won't get deactivated as long
922 ++ * as we're waiting or has debt, so we're good if we're activated
923 ++ * here. In the unlikely case that we aren't, just issue the IO.
924 ++ */
925 ++ spin_lock_irq(&iocg->waitq.lock);
926 ++
927 ++ if (unlikely(list_empty(&iocg->active_list))) {
928 ++ spin_unlock_irq(&iocg->waitq.lock);
929 ++ iocg_commit_bio(iocg, bio, cost);
930 ++ return;
931 ++ }
932 ++
933 ++ /*
934 ++ * We're over budget. If @bio has to be issued regardless, remember
935 ++ * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
936 ++ * off the debt before waking more IOs.
937 ++ *
938 + * This way, the debt is continuously paid off each period with the
939 +- * actual budget available to the cgroup. If we just wound vtime,
940 +- * we would incorrectly use the current hw_inuse for the entire
941 +- * amount which, for example, can lead to the cgroup staying
942 +- * blocked for a long time even with substantially raised hw_inuse.
943 ++ * actual budget available to the cgroup. If we just wound vtime, we
944 ++ * would incorrectly use the current hw_inuse for the entire amount
945 ++ * which, for example, can lead to the cgroup staying blocked for a
946 ++ * long time even with substantially raised hw_inuse.
947 ++ *
948 ++ * An iocg with vdebt should stay online so that the timer can keep
949 ++ * deducting its vdebt and [de]activate use_delay mechanism
950 ++ * accordingly. We don't want to race against the timer trying to
951 ++ * clear them and leave @iocg inactive w/ dangling use_delay heavily
952 ++ * penalizing the cgroup and its descendants.
953 + */
954 + if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
955 +- atomic64_add(abs_cost, &iocg->abs_vdebt);
956 ++ iocg->abs_vdebt += abs_cost;
957 + if (iocg_kick_delay(iocg, &now, cost))
958 + blkcg_schedule_throttle(rqos->q,
959 + (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
960 ++ spin_unlock_irq(&iocg->waitq.lock);
961 + return;
962 + }
963 +
964 +@@ -1759,20 +1785,6 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
965 + * All waiters are on iocg->waitq and the wait states are
966 + * synchronized using waitq.lock.
967 + */
968 +- spin_lock_irq(&iocg->waitq.lock);
969 +-
970 +- /*
971 +- * We activated above but w/o any synchronization. Deactivation is
972 +- * synchronized with waitq.lock and we won't get deactivated as
973 +- * long as we're waiting, so we're good if we're activated here.
974 +- * In the unlikely case that we are deactivated, just issue the IO.
975 +- */
976 +- if (unlikely(list_empty(&iocg->active_list))) {
977 +- spin_unlock_irq(&iocg->waitq.lock);
978 +- iocg_commit_bio(iocg, bio, cost);
979 +- return;
980 +- }
981 +-
982 + init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
983 + wait.wait.private = current;
984 + wait.bio = bio;
985 +@@ -1804,6 +1816,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
986 + struct ioc_now now;
987 + u32 hw_inuse;
988 + u64 abs_cost, cost;
989 ++ unsigned long flags;
990 +
991 + /* bypass if disabled or for root cgroup */
992 + if (!ioc->enabled || !iocg->level)
993 +@@ -1823,15 +1836,28 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
994 + iocg->cursor = bio_end;
995 +
996 + /*
997 +- * Charge if there's enough vtime budget and the existing request
998 +- * has cost assigned. Otherwise, account it as debt. See debt
999 +- * handling in ioc_rqos_throttle() for details.
1000 ++ * Charge if there's enough vtime budget and the existing request has
1001 ++ * cost assigned.
1002 + */
1003 + if (rq->bio && rq->bio->bi_iocost_cost &&
1004 +- time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow))
1005 ++ time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
1006 + iocg_commit_bio(iocg, bio, cost);
1007 +- else
1008 +- atomic64_add(abs_cost, &iocg->abs_vdebt);
1009 ++ return;
1010 ++ }
1011 ++
1012 ++ /*
1013 ++ * Otherwise, account it as debt if @iocg is online, which it should
1014 ++ * be for the vast majority of cases. See debt handling in
1015 ++ * ioc_rqos_throttle() for details.
1016 ++ */
1017 ++ spin_lock_irqsave(&iocg->waitq.lock, flags);
1018 ++ if (likely(!list_empty(&iocg->active_list))) {
1019 ++ iocg->abs_vdebt += abs_cost;
1020 ++ iocg_kick_delay(iocg, &now, cost);
1021 ++ } else {
1022 ++ iocg_commit_bio(iocg, bio, cost);
1023 ++ }
1024 ++ spin_unlock_irqrestore(&iocg->waitq.lock, flags);
1025 + }
1026 +
1027 + static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
1028 +@@ -2001,7 +2027,6 @@ static void ioc_pd_init(struct blkg_policy_data *pd)
1029 + iocg->ioc = ioc;
1030 + atomic64_set(&iocg->vtime, now.vnow);
1031 + atomic64_set(&iocg->done_vtime, now.vnow);
1032 +- atomic64_set(&iocg->abs_vdebt, 0);
1033 + atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
1034 + INIT_LIST_HEAD(&iocg->active_list);
1035 + iocg->hweight_active = HWEIGHT_WHOLE;
1036 +diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
1037 +index fe1523664816..8558b629880b 100644
1038 +--- a/drivers/amba/bus.c
1039 ++++ b/drivers/amba/bus.c
1040 +@@ -645,6 +645,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
1041 + dev->dev.release = amba_device_release;
1042 + dev->dev.bus = &amba_bustype;
1043 + dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
1044 ++ dev->dev.dma_parms = &dev->dma_parms;
1045 + dev->res.name = dev_name(&dev->dev);
1046 + }
1047 +
1048 +diff --git a/drivers/base/platform.c b/drivers/base/platform.c
1049 +index b5ce7b085795..c81b68d5d66d 100644
1050 +--- a/drivers/base/platform.c
1051 ++++ b/drivers/base/platform.c
1052 +@@ -361,6 +361,8 @@ struct platform_object {
1053 + */
1054 + static void setup_pdev_dma_masks(struct platform_device *pdev)
1055 + {
1056 ++ pdev->dev.dma_parms = &pdev->dma_parms;
1057 ++
1058 + if (!pdev->dev.coherent_dma_mask)
1059 + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
1060 + if (!pdev->dev.dma_mask) {
1061 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1062 +index f184cdca938d..5fcbacddb9b0 100644
1063 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1064 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1065 +@@ -3325,15 +3325,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
1066 + }
1067 + }
1068 +
1069 +- amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
1070 +- amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
1071 +-
1072 +- amdgpu_amdkfd_suspend(adev);
1073 +-
1074 + amdgpu_ras_suspend(adev);
1075 +
1076 + r = amdgpu_device_ip_suspend_phase1(adev);
1077 +
1078 ++ amdgpu_amdkfd_suspend(adev);
1079 ++
1080 + /* evict vram memory */
1081 + amdgpu_bo_evict_vram(adev);
1082 +
1083 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1084 +index e310d67c399a..1b0bca9587d0 100644
1085 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1086 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1087 +@@ -3034,25 +3034,32 @@ validate_out:
1088 + return out;
1089 + }
1090 +
1091 +-
1092 +-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
1093 +- bool fast_validate)
1094 ++/*
1095 ++ * This must be noinline to ensure anything that deals with FP registers
1096 ++ * is contained within this call; previously our compiling with hard-float
1097 ++ * would result in fp instructions being emitted outside of the boundaries
1098 ++ * of the DC_FP_START/END macros, which makes sense as the compiler has no
1099 ++ * idea about what is wrapped and what is not
1100 ++ *
1101 ++ * This is largely just a workaround to avoid breakage introduced with 5.6,
1102 ++ * ideally all fp-using code should be moved into its own file, only that
1103 ++ * should be compiled with hard-float, and all code exported from there
1104 ++ * should be strictly wrapped with DC_FP_START/END
1105 ++ */
1106 ++static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
1107 ++ struct dc_state *context, bool fast_validate)
1108 + {
1109 + bool voltage_supported = false;
1110 + bool full_pstate_supported = false;
1111 + bool dummy_pstate_supported = false;
1112 + double p_state_latency_us;
1113 +
1114 +- DC_FP_START();
1115 + p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
1116 + context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
1117 + dc->debug.disable_dram_clock_change_vactive_support;
1118 +
1119 + if (fast_validate) {
1120 +- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true);
1121 +-
1122 +- DC_FP_END();
1123 +- return voltage_supported;
1124 ++ return dcn20_validate_bandwidth_internal(dc, context, true);
1125 + }
1126 +
1127 + // Best case, we support full UCLK switch latency
1128 +@@ -3081,7 +3088,15 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
1129 +
1130 + restore_dml_state:
1131 + context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
1132 ++ return voltage_supported;
1133 ++}
1134 +
1135 ++bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
1136 ++ bool fast_validate)
1137 ++{
1138 ++ bool voltage_supported = false;
1139 ++ DC_FP_START();
1140 ++ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
1141 + DC_FP_END();
1142 + return voltage_supported;
1143 + }
1144 +diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
1145 +index 6d47ef7b148c..bcba2f024842 100644
1146 +--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
1147 ++++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
1148 +@@ -843,6 +843,7 @@ static const struct of_device_id ingenic_drm_of_match[] = {
1149 + { .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info },
1150 + { /* sentinel */ },
1151 + };
1152 ++MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
1153 +
1154 + static struct platform_driver ingenic_drm_driver = {
1155 + .driver = {
1156 +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
1157 +index c7bc9db5b192..17a638f15082 100644
1158 +--- a/drivers/hid/usbhid/hid-core.c
1159 ++++ b/drivers/hid/usbhid/hid-core.c
1160 +@@ -682,16 +682,21 @@ static int usbhid_open(struct hid_device *hid)
1161 + struct usbhid_device *usbhid = hid->driver_data;
1162 + int res;
1163 +
1164 ++ mutex_lock(&usbhid->mutex);
1165 ++
1166 + set_bit(HID_OPENED, &usbhid->iofl);
1167 +
1168 +- if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
1169 +- return 0;
1170 ++ if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
1171 ++ res = 0;
1172 ++ goto Done;
1173 ++ }
1174 +
1175 + res = usb_autopm_get_interface(usbhid->intf);
1176 + /* the device must be awake to reliably request remote wakeup */
1177 + if (res < 0) {
1178 + clear_bit(HID_OPENED, &usbhid->iofl);
1179 +- return -EIO;
1180 ++ res = -EIO;
1181 ++ goto Done;
1182 + }
1183 +
1184 + usbhid->intf->needs_remote_wakeup = 1;
1185 +@@ -725,6 +730,9 @@ static int usbhid_open(struct hid_device *hid)
1186 + msleep(50);
1187 +
1188 + clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
1189 ++
1190 ++ Done:
1191 ++ mutex_unlock(&usbhid->mutex);
1192 + return res;
1193 + }
1194 +
1195 +@@ -732,6 +740,8 @@ static void usbhid_close(struct hid_device *hid)
1196 + {
1197 + struct usbhid_device *usbhid = hid->driver_data;
1198 +
1199 ++ mutex_lock(&usbhid->mutex);
1200 ++
1201 + /*
1202 + * Make sure we don't restart data acquisition due to
1203 + * a resumption we no longer care about by avoiding racing
1204 +@@ -743,12 +753,13 @@ static void usbhid_close(struct hid_device *hid)
1205 + clear_bit(HID_IN_POLLING, &usbhid->iofl);
1206 + spin_unlock_irq(&usbhid->lock);
1207 +
1208 +- if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
1209 +- return;
1210 ++ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
1211 ++ hid_cancel_delayed_stuff(usbhid);
1212 ++ usb_kill_urb(usbhid->urbin);
1213 ++ usbhid->intf->needs_remote_wakeup = 0;
1214 ++ }
1215 +
1216 +- hid_cancel_delayed_stuff(usbhid);
1217 +- usb_kill_urb(usbhid->urbin);
1218 +- usbhid->intf->needs_remote_wakeup = 0;
1219 ++ mutex_unlock(&usbhid->mutex);
1220 + }
1221 +
1222 + /*
1223 +@@ -1057,6 +1068,8 @@ static int usbhid_start(struct hid_device *hid)
1224 + unsigned int n, insize = 0;
1225 + int ret;
1226 +
1227 ++ mutex_lock(&usbhid->mutex);
1228 ++
1229 + clear_bit(HID_DISCONNECTED, &usbhid->iofl);
1230 +
1231 + usbhid->bufsize = HID_MIN_BUFFER_SIZE;
1232 +@@ -1177,6 +1190,8 @@ static int usbhid_start(struct hid_device *hid)
1233 + usbhid_set_leds(hid);
1234 + device_set_wakeup_enable(&dev->dev, 1);
1235 + }
1236 ++
1237 ++ mutex_unlock(&usbhid->mutex);
1238 + return 0;
1239 +
1240 + fail:
1241 +@@ -1187,6 +1202,7 @@ fail:
1242 + usbhid->urbout = NULL;
1243 + usbhid->urbctrl = NULL;
1244 + hid_free_buffers(dev, hid);
1245 ++ mutex_unlock(&usbhid->mutex);
1246 + return ret;
1247 + }
1248 +
1249 +@@ -1202,6 +1218,8 @@ static void usbhid_stop(struct hid_device *hid)
1250 + usbhid->intf->needs_remote_wakeup = 0;
1251 + }
1252 +
1253 ++ mutex_lock(&usbhid->mutex);
1254 ++
1255 + clear_bit(HID_STARTED, &usbhid->iofl);
1256 + spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
1257 + set_bit(HID_DISCONNECTED, &usbhid->iofl);
1258 +@@ -1222,6 +1240,8 @@ static void usbhid_stop(struct hid_device *hid)
1259 + usbhid->urbout = NULL;
1260 +
1261 + hid_free_buffers(hid_to_usb_dev(hid), hid);
1262 ++
1263 ++ mutex_unlock(&usbhid->mutex);
1264 + }
1265 +
1266 + static int usbhid_power(struct hid_device *hid, int lvl)
1267 +@@ -1382,6 +1402,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
1268 + INIT_WORK(&usbhid->reset_work, hid_reset);
1269 + timer_setup(&usbhid->io_retry, hid_retry_timeout, 0);
1270 + spin_lock_init(&usbhid->lock);
1271 ++ mutex_init(&usbhid->mutex);
1272 +
1273 + ret = hid_add_device(hid);
1274 + if (ret) {
1275 +diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
1276 +index 8620408bd7af..75fe85d3d27a 100644
1277 +--- a/drivers/hid/usbhid/usbhid.h
1278 ++++ b/drivers/hid/usbhid/usbhid.h
1279 +@@ -80,6 +80,7 @@ struct usbhid_device {
1280 + dma_addr_t outbuf_dma; /* Output buffer dma */
1281 + unsigned long last_out; /* record of last output for timeouts */
1282 +
1283 ++ struct mutex mutex; /* start/stop/open/close */
1284 + spinlock_t lock; /* fifo spinlock */
1285 + unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
1286 + struct timer_list io_retry; /* Retry timer */
1287 +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
1288 +index 5ded94b7bf68..cd71e7133944 100644
1289 +--- a/drivers/hid/wacom_sys.c
1290 ++++ b/drivers/hid/wacom_sys.c
1291 +@@ -319,9 +319,11 @@ static void wacom_feature_mapping(struct hid_device *hdev,
1292 + data[0] = field->report->id;
1293 + ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
1294 + data, n, WAC_CMD_RETRIES);
1295 +- if (ret == n) {
1296 ++ if (ret == n && features->type == HID_GENERIC) {
1297 + ret = hid_report_raw_event(hdev,
1298 + HID_FEATURE_REPORT, data, n, 0);
1299 ++ } else if (ret == 2 && features->type != HID_GENERIC) {
1300 ++ features->touch_max = data[1];
1301 + } else {
1302 + features->touch_max = 16;
1303 + hid_warn(hdev, "wacom_feature_mapping: "
1304 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1305 +index d99a9d407671..1c96809b51c9 100644
1306 +--- a/drivers/hid/wacom_wac.c
1307 ++++ b/drivers/hid/wacom_wac.c
1308 +@@ -1427,11 +1427,13 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
1309 + {
1310 + struct input_dev *pad_input = wacom->pad_input;
1311 + unsigned char *data = wacom->data;
1312 ++ int nbuttons = wacom->features.numbered_buttons;
1313 +
1314 +- int buttons = data[282] | ((data[281] & 0x40) << 2);
1315 ++ int expresskeys = data[282];
1316 ++ int center = (data[281] & 0x40) >> 6;
1317 + int ring = data[285] & 0x7F;
1318 + bool ringstatus = data[285] & 0x80;
1319 +- bool prox = buttons || ringstatus;
1320 ++ bool prox = expresskeys || center || ringstatus;
1321 +
1322 + /* Fix touchring data: userspace expects 0 at left and increasing clockwise */
1323 + ring = 71 - ring;
1324 +@@ -1439,7 +1441,8 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
1325 + if (ring > 71)
1326 + ring -= 72;
1327 +
1328 +- wacom_report_numbered_buttons(pad_input, 9, buttons);
1329 ++ wacom_report_numbered_buttons(pad_input, nbuttons,
1330 ++ expresskeys | (center << (nbuttons - 1)));
1331 +
1332 + input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
1333 +
1334 +@@ -2637,9 +2640,25 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
1335 + case HID_DG_TIPSWITCH:
1336 + hid_data->last_slot_field = equivalent_usage;
1337 + break;
1338 ++ case HID_DG_CONTACTCOUNT:
1339 ++ hid_data->cc_report = report->id;
1340 ++ hid_data->cc_index = i;
1341 ++ hid_data->cc_value_index = j;
1342 ++ break;
1343 + }
1344 + }
1345 + }
1346 ++
1347 ++ if (hid_data->cc_report != 0 &&
1348 ++ hid_data->cc_index >= 0) {
1349 ++ struct hid_field *field = report->field[hid_data->cc_index];
1350 ++ int value = field->value[hid_data->cc_value_index];
1351 ++ if (value)
1352 ++ hid_data->num_expected = value;
1353 ++ }
1354 ++ else {
1355 ++ hid_data->num_expected = wacom_wac->features.touch_max;
1356 ++ }
1357 + }
1358 +
1359 + static void wacom_wac_finger_report(struct hid_device *hdev,
1360 +@@ -2649,7 +2668,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
1361 + struct wacom_wac *wacom_wac = &wacom->wacom_wac;
1362 + struct input_dev *input = wacom_wac->touch_input;
1363 + unsigned touch_max = wacom_wac->features.touch_max;
1364 +- struct hid_data *hid_data = &wacom_wac->hid_data;
1365 +
1366 + /* If more packets of data are expected, give us a chance to
1367 + * process them rather than immediately syncing a partial
1368 +@@ -2663,7 +2681,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
1369 +
1370 + input_sync(input);
1371 + wacom_wac->hid_data.num_received = 0;
1372 +- hid_data->num_expected = 0;
1373 +
1374 + /* keep touch state for pen event */
1375 + wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac);
1376 +@@ -2738,73 +2755,12 @@ static void wacom_report_events(struct hid_device *hdev,
1377 + }
1378 + }
1379 +
1380 +-static void wacom_set_num_expected(struct hid_device *hdev,
1381 +- struct hid_report *report,
1382 +- int collection_index,
1383 +- struct hid_field *field,
1384 +- int field_index)
1385 +-{
1386 +- struct wacom *wacom = hid_get_drvdata(hdev);
1387 +- struct wacom_wac *wacom_wac = &wacom->wacom_wac;
1388 +- struct hid_data *hid_data = &wacom_wac->hid_data;
1389 +- unsigned int original_collection_level =
1390 +- hdev->collection[collection_index].level;
1391 +- bool end_collection = false;
1392 +- int i;
1393 +-
1394 +- if (hid_data->num_expected)
1395 +- return;
1396 +-
1397 +- // find the contact count value for this segment
1398 +- for (i = field_index; i < report->maxfield && !end_collection; i++) {
1399 +- struct hid_field *field = report->field[i];
1400 +- unsigned int field_level =
1401 +- hdev->collection[field->usage[0].collection_index].level;
1402 +- unsigned int j;
1403 +-
1404 +- if (field_level != original_collection_level)
1405 +- continue;
1406 +-
1407 +- for (j = 0; j < field->maxusage; j++) {
1408 +- struct hid_usage *usage = &field->usage[j];
1409 +-
1410 +- if (usage->collection_index != collection_index) {
1411 +- end_collection = true;
1412 +- break;
1413 +- }
1414 +- if (wacom_equivalent_usage(usage->hid) == HID_DG_CONTACTCOUNT) {
1415 +- hid_data->cc_report = report->id;
1416 +- hid_data->cc_index = i;
1417 +- hid_data->cc_value_index = j;
1418 +-
1419 +- if (hid_data->cc_report != 0 &&
1420 +- hid_data->cc_index >= 0) {
1421 +-
1422 +- struct hid_field *field =
1423 +- report->field[hid_data->cc_index];
1424 +- int value =
1425 +- field->value[hid_data->cc_value_index];
1426 +-
1427 +- if (value)
1428 +- hid_data->num_expected = value;
1429 +- }
1430 +- }
1431 +- }
1432 +- }
1433 +-
1434 +- if (hid_data->cc_report == 0 || hid_data->cc_index < 0)
1435 +- hid_data->num_expected = wacom_wac->features.touch_max;
1436 +-}
1437 +-
1438 + static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report,
1439 + int collection_index, struct hid_field *field,
1440 + int field_index)
1441 + {
1442 + struct wacom *wacom = hid_get_drvdata(hdev);
1443 +
1444 +- if (WACOM_FINGER_FIELD(field))
1445 +- wacom_set_num_expected(hdev, report, collection_index, field,
1446 +- field_index);
1447 + wacom_report_events(hdev, report, collection_index, field_index);
1448 +
1449 + /*
1450 +diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
1451 +index 5eed75cd121f..e5dcbe80cf85 100644
1452 +--- a/drivers/iommu/virtio-iommu.c
1453 ++++ b/drivers/iommu/virtio-iommu.c
1454 +@@ -453,7 +453,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
1455 + if (!region)
1456 + return -ENOMEM;
1457 +
1458 +- list_add(&vdev->resv_regions, &region->list);
1459 ++ list_add(&region->list, &vdev->resv_regions);
1460 + return 0;
1461 + }
1462 +
1463 +diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
1464 +index 668418d7ea77..f620442addf5 100644
1465 +--- a/drivers/misc/mei/hw-me.c
1466 ++++ b/drivers/misc/mei/hw-me.c
1467 +@@ -1465,6 +1465,13 @@ static const struct mei_cfg mei_me_pch12_cfg = {
1468 + MEI_CFG_DMA_128,
1469 + };
1470 +
1471 ++/* LBG with quirk for SPS Firmware exclusion */
1472 ++static const struct mei_cfg mei_me_pch12_sps_cfg = {
1473 ++ MEI_CFG_PCH8_HFS,
1474 ++ MEI_CFG_FW_VER_SUPP,
1475 ++ MEI_CFG_FW_SPS,
1476 ++};
1477 ++
1478 + /* Tiger Lake and newer devices */
1479 + static const struct mei_cfg mei_me_pch15_cfg = {
1480 + MEI_CFG_PCH8_HFS,
1481 +@@ -1487,6 +1494,7 @@ static const struct mei_cfg *const mei_cfg_list[] = {
1482 + [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
1483 + [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
1484 + [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
1485 ++ [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
1486 + [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
1487 + };
1488 +
1489 +diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
1490 +index 4a8d4dcd5a91..b6b94e211464 100644
1491 +--- a/drivers/misc/mei/hw-me.h
1492 ++++ b/drivers/misc/mei/hw-me.h
1493 +@@ -80,6 +80,9 @@ struct mei_me_hw {
1494 + * servers platforms with quirk for
1495 + * SPS firmware exclusion.
1496 + * @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer
1497 ++ * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 and newer
1498 ++ * servers platforms with quirk for
1499 ++ * SPS firmware exclusion.
1500 + * @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer
1501 + * @MEI_ME_NUM_CFG: Upper Sentinel.
1502 + */
1503 +@@ -93,6 +96,7 @@ enum mei_cfg_idx {
1504 + MEI_ME_PCH8_CFG,
1505 + MEI_ME_PCH8_SPS_CFG,
1506 + MEI_ME_PCH12_CFG,
1507 ++ MEI_ME_PCH12_SPS_CFG,
1508 + MEI_ME_PCH15_CFG,
1509 + MEI_ME_NUM_CFG,
1510 + };
1511 +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
1512 +index 2eb7b2968e5d..0dd2922aa06d 100644
1513 +--- a/drivers/misc/mei/pci-me.c
1514 ++++ b/drivers/misc/mei/pci-me.c
1515 +@@ -79,7 +79,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
1516 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
1517 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
1518 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
1519 +- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
1520 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)},
1521 +
1522 + {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
1523 + {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
1524 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1525 +index d28b406a26b1..d0ddd08c4112 100644
1526 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1527 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1528 +@@ -6662,7 +6662,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
1529 + int rc;
1530 +
1531 + if (!mem_size)
1532 +- return 0;
1533 ++ return -EINVAL;
1534 +
1535 + ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
1536 + if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
1537 +@@ -9794,6 +9794,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
1538 + netdev_features_t features)
1539 + {
1540 + struct bnxt *bp = netdev_priv(dev);
1541 ++ netdev_features_t vlan_features;
1542 +
1543 + if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
1544 + features &= ~NETIF_F_NTUPLE;
1545 +@@ -9810,12 +9811,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
1546 + /* Both CTAG and STAG VLAN accelaration on the RX side have to be
1547 + * turned on or off together.
1548 + */
1549 +- if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
1550 +- (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
1551 ++ vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
1552 ++ NETIF_F_HW_VLAN_STAG_RX);
1553 ++ if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
1554 ++ NETIF_F_HW_VLAN_STAG_RX)) {
1555 + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
1556 + features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
1557 + NETIF_F_HW_VLAN_STAG_RX);
1558 +- else
1559 ++ else if (vlan_features)
1560 + features |= NETIF_F_HW_VLAN_CTAG_RX |
1561 + NETIF_F_HW_VLAN_STAG_RX;
1562 + }
1563 +@@ -12173,12 +12176,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
1564 + bnxt_ulp_start(bp, err);
1565 + }
1566 +
1567 +- if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
1568 +- dev_close(netdev);
1569 ++ if (result != PCI_ERS_RESULT_RECOVERED) {
1570 ++ if (netif_running(netdev))
1571 ++ dev_close(netdev);
1572 ++ pci_disable_device(pdev);
1573 ++ }
1574 +
1575 + rtnl_unlock();
1576 +
1577 +- return PCI_ERS_RESULT_RECOVERED;
1578 ++ return result;
1579 + }
1580 +
1581 + /**
1582 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1583 +index 63b170658532..ef0268649822 100644
1584 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1585 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1586 +@@ -1064,7 +1064,6 @@ struct bnxt_vf_info {
1587 + #define BNXT_VF_LINK_FORCED 0x4
1588 + #define BNXT_VF_LINK_UP 0x8
1589 + #define BNXT_VF_TRUST 0x10
1590 +- u32 func_flags; /* func cfg flags */
1591 + u32 min_tx_rate;
1592 + u32 max_tx_rate;
1593 + void *hwrm_cmd_req_addr;
1594 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
1595 +index 95f893f2a74d..d5c8bd49383a 100644
1596 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
1597 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
1598 +@@ -43,7 +43,7 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
1599 + #define BNXT_NVM_CFG_VER_BITS 24
1600 + #define BNXT_NVM_CFG_VER_BYTES 4
1601 +
1602 +-#define BNXT_MSIX_VEC_MAX 1280
1603 ++#define BNXT_MSIX_VEC_MAX 512
1604 + #define BNXT_MSIX_VEC_MIN_MAX 128
1605 +
1606 + enum bnxt_nvm_dir_type {
1607 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
1608 +index 2aba1e02a8f4..1259d135c9cc 100644
1609 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
1610 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
1611 +@@ -85,11 +85,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
1612 + if (old_setting == setting)
1613 + return 0;
1614 +
1615 +- func_flags = vf->func_flags;
1616 + if (setting)
1617 +- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
1618 ++ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
1619 + else
1620 +- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
1621 ++ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
1622 + /*TODO: if the driver supports VLAN filter on guest VLAN,
1623 + * the spoof check should also include vlan anti-spoofing
1624 + */
1625 +@@ -98,7 +97,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
1626 + req.flags = cpu_to_le32(func_flags);
1627 + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1628 + if (!rc) {
1629 +- vf->func_flags = func_flags;
1630 + if (setting)
1631 + vf->flags |= BNXT_VF_SPOOFCHK;
1632 + else
1633 +@@ -230,7 +228,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
1634 + memcpy(vf->mac_addr, mac, ETH_ALEN);
1635 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
1636 + req.fid = cpu_to_le16(vf->fw_fid);
1637 +- req.flags = cpu_to_le32(vf->func_flags);
1638 + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1639 + memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1640 + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1641 +@@ -268,7 +265,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
1642 +
1643 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
1644 + req.fid = cpu_to_le16(vf->fw_fid);
1645 +- req.flags = cpu_to_le32(vf->func_flags);
1646 + req.dflt_vlan = cpu_to_le16(vlan_tag);
1647 + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
1648 + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1649 +@@ -307,7 +303,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
1650 + return 0;
1651 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
1652 + req.fid = cpu_to_le16(vf->fw_fid);
1653 +- req.flags = cpu_to_le32(vf->func_flags);
1654 + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
1655 + req.max_bw = cpu_to_le32(max_tx_rate);
1656 + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
1657 +@@ -479,7 +474,6 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
1658 + vf = &bp->pf.vf[vf_id];
1659 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
1660 + req.fid = cpu_to_le16(vf->fw_fid);
1661 +- req.flags = cpu_to_le32(vf->func_flags);
1662 +
1663 + if (is_valid_ether_addr(vf->mac_addr)) {
1664 + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1665 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1666 +index b3a51935e8e0..f42382c2ecd0 100644
1667 +--- a/drivers/net/ethernet/cadence/macb_main.c
1668 ++++ b/drivers/net/ethernet/cadence/macb_main.c
1669 +@@ -334,8 +334,10 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1670 + int status;
1671 +
1672 + status = pm_runtime_get_sync(&bp->pdev->dev);
1673 +- if (status < 0)
1674 ++ if (status < 0) {
1675 ++ pm_runtime_put_noidle(&bp->pdev->dev);
1676 + goto mdio_pm_exit;
1677 ++ }
1678 +
1679 + status = macb_mdio_wait_for_idle(bp);
1680 + if (status < 0)
1681 +@@ -386,8 +388,10 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1682 + int status;
1683 +
1684 + status = pm_runtime_get_sync(&bp->pdev->dev);
1685 +- if (status < 0)
1686 ++ if (status < 0) {
1687 ++ pm_runtime_put_noidle(&bp->pdev->dev);
1688 + goto mdio_pm_exit;
1689 ++ }
1690 +
1691 + status = macb_mdio_wait_for_idle(bp);
1692 + if (status < 0)
1693 +@@ -3803,8 +3807,10 @@ static int at91ether_open(struct net_device *dev)
1694 + int ret;
1695 +
1696 + ret = pm_runtime_get_sync(&lp->pdev->dev);
1697 +- if (ret < 0)
1698 ++ if (ret < 0) {
1699 ++ pm_runtime_put_noidle(&lp->pdev->dev);
1700 + return ret;
1701 ++ }
1702 +
1703 + /* Clear internal statistics */
1704 + ctl = macb_readl(lp, NCR);
1705 +@@ -4159,15 +4165,9 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
1706 +
1707 + static int fu540_c000_init(struct platform_device *pdev)
1708 + {
1709 +- struct resource *res;
1710 +-
1711 +- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1712 +- if (!res)
1713 +- return -ENODEV;
1714 +-
1715 +- mgmt->reg = ioremap(res->start, resource_size(res));
1716 +- if (!mgmt->reg)
1717 +- return -ENOMEM;
1718 ++ mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
1719 ++ if (IS_ERR(mgmt->reg))
1720 ++ return PTR_ERR(mgmt->reg);
1721 +
1722 + return macb_init(pdev);
1723 + }
1724 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
1725 +index cab3d17e0e1a..d6eebd640753 100644
1726 +--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
1727 ++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
1728 +@@ -2202,6 +2202,9 @@ static void ethofld_hard_xmit(struct net_device *dev,
1729 + if (unlikely(skip_eotx_wr)) {
1730 + start = (u64 *)wr;
1731 + eosw_txq->state = next_state;
1732 ++ eosw_txq->cred -= wrlen16;
1733 ++ eosw_txq->ncompl++;
1734 ++ eosw_txq->last_compl = 0;
1735 + goto write_wr_headers;
1736 + }
1737 +
1738 +@@ -2360,6 +2363,34 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
1739 + return cxgb4_eth_xmit(skb, dev);
1740 + }
1741 +
1742 ++static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
1743 ++{
1744 ++ int pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
1745 ++ int pidx = eosw_txq->pidx;
1746 ++ struct sk_buff *skb;
1747 ++
1748 ++ if (!pktcount)
1749 ++ return;
1750 ++
1751 ++ if (pktcount < 0)
1752 ++ pktcount += eosw_txq->ndesc;
1753 ++
1754 ++ while (pktcount--) {
1755 ++ pidx--;
1756 ++ if (pidx < 0)
1757 ++ pidx += eosw_txq->ndesc;
1758 ++
1759 ++ skb = eosw_txq->desc[pidx].skb;
1760 ++ if (skb) {
1761 ++ dev_consume_skb_any(skb);
1762 ++ eosw_txq->desc[pidx].skb = NULL;
1763 ++ eosw_txq->inuse--;
1764 ++ }
1765 ++ }
1766 ++
1767 ++ eosw_txq->pidx = eosw_txq->last_pidx + 1;
1768 ++}
1769 ++
1770 + /**
1771 + * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
1772 + * @dev - netdevice
1773 +@@ -2435,9 +2466,11 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
1774 + FW_FLOWC_MNEM_EOSTATE_CLOSING :
1775 + FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
1776 +
1777 +- eosw_txq->cred -= len16;
1778 +- eosw_txq->ncompl++;
1779 +- eosw_txq->last_compl = 0;
1780 ++ /* Free up any pending skbs to ensure there's room for
1781 ++ * termination FLOWC.
1782 ++ */
1783 ++ if (tc == FW_SCHED_CLS_NONE)
1784 ++ eosw_txq_flush_pending_skbs(eosw_txq);
1785 +
1786 + ret = eosw_txq_enqueue(eosw_txq, skb);
1787 + if (ret) {
1788 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
1789 +index ebc635f8a4cc..15f37c5b8dc1 100644
1790 +--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
1791 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
1792 +@@ -74,8 +74,8 @@ err_pci_mem_reg:
1793 + pci_disable_device(pdev);
1794 + err_pci_enable:
1795 + err_mdiobus_alloc:
1796 +- iounmap(port_regs);
1797 + err_hw_alloc:
1798 ++ iounmap(port_regs);
1799 + err_ioremap:
1800 + return err;
1801 + }
1802 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1803 +index 35478cba2aa5..4344a59c823f 100644
1804 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1805 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1806 +@@ -1422,6 +1422,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
1807 + struct mvpp2_ethtool_fs *efs;
1808 + int ret;
1809 +
1810 ++ if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1811 ++ return -EINVAL;
1812 ++
1813 + efs = port->rfs_rules[info->fs.location];
1814 + if (!efs)
1815 + return -EINVAL;
1816 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1817 +index 72133cbe55d4..eb78a948bee3 100644
1818 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1819 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1820 +@@ -4325,6 +4325,8 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
1821 +
1822 + if (!mvpp22_rss_is_supported())
1823 + return -EOPNOTSUPP;
1824 ++ if (rss_context >= MVPP22_N_RSS_TABLES)
1825 ++ return -EINVAL;
1826 +
1827 + if (hfunc)
1828 + *hfunc = ETH_RSS_HASH_CRC32;
1829 +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
1830 +index 5716c3d2bb86..c72c4e1ea383 100644
1831 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c
1832 ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
1833 +@@ -2550,6 +2550,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
1834 +
1835 + if (!err || err == -ENOSPC) {
1836 + priv->def_counter[port] = idx;
1837 ++ err = 0;
1838 + } else if (err == -ENOENT) {
1839 + err = 0;
1840 + continue;
1841 +@@ -2600,7 +2601,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
1842 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1843 + if (!err)
1844 + *idx = get_param_l(&out_param);
1845 +-
1846 ++ if (WARN_ON(err == -ENOSPC))
1847 ++ err = -EINVAL;
1848 + return err;
1849 + }
1850 + return __mlx4_counter_alloc(dev, idx);
1851 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1852 +index 34cba97f7bf4..cede5bdfd598 100644
1853 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1854 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1855 +@@ -888,7 +888,6 @@ static void cmd_work_handler(struct work_struct *work)
1856 + }
1857 +
1858 + cmd->ent_arr[ent->idx] = ent;
1859 +- set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
1860 + lay = get_inst(cmd, ent->idx);
1861 + ent->lay = lay;
1862 + memset(lay, 0, sizeof(*lay));
1863 +@@ -910,6 +909,7 @@ static void cmd_work_handler(struct work_struct *work)
1864 +
1865 + if (ent->callback)
1866 + schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
1867 ++ set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
1868 +
1869 + /* Skip sending command to fw if internal error */
1870 + if (pci_channel_offline(dev->pdev) ||
1871 +@@ -922,6 +922,10 @@ static void cmd_work_handler(struct work_struct *work)
1872 + MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
1873 +
1874 + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
1875 ++ /* no doorbell, no need to keep the entry */
1876 ++ free_ent(cmd, ent->idx);
1877 ++ if (ent->callback)
1878 ++ free_cmd(ent);
1879 + return;
1880 + }
1881 +
1882 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1883 +index ffc193c4ad43..2ad0d09cc9bd 100644
1884 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1885 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1886 +@@ -1692,19 +1692,14 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1887 +
1888 + static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
1889 + {
1890 +- int err = mlx5e_init_rep_rx(priv);
1891 +-
1892 +- if (err)
1893 +- return err;
1894 +-
1895 + mlx5e_create_q_counters(priv);
1896 +- return 0;
1897 ++ return mlx5e_init_rep_rx(priv);
1898 + }
1899 +
1900 + static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
1901 + {
1902 +- mlx5e_destroy_q_counters(priv);
1903 + mlx5e_cleanup_rep_rx(priv);
1904 ++ mlx5e_destroy_q_counters(priv);
1905 + }
1906 +
1907 + static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
1908 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
1909 +index 095ec7b1399d..7c77378accf0 100644
1910 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
1911 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
1912 +@@ -689,6 +689,12 @@ static void dr_cq_event(struct mlx5_core_cq *mcq,
1913 + pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
1914 + }
1915 +
1916 ++static void dr_cq_complete(struct mlx5_core_cq *mcq,
1917 ++ struct mlx5_eqe *eqe)
1918 ++{
1919 ++ pr_err("CQ completion CQ: #%u\n", mcq->cqn);
1920 ++}
1921 ++
1922 + static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
1923 + struct mlx5_uars_page *uar,
1924 + size_t ncqe)
1925 +@@ -750,6 +756,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
1926 + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
1927 +
1928 + cq->mcq.event = dr_cq_event;
1929 ++ cq->mcq.comp = dr_cq_complete;
1930 +
1931 + err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
1932 + kvfree(in);
1933 +@@ -761,7 +768,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
1934 + cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
1935 + cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
1936 + *cq->mcq.set_ci_db = 0;
1937 +- *cq->mcq.arm_db = 0;
1938 ++
1939 ++ /* set no-zero value, in order to avoid the HW to run db-recovery on
1940 ++ * CQ that used in polling mode.
1941 ++ */
1942 ++ *cq->mcq.arm_db = cpu_to_be32(2 << 28);
1943 ++
1944 + cq->mcq.vector = 0;
1945 + cq->mcq.irqn = irqn;
1946 + cq->mcq.uar = uar;
1947 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
1948 +index e993159e8e4c..295b27112d36 100644
1949 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
1950 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
1951 +@@ -986,8 +986,9 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
1952 + unsigned int priority,
1953 + struct mlxsw_afk_element_usage *elusage)
1954 + {
1955 ++ struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
1956 + struct mlxsw_sp_acl_tcam_vregion *vregion;
1957 +- struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1958 ++ struct list_head *pos;
1959 + int err;
1960 +
1961 + if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
1962 +@@ -1025,7 +1026,14 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
1963 + }
1964 +
1965 + mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1966 +- list_add_tail(&vchunk->list, &vregion->vchunk_list);
1967 ++
1968 ++ /* Position the vchunk inside the list according to priority */
1969 ++ list_for_each(pos, &vregion->vchunk_list) {
1970 ++ vchunk2 = list_entry(pos, typeof(*vchunk2), list);
1971 ++ if (vchunk2->priority > priority)
1972 ++ break;
1973 ++ }
1974 ++ list_add_tail(&vchunk->list, pos);
1975 + mutex_unlock(&vregion->lock);
1976 +
1977 + return vchunk;
1978 +diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
1979 +index 9183b3e85d21..354efffac0f9 100644
1980 +--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
1981 ++++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
1982 +@@ -283,6 +283,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
1983 + if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
1984 + nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n");
1985 + eth_hw_addr_random(nn->dp.netdev);
1986 ++ nfp_nsp_close(nsp);
1987 + return;
1988 + }
1989 +
1990 +diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
1991 +index 3fd43d30b20d..a1066fbb93b5 100644
1992 +--- a/drivers/net/ethernet/toshiba/tc35815.c
1993 ++++ b/drivers/net/ethernet/toshiba/tc35815.c
1994 +@@ -643,7 +643,7 @@ static int tc_mii_probe(struct net_device *dev)
1995 + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
1996 + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
1997 + }
1998 +- linkmode_and(phydev->supported, phydev->supported, mask);
1999 ++ linkmode_andnot(phydev->supported, phydev->supported, mask);
2000 + linkmode_copy(phydev->advertising, phydev->supported);
2001 +
2002 + lp->link = 0;
2003 +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
2004 +index 35aa7b0a2aeb..11028ef8be4e 100644
2005 +--- a/drivers/net/macsec.c
2006 ++++ b/drivers/net/macsec.c
2007 +@@ -1226,7 +1226,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
2008 + struct crypto_aead *tfm;
2009 + int ret;
2010 +
2011 +- tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
2012 ++ /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
2013 ++ tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
2014 +
2015 + if (IS_ERR(tfm))
2016 + return tfm;
2017 +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
2018 +index ac72a324fcd1..b1d771325c57 100644
2019 +--- a/drivers/net/phy/dp83640.c
2020 ++++ b/drivers/net/phy/dp83640.c
2021 +@@ -1120,7 +1120,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
2022 + goto out;
2023 + }
2024 + dp83640_clock_init(clock, bus);
2025 +- list_add_tail(&phyter_clocks, &clock->list);
2026 ++ list_add_tail(&clock->list, &phyter_clocks);
2027 + out:
2028 + mutex_unlock(&phyter_clocks_lock);
2029 +
2030 +diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
2031 +index 64c9f3bba2cd..e2658dace15d 100644
2032 +--- a/drivers/net/phy/marvell10g.c
2033 ++++ b/drivers/net/phy/marvell10g.c
2034 +@@ -44,6 +44,9 @@ enum {
2035 + MV_PCS_PAIRSWAP_AB = 0x0002,
2036 + MV_PCS_PAIRSWAP_NONE = 0x0003,
2037 +
2038 ++ /* Temperature read register (88E2110 only) */
2039 ++ MV_PCS_TEMP = 0x8042,
2040 ++
2041 + /* These registers appear at 0x800X and 0xa00X - the 0xa00X control
2042 + * registers appear to set themselves to the 0x800X when AN is
2043 + * restarted, but status registers appear readable from either.
2044 +@@ -54,6 +57,7 @@ enum {
2045 + /* Vendor2 MMD registers */
2046 + MV_V2_PORT_CTRL = 0xf001,
2047 + MV_V2_PORT_CTRL_PWRDOWN = 0x0800,
2048 ++ /* Temperature control/read registers (88X3310 only) */
2049 + MV_V2_TEMP_CTRL = 0xf08a,
2050 + MV_V2_TEMP_CTRL_MASK = 0xc000,
2051 + MV_V2_TEMP_CTRL_SAMPLE = 0x0000,
2052 +@@ -79,6 +83,24 @@ static umode_t mv3310_hwmon_is_visible(const void *data,
2053 + return 0;
2054 + }
2055 +
2056 ++static int mv3310_hwmon_read_temp_reg(struct phy_device *phydev)
2057 ++{
2058 ++ return phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP);
2059 ++}
2060 ++
2061 ++static int mv2110_hwmon_read_temp_reg(struct phy_device *phydev)
2062 ++{
2063 ++ return phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_TEMP);
2064 ++}
2065 ++
2066 ++static int mv10g_hwmon_read_temp_reg(struct phy_device *phydev)
2067 ++{
2068 ++ if (phydev->drv->phy_id == MARVELL_PHY_ID_88X3310)
2069 ++ return mv3310_hwmon_read_temp_reg(phydev);
2070 ++ else /* MARVELL_PHY_ID_88E2110 */
2071 ++ return mv2110_hwmon_read_temp_reg(phydev);
2072 ++}
2073 ++
2074 + static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
2075 + u32 attr, int channel, long *value)
2076 + {
2077 +@@ -91,7 +113,7 @@ static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
2078 + }
2079 +
2080 + if (type == hwmon_temp && attr == hwmon_temp_input) {
2081 +- temp = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP);
2082 ++ temp = mv10g_hwmon_read_temp_reg(phydev);
2083 + if (temp < 0)
2084 + return temp;
2085 +
2086 +@@ -144,6 +166,9 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable)
2087 + u16 val;
2088 + int ret;
2089 +
2090 ++ if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310)
2091 ++ return 0;
2092 ++
2093 + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP,
2094 + MV_V2_TEMP_UNKNOWN);
2095 + if (ret < 0)
2096 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2097 +index 6c738a271257..4bb8552a00d3 100644
2098 +--- a/drivers/net/usb/qmi_wwan.c
2099 ++++ b/drivers/net/usb/qmi_wwan.c
2100 +@@ -1359,6 +1359,7 @@ static const struct usb_device_id products[] = {
2101 + {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
2102 + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
2103 + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
2104 ++ {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
2105 + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
2106 + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
2107 + {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
2108 +diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
2109 +index 5c964fcb994e..71b8e80b58e1 100644
2110 +--- a/drivers/net/wireguard/queueing.c
2111 ++++ b/drivers/net/wireguard/queueing.c
2112 +@@ -35,8 +35,10 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
2113 + if (multicore) {
2114 + queue->worker = wg_packet_percpu_multicore_worker_alloc(
2115 + function, queue);
2116 +- if (!queue->worker)
2117 ++ if (!queue->worker) {
2118 ++ ptr_ring_cleanup(&queue->ring, NULL);
2119 + return -ENOMEM;
2120 ++ }
2121 + } else {
2122 + INIT_WORK(&queue->work, function);
2123 + }
2124 +diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
2125 +index da3b782ab7d3..2566e13a292d 100644
2126 +--- a/drivers/net/wireguard/receive.c
2127 ++++ b/drivers/net/wireguard/receive.c
2128 +@@ -393,13 +393,11 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
2129 + len = ntohs(ip_hdr(skb)->tot_len);
2130 + if (unlikely(len < sizeof(struct iphdr)))
2131 + goto dishonest_packet_size;
2132 +- if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
2133 +- IP_ECN_set_ce(ip_hdr(skb));
2134 ++ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos);
2135 + } else if (skb->protocol == htons(ETH_P_IPV6)) {
2136 + len = ntohs(ipv6_hdr(skb)->payload_len) +
2137 + sizeof(struct ipv6hdr);
2138 +- if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
2139 +- IP6_ECN_set_ce(skb, ipv6_hdr(skb));
2140 ++ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb)));
2141 + } else {
2142 + goto dishonest_packet_type;
2143 + }
2144 +@@ -518,6 +516,8 @@ void wg_packet_decrypt_worker(struct work_struct *work)
2145 + &PACKET_CB(skb)->keypair->receiving)) ?
2146 + PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
2147 + wg_queue_enqueue_per_peer_napi(skb, state);
2148 ++ if (need_resched())
2149 ++ cond_resched();
2150 + }
2151 + }
2152 +
2153 +diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
2154 +index 7348c10cbae3..e8a7d0a0cb88 100644
2155 +--- a/drivers/net/wireguard/send.c
2156 ++++ b/drivers/net/wireguard/send.c
2157 +@@ -281,6 +281,8 @@ void wg_packet_tx_worker(struct work_struct *work)
2158 +
2159 + wg_noise_keypair_put(keypair, false);
2160 + wg_peer_put(peer);
2161 ++ if (need_resched())
2162 ++ cond_resched();
2163 + }
2164 + }
2165 +
2166 +@@ -305,6 +307,8 @@ void wg_packet_encrypt_worker(struct work_struct *work)
2167 + wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
2168 + state);
2169 +
2170 ++ if (need_resched())
2171 ++ cond_resched();
2172 + }
2173 + }
2174 +
2175 +diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
2176 +index b0d6541582d3..f9018027fc13 100644
2177 +--- a/drivers/net/wireguard/socket.c
2178 ++++ b/drivers/net/wireguard/socket.c
2179 +@@ -76,12 +76,6 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
2180 + net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
2181 + wg->dev->name, &endpoint->addr, ret);
2182 + goto err;
2183 +- } else if (unlikely(rt->dst.dev == skb->dev)) {
2184 +- ip_rt_put(rt);
2185 +- ret = -ELOOP;
2186 +- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
2187 +- wg->dev->name, &endpoint->addr);
2188 +- goto err;
2189 + }
2190 + if (cache)
2191 + dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
2192 +@@ -149,12 +143,6 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
2193 + net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
2194 + wg->dev->name, &endpoint->addr, ret);
2195 + goto err;
2196 +- } else if (unlikely(dst->dev == skb->dev)) {
2197 +- dst_release(dst);
2198 +- ret = -ELOOP;
2199 +- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
2200 +- wg->dev->name, &endpoint->addr);
2201 +- goto err;
2202 + }
2203 + if (cache)
2204 + dst_cache_set_ip6(cache, dst, &fl.saddr);
2205 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2206 +index fb4c35a43065..84f20369d846 100644
2207 +--- a/drivers/nvme/host/core.c
2208 ++++ b/drivers/nvme/host/core.c
2209 +@@ -1075,8 +1075,17 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
2210 +
2211 + status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
2212 + NVME_IDENTIFY_DATA_SIZE);
2213 +- if (status)
2214 ++ if (status) {
2215 ++ dev_warn(ctrl->device,
2216 ++ "Identify Descriptors failed (%d)\n", status);
2217 ++ /*
2218 ++ * Don't treat an error as fatal, as we potentially already
2219 ++ * have a NGUID or EUI-64.
2220 ++ */
2221 ++ if (status > 0 && !(status & NVME_SC_DNR))
2222 ++ status = 0;
2223 + goto free_data;
2224 ++ }
2225 +
2226 + for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
2227 + struct nvme_ns_id_desc *cur = data + pos;
2228 +@@ -1734,26 +1743,15 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
2229 + static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
2230 + struct nvme_id_ns *id, struct nvme_ns_ids *ids)
2231 + {
2232 +- int ret = 0;
2233 +-
2234 + memset(ids, 0, sizeof(*ids));
2235 +
2236 + if (ctrl->vs >= NVME_VS(1, 1, 0))
2237 + memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
2238 + if (ctrl->vs >= NVME_VS(1, 2, 0))
2239 + memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
2240 +- if (ctrl->vs >= NVME_VS(1, 3, 0)) {
2241 +- /* Don't treat error as fatal we potentially
2242 +- * already have a NGUID or EUI-64
2243 +- */
2244 +- ret = nvme_identify_ns_descs(ctrl, nsid, ids);
2245 +- if (ret)
2246 +- dev_warn(ctrl->device,
2247 +- "Identify Descriptors failed (%d)\n", ret);
2248 +- if (ret > 0)
2249 +- ret = 0;
2250 +- }
2251 +- return ret;
2252 ++ if (ctrl->vs >= NVME_VS(1, 3, 0))
2253 ++ return nvme_identify_ns_descs(ctrl, nsid, ids);
2254 ++ return 0;
2255 + }
2256 +
2257 + static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
2258 +diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
2259 +index be6b50f454b4..d3f255c740e9 100644
2260 +--- a/drivers/staging/gasket/gasket_core.c
2261 ++++ b/drivers/staging/gasket/gasket_core.c
2262 +@@ -926,6 +926,10 @@ do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
2263 + gasket_get_bar_index(gasket_dev,
2264 + (vma->vm_pgoff << PAGE_SHIFT) +
2265 + driver_desc->legacy_mmap_address_offset);
2266 ++
2267 ++ if (bar_index < 0)
2268 ++ return DO_MAP_REGION_INVALID;
2269 ++
2270 + phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
2271 + while (mapped_bytes < map_length) {
2272 + /*
2273 +diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
2274 +index b341fc60c4ba..114fbe51527c 100644
2275 +--- a/drivers/thunderbolt/usb4.c
2276 ++++ b/drivers/thunderbolt/usb4.c
2277 +@@ -182,6 +182,9 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
2278 + return ret;
2279 +
2280 + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
2281 ++ if (ret)
2282 ++ return ret;
2283 ++
2284 + if (val & ROUTER_CS_26_ONS)
2285 + return -EOPNOTSUPP;
2286 +
2287 +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
2288 +index 7a9b360b0438..1d8b6993a435 100644
2289 +--- a/drivers/tty/serial/xilinx_uartps.c
2290 ++++ b/drivers/tty/serial/xilinx_uartps.c
2291 +@@ -1471,6 +1471,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
2292 + cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS;
2293 + #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
2294 + cdns_uart_uart_driver.cons = &cdns_uart_console;
2295 ++ cdns_uart_console.index = id;
2296 + #endif
2297 +
2298 + rc = uart_register_driver(&cdns_uart_uart_driver);
2299 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
2300 +index cc1a04191365..699d8b56cbe7 100644
2301 +--- a/drivers/tty/vt/vt.c
2302 ++++ b/drivers/tty/vt/vt.c
2303 +@@ -365,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
2304 + return uniscr;
2305 + }
2306 +
2307 ++static void vc_uniscr_free(struct uni_screen *uniscr)
2308 ++{
2309 ++ vfree(uniscr);
2310 ++}
2311 ++
2312 + static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
2313 + {
2314 +- vfree(vc->vc_uni_screen);
2315 ++ vc_uniscr_free(vc->vc_uni_screen);
2316 + vc->vc_uni_screen = new_uniscr;
2317 + }
2318 +
2319 +@@ -1230,7 +1235,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
2320 + err = resize_screen(vc, new_cols, new_rows, user);
2321 + if (err) {
2322 + kfree(newscreen);
2323 +- kfree(new_uniscr);
2324 ++ vc_uniscr_free(new_uniscr);
2325 + return err;
2326 + }
2327 +
2328 +diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
2329 +index af648ba6544d..46105457e1ca 100644
2330 +--- a/drivers/usb/chipidea/ci_hdrc_msm.c
2331 ++++ b/drivers/usb/chipidea/ci_hdrc_msm.c
2332 +@@ -114,7 +114,7 @@ static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
2333 + hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
2334 + HS_PHY_ULPI_TX_PKT_EN_CLR_FIX, 0);
2335 +
2336 +- if (!IS_ERR(ci->platdata->vbus_extcon.edev)) {
2337 ++ if (!IS_ERR(ci->platdata->vbus_extcon.edev) || ci->role_switch) {
2338 + hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
2339 + HS_PHY_SESS_VLD_CTRL_EN,
2340 + HS_PHY_SESS_VLD_CTRL_EN);
2341 +diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
2342 +index ffd984142171..d63072fee099 100644
2343 +--- a/drivers/usb/serial/garmin_gps.c
2344 ++++ b/drivers/usb/serial/garmin_gps.c
2345 +@@ -1138,8 +1138,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p,
2346 + send it directly to the tty port */
2347 + if (garmin_data_p->flags & FLAGS_QUEUING) {
2348 + pkt_add(garmin_data_p, data, data_length);
2349 +- } else if (bulk_data ||
2350 +- getLayerId(data) == GARMIN_LAYERID_APPL) {
2351 ++ } else if (bulk_data || (data_length >= sizeof(u32) &&
2352 ++ getLayerId(data) == GARMIN_LAYERID_APPL)) {
2353 +
2354 + spin_lock_irqsave(&garmin_data_p->lock, flags);
2355 + garmin_data_p->flags |= APP_RESP_SEEN;
2356 +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
2357 +index 613f91add03d..ce0401d3137f 100644
2358 +--- a/drivers/usb/serial/qcserial.c
2359 ++++ b/drivers/usb/serial/qcserial.c
2360 +@@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = {
2361 + {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
2362 + {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
2363 + {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
2364 ++ {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
2365 + {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
2366 + {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
2367 + {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
2368 +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
2369 +index 1b23741036ee..37157ed9a881 100644
2370 +--- a/drivers/usb/storage/unusual_uas.h
2371 ++++ b/drivers/usb/storage/unusual_uas.h
2372 +@@ -28,6 +28,13 @@
2373 + * and don't forget to CC: the USB development list <linux-usb@×××××××××××.org>
2374 + */
2375 +
2376 ++/* Reported-by: Julian Groß <julian.g@××××××.de> */
2377 ++UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
2378 ++ "LaCie",
2379 ++ "2Big Quadra USB3",
2380 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2381 ++ US_FL_NO_REPORT_OPCODES),
2382 ++
2383 + /*
2384 + * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
2385 + * commands in UAS mode. Observed with the 1.28 firmware; are there others?
2386 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
2387 +index bbbbddf71326..da7d5c9e3133 100644
2388 +--- a/fs/ceph/mds_client.c
2389 ++++ b/fs/ceph/mds_client.c
2390 +@@ -3116,8 +3116,7 @@ static void handle_session(struct ceph_mds_session *session,
2391 + void *end = p + msg->front.iov_len;
2392 + struct ceph_mds_session_head *h;
2393 + u32 op;
2394 +- u64 seq;
2395 +- unsigned long features = 0;
2396 ++ u64 seq, features = 0;
2397 + int wake = 0;
2398 + bool blacklisted = false;
2399 +
2400 +@@ -3136,9 +3135,8 @@ static void handle_session(struct ceph_mds_session *session,
2401 + goto bad;
2402 + /* version >= 3, feature bits */
2403 + ceph_decode_32_safe(&p, end, len, bad);
2404 +- ceph_decode_need(&p, end, len, bad);
2405 +- memcpy(&features, p, min_t(size_t, len, sizeof(features)));
2406 +- p += len;
2407 ++ ceph_decode_64_safe(&p, end, features, bad);
2408 ++ p += len - sizeof(features);
2409 + }
2410 +
2411 + mutex_lock(&mdsc->mutex);
2412 +diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
2413 +index de56dee60540..19507e2fdb57 100644
2414 +--- a/fs/ceph/quota.c
2415 ++++ b/fs/ceph/quota.c
2416 +@@ -159,8 +159,8 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
2417 + }
2418 +
2419 + if (IS_ERR(in)) {
2420 +- pr_warn("Can't lookup inode %llx (err: %ld)\n",
2421 +- realm->ino, PTR_ERR(in));
2422 ++ dout("Can't lookup inode %llx (err: %ld)\n",
2423 ++ realm->ino, PTR_ERR(in));
2424 + qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
2425 + } else {
2426 + qri->timeout = 0;
2427 +diff --git a/fs/coredump.c b/fs/coredump.c
2428 +index 408418e6aa13..478a0d810136 100644
2429 +--- a/fs/coredump.c
2430 ++++ b/fs/coredump.c
2431 +@@ -788,6 +788,14 @@ void do_coredump(const kernel_siginfo_t *siginfo)
2432 + if (displaced)
2433 + put_files_struct(displaced);
2434 + if (!dump_interrupted()) {
2435 ++ /*
2436 ++ * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
2437 ++ * have this set to NULL.
2438 ++ */
2439 ++ if (!cprm.file) {
2440 ++ pr_info("Core dump to |%s disabled\n", cn.corename);
2441 ++ goto close_fail;
2442 ++ }
2443 + file_start_write(cprm.file);
2444 + core_dumped = binfmt->core_dump(&cprm);
2445 + file_end_write(cprm.file);
2446 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
2447 +index eee3c92a9ebf..b0a097274cfe 100644
2448 +--- a/fs/eventpoll.c
2449 ++++ b/fs/eventpoll.c
2450 +@@ -1149,6 +1149,10 @@ static inline bool chain_epi_lockless(struct epitem *epi)
2451 + {
2452 + struct eventpoll *ep = epi->ep;
2453 +
2454 ++ /* Fast preliminary check */
2455 ++ if (epi->next != EP_UNACTIVE_PTR)
2456 ++ return false;
2457 ++
2458 + /* Check that the same epi has not been just chained from another CPU */
2459 + if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
2460 + return false;
2461 +@@ -1215,16 +1219,12 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
2462 + * chained in ep->ovflist and requeued later on.
2463 + */
2464 + if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
2465 +- if (epi->next == EP_UNACTIVE_PTR &&
2466 +- chain_epi_lockless(epi))
2467 ++ if (chain_epi_lockless(epi))
2468 ++ ep_pm_stay_awake_rcu(epi);
2469 ++ } else if (!ep_is_linked(epi)) {
2470 ++ /* In the usual case, add event to ready list. */
2471 ++ if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
2472 + ep_pm_stay_awake_rcu(epi);
2473 +- goto out_unlock;
2474 +- }
2475 +-
2476 +- /* If this file is already in the ready list we exit soon */
2477 +- if (!ep_is_linked(epi) &&
2478 +- list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) {
2479 +- ep_pm_stay_awake_rcu(epi);
2480 + }
2481 +
2482 + /*
2483 +@@ -1800,7 +1800,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
2484 + {
2485 + int res = 0, eavail, timed_out = 0;
2486 + u64 slack = 0;
2487 +- bool waiter = false;
2488 + wait_queue_entry_t wait;
2489 + ktime_t expires, *to = NULL;
2490 +
2491 +@@ -1845,21 +1844,23 @@ fetch_events:
2492 + */
2493 + ep_reset_busy_poll_napi_id(ep);
2494 +
2495 +- /*
2496 +- * We don't have any available event to return to the caller. We need
2497 +- * to sleep here, and we will be woken by ep_poll_callback() when events
2498 +- * become available.
2499 +- */
2500 +- if (!waiter) {
2501 +- waiter = true;
2502 +- init_waitqueue_entry(&wait, current);
2503 +-
2504 ++ do {
2505 ++ /*
2506 ++ * Internally init_wait() uses autoremove_wake_function(),
2507 ++ * thus wait entry is removed from the wait queue on each
2508 ++ * wakeup. Why it is important? In case of several waiters
2509 ++ * each new wakeup will hit the next waiter, giving it the
2510 ++ * chance to harvest new event. Otherwise wakeup can be
2511 ++ * lost. This is also good performance-wise, because on
2512 ++ * normal wakeup path no need to call __remove_wait_queue()
2513 ++ * explicitly, thus ep->lock is not taken, which halts the
2514 ++ * event delivery.
2515 ++ */
2516 ++ init_wait(&wait);
2517 + write_lock_irq(&ep->lock);
2518 + __add_wait_queue_exclusive(&ep->wq, &wait);
2519 + write_unlock_irq(&ep->lock);
2520 +- }
2521 +
2522 +- for (;;) {
2523 + /*
2524 + * We don't want to sleep if the ep_poll_callback() sends us
2525 + * a wakeup in between. That's why we set the task state
2526 +@@ -1889,10 +1890,20 @@ fetch_events:
2527 + timed_out = 1;
2528 + break;
2529 + }
2530 +- }
2531 ++
2532 ++ /* We were woken up, thus go and try to harvest some events */
2533 ++ eavail = 1;
2534 ++
2535 ++ } while (0);
2536 +
2537 + __set_current_state(TASK_RUNNING);
2538 +
2539 ++ if (!list_empty_careful(&wait.entry)) {
2540 ++ write_lock_irq(&ep->lock);
2541 ++ __remove_wait_queue(&ep->wq, &wait);
2542 ++ write_unlock_irq(&ep->lock);
2543 ++ }
2544 ++
2545 + send_events:
2546 + /*
2547 + * Try to transfer events to user space. In case we get 0 events and
2548 +@@ -1903,12 +1914,6 @@ send_events:
2549 + !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
2550 + goto fetch_events;
2551 +
2552 +- if (waiter) {
2553 +- write_lock_irq(&ep->lock);
2554 +- __remove_wait_queue(&ep->wq, &wait);
2555 +- write_unlock_irq(&ep->lock);
2556 +- }
2557 +-
2558 + return res;
2559 + }
2560 +
2561 +diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
2562 +index 7ea4f6fa173b..4b9002f0e84c 100644
2563 +--- a/fs/ext4/ext4_jbd2.h
2564 ++++ b/fs/ext4/ext4_jbd2.h
2565 +@@ -512,6 +512,9 @@ static inline int ext4_should_dioread_nolock(struct inode *inode)
2566 + return 0;
2567 + if (ext4_should_journal_data(inode))
2568 + return 0;
2569 ++ /* temporary fix to prevent generic/422 test failures */
2570 ++ if (!test_opt(inode->i_sb, DELALLOC))
2571 ++ return 0;
2572 + return 1;
2573 + }
2574 +
2575 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2576 +index 446158ab507d..70796de7c468 100644
2577 +--- a/fs/ext4/super.c
2578 ++++ b/fs/ext4/super.c
2579 +@@ -2181,6 +2181,14 @@ static int parse_options(char *options, struct super_block *sb,
2580 + }
2581 + }
2582 + #endif
2583 ++ if (test_opt(sb, DIOREAD_NOLOCK)) {
2584 ++ int blocksize =
2585 ++ BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
2586 ++ if (blocksize < PAGE_SIZE)
2587 ++ ext4_msg(sb, KERN_WARNING, "Warning: mounting with an "
2588 ++ "experimental mount option 'dioread_nolock' "
2589 ++ "for blocksize < PAGE_SIZE");
2590 ++ }
2591 + return 1;
2592 + }
2593 +
2594 +@@ -3787,7 +3795,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2595 + set_opt(sb, NO_UID32);
2596 + /* xattr user namespace & acls are now defaulted on */
2597 + set_opt(sb, XATTR_USER);
2598 +- set_opt(sb, DIOREAD_NOLOCK);
2599 + #ifdef CONFIG_EXT4_FS_POSIX_ACL
2600 + set_opt(sb, POSIX_ACL);
2601 + #endif
2602 +@@ -3837,6 +3844,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2603 + sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
2604 +
2605 + blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
2606 ++
2607 ++ if (blocksize == PAGE_SIZE)
2608 ++ set_opt(sb, DIOREAD_NOLOCK);
2609 ++
2610 + if (blocksize < EXT4_MIN_BLOCK_SIZE ||
2611 + blocksize > EXT4_MAX_BLOCK_SIZE) {
2612 + ext4_msg(sb, KERN_ERR,
2613 +diff --git a/fs/io_uring.c b/fs/io_uring.c
2614 +index 38b25f599896..9690c845a3e4 100644
2615 +--- a/fs/io_uring.c
2616 ++++ b/fs/io_uring.c
2617 +@@ -696,8 +696,6 @@ static const struct io_op_def io_op_defs[] = {
2618 + .needs_file = 1,
2619 + },
2620 + [IORING_OP_OPENAT] = {
2621 +- .needs_file = 1,
2622 +- .fd_non_neg = 1,
2623 + .file_table = 1,
2624 + .needs_fs = 1,
2625 + },
2626 +@@ -711,8 +709,6 @@ static const struct io_op_def io_op_defs[] = {
2627 + },
2628 + [IORING_OP_STATX] = {
2629 + .needs_mm = 1,
2630 +- .needs_file = 1,
2631 +- .fd_non_neg = 1,
2632 + .needs_fs = 1,
2633 + .file_table = 1,
2634 + },
2635 +@@ -743,8 +739,6 @@ static const struct io_op_def io_op_defs[] = {
2636 + .unbound_nonreg_file = 1,
2637 + },
2638 + [IORING_OP_OPENAT2] = {
2639 +- .needs_file = 1,
2640 +- .fd_non_neg = 1,
2641 + .file_table = 1,
2642 + .needs_fs = 1,
2643 + },
2644 +diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
2645 +index 5778d1347b35..f5d30573f4a9 100644
2646 +--- a/fs/notify/fanotify/fanotify.c
2647 ++++ b/fs/notify/fanotify/fanotify.c
2648 +@@ -26,7 +26,7 @@ static bool should_merge(struct fsnotify_event *old_fsn,
2649 + old = FANOTIFY_E(old_fsn);
2650 + new = FANOTIFY_E(new_fsn);
2651 +
2652 +- if (old_fsn->inode != new_fsn->inode || old->pid != new->pid ||
2653 ++ if (old_fsn->objectid != new_fsn->objectid || old->pid != new->pid ||
2654 + old->fh_type != new->fh_type || old->fh_len != new->fh_len)
2655 + return false;
2656 +
2657 +@@ -314,7 +314,12 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
2658 + if (!event)
2659 + goto out;
2660 + init: __maybe_unused
2661 +- fsnotify_init_event(&event->fse, inode);
2662 ++ /*
2663 ++ * Use the victim inode instead of the watching inode as the id for
2664 ++ * event queue, so event reported on parent is merged with event
2665 ++ * reported on child when both directory and child watches exist.
2666 ++ */
2667 ++ fsnotify_init_event(&event->fse, (unsigned long)id);
2668 + event->mask = mask;
2669 + if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
2670 + event->pid = get_pid(task_pid(current));
2671 +diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
2672 +index d510223d302c..589dee962993 100644
2673 +--- a/fs/notify/inotify/inotify_fsnotify.c
2674 ++++ b/fs/notify/inotify/inotify_fsnotify.c
2675 +@@ -39,7 +39,7 @@ static bool event_compare(struct fsnotify_event *old_fsn,
2676 + if (old->mask & FS_IN_IGNORED)
2677 + return false;
2678 + if ((old->mask == new->mask) &&
2679 +- (old_fsn->inode == new_fsn->inode) &&
2680 ++ (old_fsn->objectid == new_fsn->objectid) &&
2681 + (old->name_len == new->name_len) &&
2682 + (!old->name_len || !strcmp(old->name, new->name)))
2683 + return true;
2684 +@@ -118,7 +118,7 @@ int inotify_handle_event(struct fsnotify_group *group,
2685 + mask &= ~IN_ISDIR;
2686 +
2687 + fsn_event = &event->fse;
2688 +- fsnotify_init_event(fsn_event, inode);
2689 ++ fsnotify_init_event(fsn_event, (unsigned long)inode);
2690 + event->mask = mask;
2691 + event->wd = i_mark->wd;
2692 + event->sync_cookie = cookie;
2693 +diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
2694 +index 107537a543fd..81ffc8629fc4 100644
2695 +--- a/fs/notify/inotify/inotify_user.c
2696 ++++ b/fs/notify/inotify/inotify_user.c
2697 +@@ -635,7 +635,7 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events)
2698 + return ERR_PTR(-ENOMEM);
2699 + }
2700 + group->overflow_event = &oevent->fse;
2701 +- fsnotify_init_event(group->overflow_event, NULL);
2702 ++ fsnotify_init_event(group->overflow_event, 0);
2703 + oevent->mask = FS_Q_OVERFLOW;
2704 + oevent->wd = -1;
2705 + oevent->sync_cookie = 0;
2706 +diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
2707 +index 26f0ecf401ea..0bbfd647f5c6 100644
2708 +--- a/include/linux/amba/bus.h
2709 ++++ b/include/linux/amba/bus.h
2710 +@@ -65,6 +65,7 @@ struct amba_device {
2711 + struct device dev;
2712 + struct resource res;
2713 + struct clk *pclk;
2714 ++ struct device_dma_parameters dma_parms;
2715 + unsigned int periphid;
2716 + unsigned int cid;
2717 + struct amba_cs_uci_id uci;
2718 +diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
2719 +index 4fc87dee005a..2849bdbb3acb 100644
2720 +--- a/include/linux/backing-dev-defs.h
2721 ++++ b/include/linux/backing-dev-defs.h
2722 +@@ -220,6 +220,7 @@ struct backing_dev_info {
2723 + wait_queue_head_t wb_waitq;
2724 +
2725 + struct device *dev;
2726 ++ char dev_name[64];
2727 + struct device *owner;
2728 +
2729 + struct timer_list laptop_mode_wb_timer;
2730 +diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
2731 +index f88197c1ffc2..c9ad5c3b7b4b 100644
2732 +--- a/include/linux/backing-dev.h
2733 ++++ b/include/linux/backing-dev.h
2734 +@@ -505,13 +505,6 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
2735 + (1 << WB_async_congested));
2736 + }
2737 +
2738 +-extern const char *bdi_unknown_name;
2739 +-
2740 +-static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
2741 +-{
2742 +- if (!bdi || !bdi->dev)
2743 +- return bdi_unknown_name;
2744 +- return dev_name(bdi->dev);
2745 +-}
2746 ++const char *bdi_dev_name(struct backing_dev_info *bdi);
2747 +
2748 + #endif /* _LINUX_BACKING_DEV_H */
2749 +diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
2750 +index 1915bdba2fad..64cfb5446f4d 100644
2751 +--- a/include/linux/fsnotify_backend.h
2752 ++++ b/include/linux/fsnotify_backend.h
2753 +@@ -133,8 +133,7 @@ struct fsnotify_ops {
2754 + */
2755 + struct fsnotify_event {
2756 + struct list_head list;
2757 +- /* inode may ONLY be dereferenced during handle_event(). */
2758 +- struct inode *inode; /* either the inode the event happened to or its parent */
2759 ++ unsigned long objectid; /* identifier for queue merges */
2760 + };
2761 +
2762 + /*
2763 +@@ -500,10 +499,10 @@ extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
2764 + extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
2765 +
2766 + static inline void fsnotify_init_event(struct fsnotify_event *event,
2767 +- struct inode *inode)
2768 ++ unsigned long objectid)
2769 + {
2770 + INIT_LIST_HEAD(&event->list);
2771 +- event->inode = inode;
2772 ++ event->objectid = objectid;
2773 + }
2774 +
2775 + #else
2776 +diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
2777 +index 041bfa412aa0..81900b3cbe37 100644
2778 +--- a/include/linux/platform_device.h
2779 ++++ b/include/linux/platform_device.h
2780 +@@ -25,6 +25,7 @@ struct platform_device {
2781 + bool id_auto;
2782 + struct device dev;
2783 + u64 platform_dma_mask;
2784 ++ struct device_dma_parameters dma_parms;
2785 + u32 num_resources;
2786 + struct resource *resource;
2787 +
2788 +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
2789 +index 0d1fe9297ac6..6f6ade63b04c 100644
2790 +--- a/include/linux/virtio_net.h
2791 ++++ b/include/linux/virtio_net.h
2792 +@@ -3,6 +3,8 @@
2793 + #define _LINUX_VIRTIO_NET_H
2794 +
2795 + #include <linux/if_vlan.h>
2796 ++#include <uapi/linux/tcp.h>
2797 ++#include <uapi/linux/udp.h>
2798 + #include <uapi/linux/virtio_net.h>
2799 +
2800 + static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
2801 +@@ -28,17 +30,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
2802 + bool little_endian)
2803 + {
2804 + unsigned int gso_type = 0;
2805 ++ unsigned int thlen = 0;
2806 ++ unsigned int ip_proto;
2807 +
2808 + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2809 + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2810 + case VIRTIO_NET_HDR_GSO_TCPV4:
2811 + gso_type = SKB_GSO_TCPV4;
2812 ++ ip_proto = IPPROTO_TCP;
2813 ++ thlen = sizeof(struct tcphdr);
2814 + break;
2815 + case VIRTIO_NET_HDR_GSO_TCPV6:
2816 + gso_type = SKB_GSO_TCPV6;
2817 ++ ip_proto = IPPROTO_TCP;
2818 ++ thlen = sizeof(struct tcphdr);
2819 + break;
2820 + case VIRTIO_NET_HDR_GSO_UDP:
2821 + gso_type = SKB_GSO_UDP;
2822 ++ ip_proto = IPPROTO_UDP;
2823 ++ thlen = sizeof(struct udphdr);
2824 + break;
2825 + default:
2826 + return -EINVAL;
2827 +@@ -57,16 +67,22 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
2828 +
2829 + if (!skb_partial_csum_set(skb, start, off))
2830 + return -EINVAL;
2831 ++
2832 ++ if (skb_transport_offset(skb) + thlen > skb_headlen(skb))
2833 ++ return -EINVAL;
2834 + } else {
2835 + /* gso packets without NEEDS_CSUM do not set transport_offset.
2836 + * probe and drop if does not match one of the above types.
2837 + */
2838 + if (gso_type && skb->network_header) {
2839 ++ struct flow_keys_basic keys;
2840 ++
2841 + if (!skb->protocol)
2842 + virtio_net_hdr_set_proto(skb, hdr);
2843 + retry:
2844 +- skb_probe_transport_header(skb);
2845 +- if (!skb_transport_header_was_set(skb)) {
2846 ++ if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2847 ++ NULL, 0, 0, 0,
2848 ++ 0)) {
2849 + /* UFO does not specify ipv4 or 6: try both */
2850 + if (gso_type & SKB_GSO_UDP &&
2851 + skb->protocol == htons(ETH_P_IP)) {
2852 +@@ -75,6 +91,12 @@ retry:
2853 + }
2854 + return -EINVAL;
2855 + }
2856 ++
2857 ++ if (keys.control.thoff + thlen > skb_headlen(skb) ||
2858 ++ keys.basic.ip_proto != ip_proto)
2859 ++ return -EINVAL;
2860 ++
2861 ++ skb_set_transport_header(skb, keys.control.thoff);
2862 + }
2863 + }
2864 +
2865 +diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
2866 +index c8e2bebd8d93..0f0d1efe06dd 100644
2867 +--- a/include/net/inet_ecn.h
2868 ++++ b/include/net/inet_ecn.h
2869 +@@ -99,6 +99,20 @@ static inline int IP_ECN_set_ce(struct iphdr *iph)
2870 + return 1;
2871 + }
2872 +
2873 ++static inline int IP_ECN_set_ect1(struct iphdr *iph)
2874 ++{
2875 ++ u32 check = (__force u32)iph->check;
2876 ++
2877 ++ if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
2878 ++ return 0;
2879 ++
2880 ++ check += (__force u16)htons(0x100);
2881 ++
2882 ++ iph->check = (__force __sum16)(check + (check>=0xFFFF));
2883 ++ iph->tos ^= INET_ECN_MASK;
2884 ++ return 1;
2885 ++}
2886 ++
2887 + static inline void IP_ECN_clear(struct iphdr *iph)
2888 + {
2889 + iph->tos &= ~INET_ECN_MASK;
2890 +@@ -134,6 +148,22 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
2891 + return 1;
2892 + }
2893 +
2894 ++static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph)
2895 ++{
2896 ++ __be32 from, to;
2897 ++
2898 ++ if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0)
2899 ++ return 0;
2900 ++
2901 ++ from = *(__be32 *)iph;
2902 ++ to = from ^ htonl(INET_ECN_MASK << 20);
2903 ++ *(__be32 *)iph = to;
2904 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
2905 ++ skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
2906 ++ (__force __wsum)to);
2907 ++ return 1;
2908 ++}
2909 ++
2910 + static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
2911 + {
2912 + dscp &= ~INET_ECN_MASK;
2913 +@@ -159,6 +189,25 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
2914 + return 0;
2915 + }
2916 +
2917 ++static inline int INET_ECN_set_ect1(struct sk_buff *skb)
2918 ++{
2919 ++ switch (skb->protocol) {
2920 ++ case cpu_to_be16(ETH_P_IP):
2921 ++ if (skb_network_header(skb) + sizeof(struct iphdr) <=
2922 ++ skb_tail_pointer(skb))
2923 ++ return IP_ECN_set_ect1(ip_hdr(skb));
2924 ++ break;
2925 ++
2926 ++ case cpu_to_be16(ETH_P_IPV6):
2927 ++ if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
2928 ++ skb_tail_pointer(skb))
2929 ++ return IP6_ECN_set_ect1(skb, ipv6_hdr(skb));
2930 ++ break;
2931 ++ }
2932 ++
2933 ++ return 0;
2934 ++}
2935 ++
2936 + /*
2937 + * RFC 6040 4.2
2938 + * To decapsulate the inner header at the tunnel egress, a compliant
2939 +@@ -208,8 +257,12 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb,
2940 + int rc;
2941 +
2942 + rc = __INET_ECN_decapsulate(outer, inner, &set_ce);
2943 +- if (!rc && set_ce)
2944 +- INET_ECN_set_ce(skb);
2945 ++ if (!rc) {
2946 ++ if (set_ce)
2947 ++ INET_ECN_set_ce(skb);
2948 ++ else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1)
2949 ++ INET_ECN_set_ect1(skb);
2950 ++ }
2951 +
2952 + return rc;
2953 + }
2954 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
2955 +index fd60a8ac02ee..98ec56e2fae2 100644
2956 +--- a/include/net/ip6_fib.h
2957 ++++ b/include/net/ip6_fib.h
2958 +@@ -204,6 +204,7 @@ struct fib6_info {
2959 + struct rt6_info {
2960 + struct dst_entry dst;
2961 + struct fib6_info __rcu *from;
2962 ++ int sernum;
2963 +
2964 + struct rt6key rt6i_dst;
2965 + struct rt6key rt6i_src;
2966 +@@ -292,6 +293,9 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
2967 + struct fib6_info *from;
2968 + u32 cookie = 0;
2969 +
2970 ++ if (rt->sernum)
2971 ++ return rt->sernum;
2972 ++
2973 + rcu_read_lock();
2974 +
2975 + from = rcu_dereference(rt->from);
2976 +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
2977 +index 854d39ef1ca3..9cdb67e3a553 100644
2978 +--- a/include/net/net_namespace.h
2979 ++++ b/include/net/net_namespace.h
2980 +@@ -432,6 +432,13 @@ static inline int rt_genid_ipv4(const struct net *net)
2981 + return atomic_read(&net->ipv4.rt_genid);
2982 + }
2983 +
2984 ++#if IS_ENABLED(CONFIG_IPV6)
2985 ++static inline int rt_genid_ipv6(const struct net *net)
2986 ++{
2987 ++ return atomic_read(&net->ipv6.fib6_sernum);
2988 ++}
2989 ++#endif
2990 ++
2991 + static inline void rt_genid_bump_ipv4(struct net *net)
2992 + {
2993 + atomic_inc(&net->ipv4.rt_genid);
2994 +diff --git a/ipc/mqueue.c b/ipc/mqueue.c
2995 +index 49a05ba3000d..3ba0ea3d5920 100644
2996 +--- a/ipc/mqueue.c
2997 ++++ b/ipc/mqueue.c
2998 +@@ -142,6 +142,7 @@ struct mqueue_inode_info {
2999 +
3000 + struct sigevent notify;
3001 + struct pid *notify_owner;
3002 ++ u32 notify_self_exec_id;
3003 + struct user_namespace *notify_user_ns;
3004 + struct user_struct *user; /* user who created, for accounting */
3005 + struct sock *notify_sock;
3006 +@@ -774,28 +775,44 @@ static void __do_notify(struct mqueue_inode_info *info)
3007 + * synchronously. */
3008 + if (info->notify_owner &&
3009 + info->attr.mq_curmsgs == 1) {
3010 +- struct kernel_siginfo sig_i;
3011 + switch (info->notify.sigev_notify) {
3012 + case SIGEV_NONE:
3013 + break;
3014 +- case SIGEV_SIGNAL:
3015 +- /* sends signal */
3016 ++ case SIGEV_SIGNAL: {
3017 ++ struct kernel_siginfo sig_i;
3018 ++ struct task_struct *task;
3019 ++
3020 ++ /* do_mq_notify() accepts sigev_signo == 0, why?? */
3021 ++ if (!info->notify.sigev_signo)
3022 ++ break;
3023 +
3024 + clear_siginfo(&sig_i);
3025 + sig_i.si_signo = info->notify.sigev_signo;
3026 + sig_i.si_errno = 0;
3027 + sig_i.si_code = SI_MESGQ;
3028 + sig_i.si_value = info->notify.sigev_value;
3029 +- /* map current pid/uid into info->owner's namespaces */
3030 + rcu_read_lock();
3031 ++ /* map current pid/uid into info->owner's namespaces */
3032 + sig_i.si_pid = task_tgid_nr_ns(current,
3033 + ns_of_pid(info->notify_owner));
3034 +- sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
3035 ++ sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
3036 ++ current_uid());
3037 ++ /*
3038 ++ * We can't use kill_pid_info(), this signal should
3039 ++ * bypass check_kill_permission(). It is from kernel
3040 ++ * but si_fromuser() can't know this.
3041 ++ * We do check the self_exec_id, to avoid sending
3042 ++ * signals to programs that don't expect them.
3043 ++ */
3044 ++ task = pid_task(info->notify_owner, PIDTYPE_TGID);
3045 ++ if (task && task->self_exec_id ==
3046 ++ info->notify_self_exec_id) {
3047 ++ do_send_sig_info(info->notify.sigev_signo,
3048 ++ &sig_i, task, PIDTYPE_TGID);
3049 ++ }
3050 + rcu_read_unlock();
3051 +-
3052 +- kill_pid_info(info->notify.sigev_signo,
3053 +- &sig_i, info->notify_owner);
3054 + break;
3055 ++ }
3056 + case SIGEV_THREAD:
3057 + set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
3058 + netlink_sendskb(info->notify_sock, info->notify_cookie);
3059 +@@ -1384,6 +1401,7 @@ retry:
3060 + info->notify.sigev_signo = notification->sigev_signo;
3061 + info->notify.sigev_value = notification->sigev_value;
3062 + info->notify.sigev_notify = SIGEV_SIGNAL;
3063 ++ info->notify_self_exec_id = current->self_exec_id;
3064 + break;
3065 + }
3066 +
3067 +diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c
3068 +index 31c0fad4cb9e..c4c86de63cf9 100644
3069 +--- a/kernel/trace/preemptirq_delay_test.c
3070 ++++ b/kernel/trace/preemptirq_delay_test.c
3071 +@@ -113,22 +113,42 @@ static int preemptirq_delay_run(void *data)
3072 +
3073 + for (i = 0; i < s; i++)
3074 + (testfuncs[i])(i);
3075 ++
3076 ++ set_current_state(TASK_INTERRUPTIBLE);
3077 ++ while (!kthread_should_stop()) {
3078 ++ schedule();
3079 ++ set_current_state(TASK_INTERRUPTIBLE);
3080 ++ }
3081 ++
3082 ++ __set_current_state(TASK_RUNNING);
3083 ++
3084 + return 0;
3085 + }
3086 +
3087 +-static struct task_struct *preemptirq_start_test(void)
3088 ++static int preemptirq_run_test(void)
3089 + {
3090 ++ struct task_struct *task;
3091 ++
3092 + char task_name[50];
3093 +
3094 + snprintf(task_name, sizeof(task_name), "%s_test", test_mode);
3095 +- return kthread_run(preemptirq_delay_run, NULL, task_name);
3096 ++ task = kthread_run(preemptirq_delay_run, NULL, task_name);
3097 ++ if (IS_ERR(task))
3098 ++ return PTR_ERR(task);
3099 ++ if (task)
3100 ++ kthread_stop(task);
3101 ++ return 0;
3102 + }
3103 +
3104 +
3105 + static ssize_t trigger_store(struct kobject *kobj, struct kobj_attribute *attr,
3106 + const char *buf, size_t count)
3107 + {
3108 +- preemptirq_start_test();
3109 ++ ssize_t ret;
3110 ++
3111 ++ ret = preemptirq_run_test();
3112 ++ if (ret)
3113 ++ return ret;
3114 + return count;
3115 + }
3116 +
3117 +@@ -148,11 +168,9 @@ static struct kobject *preemptirq_delay_kobj;
3118 +
3119 + static int __init preemptirq_delay_init(void)
3120 + {
3121 +- struct task_struct *test_task;
3122 + int retval;
3123 +
3124 +- test_task = preemptirq_start_test();
3125 +- retval = PTR_ERR_OR_ZERO(test_task);
3126 ++ retval = preemptirq_run_test();
3127 + if (retval != 0)
3128 + return retval;
3129 +
3130 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
3131 +index 6b11e4e2150c..5f0aa5d66e22 100644
3132 +--- a/kernel/trace/trace.c
3133 ++++ b/kernel/trace/trace.c
3134 +@@ -8452,6 +8452,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
3135 + */
3136 + allocate_snapshot = false;
3137 + #endif
3138 ++
3139 ++ /*
3140 ++ * Because of some magic with the way alloc_percpu() works on
3141 ++ * x86_64, we need to synchronize the pgd of all the tables,
3142 ++ * otherwise the trace events that happen in x86_64 page fault
3143 ++ * handlers can't cope with accessing the chance that a
3144 ++ * alloc_percpu()'d memory might be touched in the page fault trace
3145 ++ * event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
3146 ++ * calls in tracing, because something might get triggered within a
3147 ++ * page fault trace event!
3148 ++ */
3149 ++ vmalloc_sync_mappings();
3150 ++
3151 + return 0;
3152 + }
3153 +
3154 +diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
3155 +index 06d7feb5255f..9de29bb45a27 100644
3156 +--- a/kernel/trace/trace_boot.c
3157 ++++ b/kernel/trace/trace_boot.c
3158 +@@ -95,24 +95,20 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
3159 + struct xbc_node *anode;
3160 + char buf[MAX_BUF_LEN];
3161 + const char *val;
3162 +- int ret;
3163 ++ int ret = 0;
3164 +
3165 +- kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
3166 ++ xbc_node_for_each_array_value(node, "probes", anode, val) {
3167 ++ kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
3168 +
3169 +- ret = kprobe_event_gen_cmd_start(&cmd, event, NULL);
3170 +- if (ret)
3171 +- return ret;
3172 ++ ret = kprobe_event_gen_cmd_start(&cmd, event, val);
3173 ++ if (ret)
3174 ++ break;
3175 +
3176 +- xbc_node_for_each_array_value(node, "probes", anode, val) {
3177 +- ret = kprobe_event_add_field(&cmd, val);
3178 ++ ret = kprobe_event_gen_cmd_end(&cmd);
3179 + if (ret)
3180 +- return ret;
3181 ++ pr_err("Failed to add probe: %s\n", buf);
3182 + }
3183 +
3184 +- ret = kprobe_event_gen_cmd_end(&cmd);
3185 +- if (ret)
3186 +- pr_err("Failed to add probe: %s\n", buf);
3187 +-
3188 + return ret;
3189 + }
3190 + #else
3191 +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
3192 +index d0568af4a0ef..35989383ae11 100644
3193 +--- a/kernel/trace/trace_kprobe.c
3194 ++++ b/kernel/trace/trace_kprobe.c
3195 +@@ -453,7 +453,7 @@ static bool __within_notrace_func(unsigned long addr)
3196 +
3197 + static bool within_notrace_func(struct trace_kprobe *tk)
3198 + {
3199 +- unsigned long addr = addr = trace_kprobe_address(tk);
3200 ++ unsigned long addr = trace_kprobe_address(tk);
3201 + char symname[KSYM_NAME_LEN], *p;
3202 +
3203 + if (!__within_notrace_func(addr))
3204 +@@ -940,6 +940,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
3205 + * complete command or only the first part of it; in the latter case,
3206 + * kprobe_event_add_fields() can be used to add more fields following this.
3207 + *
3208 ++ * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
3209 ++ * returns -EINVAL if @loc == NULL.
3210 ++ *
3211 + * Return: 0 if successful, error otherwise.
3212 + */
3213 + int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
3214 +@@ -953,6 +956,9 @@ int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
3215 + if (cmd->type != DYNEVENT_TYPE_KPROBE)
3216 + return -EINVAL;
3217 +
3218 ++ if (!loc)
3219 ++ return -EINVAL;
3220 ++
3221 + if (kretprobe)
3222 + snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
3223 + else
3224 +diff --git a/kernel/umh.c b/kernel/umh.c
3225 +index 7f255b5a8845..11bf5eea474c 100644
3226 +--- a/kernel/umh.c
3227 ++++ b/kernel/umh.c
3228 +@@ -544,6 +544,11 @@ EXPORT_SYMBOL_GPL(fork_usermode_blob);
3229 + * Runs a user-space application. The application is started
3230 + * asynchronously if wait is not set, and runs as a child of system workqueues.
3231 + * (ie. it runs with full root capabilities and optimized affinity).
3232 ++ *
3233 ++ * Note: successful return value does not guarantee the helper was called at
3234 ++ * all. You can't rely on sub_info->{init,cleanup} being called even for
3235 ++ * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers
3236 ++ * into a successful no-op.
3237 + */
3238 + int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
3239 + {
3240 +diff --git a/mm/backing-dev.c b/mm/backing-dev.c
3241 +index 62f05f605fb5..3f2480e4c5af 100644
3242 +--- a/mm/backing-dev.c
3243 ++++ b/mm/backing-dev.c
3244 +@@ -21,7 +21,7 @@ struct backing_dev_info noop_backing_dev_info = {
3245 + EXPORT_SYMBOL_GPL(noop_backing_dev_info);
3246 +
3247 + static struct class *bdi_class;
3248 +-const char *bdi_unknown_name = "(unknown)";
3249 ++static const char *bdi_unknown_name = "(unknown)";
3250 +
3251 + /*
3252 + * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
3253 +@@ -938,7 +938,8 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
3254 + if (bdi->dev) /* The driver needs to use separate queues per device */
3255 + return 0;
3256 +
3257 +- dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
3258 ++ vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
3259 ++ dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
3260 + if (IS_ERR(dev))
3261 + return PTR_ERR(dev);
3262 +
3263 +@@ -1043,6 +1044,14 @@ void bdi_put(struct backing_dev_info *bdi)
3264 + }
3265 + EXPORT_SYMBOL(bdi_put);
3266 +
3267 ++const char *bdi_dev_name(struct backing_dev_info *bdi)
3268 ++{
3269 ++ if (!bdi || !bdi->dev)
3270 ++ return bdi_unknown_name;
3271 ++ return bdi->dev_name;
3272 ++}
3273 ++EXPORT_SYMBOL_GPL(bdi_dev_name);
3274 ++
3275 + static wait_queue_head_t congestion_wqh[2] = {
3276 + __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
3277 + __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
3278 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3279 +index 615d73acd0da..537eae162ed3 100644
3280 +--- a/mm/memcontrol.c
3281 ++++ b/mm/memcontrol.c
3282 +@@ -4977,19 +4977,22 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
3283 + unsigned int size;
3284 + int node;
3285 + int __maybe_unused i;
3286 ++ long error = -ENOMEM;
3287 +
3288 + size = sizeof(struct mem_cgroup);
3289 + size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
3290 +
3291 + memcg = kzalloc(size, GFP_KERNEL);
3292 + if (!memcg)
3293 +- return NULL;
3294 ++ return ERR_PTR(error);
3295 +
3296 + memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
3297 + 1, MEM_CGROUP_ID_MAX,
3298 + GFP_KERNEL);
3299 +- if (memcg->id.id < 0)
3300 ++ if (memcg->id.id < 0) {
3301 ++ error = memcg->id.id;
3302 + goto fail;
3303 ++ }
3304 +
3305 + memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
3306 + if (!memcg->vmstats_local)
3307 +@@ -5033,7 +5036,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
3308 + fail:
3309 + mem_cgroup_id_remove(memcg);
3310 + __mem_cgroup_free(memcg);
3311 +- return NULL;
3312 ++ return ERR_PTR(error);
3313 + }
3314 +
3315 + static struct cgroup_subsys_state * __ref
3316 +@@ -5044,8 +5047,8 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
3317 + long error = -ENOMEM;
3318 +
3319 + memcg = mem_cgroup_alloc();
3320 +- if (!memcg)
3321 +- return ERR_PTR(error);
3322 ++ if (IS_ERR(memcg))
3323 ++ return ERR_CAST(memcg);
3324 +
3325 + memcg->high = PAGE_COUNTER_MAX;
3326 + memcg->soft_limit = PAGE_COUNTER_MAX;
3327 +@@ -5095,7 +5098,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
3328 + fail:
3329 + mem_cgroup_id_remove(memcg);
3330 + mem_cgroup_free(memcg);
3331 +- return ERR_PTR(-ENOMEM);
3332 ++ return ERR_PTR(error);
3333 + }
3334 +
3335 + static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
3336 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
3337 +index 3c4eb750a199..a97de355a13c 100644
3338 +--- a/mm/page_alloc.c
3339 ++++ b/mm/page_alloc.c
3340 +@@ -1555,6 +1555,7 @@ void set_zone_contiguous(struct zone *zone)
3341 + if (!__pageblock_pfn_to_page(block_start_pfn,
3342 + block_end_pfn, zone))
3343 + return;
3344 ++ cond_resched();
3345 + }
3346 +
3347 + /* We confirm that there is no hole */
3348 +@@ -2350,6 +2351,14 @@ static inline void boost_watermark(struct zone *zone)
3349 +
3350 + if (!watermark_boost_factor)
3351 + return;
3352 ++ /*
3353 ++ * Don't bother in zones that are unlikely to produce results.
3354 ++ * On small machines, including kdump capture kernels running
3355 ++ * in a small area, boosting the watermark can cause an out of
3356 ++ * memory situation immediately.
3357 ++ */
3358 ++ if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
3359 ++ return;
3360 +
3361 + max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
3362 + watermark_boost_factor, 10000);
3363 +diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
3364 +index 969466218999..80b87b1f4e3a 100644
3365 +--- a/net/batman-adv/bat_v_ogm.c
3366 ++++ b/net/batman-adv/bat_v_ogm.c
3367 +@@ -893,7 +893,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
3368 +
3369 + orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig);
3370 + if (!orig_node)
3371 +- return;
3372 ++ goto out;
3373 +
3374 + neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming,
3375 + ethhdr->h_source);
3376 +diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
3377 +index 8f0717c3f7b5..b0469d15da0e 100644
3378 +--- a/net/batman-adv/network-coding.c
3379 ++++ b/net/batman-adv/network-coding.c
3380 +@@ -1009,15 +1009,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
3381 + */
3382 + static u8 batadv_nc_random_weight_tq(u8 tq)
3383 + {
3384 +- u8 rand_val, rand_tq;
3385 +-
3386 +- get_random_bytes(&rand_val, sizeof(rand_val));
3387 +-
3388 + /* randomize the estimated packet loss (max TQ - estimated TQ) */
3389 +- rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq);
3390 +-
3391 +- /* normalize the randomized packet loss */
3392 +- rand_tq /= BATADV_TQ_MAX_VALUE;
3393 ++ u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq);
3394 +
3395 + /* convert to (randomized) estimated tq again */
3396 + return BATADV_TQ_MAX_VALUE - rand_tq;
3397 +diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
3398 +index c45962d8527b..0f962dcd239e 100644
3399 +--- a/net/batman-adv/sysfs.c
3400 ++++ b/net/batman-adv/sysfs.c
3401 +@@ -1150,7 +1150,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
3402 + ret = batadv_parse_throughput(net_dev, buff, "throughput_override",
3403 + &tp_override);
3404 + if (!ret)
3405 +- return count;
3406 ++ goto out;
3407 +
3408 + old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
3409 + if (old_tp_override == tp_override)
3410 +@@ -1190,6 +1190,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj,
3411 +
3412 + tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
3413 +
3414 ++ batadv_hardif_put(hard_iface);
3415 + return sprintf(buff, "%u.%u MBit\n", tp_override / 10,
3416 + tp_override % 10);
3417 + }
3418 +diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
3419 +index 43dab4066f91..a0f5dbee8f9c 100644
3420 +--- a/net/bridge/br_netlink.c
3421 ++++ b/net/bridge/br_netlink.c
3422 +@@ -612,6 +612,7 @@ int br_process_vlan_info(struct net_bridge *br,
3423 + v - 1, rtm_cmd);
3424 + v_change_start = 0;
3425 + }
3426 ++ cond_resched();
3427 + }
3428 + /* v_change_start is set only if the last/whole range changed */
3429 + if (v_change_start)
3430 +diff --git a/net/core/devlink.c b/net/core/devlink.c
3431 +index b831c5545d6a..b4e26b702352 100644
3432 +--- a/net/core/devlink.c
3433 ++++ b/net/core/devlink.c
3434 +@@ -4030,6 +4030,11 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
3435 + end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
3436 + end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
3437 + dump = false;
3438 ++
3439 ++ if (start_offset == end_offset) {
3440 ++ err = 0;
3441 ++ goto nla_put_failure;
3442 ++ }
3443 + }
3444 +
3445 + err = devlink_nl_region_read_snapshot_fill(skb, devlink,
3446 +@@ -5029,6 +5034,7 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
3447 + {
3448 + enum devlink_health_reporter_state prev_health_state;
3449 + struct devlink *devlink = reporter->devlink;
3450 ++ unsigned long recover_ts_threshold;
3451 +
3452 + /* write a log message of the current error */
3453 + WARN_ON(!msg);
3454 +@@ -5039,10 +5045,12 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
3455 + devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
3456 +
3457 + /* abort if the previous error wasn't recovered */
3458 ++ recover_ts_threshold = reporter->last_recovery_ts +
3459 ++ msecs_to_jiffies(reporter->graceful_period);
3460 + if (reporter->auto_recover &&
3461 + (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY ||
3462 +- jiffies - reporter->last_recovery_ts <
3463 +- msecs_to_jiffies(reporter->graceful_period))) {
3464 ++ (reporter->last_recovery_ts && reporter->recovery_count &&
3465 ++ time_is_after_jiffies(recover_ts_threshold)))) {
3466 + trace_devlink_health_recover_aborted(devlink,
3467 + reporter->ops->name,
3468 + reporter->health_state,
3469 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
3470 +index 789a73aa7bd8..04953e5f2530 100644
3471 +--- a/net/core/neighbour.c
3472 ++++ b/net/core/neighbour.c
3473 +@@ -1954,6 +1954,9 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
3474 + NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
3475 + }
3476 +
3477 ++ if (protocol)
3478 ++ neigh->protocol = protocol;
3479 ++
3480 + if (ndm->ndm_flags & NTF_EXT_LEARNED)
3481 + flags |= NEIGH_UPDATE_F_EXT_LEARNED;
3482 +
3483 +@@ -1967,9 +1970,6 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
3484 + err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
3485 + NETLINK_CB(skb).portid, extack);
3486 +
3487 +- if (protocol)
3488 +- neigh->protocol = protocol;
3489 +-
3490 + neigh_release(neigh);
3491 +
3492 + out:
3493 +diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
3494 +index e7c30b472034..154b639d27b8 100644
3495 +--- a/net/dsa/dsa2.c
3496 ++++ b/net/dsa/dsa2.c
3497 +@@ -459,7 +459,7 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
3498 + list_for_each_entry(dp, &dst->ports, list) {
3499 + err = dsa_port_setup(dp);
3500 + if (err)
3501 +- goto teardown;
3502 ++ continue;
3503 + }
3504 +
3505 + return 0;
3506 +diff --git a/net/dsa/master.c b/net/dsa/master.c
3507 +index bd44bde272f4..4f5219e2e63c 100644
3508 +--- a/net/dsa/master.c
3509 ++++ b/net/dsa/master.c
3510 +@@ -289,7 +289,8 @@ static void dsa_master_ndo_teardown(struct net_device *dev)
3511 + {
3512 + struct dsa_port *cpu_dp = dev->dsa_ptr;
3513 +
3514 +- dev->netdev_ops = cpu_dp->orig_ndo_ops;
3515 ++ if (cpu_dp->orig_ndo_ops)
3516 ++ dev->netdev_ops = cpu_dp->orig_ndo_ops;
3517 + cpu_dp->orig_ndo_ops = NULL;
3518 + }
3519 +
3520 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3521 +index 2931224b674e..42d0596dd398 100644
3522 +--- a/net/ipv6/route.c
3523 ++++ b/net/ipv6/route.c
3524 +@@ -1388,9 +1388,18 @@ static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
3525 + }
3526 + ip6_rt_copy_init(pcpu_rt, res);
3527 + pcpu_rt->rt6i_flags |= RTF_PCPU;
3528 ++
3529 ++ if (f6i->nh)
3530 ++ pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
3531 ++
3532 + return pcpu_rt;
3533 + }
3534 +
3535 ++static bool rt6_is_valid(const struct rt6_info *rt6)
3536 ++{
3537 ++ return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
3538 ++}
3539 ++
3540 + /* It should be called with rcu_read_lock() acquired */
3541 + static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
3542 + {
3543 +@@ -1398,6 +1407,19 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
3544 +
3545 + pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
3546 +
3547 ++ if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
3548 ++ struct rt6_info *prev, **p;
3549 ++
3550 ++ p = this_cpu_ptr(res->nh->rt6i_pcpu);
3551 ++ prev = xchg(p, NULL);
3552 ++ if (prev) {
3553 ++ dst_dev_put(&prev->dst);
3554 ++ dst_release(&prev->dst);
3555 ++ }
3556 ++
3557 ++ pcpu_rt = NULL;
3558 ++ }
3559 ++
3560 + return pcpu_rt;
3561 + }
3562 +
3563 +@@ -2596,6 +2618,9 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
3564 +
3565 + rt = container_of(dst, struct rt6_info, dst);
3566 +
3567 ++ if (rt->sernum)
3568 ++ return rt6_is_valid(rt) ? dst : NULL;
3569 ++
3570 + rcu_read_lock();
3571 +
3572 + /* All IPV6 dsts are created with ->obsolete set to the value
3573 +diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
3574 +index 3d816a1e5442..59151dc07fdc 100644
3575 +--- a/net/netfilter/nf_nat_proto.c
3576 ++++ b/net/netfilter/nf_nat_proto.c
3577 +@@ -68,15 +68,13 @@ static bool udp_manip_pkt(struct sk_buff *skb,
3578 + enum nf_nat_manip_type maniptype)
3579 + {
3580 + struct udphdr *hdr;
3581 +- bool do_csum;
3582 +
3583 + if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
3584 + return false;
3585 +
3586 + hdr = (struct udphdr *)(skb->data + hdroff);
3587 +- do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL;
3588 ++ __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check);
3589 +
3590 +- __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, do_csum);
3591 + return true;
3592 + }
3593 +
3594 +diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
3595 +index 9f5dea0064ea..916a3c7f9eaf 100644
3596 +--- a/net/netfilter/nfnetlink_osf.c
3597 ++++ b/net/netfilter/nfnetlink_osf.c
3598 +@@ -165,12 +165,12 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
3599 + static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
3600 + const struct sk_buff *skb,
3601 + const struct iphdr *ip,
3602 +- unsigned char *opts)
3603 ++ unsigned char *opts,
3604 ++ struct tcphdr *_tcph)
3605 + {
3606 + const struct tcphdr *tcp;
3607 +- struct tcphdr _tcph;
3608 +
3609 +- tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph);
3610 ++ tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph);
3611 + if (!tcp)
3612 + return NULL;
3613 +
3614 +@@ -205,10 +205,11 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family,
3615 + int fmatch = FMATCH_WRONG;
3616 + struct nf_osf_hdr_ctx ctx;
3617 + const struct tcphdr *tcp;
3618 ++ struct tcphdr _tcph;
3619 +
3620 + memset(&ctx, 0, sizeof(ctx));
3621 +
3622 +- tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
3623 ++ tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
3624 + if (!tcp)
3625 + return false;
3626 +
3627 +@@ -265,10 +266,11 @@ bool nf_osf_find(const struct sk_buff *skb,
3628 + const struct nf_osf_finger *kf;
3629 + struct nf_osf_hdr_ctx ctx;
3630 + const struct tcphdr *tcp;
3631 ++ struct tcphdr _tcph;
3632 +
3633 + memset(&ctx, 0, sizeof(ctx));
3634 +
3635 +- tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
3636 ++ tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
3637 + if (!tcp)
3638 + return false;
3639 +
3640 +diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
3641 +index a36974e9c601..1bcf8fbfd40e 100644
3642 +--- a/net/sched/sch_choke.c
3643 ++++ b/net/sched/sch_choke.c
3644 +@@ -323,7 +323,8 @@ static void choke_reset(struct Qdisc *sch)
3645 +
3646 + sch->q.qlen = 0;
3647 + sch->qstats.backlog = 0;
3648 +- memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
3649 ++ if (q->tab)
3650 ++ memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
3651 + q->head = q->tail = 0;
3652 + red_restart(&q->vars);
3653 + }
3654 +diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
3655 +index 968519ff36e9..436160be9c18 100644
3656 +--- a/net/sched/sch_fq_codel.c
3657 ++++ b/net/sched/sch_fq_codel.c
3658 +@@ -416,7 +416,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
3659 + q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
3660 +
3661 + if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
3662 +- q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
3663 ++ q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
3664 +
3665 + if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
3666 + q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
3667 +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
3668 +index c787d4d46017..5a6def5e4e6d 100644
3669 +--- a/net/sched/sch_sfq.c
3670 ++++ b/net/sched/sch_sfq.c
3671 +@@ -637,6 +637,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
3672 + if (ctl->divisor &&
3673 + (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
3674 + return -EINVAL;
3675 ++
3676 ++ /* slot->allot is a short, make sure quantum is not too big. */
3677 ++ if (ctl->quantum) {
3678 ++ unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
3679 ++
3680 ++ if (scaled <= 0 || scaled > SHRT_MAX)
3681 ++ return -EINVAL;
3682 ++ }
3683 ++
3684 + if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
3685 + ctl_v1->Wlog))
3686 + return -EINVAL;
3687 +diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
3688 +index 0fb10abf7579..7a5e4c454715 100644
3689 +--- a/net/sched/sch_skbprio.c
3690 ++++ b/net/sched/sch_skbprio.c
3691 +@@ -169,6 +169,9 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
3692 + {
3693 + struct tc_skbprio_qopt *ctl = nla_data(opt);
3694 +
3695 ++ if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
3696 ++ return -EINVAL;
3697 ++
3698 + sch->limit = ctl->limit;
3699 + return 0;
3700 + }
3701 +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
3702 +index 6a16af4b1ef6..26788f4a3b9e 100644
3703 +--- a/net/sctp/sm_statefuns.c
3704 ++++ b/net/sctp/sm_statefuns.c
3705 +@@ -1865,7 +1865,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
3706 + */
3707 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
3708 + return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
3709 +- SCTP_ST_CHUNK(0), NULL,
3710 ++ SCTP_ST_CHUNK(0), repl,
3711 + commands);
3712 + } else {
3713 + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
3714 +@@ -5470,7 +5470,7 @@ enum sctp_disposition sctp_sf_do_9_2_start_shutdown(
3715 + * in the Cumulative TSN Ack field the last sequential TSN it
3716 + * has received from the peer.
3717 + */
3718 +- reply = sctp_make_shutdown(asoc, NULL);
3719 ++ reply = sctp_make_shutdown(asoc, arg);
3720 + if (!reply)
3721 + goto nomem;
3722 +
3723 +@@ -6068,7 +6068,7 @@ enum sctp_disposition sctp_sf_autoclose_timer_expire(
3724 + disposition = SCTP_DISPOSITION_CONSUME;
3725 + if (sctp_outq_is_empty(&asoc->outqueue)) {
3726 + disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
3727 +- arg, commands);
3728 ++ NULL, commands);
3729 + }
3730 +
3731 + return disposition;
3732 +diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
3733 +index 3a12fc18239b..73dbed0c4b6b 100644
3734 +--- a/net/tipc/topsrv.c
3735 ++++ b/net/tipc/topsrv.c
3736 +@@ -402,10 +402,11 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
3737 + read_lock_bh(&sk->sk_callback_lock);
3738 + ret = tipc_conn_rcv_sub(srv, con, &s);
3739 + read_unlock_bh(&sk->sk_callback_lock);
3740 ++ if (!ret)
3741 ++ return 0;
3742 + }
3743 +- if (ret < 0)
3744 +- tipc_conn_close(con);
3745 +
3746 ++ tipc_conn_close(con);
3747 + return ret;
3748 + }
3749 +
3750 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
3751 +index c98e602a1a2d..e23f94a5549b 100644
3752 +--- a/net/tls/tls_sw.c
3753 ++++ b/net/tls/tls_sw.c
3754 +@@ -800,6 +800,8 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
3755 + *copied -= sk_msg_free(sk, msg);
3756 + tls_free_open_rec(sk);
3757 + }
3758 ++ if (psock)
3759 ++ sk_psock_put(sk, psock);
3760 + return err;
3761 + }
3762 + more_data:
3763 +@@ -2081,8 +2083,9 @@ static void tls_data_ready(struct sock *sk)
3764 + strp_data_ready(&ctx->strp);
3765 +
3766 + psock = sk_psock_get(sk);
3767 +- if (psock && !list_empty(&psock->ingress_msg)) {
3768 +- ctx->saved_data_ready(sk);
3769 ++ if (psock) {
3770 ++ if (!list_empty(&psock->ingress_msg))
3771 ++ ctx->saved_data_ready(sk);
3772 + sk_psock_put(sk, psock);
3773 + }
3774 + }
3775 +diff --git a/scripts/decodecode b/scripts/decodecode
3776 +index ba8b8d5834e6..fbdb325cdf4f 100755
3777 +--- a/scripts/decodecode
3778 ++++ b/scripts/decodecode
3779 +@@ -126,7 +126,7 @@ faultlinenum=$(( $(wc -l $T.oo | cut -d" " -f1) - \
3780 + faultline=`cat $T.dis | head -1 | cut -d":" -f2-`
3781 + faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'`
3782 +
3783 +-cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/"
3784 ++cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/"
3785 + echo
3786 + cat $T.aa
3787 + cleanup
3788 +diff --git a/tools/cgroup/iocost_monitor.py b/tools/cgroup/iocost_monitor.py
3789 +index 7427a5ee761b..9d8e9613008a 100644
3790 +--- a/tools/cgroup/iocost_monitor.py
3791 ++++ b/tools/cgroup/iocost_monitor.py
3792 +@@ -159,7 +159,12 @@ class IocgStat:
3793 + else:
3794 + self.inflight_pct = 0
3795 +
3796 +- self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000
3797 ++ # vdebt used to be an atomic64_t and is now u64, support both
3798 ++ try:
3799 ++ self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000
3800 ++ except:
3801 ++ self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000
3802 ++
3803 + self.use_delay = blkg.use_delay.counter.value_()
3804 + self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000
3805 +
3806 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
3807 +index 95c485d3d4d8..f9ffb548b4fa 100644
3808 +--- a/tools/objtool/check.c
3809 ++++ b/tools/objtool/check.c
3810 +@@ -1403,7 +1403,7 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s
3811 + struct cfi_reg *cfa = &state->cfa;
3812 + struct stack_op *op = &insn->stack_op;
3813 +
3814 +- if (cfa->base != CFI_SP)
3815 ++ if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
3816 + return 0;
3817 +
3818 + /* push */
3819 +diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c
3820 +index 35505b31e5cc..4555f88252ba 100644
3821 +--- a/tools/testing/selftests/net/tcp_mmap.c
3822 ++++ b/tools/testing/selftests/net/tcp_mmap.c
3823 +@@ -165,9 +165,10 @@ void *child_thread(void *arg)
3824 + socklen_t zc_len = sizeof(zc);
3825 + int res;
3826 +
3827 ++ memset(&zc, 0, sizeof(zc));
3828 + zc.address = (__u64)((unsigned long)addr);
3829 + zc.length = chunk_size;
3830 +- zc.recv_skip_hint = 0;
3831 ++
3832 + res = getsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE,
3833 + &zc, &zc_len);
3834 + if (res == -1)
3835 +@@ -281,12 +282,14 @@ static void setup_sockaddr(int domain, const char *str_addr,
3836 + static void do_accept(int fdlisten)
3837 + {
3838 + pthread_attr_t attr;
3839 ++ int rcvlowat;
3840 +
3841 + pthread_attr_init(&attr);
3842 + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
3843 +
3844 ++ rcvlowat = chunk_size;
3845 + if (setsockopt(fdlisten, SOL_SOCKET, SO_RCVLOWAT,
3846 +- &chunk_size, sizeof(chunk_size)) == -1) {
3847 ++ &rcvlowat, sizeof(rcvlowat)) == -1) {
3848 + perror("setsockopt SO_RCVLOWAT");
3849 + }
3850 +
3851 +diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
3852 +index 936e1ca9410e..17a1f53ceba0 100755
3853 +--- a/tools/testing/selftests/wireguard/netns.sh
3854 ++++ b/tools/testing/selftests/wireguard/netns.sh
3855 +@@ -48,8 +48,11 @@ cleanup() {
3856 + exec 2>/dev/null
3857 + printf "$orig_message_cost" > /proc/sys/net/core/message_cost
3858 + ip0 link del dev wg0
3859 ++ ip0 link del dev wg1
3860 + ip1 link del dev wg0
3861 ++ ip1 link del dev wg1
3862 + ip2 link del dev wg0
3863 ++ ip2 link del dev wg1
3864 + local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)"
3865 + [[ -n $to_kill ]] && kill $to_kill
3866 + pp ip netns del $netns1
3867 +@@ -77,18 +80,20 @@ ip0 link set wg0 netns $netns2
3868 + key1="$(pp wg genkey)"
3869 + key2="$(pp wg genkey)"
3870 + key3="$(pp wg genkey)"
3871 ++key4="$(pp wg genkey)"
3872 + pub1="$(pp wg pubkey <<<"$key1")"
3873 + pub2="$(pp wg pubkey <<<"$key2")"
3874 + pub3="$(pp wg pubkey <<<"$key3")"
3875 ++pub4="$(pp wg pubkey <<<"$key4")"
3876 + psk="$(pp wg genpsk)"
3877 + [[ -n $key1 && -n $key2 && -n $psk ]]
3878 +
3879 + configure_peers() {
3880 + ip1 addr add 192.168.241.1/24 dev wg0
3881 +- ip1 addr add fd00::1/24 dev wg0
3882 ++ ip1 addr add fd00::1/112 dev wg0
3883 +
3884 + ip2 addr add 192.168.241.2/24 dev wg0
3885 +- ip2 addr add fd00::2/24 dev wg0
3886 ++ ip2 addr add fd00::2/112 dev wg0
3887 +
3888 + n1 wg set wg0 \
3889 + private-key <(echo "$key1") \
3890 +@@ -230,9 +235,38 @@ n1 ping -W 1 -c 1 192.168.241.2
3891 + n1 wg set wg0 private-key <(echo "$key3")
3892 + n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove
3893 + n1 ping -W 1 -c 1 192.168.241.2
3894 ++n2 wg set wg0 peer "$pub3" remove
3895 ++
3896 ++# Test that we can route wg through wg
3897 ++ip1 addr flush dev wg0
3898 ++ip2 addr flush dev wg0
3899 ++ip1 addr add fd00::5:1/112 dev wg0
3900 ++ip2 addr add fd00::5:2/112 dev wg0
3901 ++n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips fd00::5:2/128 endpoint 127.0.0.1:2
3902 ++n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips fd00::5:1/128 endpoint 127.212.121.99:9998
3903 ++ip1 link add wg1 type wireguard
3904 ++ip2 link add wg1 type wireguard
3905 ++ip1 addr add 192.168.241.1/24 dev wg1
3906 ++ip1 addr add fd00::1/112 dev wg1
3907 ++ip2 addr add 192.168.241.2/24 dev wg1
3908 ++ip2 addr add fd00::2/112 dev wg1
3909 ++ip1 link set mtu 1340 up dev wg1
3910 ++ip2 link set mtu 1340 up dev wg1
3911 ++n1 wg set wg1 listen-port 5 private-key <(echo "$key3") peer "$pub4" allowed-ips 192.168.241.2/32,fd00::2/128 endpoint [fd00::5:2]:5
3912 ++n2 wg set wg1 listen-port 5 private-key <(echo "$key4") peer "$pub3" allowed-ips 192.168.241.1/32,fd00::1/128 endpoint [fd00::5:1]:5
3913 ++tests
3914 ++# Try to set up a routing loop between the two namespaces
3915 ++ip1 link set netns $netns0 dev wg1
3916 ++ip0 addr add 192.168.241.1/24 dev wg1
3917 ++ip0 link set up dev wg1
3918 ++n0 ping -W 1 -c 1 192.168.241.2
3919 ++n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7
3920 ++ip2 link del wg0
3921 ++ip2 link del wg1
3922 ++! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel
3923 +
3924 ++ip0 link del wg1
3925 + ip1 link del wg0
3926 +-ip2 link del wg0
3927 +
3928 + # Test using NAT. We now change the topology to this:
3929 + # ┌────────────────────────────────────────┐ ┌────────────────────────────────────────────────┐ ┌────────────────────────────────────────┐
3930 +@@ -282,6 +316,20 @@ pp sleep 3
3931 + n2 ping -W 1 -c 1 192.168.241.1
3932 + n1 wg set wg0 peer "$pub2" persistent-keepalive 0
3933 +
3934 ++# Test that onion routing works, even when it loops
3935 ++n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5
3936 ++ip1 addr add 192.168.242.1/24 dev wg0
3937 ++ip2 link add wg1 type wireguard
3938 ++ip2 addr add 192.168.242.2/24 dev wg1
3939 ++n2 wg set wg1 private-key <(echo "$key3") listen-port 5 peer "$pub1" allowed-ips 192.168.242.1/32
3940 ++ip2 link set wg1 up
3941 ++n1 ping -W 1 -c 1 192.168.242.2
3942 ++ip2 link del wg1
3943 ++n1 wg set wg0 peer "$pub3" endpoint 192.168.242.2:5
3944 ++! n1 ping -W 1 -c 1 192.168.242.2 || false # Should not crash kernel
3945 ++n1 wg set wg0 peer "$pub3" remove
3946 ++ip1 addr del 192.168.242.1/24 dev wg0
3947 ++
3948 + # Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs.
3949 + ip1 -6 addr add fc00::9/96 dev vethc
3950 + ip1 -6 route add default via fc00::1
3951 +diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c
3952 +index d31f267961e7..25c0e47d57cb 100644
3953 +--- a/virt/kvm/arm/hyp/aarch32.c
3954 ++++ b/virt/kvm/arm/hyp/aarch32.c
3955 +@@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
3956 + */
3957 + void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
3958 + {
3959 ++ u32 pc = *vcpu_pc(vcpu);
3960 + bool is_thumb;
3961 +
3962 + is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
3963 + if (is_thumb && !is_wide_instr)
3964 +- *vcpu_pc(vcpu) += 2;
3965 ++ pc += 2;
3966 + else
3967 +- *vcpu_pc(vcpu) += 4;
3968 ++ pc += 4;
3969 ++
3970 ++ *vcpu_pc(vcpu) = pc;
3971 ++
3972 + kvm_adjust_itstate(vcpu);
3973 + }
3974 +diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
3975 +index 97fb2a40e6ba..e7abd05ea896 100644
3976 +--- a/virt/kvm/arm/vgic/vgic-mmio.c
3977 ++++ b/virt/kvm/arm/vgic/vgic-mmio.c
3978 +@@ -368,7 +368,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
3979 + static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
3980 + {
3981 + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
3982 +- intid > VGIC_NR_PRIVATE_IRQS)
3983 ++ intid >= VGIC_NR_PRIVATE_IRQS)
3984 + kvm_arm_halt_guest(vcpu->kvm);
3985 + }
3986 +
3987 +@@ -376,7 +376,7 @@ static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
3988 + static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
3989 + {
3990 + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
3991 +- intid > VGIC_NR_PRIVATE_IRQS)
3992 ++ intid >= VGIC_NR_PRIVATE_IRQS)
3993 + kvm_arm_resume_guest(vcpu->kvm);
3994 + }
3995 +