Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.17 commit in: /
Date: Mon, 06 Jun 2022 11:02:21
Message-Id: 1654513293.2959c9bb7685e717881e8ce4a25992c5f9dcdeda.mpagano@gentoo
1 commit: 2959c9bb7685e717881e8ce4a25992c5f9dcdeda
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Jun 6 11:01:33 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Jun 6 11:01:33 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2959c9bb
7
8 Linux patch 5.17.13
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1012_linux-5.17.13.patch | 2619 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2623 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index ecb45bb4..aa088efb 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -91,6 +91,10 @@ Patch: 1011_linux-5.17.12.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.17.12
23
24 +Patch: 1012_linux-5.17.13.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.17.13
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1012_linux-5.17.13.patch b/1012_linux-5.17.13.patch
33 new file mode 100644
34 index 00000000..84bd9083
35 --- /dev/null
36 +++ b/1012_linux-5.17.13.patch
37 @@ -0,0 +1,2619 @@
38 +diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
39 +index 31ea120ce531c..0c76d4ede04b6 100644
40 +--- a/Documentation/process/submitting-patches.rst
41 ++++ b/Documentation/process/submitting-patches.rst
42 +@@ -77,7 +77,7 @@ as you intend it to.
43 +
44 + The maintainer will thank you if you write your patch description in a
45 + form which can be easily pulled into Linux's source code management
46 +-system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`.
47 ++system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`.
48 +
49 + Solve only one problem per patch. If your description starts to get
50 + long, that's a sign that you probably need to split up your patch.
51 +diff --git a/Makefile b/Makefile
52 +index 25c44dda0ef37..d38228d336bf6 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 17
59 +-SUBLEVEL = 12
60 ++SUBLEVEL = 13
61 + EXTRAVERSION =
62 + NAME = Superb Owl
63 +
64 +diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi
65 +index 160f8cd9a68da..2f57100a011a3 100644
66 +--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
67 ++++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
68 +@@ -895,7 +895,7 @@
69 + device-wakeup-gpios = <&gpg3 4 GPIO_ACTIVE_HIGH>;
70 + interrupt-parent = <&gph2>;
71 + interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
72 +- interrupt-names = "host-wake";
73 ++ interrupt-names = "host-wakeup";
74 + };
75 + };
76 +
77 +diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
78 +index 25d8aff273a10..4dd5f3b08aaa4 100644
79 +--- a/arch/arm64/kvm/arm.c
80 ++++ b/arch/arm64/kvm/arm.c
81 +@@ -1496,7 +1496,8 @@ static int kvm_init_vector_slots(void)
82 + base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
83 + kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
84 +
85 +- if (kvm_system_needs_idmapped_vectors() && !has_vhe()) {
86 ++ if (kvm_system_needs_idmapped_vectors() &&
87 ++ !is_protected_kvm_enabled()) {
88 + err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
89 + __BP_HARDEN_HYP_VECS_SZ, &base);
90 + if (err)
91 +diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
92 +index e414ca44839fd..0cb20ee6a632c 100644
93 +--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
94 ++++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
95 +@@ -360,13 +360,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
96 + static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
97 + struct kvm *kvm, unsigned long *gfn)
98 + {
99 +- struct kvmppc_uvmem_slot *p;
100 ++ struct kvmppc_uvmem_slot *p = NULL, *iter;
101 + bool ret = false;
102 + unsigned long i;
103 +
104 +- list_for_each_entry(p, &kvm->arch.uvmem_pfns, list)
105 +- if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns)
106 ++ list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
107 ++ if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
108 ++ p = iter;
109 + break;
110 ++ }
111 + if (!p)
112 + return ret;
113 + /*
114 +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
115 +index ac96f9b2d64b3..1c14bcce88f27 100644
116 +--- a/arch/x86/include/asm/uaccess.h
117 ++++ b/arch/x86/include/asm/uaccess.h
118 +@@ -409,6 +409,103 @@ do { \
119 +
120 + #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
121 +
122 ++#ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
123 ++#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
124 ++ bool success; \
125 ++ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
126 ++ __typeof__(*(_ptr)) __old = *_old; \
127 ++ __typeof__(*(_ptr)) __new = (_new); \
128 ++ asm_volatile_goto("\n" \
129 ++ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
130 ++ _ASM_EXTABLE_UA(1b, %l[label]) \
131 ++ : CC_OUT(z) (success), \
132 ++ [ptr] "+m" (*_ptr), \
133 ++ [old] "+a" (__old) \
134 ++ : [new] ltype (__new) \
135 ++ : "memory" \
136 ++ : label); \
137 ++ if (unlikely(!success)) \
138 ++ *_old = __old; \
139 ++ likely(success); })
140 ++
141 ++#ifdef CONFIG_X86_32
142 ++#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
143 ++ bool success; \
144 ++ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
145 ++ __typeof__(*(_ptr)) __old = *_old; \
146 ++ __typeof__(*(_ptr)) __new = (_new); \
147 ++ asm_volatile_goto("\n" \
148 ++ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
149 ++ _ASM_EXTABLE_UA(1b, %l[label]) \
150 ++ : CC_OUT(z) (success), \
151 ++ "+A" (__old), \
152 ++ [ptr] "+m" (*_ptr) \
153 ++ : "b" ((u32)__new), \
154 ++ "c" ((u32)((u64)__new >> 32)) \
155 ++ : "memory" \
156 ++ : label); \
157 ++ if (unlikely(!success)) \
158 ++ *_old = __old; \
159 ++ likely(success); })
160 ++#endif // CONFIG_X86_32
161 ++#else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
162 ++#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
163 ++ int __err = 0; \
164 ++ bool success; \
165 ++ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
166 ++ __typeof__(*(_ptr)) __old = *_old; \
167 ++ __typeof__(*(_ptr)) __new = (_new); \
168 ++ asm volatile("\n" \
169 ++ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
170 ++ CC_SET(z) \
171 ++ "2:\n" \
172 ++ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \
173 ++ %[errout]) \
174 ++ : CC_OUT(z) (success), \
175 ++ [errout] "+r" (__err), \
176 ++ [ptr] "+m" (*_ptr), \
177 ++ [old] "+a" (__old) \
178 ++ : [new] ltype (__new) \
179 ++ : "memory", "cc"); \
180 ++ if (unlikely(__err)) \
181 ++ goto label; \
182 ++ if (unlikely(!success)) \
183 ++ *_old = __old; \
184 ++ likely(success); })
185 ++
186 ++#ifdef CONFIG_X86_32
187 ++/*
188 ++ * Unlike the normal CMPXCHG, hardcode ECX for both success/fail and error.
189 ++ * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
190 ++ * hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses
191 ++ * both ESI and EDI for the memory operand, compilation will fail if the error
192 ++ * is an input+output as there will be no register available for input.
193 ++ */
194 ++#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
195 ++ int __result; \
196 ++ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
197 ++ __typeof__(*(_ptr)) __old = *_old; \
198 ++ __typeof__(*(_ptr)) __new = (_new); \
199 ++ asm volatile("\n" \
200 ++ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
201 ++ "mov $0, %%ecx\n\t" \
202 ++ "setz %%cl\n" \
203 ++ "2:\n" \
204 ++ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %%ecx) \
205 ++ : [result]"=c" (__result), \
206 ++ "+A" (__old), \
207 ++ [ptr] "+m" (*_ptr) \
208 ++ : "b" ((u32)__new), \
209 ++ "c" ((u32)((u64)__new >> 32)) \
210 ++ : "memory", "cc"); \
211 ++ if (unlikely(__result < 0)) \
212 ++ goto label; \
213 ++ if (unlikely(!__result)) \
214 ++ *_old = __old; \
215 ++ likely(__result); })
216 ++#endif // CONFIG_X86_32
217 ++#endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
218 ++
219 + /* FIXME: this hack is definitely wrong -AK */
220 + struct __large_struct { unsigned long buf[100]; };
221 + #define __m(x) (*(struct __large_struct __user *)(x))
222 +@@ -501,6 +598,51 @@ do { \
223 + } while (0)
224 + #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
225 +
226 ++extern void __try_cmpxchg_user_wrong_size(void);
227 ++
228 ++#ifndef CONFIG_X86_32
229 ++#define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label) \
230 ++ __try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
231 ++#endif
232 ++
233 ++/*
234 ++ * Force the pointer to u<size> to match the size expected by the asm helper.
235 ++ * clang/LLVM compiles all cases and only discards the unused paths after
236 ++ * processing errors, which breaks i386 if the pointer is an 8-byte value.
237 ++ */
238 ++#define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
239 ++ bool __ret; \
240 ++ __chk_user_ptr(_ptr); \
241 ++ switch (sizeof(*(_ptr))) { \
242 ++ case 1: __ret = __try_cmpxchg_user_asm("b", "q", \
243 ++ (__force u8 *)(_ptr), (_oldp), \
244 ++ (_nval), _label); \
245 ++ break; \
246 ++ case 2: __ret = __try_cmpxchg_user_asm("w", "r", \
247 ++ (__force u16 *)(_ptr), (_oldp), \
248 ++ (_nval), _label); \
249 ++ break; \
250 ++ case 4: __ret = __try_cmpxchg_user_asm("l", "r", \
251 ++ (__force u32 *)(_ptr), (_oldp), \
252 ++ (_nval), _label); \
253 ++ break; \
254 ++ case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\
255 ++ (_nval), _label); \
256 ++ break; \
257 ++ default: __try_cmpxchg_user_wrong_size(); \
258 ++ } \
259 ++ __ret; })
260 ++
261 ++/* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
262 ++#define __try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
263 ++ int __ret = -EFAULT; \
264 ++ __uaccess_begin_nospec(); \
265 ++ __ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label); \
266 ++_label: \
267 ++ __uaccess_end(); \
268 ++ __ret; \
269 ++ })
270 ++
271 + /*
272 + * We want the unsafe accessors to always be inlined and use
273 + * the error labels - thus the macro games.
274 +diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
275 +index 7c63a1911fae9..3c24e6124d955 100644
276 +--- a/arch/x86/kernel/cpu/sgx/encl.c
277 ++++ b/arch/x86/kernel/cpu/sgx/encl.c
278 +@@ -12,6 +12,92 @@
279 + #include "encls.h"
280 + #include "sgx.h"
281 +
282 ++#define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd))
283 ++/*
284 ++ * 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to
285 ++ * determine the page index associated with the first PCMD entry
286 ++ * within a PCMD page.
287 ++ */
288 ++#define PCMD_FIRST_MASK GENMASK(4, 0)
289 ++
290 ++/**
291 ++ * reclaimer_writing_to_pcmd() - Query if any enclave page associated with
292 ++ * a PCMD page is in process of being reclaimed.
293 ++ * @encl: Enclave to which PCMD page belongs
294 ++ * @start_addr: Address of enclave page using first entry within the PCMD page
295 ++ *
296 ++ * When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is
297 ++ * stored. The PCMD data of a reclaimed enclave page contains enough
298 ++ * information for the processor to verify the page at the time
299 ++ * it is loaded back into the Enclave Page Cache (EPC).
300 ++ *
301 ++ * The backing storage to which enclave pages are reclaimed is laid out as
302 ++ * follows:
303 ++ * Encrypted enclave pages:SECS page:PCMD pages
304 ++ *
305 ++ * Each PCMD page contains the PCMD metadata of
306 ++ * PAGE_SIZE/sizeof(struct sgx_pcmd) enclave pages.
307 ++ *
308 ++ * A PCMD page can only be truncated if it is (a) empty, and (b) not in the
309 ++ * process of getting data (and thus soon being non-empty). (b) is tested with
310 ++ * a check if an enclave page sharing the PCMD page is in the process of being
311 ++ * reclaimed.
312 ++ *
313 ++ * The reclaimer sets the SGX_ENCL_PAGE_BEING_RECLAIMED flag when it
314 ++ * intends to reclaim that enclave page - it means that the PCMD page
315 ++ * associated with that enclave page is about to get some data and thus
316 ++ * even if the PCMD page is empty, it should not be truncated.
317 ++ *
318 ++ * Context: Enclave mutex (&sgx_encl->lock) must be held.
319 ++ * Return: 1 if the reclaimer is about to write to the PCMD page
320 ++ * 0 if the reclaimer has no intention to write to the PCMD page
321 ++ */
322 ++static int reclaimer_writing_to_pcmd(struct sgx_encl *encl,
323 ++ unsigned long start_addr)
324 ++{
325 ++ int reclaimed = 0;
326 ++ int i;
327 ++
328 ++ /*
329 ++ * PCMD_FIRST_MASK is based on number of PCMD entries within
330 ++ * PCMD page being 32.
331 ++ */
332 ++ BUILD_BUG_ON(PCMDS_PER_PAGE != 32);
333 ++
334 ++ for (i = 0; i < PCMDS_PER_PAGE; i++) {
335 ++ struct sgx_encl_page *entry;
336 ++ unsigned long addr;
337 ++
338 ++ addr = start_addr + i * PAGE_SIZE;
339 ++
340 ++ /*
341 ++ * Stop when reaching the SECS page - it does not
342 ++ * have a page_array entry and its reclaim is
343 ++ * started and completed with enclave mutex held so
344 ++ * it does not use the SGX_ENCL_PAGE_BEING_RECLAIMED
345 ++ * flag.
346 ++ */
347 ++ if (addr == encl->base + encl->size)
348 ++ break;
349 ++
350 ++ entry = xa_load(&encl->page_array, PFN_DOWN(addr));
351 ++ if (!entry)
352 ++ continue;
353 ++
354 ++ /*
355 ++ * VA page slot ID uses same bit as the flag so it is important
356 ++ * to ensure that the page is not already in backing store.
357 ++ */
358 ++ if (entry->epc_page &&
359 ++ (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)) {
360 ++ reclaimed = 1;
361 ++ break;
362 ++ }
363 ++ }
364 ++
365 ++ return reclaimed;
366 ++}
367 ++
368 + /*
369 + * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's
370 + * follow right after the EPC data in the backing storage. In addition to the
371 +@@ -47,6 +133,7 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
372 + unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
373 + struct sgx_encl *encl = encl_page->encl;
374 + pgoff_t page_index, page_pcmd_off;
375 ++ unsigned long pcmd_first_page;
376 + struct sgx_pageinfo pginfo;
377 + struct sgx_backing b;
378 + bool pcmd_page_empty;
379 +@@ -58,6 +145,11 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
380 + else
381 + page_index = PFN_DOWN(encl->size);
382 +
383 ++ /*
384 ++ * Address of enclave page using the first entry within the PCMD page.
385 ++ */
386 ++ pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base;
387 ++
388 + page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
389 +
390 + ret = sgx_encl_get_backing(encl, page_index, &b);
391 +@@ -84,6 +176,7 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
392 + }
393 +
394 + memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
395 ++ set_page_dirty(b.pcmd);
396 +
397 + /*
398 + * The area for the PCMD in the page was zeroed above. Check if the
399 +@@ -94,12 +187,20 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
400 + kunmap_atomic(pcmd_page);
401 + kunmap_atomic((void *)(unsigned long)pginfo.contents);
402 +
403 +- sgx_encl_put_backing(&b, false);
404 ++ get_page(b.pcmd);
405 ++ sgx_encl_put_backing(&b);
406 +
407 + sgx_encl_truncate_backing_page(encl, page_index);
408 +
409 +- if (pcmd_page_empty)
410 ++ if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) {
411 + sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
412 ++ pcmd_page = kmap_atomic(b.pcmd);
413 ++ if (memchr_inv(pcmd_page, 0, PAGE_SIZE))
414 ++ pr_warn("PCMD page not empty after truncate.\n");
415 ++ kunmap_atomic(pcmd_page);
416 ++ }
417 ++
418 ++ put_page(b.pcmd);
419 +
420 + return ret;
421 + }
422 +@@ -645,15 +746,9 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
423 + /**
424 + * sgx_encl_put_backing() - Unpin the backing storage
425 + * @backing: data for accessing backing storage for the page
426 +- * @do_write: mark pages dirty
427 + */
428 +-void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write)
429 ++void sgx_encl_put_backing(struct sgx_backing *backing)
430 + {
431 +- if (do_write) {
432 +- set_page_dirty(backing->pcmd);
433 +- set_page_dirty(backing->contents);
434 +- }
435 +-
436 + put_page(backing->pcmd);
437 + put_page(backing->contents);
438 + }
439 +diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
440 +index fec43ca65065b..d44e7372151f0 100644
441 +--- a/arch/x86/kernel/cpu/sgx/encl.h
442 ++++ b/arch/x86/kernel/cpu/sgx/encl.h
443 +@@ -107,7 +107,7 @@ void sgx_encl_release(struct kref *ref);
444 + int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
445 + int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
446 + struct sgx_backing *backing);
447 +-void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
448 ++void sgx_encl_put_backing(struct sgx_backing *backing);
449 + int sgx_encl_test_and_clear_young(struct mm_struct *mm,
450 + struct sgx_encl_page *page);
451 +
452 +diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
453 +index 8e4bc6453d263..ab4ec54bbdd94 100644
454 +--- a/arch/x86/kernel/cpu/sgx/main.c
455 ++++ b/arch/x86/kernel/cpu/sgx/main.c
456 +@@ -191,6 +191,8 @@ static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot,
457 + backing->pcmd_offset;
458 +
459 + ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot);
460 ++ set_page_dirty(backing->pcmd);
461 ++ set_page_dirty(backing->contents);
462 +
463 + kunmap_atomic((void *)(unsigned long)(pginfo.metadata -
464 + backing->pcmd_offset));
465 +@@ -308,6 +310,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
466 + sgx_encl_ewb(epc_page, backing);
467 + encl_page->epc_page = NULL;
468 + encl->secs_child_cnt--;
469 ++ sgx_encl_put_backing(backing);
470 +
471 + if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) {
472 + ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size),
473 +@@ -320,7 +323,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
474 + sgx_encl_free_epc_page(encl->secs.epc_page);
475 + encl->secs.epc_page = NULL;
476 +
477 +- sgx_encl_put_backing(&secs_backing, true);
478 ++ sgx_encl_put_backing(&secs_backing);
479 + }
480 +
481 + out:
482 +@@ -379,11 +382,14 @@ static void sgx_reclaim_pages(void)
483 + goto skip;
484 +
485 + page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
486 ++
487 ++ mutex_lock(&encl_page->encl->lock);
488 + ret = sgx_encl_get_backing(encl_page->encl, page_index, &backing[i]);
489 +- if (ret)
490 ++ if (ret) {
491 ++ mutex_unlock(&encl_page->encl->lock);
492 + goto skip;
493 ++ }
494 +
495 +- mutex_lock(&encl_page->encl->lock);
496 + encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED;
497 + mutex_unlock(&encl_page->encl->lock);
498 + continue;
499 +@@ -411,7 +417,6 @@ skip:
500 +
501 + encl_page = epc_page->owner;
502 + sgx_reclaimer_write(epc_page, &backing[i]);
503 +- sgx_encl_put_backing(&backing[i], true);
504 +
505 + kref_put(&encl_page->encl->refcount, sgx_encl_release);
506 + epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
507 +diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
508 +index 5290d64723086..98e6c29e17a48 100644
509 +--- a/arch/x86/kernel/fpu/core.c
510 ++++ b/arch/x86/kernel/fpu/core.c
511 +@@ -14,6 +14,8 @@
512 + #include <asm/traps.h>
513 + #include <asm/irq_regs.h>
514 +
515 ++#include <uapi/asm/kvm.h>
516 ++
517 + #include <linux/hardirq.h>
518 + #include <linux/pkeys.h>
519 + #include <linux/vmalloc.h>
520 +@@ -232,7 +234,20 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
521 + gfpu->fpstate = fpstate;
522 + gfpu->xfeatures = fpu_user_cfg.default_features;
523 + gfpu->perm = fpu_user_cfg.default_features;
524 +- gfpu->uabi_size = fpu_user_cfg.default_size;
525 ++
526 ++ /*
527 ++ * KVM sets the FP+SSE bits in the XSAVE header when copying FPU state
528 ++ * to userspace, even when XSAVE is unsupported, so that restoring FPU
529 ++ * state on a different CPU that does support XSAVE can cleanly load
530 ++ * the incoming state using its natural XSAVE. In other words, KVM's
531 ++ * uABI size may be larger than this host's default size. Conversely,
532 ++ * the default size should never be larger than KVM's base uABI size;
533 ++ * all features that can expand the uABI size must be opt-in.
534 ++ */
535 ++ gfpu->uabi_size = sizeof(struct kvm_xsave);
536 ++ if (WARN_ON_ONCE(fpu_user_cfg.default_size > gfpu->uabi_size))
537 ++ gfpu->uabi_size = fpu_user_cfg.default_size;
538 ++
539 + fpu_init_guest_permissions(gfpu);
540 +
541 + return true;
542 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
543 +index 4c2a158bb6c4f..015df76bc9145 100644
544 +--- a/arch/x86/kernel/kvm.c
545 ++++ b/arch/x86/kernel/kvm.c
546 +@@ -191,7 +191,7 @@ void kvm_async_pf_task_wake(u32 token)
547 + {
548 + u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
549 + struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
550 +- struct kvm_task_sleep_node *n;
551 ++ struct kvm_task_sleep_node *n, *dummy = NULL;
552 +
553 + if (token == ~0) {
554 + apf_task_wake_all();
555 +@@ -203,28 +203,41 @@ again:
556 + n = _find_apf_task(b, token);
557 + if (!n) {
558 + /*
559 +- * async PF was not yet handled.
560 +- * Add dummy entry for the token.
561 ++ * Async #PF not yet handled, add a dummy entry for the token.
562 ++ * Allocating the token must be down outside of the raw lock
563 ++ * as the allocator is preemptible on PREEMPT_RT kernels.
564 + */
565 +- n = kzalloc(sizeof(*n), GFP_ATOMIC);
566 +- if (!n) {
567 ++ if (!dummy) {
568 ++ raw_spin_unlock(&b->lock);
569 ++ dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC);
570 ++
571 + /*
572 +- * Allocation failed! Busy wait while other cpu
573 +- * handles async PF.
574 ++ * Continue looping on allocation failure, eventually
575 ++ * the async #PF will be handled and allocating a new
576 ++ * node will be unnecessary.
577 ++ */
578 ++ if (!dummy)
579 ++ cpu_relax();
580 ++
581 ++ /*
582 ++ * Recheck for async #PF completion before enqueueing
583 ++ * the dummy token to avoid duplicate list entries.
584 + */
585 +- raw_spin_unlock(&b->lock);
586 +- cpu_relax();
587 + goto again;
588 + }
589 +- n->token = token;
590 +- n->cpu = smp_processor_id();
591 +- init_swait_queue_head(&n->wq);
592 +- hlist_add_head(&n->link, &b->list);
593 ++ dummy->token = token;
594 ++ dummy->cpu = smp_processor_id();
595 ++ init_swait_queue_head(&dummy->wq);
596 ++ hlist_add_head(&dummy->link, &b->list);
597 ++ dummy = NULL;
598 + } else {
599 + apf_task_wake_one(n);
600 + }
601 + raw_spin_unlock(&b->lock);
602 +- return;
603 ++
604 ++ /* A dummy token might be allocated and ultimately not used. */
605 ++ if (dummy)
606 ++ kfree(dummy);
607 + }
608 + EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
609 +
610 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
611 +index 495329ae6b1b2..f03fa385b2e28 100644
612 +--- a/arch/x86/kvm/mmu/mmu.c
613 ++++ b/arch/x86/kvm/mmu/mmu.c
614 +@@ -1894,17 +1894,14 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
615 + &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
616 + if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
617 +
618 +-static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
619 ++static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
620 + struct list_head *invalid_list)
621 + {
622 + int ret = vcpu->arch.mmu->sync_page(vcpu, sp);
623 +
624 +- if (ret < 0) {
625 ++ if (ret < 0)
626 + kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
627 +- return false;
628 +- }
629 +-
630 +- return !!ret;
631 ++ return ret;
632 + }
633 +
634 + static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
635 +@@ -2033,7 +2030,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu,
636 +
637 + for_each_sp(pages, sp, parents, i) {
638 + kvm_unlink_unsync_page(vcpu->kvm, sp);
639 +- flush |= kvm_sync_page(vcpu, sp, &invalid_list);
640 ++ flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
641 + mmu_pages_clear_parents(&parents);
642 + }
643 + if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
644 +@@ -2074,6 +2071,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
645 + struct hlist_head *sp_list;
646 + unsigned quadrant;
647 + struct kvm_mmu_page *sp;
648 ++ int ret;
649 + int collisions = 0;
650 + LIST_HEAD(invalid_list);
651 +
652 +@@ -2126,11 +2124,13 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
653 + * If the sync fails, the page is zapped. If so, break
654 + * in order to rebuild it.
655 + */
656 +- if (!kvm_sync_page(vcpu, sp, &invalid_list))
657 ++ ret = kvm_sync_page(vcpu, sp, &invalid_list);
658 ++ if (ret < 0)
659 + break;
660 +
661 + WARN_ON(!list_empty(&invalid_list));
662 +- kvm_flush_remote_tlbs(vcpu->kvm);
663 ++ if (ret > 0)
664 ++ kvm_flush_remote_tlbs(vcpu->kvm);
665 + }
666 +
667 + __clear_sp_write_flooding_count(sp);
668 +diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
669 +index 3821d5140ea31..8aba3c5be4627 100644
670 +--- a/arch/x86/kvm/mmu/paging_tmpl.h
671 ++++ b/arch/x86/kvm/mmu/paging_tmpl.h
672 +@@ -144,42 +144,6 @@ static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
673 + FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
674 + }
675 +
676 +-static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
677 +- pt_element_t __user *ptep_user, unsigned index,
678 +- pt_element_t orig_pte, pt_element_t new_pte)
679 +-{
680 +- signed char r;
681 +-
682 +- if (!user_access_begin(ptep_user, sizeof(pt_element_t)))
683 +- return -EFAULT;
684 +-
685 +-#ifdef CMPXCHG
686 +- asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n"
687 +- "setnz %b[r]\n"
688 +- "2:"
689 +- _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
690 +- : [ptr] "+m" (*ptep_user),
691 +- [old] "+a" (orig_pte),
692 +- [r] "=q" (r)
693 +- : [new] "r" (new_pte)
694 +- : "memory");
695 +-#else
696 +- asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n"
697 +- "setnz %b[r]\n"
698 +- "2:"
699 +- _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
700 +- : [ptr] "+m" (*ptep_user),
701 +- [old] "+A" (orig_pte),
702 +- [r] "=q" (r)
703 +- : [new_lo] "b" ((u32)new_pte),
704 +- [new_hi] "c" ((u32)(new_pte >> 32))
705 +- : "memory");
706 +-#endif
707 +-
708 +- user_access_end();
709 +- return r;
710 +-}
711 +-
712 + static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
713 + struct kvm_mmu_page *sp, u64 *spte,
714 + u64 gpte)
715 +@@ -278,7 +242,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
716 + if (unlikely(!walker->pte_writable[level - 1]))
717 + continue;
718 +
719 +- ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
720 ++ ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault);
721 + if (ret)
722 + return ret;
723 +
724 +diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
725 +index 39d280e7e80ef..26140579456b1 100644
726 +--- a/arch/x86/kvm/svm/nested.c
727 ++++ b/arch/x86/kvm/svm/nested.c
728 +@@ -790,9 +790,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
729 + struct kvm_host_map map;
730 + int rc;
731 +
732 +- /* Triple faults in L2 should never escape. */
733 +- WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
734 +-
735 + rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
736 + if (rc) {
737 + if (rc == -EINVAL)
738 +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
739 +index 76e6411d4dde1..730d887f53ac2 100644
740 +--- a/arch/x86/kvm/svm/sev.c
741 ++++ b/arch/x86/kvm/svm/sev.c
742 +@@ -684,7 +684,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
743 + if (params.len > SEV_FW_BLOB_MAX_SIZE)
744 + return -EINVAL;
745 +
746 +- blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
747 ++ blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
748 + if (!blob)
749 + return -ENOMEM;
750 +
751 +@@ -804,7 +804,7 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
752 + if (!IS_ALIGNED(dst_paddr, 16) ||
753 + !IS_ALIGNED(paddr, 16) ||
754 + !IS_ALIGNED(size, 16)) {
755 +- tpage = (void *)alloc_page(GFP_KERNEL);
756 ++ tpage = (void *)alloc_page(GFP_KERNEL | __GFP_ZERO);
757 + if (!tpage)
758 + return -ENOMEM;
759 +
760 +@@ -1090,7 +1090,7 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
761 + if (params.len > SEV_FW_BLOB_MAX_SIZE)
762 + return -EINVAL;
763 +
764 +- blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
765 ++ blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
766 + if (!blob)
767 + return -ENOMEM;
768 +
769 +@@ -1172,7 +1172,7 @@ static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
770 + return -EINVAL;
771 +
772 + /* allocate the memory to hold the session data blob */
773 +- session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
774 ++ session_data = kzalloc(params.session_len, GFP_KERNEL_ACCOUNT);
775 + if (!session_data)
776 + return -ENOMEM;
777 +
778 +@@ -1296,11 +1296,11 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
779 +
780 + /* allocate memory for header and transport buffer */
781 + ret = -ENOMEM;
782 +- hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
783 ++ hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
784 + if (!hdr)
785 + goto e_unpin;
786 +
787 +- trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
788 ++ trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
789 + if (!trans_data)
790 + goto e_free_hdr;
791 +
792 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
793 +index 896ddf7392365..3237d804564b1 100644
794 +--- a/arch/x86/kvm/vmx/nested.c
795 ++++ b/arch/x86/kvm/vmx/nested.c
796 +@@ -4518,9 +4518,6 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
797 + /* trying to cancel vmlaunch/vmresume is a bug */
798 + WARN_ON_ONCE(vmx->nested.nested_run_pending);
799 +
800 +- /* Similarly, triple faults in L2 should never escape. */
801 +- WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
802 +-
803 + if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
804 + /*
805 + * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
806 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
807 +index 267d6dc4b8186..c87be7c52cc26 100644
808 +--- a/arch/x86/kvm/vmx/vmx.c
809 ++++ b/arch/x86/kvm/vmx/vmx.c
810 +@@ -7858,7 +7858,7 @@ static unsigned int vmx_handle_intel_pt_intr(void)
811 + struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
812 +
813 + /* '0' on failure so that the !PT case can use a RET0 static call. */
814 +- if (!kvm_arch_pmi_in_guest(vcpu))
815 ++ if (!vcpu || !kvm_handling_nmi_from_guest(vcpu))
816 + return 0;
817 +
818 + kvm_make_request(KVM_REQ_PMI, vcpu);
819 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
820 +index 23d176cd12a4f..5204283da7987 100644
821 +--- a/arch/x86/kvm/x86.c
822 ++++ b/arch/x86/kvm/x86.c
823 +@@ -7168,15 +7168,8 @@ static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
824 + exception, &write_emultor);
825 + }
826 +
827 +-#define CMPXCHG_TYPE(t, ptr, old, new) \
828 +- (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
829 +-
830 +-#ifdef CONFIG_X86_64
831 +-# define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
832 +-#else
833 +-# define CMPXCHG64(ptr, old, new) \
834 +- (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
835 +-#endif
836 ++#define emulator_try_cmpxchg_user(t, ptr, old, new) \
837 ++ (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t))
838 +
839 + static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
840 + unsigned long addr,
841 +@@ -7185,12 +7178,11 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
842 + unsigned int bytes,
843 + struct x86_exception *exception)
844 + {
845 +- struct kvm_host_map map;
846 + struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
847 + u64 page_line_mask;
848 ++ unsigned long hva;
849 + gpa_t gpa;
850 +- char *kaddr;
851 +- bool exchanged;
852 ++ int r;
853 +
854 + /* guests cmpxchg8b have to be emulated atomically */
855 + if (bytes > 8 || (bytes & (bytes - 1)))
856 +@@ -7214,31 +7206,32 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
857 + if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask))
858 + goto emul_write;
859 +
860 +- if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
861 ++ hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa));
862 ++ if (kvm_is_error_hva(hva))
863 + goto emul_write;
864 +
865 +- kaddr = map.hva + offset_in_page(gpa);
866 ++ hva += offset_in_page(gpa);
867 +
868 + switch (bytes) {
869 + case 1:
870 +- exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
871 ++ r = emulator_try_cmpxchg_user(u8, hva, old, new);
872 + break;
873 + case 2:
874 +- exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
875 ++ r = emulator_try_cmpxchg_user(u16, hva, old, new);
876 + break;
877 + case 4:
878 +- exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
879 ++ r = emulator_try_cmpxchg_user(u32, hva, old, new);
880 + break;
881 + case 8:
882 +- exchanged = CMPXCHG64(kaddr, old, new);
883 ++ r = emulator_try_cmpxchg_user(u64, hva, old, new);
884 + break;
885 + default:
886 + BUG();
887 + }
888 +
889 +- kvm_vcpu_unmap(vcpu, &map, true);
890 +-
891 +- if (!exchanged)
892 ++ if (r < 0)
893 ++ goto emul_write;
894 ++ if (r)
895 + return X86EMUL_CMPXCHG_FAILED;
896 +
897 + kvm_page_track_write(vcpu, gpa, new, bytes);
898 +@@ -8176,7 +8169,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
899 + }
900 + EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
901 +
902 +-static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
903 ++static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r)
904 + {
905 + if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
906 + (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
907 +@@ -8245,25 +8238,23 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
908 + }
909 +
910 + /*
911 +- * Decode to be emulated instruction. Return EMULATION_OK if success.
912 ++ * Decode an instruction for emulation. The caller is responsible for handling
913 ++ * code breakpoints. Note, manually detecting code breakpoints is unnecessary
914 ++ * (and wrong) when emulating on an intercepted fault-like exception[*], as
915 ++ * code breakpoints have higher priority and thus have already been done by
916 ++ * hardware.
917 ++ *
918 ++ * [*] Except #MC, which is higher priority, but KVM should never emulate in
919 ++ * response to a machine check.
920 + */
921 + int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
922 + void *insn, int insn_len)
923 + {
924 +- int r = EMULATION_OK;
925 + struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
926 ++ int r;
927 +
928 + init_emulate_ctxt(vcpu);
929 +
930 +- /*
931 +- * We will reenter on the same instruction since we do not set
932 +- * complete_userspace_io. This does not handle watchpoints yet,
933 +- * those would be handled in the emulate_ops.
934 +- */
935 +- if (!(emulation_type & EMULTYPE_SKIP) &&
936 +- kvm_vcpu_check_breakpoint(vcpu, &r))
937 +- return r;
938 +-
939 + r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
940 +
941 + trace_kvm_emulate_insn_start(vcpu);
942 +@@ -8296,6 +8287,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
943 + if (!(emulation_type & EMULTYPE_NO_DECODE)) {
944 + kvm_clear_exception_queue(vcpu);
945 +
946 ++ /*
947 ++ * Return immediately if RIP hits a code breakpoint, such #DBs
948 ++ * are fault-like and are higher priority than any faults on
949 ++ * the code fetch itself.
950 ++ */
951 ++ if (!(emulation_type & EMULTYPE_SKIP) &&
952 ++ kvm_vcpu_check_code_breakpoint(vcpu, &r))
953 ++ return r;
954 ++
955 + r = x86_decode_emulated_instruction(vcpu, emulation_type,
956 + insn, insn_len);
957 + if (r != EMULATION_OK) {
958 +@@ -11655,20 +11655,15 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
959 + vcpu_put(vcpu);
960 + }
961 +
962 +-static void kvm_free_vcpus(struct kvm *kvm)
963 ++static void kvm_unload_vcpu_mmus(struct kvm *kvm)
964 + {
965 + unsigned long i;
966 + struct kvm_vcpu *vcpu;
967 +
968 +- /*
969 +- * Unpin any mmu pages first.
970 +- */
971 + kvm_for_each_vcpu(i, vcpu, kvm) {
972 + kvm_clear_async_pf_completion_queue(vcpu);
973 + kvm_unload_vcpu_mmu(vcpu);
974 + }
975 +-
976 +- kvm_destroy_vcpus(kvm);
977 + }
978 +
979 + void kvm_arch_sync_events(struct kvm *kvm)
980 +@@ -11774,11 +11769,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
981 + __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
982 + mutex_unlock(&kvm->slots_lock);
983 + }
984 ++ kvm_unload_vcpu_mmus(kvm);
985 + static_call_cond(kvm_x86_vm_destroy)(kvm);
986 + kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
987 + kvm_pic_destroy(kvm);
988 + kvm_ioapic_destroy(kvm);
989 +- kvm_free_vcpus(kvm);
990 ++ kvm_destroy_vcpus(kvm);
991 + kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
992 + kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
993 + kvm_mmu_uninit_vm(kvm);
994 +diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
995 +index b32ffcaad9adf..f3c6b5e15e75b 100644
996 +--- a/crypto/ecrdsa.c
997 ++++ b/crypto/ecrdsa.c
998 +@@ -113,15 +113,15 @@ static int ecrdsa_verify(struct akcipher_request *req)
999 +
1000 + /* Step 1: verify that 0 < r < q, 0 < s < q */
1001 + if (vli_is_zero(r, ndigits) ||
1002 +- vli_cmp(r, ctx->curve->n, ndigits) == 1 ||
1003 ++ vli_cmp(r, ctx->curve->n, ndigits) >= 0 ||
1004 + vli_is_zero(s, ndigits) ||
1005 +- vli_cmp(s, ctx->curve->n, ndigits) == 1)
1006 ++ vli_cmp(s, ctx->curve->n, ndigits) >= 0)
1007 + return -EKEYREJECTED;
1008 +
1009 + /* Step 2: calculate hash (h) of the message (passed as input) */
1010 + /* Step 3: calculate e = h \mod q */
1011 + vli_from_le64(e, digest, ndigits);
1012 +- if (vli_cmp(e, ctx->curve->n, ndigits) == 1)
1013 ++ if (vli_cmp(e, ctx->curve->n, ndigits) >= 0)
1014 + vli_sub(e, e, ctx->curve->n, ndigits);
1015 + if (vli_is_zero(e, ndigits))
1016 + e[0] = 1;
1017 +@@ -137,7 +137,7 @@ static int ecrdsa_verify(struct akcipher_request *req)
1018 + /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */
1019 + ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key,
1020 + ctx->curve);
1021 +- if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1)
1022 ++ if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0)
1023 + vli_sub(cc.x, cc.x, ctx->curve->n, ndigits);
1024 +
1025 + /* Step 7: if R == r signature is valid */
1026 +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
1027 +index f6e91fb432a3b..eab34e24d9446 100644
1028 +--- a/drivers/bluetooth/hci_qca.c
1029 ++++ b/drivers/bluetooth/hci_qca.c
1030 +@@ -696,9 +696,9 @@ static int qca_close(struct hci_uart *hu)
1031 + skb_queue_purge(&qca->tx_wait_q);
1032 + skb_queue_purge(&qca->txq);
1033 + skb_queue_purge(&qca->rx_memdump_q);
1034 +- del_timer(&qca->tx_idle_timer);
1035 +- del_timer(&qca->wake_retrans_timer);
1036 + destroy_workqueue(qca->workqueue);
1037 ++ del_timer_sync(&qca->tx_idle_timer);
1038 ++ del_timer_sync(&qca->wake_retrans_timer);
1039 + qca->hu = NULL;
1040 +
1041 + kfree_skb(qca->rx_skb);
1042 +diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
1043 +index 4704fa553098b..04a3e23a4afc7 100644
1044 +--- a/drivers/char/tpm/tpm2-cmd.c
1045 ++++ b/drivers/char/tpm/tpm2-cmd.c
1046 +@@ -400,7 +400,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
1047 + if (!rc) {
1048 + out = (struct tpm2_get_cap_out *)
1049 + &buf.data[TPM_HEADER_SIZE];
1050 +- *value = be32_to_cpu(out->value);
1051 ++ /*
1052 ++ * To prevent failing boot up of some systems, Infineon TPM2.0
1053 ++ * returns SUCCESS on TPM2_Startup in field upgrade mode. Also
1054 ++ * the TPM2_Getcapability command returns a zero length list
1055 ++ * in field upgrade mode.
1056 ++ */
1057 ++ if (be32_to_cpu(out->property_cnt) > 0)
1058 ++ *value = be32_to_cpu(out->value);
1059 ++ else
1060 ++ rc = -ENODATA;
1061 + }
1062 + tpm_buf_destroy(&buf);
1063 + return rc;
1064 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
1065 +index 3af4c07a9342f..d3989b257f422 100644
1066 +--- a/drivers/char/tpm/tpm_ibmvtpm.c
1067 ++++ b/drivers/char/tpm/tpm_ibmvtpm.c
1068 +@@ -681,6 +681,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
1069 + if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
1070 + ibmvtpm->rtce_buf != NULL,
1071 + HZ)) {
1072 ++ rc = -ENODEV;
1073 + dev_err(dev, "CRQ response timed out\n");
1074 + goto init_irq_cleanup;
1075 + }
1076 +diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
1077 +index ca0361b2dbb07..f87aa2169e5f5 100644
1078 +--- a/drivers/crypto/caam/ctrl.c
1079 ++++ b/drivers/crypto/caam/ctrl.c
1080 +@@ -609,6 +609,13 @@ static bool check_version(struct fsl_mc_version *mc_version, u32 major,
1081 + }
1082 + #endif
1083 +
1084 ++static bool needs_entropy_delay_adjustment(void)
1085 ++{
1086 ++ if (of_machine_is_compatible("fsl,imx6sx"))
1087 ++ return true;
1088 ++ return false;
1089 ++}
1090 ++
1091 + /* Probe routine for CAAM top (controller) level */
1092 + static int caam_probe(struct platform_device *pdev)
1093 + {
1094 +@@ -855,6 +862,8 @@ static int caam_probe(struct platform_device *pdev)
1095 + * Also, if a handle was instantiated, do not change
1096 + * the TRNG parameters.
1097 + */
1098 ++ if (needs_entropy_delay_adjustment())
1099 ++ ent_delay = 12000;
1100 + if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
1101 + dev_info(dev,
1102 + "Entropy delay = %u\n",
1103 +@@ -871,6 +880,15 @@ static int caam_probe(struct platform_device *pdev)
1104 + */
1105 + ret = instantiate_rng(dev, inst_handles,
1106 + gen_sk);
1107 ++ /*
1108 ++ * Entropy delay is determined via TRNG characterization.
1109 ++ * TRNG characterization is run across different voltages
1110 ++ * and temperatures.
1111 ++ * If worst case value for ent_dly is identified,
1112 ++ * the loop can be skipped for that platform.
1113 ++ */
1114 ++ if (needs_entropy_delay_adjustment())
1115 ++ break;
1116 + if (ret == -EAGAIN)
1117 + /*
1118 + * if here, the loop will rerun,
1119 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1120 +index 12120474c80c7..b97a079a56aab 100644
1121 +--- a/drivers/gpu/drm/i915/intel_pm.c
1122 ++++ b/drivers/gpu/drm/i915/intel_pm.c
1123 +@@ -2876,7 +2876,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1124 + }
1125 +
1126 + static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
1127 +- u16 wm[8])
1128 ++ u16 wm[])
1129 + {
1130 + struct intel_uncore *uncore = &dev_priv->uncore;
1131 +
1132 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1133 +index 78bd3ddda4426..aca7909c726d3 100644
1134 +--- a/drivers/hid/hid-ids.h
1135 ++++ b/drivers/hid/hid-ids.h
1136 +@@ -760,6 +760,7 @@
1137 + #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
1138 + #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
1139 + #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
1140 ++#define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe
1141 + #define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e
1142 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
1143 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
1144 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1145 +index 99eabfb4145b5..6bb3890b0f2c9 100644
1146 +--- a/drivers/hid/hid-multitouch.c
1147 ++++ b/drivers/hid/hid-multitouch.c
1148 +@@ -2034,6 +2034,12 @@ static const struct hid_device_id mt_devices[] = {
1149 + USB_VENDOR_ID_LENOVO,
1150 + USB_DEVICE_ID_LENOVO_X1_TAB3) },
1151 +
1152 ++ /* Lenovo X12 TAB Gen 1 */
1153 ++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
1154 ++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
1155 ++ USB_VENDOR_ID_LENOVO,
1156 ++ USB_DEVICE_ID_LENOVO_X12_TAB) },
1157 ++
1158 + /* MosArt panels */
1159 + { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
1160 + MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
1161 +@@ -2178,6 +2184,9 @@ static const struct hid_device_id mt_devices[] = {
1162 + { .driver_data = MT_CLS_GOOGLE,
1163 + HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
1164 + USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) },
1165 ++ { .driver_data = MT_CLS_GOOGLE,
1166 ++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE,
1167 ++ USB_DEVICE_ID_GOOGLE_WHISKERS) },
1168 +
1169 + /* Generic MT device */
1170 + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
1171 +diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
1172 +index f4820fd3dc13e..9c78965360218 100644
1173 +--- a/drivers/i2c/busses/i2c-ismt.c
1174 ++++ b/drivers/i2c/busses/i2c-ismt.c
1175 +@@ -82,6 +82,7 @@
1176 +
1177 + #define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
1178 + #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
1179 ++#define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */
1180 +
1181 + /* Hardware Descriptor Constants - Control Field */
1182 + #define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
1183 +@@ -175,6 +176,8 @@ struct ismt_priv {
1184 + u8 head; /* ring buffer head pointer */
1185 + struct completion cmp; /* interrupt completion */
1186 + u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */
1187 ++ dma_addr_t log_dma;
1188 ++ u32 *log;
1189 + };
1190 +
1191 + static const struct pci_device_id ismt_ids[] = {
1192 +@@ -411,6 +414,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
1193 + memset(desc, 0, sizeof(struct ismt_desc));
1194 + desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
1195 +
1196 ++ /* Always clear the log entries */
1197 ++ memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32));
1198 ++
1199 + /* Initialize common control bits */
1200 + if (likely(pci_dev_msi_enabled(priv->pci_dev)))
1201 + desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
1202 +@@ -522,6 +528,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
1203 +
1204 + case I2C_SMBUS_BLOCK_PROC_CALL:
1205 + dev_dbg(dev, "I2C_SMBUS_BLOCK_PROC_CALL\n");
1206 ++ if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
1207 ++ return -EINVAL;
1208 ++
1209 + dma_size = I2C_SMBUS_BLOCK_MAX;
1210 + desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, 1);
1211 + desc->wr_len_cmd = data->block[0] + 1;
1212 +@@ -708,6 +717,8 @@ static void ismt_hw_init(struct ismt_priv *priv)
1213 + /* initialize the Master Descriptor Base Address (MDBA) */
1214 + writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
1215 +
1216 ++ writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL);
1217 ++
1218 + /* initialize the Master Control Register (MCTRL) */
1219 + writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
1220 +
1221 +@@ -795,6 +806,12 @@ static int ismt_dev_init(struct ismt_priv *priv)
1222 + priv->head = 0;
1223 + init_completion(&priv->cmp);
1224 +
1225 ++ priv->log = dmam_alloc_coherent(&priv->pci_dev->dev,
1226 ++ ISMT_LOG_ENTRIES * sizeof(u32),
1227 ++ &priv->log_dma, GFP_KERNEL);
1228 ++ if (!priv->log)
1229 ++ return -ENOMEM;
1230 ++
1231 + return 0;
1232 + }
1233 +
1234 +diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
1235 +index 12c90aa0900e6..a77cd86fe75ed 100644
1236 +--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
1237 ++++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
1238 +@@ -213,6 +213,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
1239 + i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
1240 + i2c->adap.dev.parent = dev;
1241 + i2c->adap.dev.of_node = pdev->dev.of_node;
1242 ++ i2c->adap.dev.fwnode = dev->fwnode;
1243 + snprintf(i2c->adap.name, sizeof(i2c->adap.name),
1244 + "Cavium ThunderX i2c adapter at %s", dev_name(dev));
1245 + i2c_set_adapdata(&i2c->adap, i2c);
1246 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1247 +index f51aea71cb036..a97ac274d8754 100644
1248 +--- a/drivers/md/dm-crypt.c
1249 ++++ b/drivers/md/dm-crypt.c
1250 +@@ -3449,6 +3449,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
1251 + return DM_MAPIO_SUBMITTED;
1252 + }
1253 +
1254 ++static char hex2asc(unsigned char c)
1255 ++{
1256 ++ return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
1257 ++}
1258 ++
1259 + static void crypt_status(struct dm_target *ti, status_type_t type,
1260 + unsigned status_flags, char *result, unsigned maxlen)
1261 + {
1262 +@@ -3467,9 +3472,12 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
1263 + if (cc->key_size > 0) {
1264 + if (cc->key_string)
1265 + DMEMIT(":%u:%s", cc->key_size, cc->key_string);
1266 +- else
1267 +- for (i = 0; i < cc->key_size; i++)
1268 +- DMEMIT("%02x", cc->key[i]);
1269 ++ else {
1270 ++ for (i = 0; i < cc->key_size; i++) {
1271 ++ DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
1272 ++ hex2asc(cc->key[i] & 0xf));
1273 ++ }
1274 ++ }
1275 + } else
1276 + DMEMIT("-");
1277 +
1278 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
1279 +index ffe50be8b6875..afd127eef0c1d 100644
1280 +--- a/drivers/md/dm-integrity.c
1281 ++++ b/drivers/md/dm-integrity.c
1282 +@@ -4495,8 +4495,6 @@ try_smaller_buffer:
1283 + }
1284 +
1285 + if (should_write_sb) {
1286 +- int r;
1287 +-
1288 + init_journal(ic, 0, ic->journal_sections, 0);
1289 + r = dm_integrity_failed(ic);
1290 + if (unlikely(r)) {
1291 +diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
1292 +index 0e039a8c0bf2e..a3f2050b9c9b4 100644
1293 +--- a/drivers/md/dm-stats.c
1294 ++++ b/drivers/md/dm-stats.c
1295 +@@ -225,6 +225,7 @@ void dm_stats_cleanup(struct dm_stats *stats)
1296 + atomic_read(&shared->in_flight[READ]),
1297 + atomic_read(&shared->in_flight[WRITE]));
1298 + }
1299 ++ cond_resched();
1300 + }
1301 + dm_stat_free(&s->rcu_head);
1302 + }
1303 +@@ -330,6 +331,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
1304 + for (ni = 0; ni < n_entries; ni++) {
1305 + atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
1306 + atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
1307 ++ cond_resched();
1308 + }
1309 +
1310 + if (s->n_histogram_entries) {
1311 +@@ -342,6 +344,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
1312 + for (ni = 0; ni < n_entries; ni++) {
1313 + s->stat_shared[ni].tmp.histogram = hi;
1314 + hi += s->n_histogram_entries + 1;
1315 ++ cond_resched();
1316 + }
1317 + }
1318 +
1319 +@@ -362,6 +365,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
1320 + for (ni = 0; ni < n_entries; ni++) {
1321 + p[ni].histogram = hi;
1322 + hi += s->n_histogram_entries + 1;
1323 ++ cond_resched();
1324 + }
1325 + }
1326 + }
1327 +@@ -497,6 +501,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
1328 + }
1329 + DMEMIT("\n");
1330 + }
1331 ++ cond_resched();
1332 + }
1333 + mutex_unlock(&stats->mutex);
1334 +
1335 +@@ -774,6 +779,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
1336 + local_irq_enable();
1337 + }
1338 + }
1339 ++ cond_resched();
1340 + }
1341 + }
1342 +
1343 +@@ -889,6 +895,8 @@ static int dm_stats_print(struct dm_stats *stats, int id,
1344 +
1345 + if (unlikely(sz + 1 >= maxlen))
1346 + goto buffer_overflow;
1347 ++
1348 ++ cond_resched();
1349 + }
1350 +
1351 + if (clear)
1352 +diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
1353 +index 80133aae0db37..d6dbd47492a85 100644
1354 +--- a/drivers/md/dm-verity-target.c
1355 ++++ b/drivers/md/dm-verity-target.c
1356 +@@ -1312,6 +1312,7 @@ bad:
1357 +
1358 + static struct target_type verity_target = {
1359 + .name = "verity",
1360 ++ .features = DM_TARGET_IMMUTABLE,
1361 + .version = {1, 8, 0},
1362 + .module = THIS_MODULE,
1363 + .ctr = verity_ctr,
1364 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1365 +index ffe720c73b0a5..ad7a84a82938f 100644
1366 +--- a/drivers/md/raid5.c
1367 ++++ b/drivers/md/raid5.c
1368 +@@ -686,17 +686,17 @@ int raid5_calc_degraded(struct r5conf *conf)
1369 + return degraded;
1370 + }
1371 +
1372 +-static int has_failed(struct r5conf *conf)
1373 ++static bool has_failed(struct r5conf *conf)
1374 + {
1375 +- int degraded;
1376 ++ int degraded = conf->mddev->degraded;
1377 +
1378 +- if (conf->mddev->reshape_position == MaxSector)
1379 +- return conf->mddev->degraded > conf->max_degraded;
1380 ++ if (test_bit(MD_BROKEN, &conf->mddev->flags))
1381 ++ return true;
1382 +
1383 +- degraded = raid5_calc_degraded(conf);
1384 +- if (degraded > conf->max_degraded)
1385 +- return 1;
1386 +- return 0;
1387 ++ if (conf->mddev->reshape_position != MaxSector)
1388 ++ degraded = raid5_calc_degraded(conf);
1389 ++
1390 ++ return degraded > conf->max_degraded;
1391 + }
1392 +
1393 + struct stripe_head *
1394 +@@ -2877,34 +2877,31 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
1395 + unsigned long flags;
1396 + pr_debug("raid456: error called\n");
1397 +
1398 ++ pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
1399 ++ mdname(mddev), bdevname(rdev->bdev, b));
1400 ++
1401 + spin_lock_irqsave(&conf->device_lock, flags);
1402 ++ set_bit(Faulty, &rdev->flags);
1403 ++ clear_bit(In_sync, &rdev->flags);
1404 ++ mddev->degraded = raid5_calc_degraded(conf);
1405 +
1406 +- if (test_bit(In_sync, &rdev->flags) &&
1407 +- mddev->degraded == conf->max_degraded) {
1408 +- /*
1409 +- * Don't allow to achieve failed state
1410 +- * Don't try to recover this device
1411 +- */
1412 ++ if (has_failed(conf)) {
1413 ++ set_bit(MD_BROKEN, &conf->mddev->flags);
1414 + conf->recovery_disabled = mddev->recovery_disabled;
1415 +- spin_unlock_irqrestore(&conf->device_lock, flags);
1416 +- return;
1417 ++
1418 ++ pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n",
1419 ++ mdname(mddev), mddev->degraded, conf->raid_disks);
1420 ++ } else {
1421 ++ pr_crit("md/raid:%s: Operation continuing on %d devices.\n",
1422 ++ mdname(mddev), conf->raid_disks - mddev->degraded);
1423 + }
1424 +
1425 +- set_bit(Faulty, &rdev->flags);
1426 +- clear_bit(In_sync, &rdev->flags);
1427 +- mddev->degraded = raid5_calc_degraded(conf);
1428 + spin_unlock_irqrestore(&conf->device_lock, flags);
1429 + set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1430 +
1431 + set_bit(Blocked, &rdev->flags);
1432 + set_mask_bits(&mddev->sb_flags, 0,
1433 + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1434 +- pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
1435 +- "md/raid:%s: Operation continuing on %d devices.\n",
1436 +- mdname(mddev),
1437 +- bdevname(rdev->bdev, b),
1438 +- mdname(mddev),
1439 +- conf->raid_disks - mddev->degraded);
1440 + r5c_update_on_rdev_error(mddev, rdev);
1441 + }
1442 +
1443 +diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
1444 +index be3f6ea555597..84279a6808730 100644
1445 +--- a/drivers/media/i2c/imx412.c
1446 ++++ b/drivers/media/i2c/imx412.c
1447 +@@ -1011,7 +1011,7 @@ static int imx412_power_on(struct device *dev)
1448 + struct imx412 *imx412 = to_imx412(sd);
1449 + int ret;
1450 +
1451 +- gpiod_set_value_cansleep(imx412->reset_gpio, 1);
1452 ++ gpiod_set_value_cansleep(imx412->reset_gpio, 0);
1453 +
1454 + ret = clk_prepare_enable(imx412->inclk);
1455 + if (ret) {
1456 +@@ -1024,7 +1024,7 @@ static int imx412_power_on(struct device *dev)
1457 + return 0;
1458 +
1459 + error_reset:
1460 +- gpiod_set_value_cansleep(imx412->reset_gpio, 0);
1461 ++ gpiod_set_value_cansleep(imx412->reset_gpio, 1);
1462 +
1463 + return ret;
1464 + }
1465 +@@ -1040,10 +1040,10 @@ static int imx412_power_off(struct device *dev)
1466 + struct v4l2_subdev *sd = dev_get_drvdata(dev);
1467 + struct imx412 *imx412 = to_imx412(sd);
1468 +
1469 +- gpiod_set_value_cansleep(imx412->reset_gpio, 0);
1470 +-
1471 + clk_disable_unprepare(imx412->inclk);
1472 +
1473 ++ gpiod_set_value_cansleep(imx412->reset_gpio, 1);
1474 ++
1475 + return 0;
1476 + }
1477 +
1478 +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
1479 +index caf48023f8ea5..5231818943c6e 100644
1480 +--- a/drivers/net/ethernet/faraday/ftgmac100.c
1481 ++++ b/drivers/net/ethernet/faraday/ftgmac100.c
1482 +@@ -1928,6 +1928,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
1483 + /* AST2400 doesn't have working HW checksum generation */
1484 + if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
1485 + netdev->hw_features &= ~NETIF_F_HW_CSUM;
1486 ++
1487 ++ /* AST2600 tx checksum with NCSI is broken */
1488 ++ if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
1489 ++ netdev->hw_features &= ~NETIF_F_HW_CSUM;
1490 ++
1491 + if (np && of_get_property(np, "no-hw-checksum", NULL))
1492 + netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
1493 + netdev->features |= netdev->hw_features;
1494 +diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
1495 +index 2ecfc17544a6a..dde55ccae9d73 100644
1496 +--- a/drivers/net/ipa/ipa_endpoint.c
1497 ++++ b/drivers/net/ipa/ipa_endpoint.c
1498 +@@ -723,13 +723,15 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
1499 +
1500 + if (endpoint->data->aggregation) {
1501 + if (!endpoint->toward_ipa) {
1502 ++ u32 buffer_size;
1503 + bool close_eof;
1504 + u32 limit;
1505 +
1506 + val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
1507 + val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
1508 +
1509 +- limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
1510 ++ buffer_size = IPA_RX_BUFFER_SIZE - NET_SKB_PAD;
1511 ++ limit = ipa_aggr_size_kb(buffer_size);
1512 + val |= aggr_byte_limit_encoded(version, limit);
1513 +
1514 + limit = IPA_AGGR_TIME_LIMIT;
1515 +diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
1516 +index a491db46e3bd4..d9f6367b9993d 100644
1517 +--- a/drivers/nfc/pn533/pn533.c
1518 ++++ b/drivers/nfc/pn533/pn533.c
1519 +@@ -2787,13 +2787,14 @@ void pn53x_common_clean(struct pn533 *priv)
1520 + {
1521 + struct pn533_cmd *cmd, *n;
1522 +
1523 ++ /* delete the timer before cleanup the worker */
1524 ++ del_timer_sync(&priv->listen_timer);
1525 ++
1526 + flush_delayed_work(&priv->poll_work);
1527 + destroy_workqueue(priv->wq);
1528 +
1529 + skb_queue_purge(&priv->resp_q);
1530 +
1531 +- del_timer(&priv->listen_timer);
1532 +-
1533 + list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) {
1534 + list_del(&cmd->queue);
1535 + kfree(cmd);
1536 +diff --git a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
1537 +index 2801ca7062732..68a5b627fb9b2 100644
1538 +--- a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
1539 ++++ b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
1540 +@@ -204,7 +204,7 @@ static const struct sunxi_desc_pin suniv_f1c100s_pins[] = {
1541 + SUNXI_FUNCTION(0x0, "gpio_in"),
1542 + SUNXI_FUNCTION(0x1, "gpio_out"),
1543 + SUNXI_FUNCTION(0x2, "lcd"), /* D20 */
1544 +- SUNXI_FUNCTION(0x3, "lvds1"), /* RX */
1545 ++ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
1546 + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)),
1547 + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
1548 + SUNXI_FUNCTION(0x0, "gpio_in"),
1549 +diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
1550 +index 03f1423071749..9f42f25fab920 100644
1551 +--- a/fs/exfat/balloc.c
1552 ++++ b/fs/exfat/balloc.c
1553 +@@ -148,7 +148,9 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
1554 + struct super_block *sb = inode->i_sb;
1555 + struct exfat_sb_info *sbi = EXFAT_SB(sb);
1556 +
1557 +- WARN_ON(clu < EXFAT_FIRST_CLUSTER);
1558 ++ if (!is_valid_cluster(sbi, clu))
1559 ++ return -EINVAL;
1560 ++
1561 + ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
1562 + i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
1563 + b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
1564 +@@ -166,7 +168,9 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
1565 + struct exfat_sb_info *sbi = EXFAT_SB(sb);
1566 + struct exfat_mount_options *opts = &sbi->options;
1567 +
1568 +- WARN_ON(clu < EXFAT_FIRST_CLUSTER);
1569 ++ if (!is_valid_cluster(sbi, clu))
1570 ++ return;
1571 ++
1572 + ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
1573 + i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
1574 + b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
1575 +diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
1576 +index 619e5b4bed100..0f2b1b196fa25 100644
1577 +--- a/fs/exfat/exfat_fs.h
1578 ++++ b/fs/exfat/exfat_fs.h
1579 +@@ -380,6 +380,12 @@ static inline int exfat_sector_to_cluster(struct exfat_sb_info *sbi,
1580 + EXFAT_RESERVED_CLUSTERS;
1581 + }
1582 +
1583 ++static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
1584 ++ unsigned int clus)
1585 ++{
1586 ++ return clus >= EXFAT_FIRST_CLUSTER && clus < sbi->num_clusters;
1587 ++}
1588 ++
1589 + /* super.c */
1590 + int exfat_set_volume_dirty(struct super_block *sb);
1591 + int exfat_clear_volume_dirty(struct super_block *sb);
1592 +diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
1593 +index a3464e56a7e16..421c273531049 100644
1594 +--- a/fs/exfat/fatent.c
1595 ++++ b/fs/exfat/fatent.c
1596 +@@ -81,12 +81,6 @@ int exfat_ent_set(struct super_block *sb, unsigned int loc,
1597 + return 0;
1598 + }
1599 +
1600 +-static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
1601 +- unsigned int clus)
1602 +-{
1603 +- return clus >= EXFAT_FIRST_CLUSTER && clus < sbi->num_clusters;
1604 +-}
1605 +-
1606 + int exfat_ent_get(struct super_block *sb, unsigned int loc,
1607 + unsigned int *content)
1608 + {
1609 +diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
1610 +index 465e39ff018d4..b3b8ac4a9ace6 100644
1611 +--- a/fs/nfs/internal.h
1612 ++++ b/fs/nfs/internal.h
1613 +@@ -827,6 +827,7 @@ static inline bool nfs_error_is_fatal_on_server(int err)
1614 + case 0:
1615 + case -ERESTARTSYS:
1616 + case -EINTR:
1617 ++ case -ENOMEM:
1618 + return false;
1619 + }
1620 + return nfs_error_is_fatal(err);
1621 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1622 +index f3b71fd1d1341..8ad64b207b58a 100644
1623 +--- a/fs/nfsd/nfs4state.c
1624 ++++ b/fs/nfsd/nfs4state.c
1625 +@@ -7330,16 +7330,12 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
1626 + if (sop->so_is_open_owner || !same_owner_str(sop, owner))
1627 + continue;
1628 +
1629 +- /* see if there are still any locks associated with it */
1630 +- lo = lockowner(sop);
1631 +- list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
1632 +- if (check_for_locks(stp->st_stid.sc_file, lo)) {
1633 +- status = nfserr_locks_held;
1634 +- spin_unlock(&clp->cl_lock);
1635 +- return status;
1636 +- }
1637 ++ if (atomic_read(&sop->so_count) != 1) {
1638 ++ spin_unlock(&clp->cl_lock);
1639 ++ return nfserr_locks_held;
1640 + }
1641 +
1642 ++ lo = lockowner(sop);
1643 + nfs4_get_stateowner(sop);
1644 + break;
1645 + }
1646 +diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
1647 +index 29813200c7af9..e81246d9a08ea 100644
1648 +--- a/fs/ntfs3/super.c
1649 ++++ b/fs/ntfs3/super.c
1650 +@@ -668,9 +668,11 @@ static u32 format_size_gb(const u64 bytes, u32 *mb)
1651 +
1652 + static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
1653 + {
1654 +- return boot->sectors_per_clusters <= 0x80
1655 +- ? boot->sectors_per_clusters
1656 +- : (1u << (0 - boot->sectors_per_clusters));
1657 ++ if (boot->sectors_per_clusters <= 0x80)
1658 ++ return boot->sectors_per_clusters;
1659 ++ if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */
1660 ++ return 1U << (0 - boot->sectors_per_clusters);
1661 ++ return -EINVAL;
1662 + }
1663 +
1664 + /*
1665 +@@ -713,6 +715,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
1666 +
1667 + /* cluster size: 512, 1K, 2K, 4K, ... 2M */
1668 + sct_per_clst = true_sectors_per_clst(boot);
1669 ++ if ((int)sct_per_clst < 0)
1670 ++ goto out;
1671 + if (!is_power_of_2(sct_per_clst))
1672 + goto out;
1673 +
1674 +diff --git a/fs/pipe.c b/fs/pipe.c
1675 +index 2667db9506e2f..1025f8ad1aa56 100644
1676 +--- a/fs/pipe.c
1677 ++++ b/fs/pipe.c
1678 +@@ -653,7 +653,7 @@ pipe_poll(struct file *filp, poll_table *wait)
1679 + unsigned int head, tail;
1680 +
1681 + /* Epoll has some historical nasty semantics, this enables them */
1682 +- pipe->poll_usage = 1;
1683 ++ WRITE_ONCE(pipe->poll_usage, true);
1684 +
1685 + /*
1686 + * Reading pipe state only -- no need for acquiring the semaphore.
1687 +@@ -1245,30 +1245,33 @@ unsigned int round_pipe_size(unsigned long size)
1688 +
1689 + /*
1690 + * Resize the pipe ring to a number of slots.
1691 ++ *
1692 ++ * Note the pipe can be reduced in capacity, but only if the current
1693 ++ * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1694 ++ * returned instead.
1695 + */
1696 + int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1697 + {
1698 + struct pipe_buffer *bufs;
1699 + unsigned int head, tail, mask, n;
1700 +
1701 +- /*
1702 +- * We can shrink the pipe, if arg is greater than the ring occupancy.
1703 +- * Since we don't expect a lot of shrink+grow operations, just free and
1704 +- * allocate again like we would do for growing. If the pipe currently
1705 +- * contains more buffers than arg, then return busy.
1706 +- */
1707 +- mask = pipe->ring_size - 1;
1708 +- head = pipe->head;
1709 +- tail = pipe->tail;
1710 +- n = pipe_occupancy(pipe->head, pipe->tail);
1711 +- if (nr_slots < n)
1712 +- return -EBUSY;
1713 +-
1714 + bufs = kcalloc(nr_slots, sizeof(*bufs),
1715 + GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1716 + if (unlikely(!bufs))
1717 + return -ENOMEM;
1718 +
1719 ++ spin_lock_irq(&pipe->rd_wait.lock);
1720 ++ mask = pipe->ring_size - 1;
1721 ++ head = pipe->head;
1722 ++ tail = pipe->tail;
1723 ++
1724 ++ n = pipe_occupancy(head, tail);
1725 ++ if (nr_slots < n) {
1726 ++ spin_unlock_irq(&pipe->rd_wait.lock);
1727 ++ kfree(bufs);
1728 ++ return -EBUSY;
1729 ++ }
1730 ++
1731 + /*
1732 + * The pipe array wraps around, so just start the new one at zero
1733 + * and adjust the indices.
1734 +@@ -1300,6 +1303,8 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1735 + pipe->tail = tail;
1736 + pipe->head = head;
1737 +
1738 ++ spin_unlock_irq(&pipe->rd_wait.lock);
1739 ++
1740 + /* This might have made more room for writers */
1741 + wake_up_interruptible(&pipe->wr_wait);
1742 + return 0;
1743 +diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
1744 +index 37b3906af8b19..40b561b09e47f 100644
1745 +--- a/include/linux/bpf_local_storage.h
1746 ++++ b/include/linux/bpf_local_storage.h
1747 +@@ -143,9 +143,9 @@ void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
1748 +
1749 + bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
1750 + struct bpf_local_storage_elem *selem,
1751 +- bool uncharge_omem);
1752 ++ bool uncharge_omem, bool use_trace_rcu);
1753 +
1754 +-void bpf_selem_unlink(struct bpf_local_storage_elem *selem);
1755 ++void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu);
1756 +
1757 + void bpf_selem_link_map(struct bpf_local_storage_map *smap,
1758 + struct bpf_local_storage_elem *selem);
1759 +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
1760 +index c00c618ef290d..cb0fd633a6106 100644
1761 +--- a/include/linux/pipe_fs_i.h
1762 ++++ b/include/linux/pipe_fs_i.h
1763 +@@ -71,7 +71,7 @@ struct pipe_inode_info {
1764 + unsigned int files;
1765 + unsigned int r_counter;
1766 + unsigned int w_counter;
1767 +- unsigned int poll_usage;
1768 ++ bool poll_usage;
1769 + struct page *tmp_page;
1770 + struct fasync_struct *fasync_readers;
1771 + struct fasync_struct *fasync_writers;
1772 +diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
1773 +index 13807ea94cd2b..2d524782f53b7 100644
1774 +--- a/include/net/netfilter/nf_conntrack_core.h
1775 ++++ b/include/net/netfilter/nf_conntrack_core.h
1776 +@@ -58,8 +58,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
1777 + int ret = NF_ACCEPT;
1778 +
1779 + if (ct) {
1780 +- if (!nf_ct_is_confirmed(ct))
1781 ++ if (!nf_ct_is_confirmed(ct)) {
1782 + ret = __nf_conntrack_confirm(skb);
1783 ++
1784 ++ if (ret == NF_ACCEPT)
1785 ++ ct = (struct nf_conn *)skb_nfct(skb);
1786 ++ }
1787 ++
1788 + if (likely(ret == NF_ACCEPT))
1789 + nf_ct_deliver_cached_events(ct);
1790 + }
1791 +diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
1792 +index e29d9e3d853ea..0da52c3bcd4d8 100644
1793 +--- a/kernel/bpf/bpf_inode_storage.c
1794 ++++ b/kernel/bpf/bpf_inode_storage.c
1795 +@@ -90,7 +90,7 @@ void bpf_inode_storage_free(struct inode *inode)
1796 + */
1797 + bpf_selem_unlink_map(selem);
1798 + free_inode_storage = bpf_selem_unlink_storage_nolock(
1799 +- local_storage, selem, false);
1800 ++ local_storage, selem, false, false);
1801 + }
1802 + raw_spin_unlock_bh(&local_storage->lock);
1803 + rcu_read_unlock();
1804 +@@ -149,7 +149,7 @@ static int inode_storage_delete(struct inode *inode, struct bpf_map *map)
1805 + if (!sdata)
1806 + return -ENOENT;
1807 +
1808 +- bpf_selem_unlink(SELEM(sdata));
1809 ++ bpf_selem_unlink(SELEM(sdata), true);
1810 +
1811 + return 0;
1812 + }
1813 +diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
1814 +index 71de2a89869c8..79a8edfa7a2f4 100644
1815 +--- a/kernel/bpf/bpf_local_storage.c
1816 ++++ b/kernel/bpf/bpf_local_storage.c
1817 +@@ -106,7 +106,7 @@ static void bpf_selem_free_rcu(struct rcu_head *rcu)
1818 + */
1819 + bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
1820 + struct bpf_local_storage_elem *selem,
1821 +- bool uncharge_mem)
1822 ++ bool uncharge_mem, bool use_trace_rcu)
1823 + {
1824 + struct bpf_local_storage_map *smap;
1825 + bool free_local_storage;
1826 +@@ -150,11 +150,16 @@ bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
1827 + SDATA(selem))
1828 + RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
1829 +
1830 +- call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
1831 ++ if (use_trace_rcu)
1832 ++ call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
1833 ++ else
1834 ++ kfree_rcu(selem, rcu);
1835 ++
1836 + return free_local_storage;
1837 + }
1838 +
1839 +-static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem)
1840 ++static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
1841 ++ bool use_trace_rcu)
1842 + {
1843 + struct bpf_local_storage *local_storage;
1844 + bool free_local_storage = false;
1845 +@@ -169,12 +174,16 @@ static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem)
1846 + raw_spin_lock_irqsave(&local_storage->lock, flags);
1847 + if (likely(selem_linked_to_storage(selem)))
1848 + free_local_storage = bpf_selem_unlink_storage_nolock(
1849 +- local_storage, selem, true);
1850 ++ local_storage, selem, true, use_trace_rcu);
1851 + raw_spin_unlock_irqrestore(&local_storage->lock, flags);
1852 +
1853 +- if (free_local_storage)
1854 +- call_rcu_tasks_trace(&local_storage->rcu,
1855 ++ if (free_local_storage) {
1856 ++ if (use_trace_rcu)
1857 ++ call_rcu_tasks_trace(&local_storage->rcu,
1858 + bpf_local_storage_free_rcu);
1859 ++ else
1860 ++ kfree_rcu(local_storage, rcu);
1861 ++ }
1862 + }
1863 +
1864 + void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
1865 +@@ -214,14 +223,14 @@ void bpf_selem_link_map(struct bpf_local_storage_map *smap,
1866 + raw_spin_unlock_irqrestore(&b->lock, flags);
1867 + }
1868 +
1869 +-void bpf_selem_unlink(struct bpf_local_storage_elem *selem)
1870 ++void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
1871 + {
1872 + /* Always unlink from map before unlinking from local_storage
1873 + * because selem will be freed after successfully unlinked from
1874 + * the local_storage.
1875 + */
1876 + bpf_selem_unlink_map(selem);
1877 +- __bpf_selem_unlink_storage(selem);
1878 ++ __bpf_selem_unlink_storage(selem, use_trace_rcu);
1879 + }
1880 +
1881 + struct bpf_local_storage_data *
1882 +@@ -454,7 +463,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
1883 + if (old_sdata) {
1884 + bpf_selem_unlink_map(SELEM(old_sdata));
1885 + bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
1886 +- false);
1887 ++ false, true);
1888 + }
1889 +
1890 + unlock:
1891 +@@ -532,7 +541,7 @@ void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
1892 + migrate_disable();
1893 + __this_cpu_inc(*busy_counter);
1894 + }
1895 +- bpf_selem_unlink(selem);
1896 ++ bpf_selem_unlink(selem, false);
1897 + if (busy_counter) {
1898 + __this_cpu_dec(*busy_counter);
1899 + migrate_enable();
1900 +diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
1901 +index 5da7bed0f5f6e..be6c533bb862f 100644
1902 +--- a/kernel/bpf/bpf_task_storage.c
1903 ++++ b/kernel/bpf/bpf_task_storage.c
1904 +@@ -102,7 +102,7 @@ void bpf_task_storage_free(struct task_struct *task)
1905 + */
1906 + bpf_selem_unlink_map(selem);
1907 + free_task_storage = bpf_selem_unlink_storage_nolock(
1908 +- local_storage, selem, false);
1909 ++ local_storage, selem, false, false);
1910 + }
1911 + raw_spin_unlock_irqrestore(&local_storage->lock, flags);
1912 + bpf_task_storage_unlock();
1913 +@@ -191,7 +191,7 @@ static int task_storage_delete(struct task_struct *task, struct bpf_map *map)
1914 + if (!sdata)
1915 + return -ENOENT;
1916 +
1917 +- bpf_selem_unlink(SELEM(sdata));
1918 ++ bpf_selem_unlink(SELEM(sdata), true);
1919 +
1920 + return 0;
1921 + }
1922 +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
1923 +index de3e5bc6781fe..64c44eed8c078 100644
1924 +--- a/kernel/bpf/core.c
1925 ++++ b/kernel/bpf/core.c
1926 +@@ -1157,6 +1157,16 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1927 + insn = clone->insnsi;
1928 +
1929 + for (i = 0; i < insn_cnt; i++, insn++) {
1930 ++ if (bpf_pseudo_func(insn)) {
1931 ++ /* ld_imm64 with an address of bpf subprog is not
1932 ++ * a user controlled constant. Don't randomize it,
1933 ++ * since it will conflict with jit_subprogs() logic.
1934 ++ */
1935 ++ insn++;
1936 ++ i++;
1937 ++ continue;
1938 ++ }
1939 ++
1940 + /* We temporarily need to hold the original ld64 insn
1941 + * so that we can still access the first part in the
1942 + * second blinding run.
1943 +diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
1944 +index 2823dcefae10e..354d066ce6955 100644
1945 +--- a/kernel/bpf/stackmap.c
1946 ++++ b/kernel/bpf/stackmap.c
1947 +@@ -100,7 +100,6 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
1948 + return ERR_PTR(-E2BIG);
1949 +
1950 + cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
1951 +- cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
1952 + smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
1953 + if (!smap)
1954 + return ERR_PTR(-ENOMEM);
1955 +diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
1956 +index 5e7edf9130601..e854ebd0aa652 100644
1957 +--- a/kernel/bpf/trampoline.c
1958 ++++ b/kernel/bpf/trampoline.c
1959 +@@ -423,7 +423,7 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
1960 + {
1961 + enum bpf_tramp_prog_type kind;
1962 + int err = 0;
1963 +- int cnt;
1964 ++ int cnt = 0, i;
1965 +
1966 + kind = bpf_attach_type_to_tramp(prog);
1967 + mutex_lock(&tr->mutex);
1968 +@@ -434,7 +434,10 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
1969 + err = -EBUSY;
1970 + goto out;
1971 + }
1972 +- cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
1973 ++
1974 ++ for (i = 0; i < BPF_TRAMP_MAX; i++)
1975 ++ cnt += tr->progs_cnt[i];
1976 ++
1977 + if (kind == BPF_TRAMP_REPLACE) {
1978 + /* Cannot attach extension if fentry/fexit are in use. */
1979 + if (cnt) {
1980 +@@ -512,16 +515,19 @@ out:
1981 +
1982 + void bpf_trampoline_put(struct bpf_trampoline *tr)
1983 + {
1984 ++ int i;
1985 ++
1986 + if (!tr)
1987 + return;
1988 + mutex_lock(&trampoline_mutex);
1989 + if (!refcount_dec_and_test(&tr->refcnt))
1990 + goto out;
1991 + WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
1992 +- if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
1993 +- goto out;
1994 +- if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
1995 +- goto out;
1996 ++
1997 ++ for (i = 0; i < BPF_TRAMP_MAX; i++)
1998 ++ if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i])))
1999 ++ goto out;
2000 ++
2001 + /* This code will be executed even when the last bpf_tramp_image
2002 + * is alive. All progs are detached from the trampoline and the
2003 + * trampoline image is patched with jmp into epilogue to skip
2004 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
2005 +index a39eedecc93a1..1d93e90b4e09f 100644
2006 +--- a/kernel/bpf/verifier.c
2007 ++++ b/kernel/bpf/verifier.c
2008 +@@ -4832,6 +4832,11 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
2009 + return check_packet_access(env, regno, reg->off, access_size,
2010 + zero_size_allowed);
2011 + case PTR_TO_MAP_KEY:
2012 ++ if (meta && meta->raw_mode) {
2013 ++ verbose(env, "R%d cannot write into %s\n", regno,
2014 ++ reg_type_str(env, reg->type));
2015 ++ return -EACCES;
2016 ++ }
2017 + return check_mem_region_access(env, regno, reg->off, access_size,
2018 + reg->map_ptr->key_size, false);
2019 + case PTR_TO_MAP_VALUE:
2020 +@@ -4842,13 +4847,23 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
2021 + return check_map_access(env, regno, reg->off, access_size,
2022 + zero_size_allowed);
2023 + case PTR_TO_MEM:
2024 ++ if (type_is_rdonly_mem(reg->type)) {
2025 ++ if (meta && meta->raw_mode) {
2026 ++ verbose(env, "R%d cannot write into %s\n", regno,
2027 ++ reg_type_str(env, reg->type));
2028 ++ return -EACCES;
2029 ++ }
2030 ++ }
2031 + return check_mem_region_access(env, regno, reg->off,
2032 + access_size, reg->mem_size,
2033 + zero_size_allowed);
2034 + case PTR_TO_BUF:
2035 + if (type_is_rdonly_mem(reg->type)) {
2036 +- if (meta && meta->raw_mode)
2037 ++ if (meta && meta->raw_mode) {
2038 ++ verbose(env, "R%d cannot write into %s\n", regno,
2039 ++ reg_type_str(env, reg->type));
2040 + return -EACCES;
2041 ++ }
2042 +
2043 + buf_info = "rdonly";
2044 + max_access = &env->prog->aux->max_rdonly_access;
2045 +diff --git a/lib/assoc_array.c b/lib/assoc_array.c
2046 +index 079c72e26493e..ca0b4f360c1a0 100644
2047 +--- a/lib/assoc_array.c
2048 ++++ b/lib/assoc_array.c
2049 +@@ -1461,6 +1461,7 @@ int assoc_array_gc(struct assoc_array *array,
2050 + struct assoc_array_ptr *cursor, *ptr;
2051 + struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
2052 + unsigned long nr_leaves_on_tree;
2053 ++ bool retained;
2054 + int keylen, slot, nr_free, next_slot, i;
2055 +
2056 + pr_devel("-->%s()\n", __func__);
2057 +@@ -1536,6 +1537,7 @@ continue_node:
2058 + goto descend;
2059 + }
2060 +
2061 ++retry_compress:
2062 + pr_devel("-- compress node %p --\n", new_n);
2063 +
2064 + /* Count up the number of empty slots in this node and work out the
2065 +@@ -1553,6 +1555,7 @@ continue_node:
2066 + pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
2067 +
2068 + /* See what we can fold in */
2069 ++ retained = false;
2070 + next_slot = 0;
2071 + for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
2072 + struct assoc_array_shortcut *s;
2073 +@@ -1602,9 +1605,14 @@ continue_node:
2074 + pr_devel("[%d] retain node %lu/%d [nx %d]\n",
2075 + slot, child->nr_leaves_on_branch, nr_free + 1,
2076 + next_slot);
2077 ++ retained = true;
2078 + }
2079 + }
2080 +
2081 ++ if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
2082 ++ pr_devel("internal nodes remain despite enough space, retrying\n");
2083 ++ goto retry_compress;
2084 ++ }
2085 + pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
2086 +
2087 + nr_leaves_on_tree = new_n->nr_leaves_on_branch;
2088 +diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
2089 +index af9302141bcf6..e5c5315da2741 100644
2090 +--- a/lib/percpu-refcount.c
2091 ++++ b/lib/percpu-refcount.c
2092 +@@ -76,6 +76,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
2093 + data = kzalloc(sizeof(*ref->data), gfp);
2094 + if (!data) {
2095 + free_percpu((void __percpu *)ref->percpu_count_ptr);
2096 ++ ref->percpu_count_ptr = 0;
2097 + return -ENOMEM;
2098 + }
2099 +
2100 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
2101 +index 9152fbde33b50..5d5fc04385b8d 100644
2102 +--- a/mm/zsmalloc.c
2103 ++++ b/mm/zsmalloc.c
2104 +@@ -1718,11 +1718,40 @@ static enum fullness_group putback_zspage(struct size_class *class,
2105 + */
2106 + static void lock_zspage(struct zspage *zspage)
2107 + {
2108 +- struct page *page = get_first_page(zspage);
2109 ++ struct page *curr_page, *page;
2110 +
2111 +- do {
2112 +- lock_page(page);
2113 +- } while ((page = get_next_page(page)) != NULL);
2114 ++ /*
2115 ++ * Pages we haven't locked yet can be migrated off the list while we're
2116 ++ * trying to lock them, so we need to be careful and only attempt to
2117 ++ * lock each page under migrate_read_lock(). Otherwise, the page we lock
2118 ++ * may no longer belong to the zspage. This means that we may wait for
2119 ++ * the wrong page to unlock, so we must take a reference to the page
2120 ++ * prior to waiting for it to unlock outside migrate_read_lock().
2121 ++ */
2122 ++ while (1) {
2123 ++ migrate_read_lock(zspage);
2124 ++ page = get_first_page(zspage);
2125 ++ if (trylock_page(page))
2126 ++ break;
2127 ++ get_page(page);
2128 ++ migrate_read_unlock(zspage);
2129 ++ wait_on_page_locked(page);
2130 ++ put_page(page);
2131 ++ }
2132 ++
2133 ++ curr_page = page;
2134 ++ while ((page = get_next_page(curr_page))) {
2135 ++ if (trylock_page(page)) {
2136 ++ curr_page = page;
2137 ++ } else {
2138 ++ get_page(page);
2139 ++ migrate_read_unlock(zspage);
2140 ++ wait_on_page_locked(page);
2141 ++ put_page(page);
2142 ++ migrate_read_lock(zspage);
2143 ++ }
2144 ++ }
2145 ++ migrate_read_unlock(zspage);
2146 + }
2147 +
2148 + static int zs_init_fs_context(struct fs_context *fc)
2149 +diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
2150 +index d9c37fd108097..4fc5bf519ba59 100644
2151 +--- a/net/core/bpf_sk_storage.c
2152 ++++ b/net/core/bpf_sk_storage.c
2153 +@@ -40,7 +40,7 @@ static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
2154 + if (!sdata)
2155 + return -ENOENT;
2156 +
2157 +- bpf_selem_unlink(SELEM(sdata));
2158 ++ bpf_selem_unlink(SELEM(sdata), true);
2159 +
2160 + return 0;
2161 + }
2162 +@@ -75,8 +75,8 @@ void bpf_sk_storage_free(struct sock *sk)
2163 + * sk_storage.
2164 + */
2165 + bpf_selem_unlink_map(selem);
2166 +- free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage,
2167 +- selem, true);
2168 ++ free_sk_storage = bpf_selem_unlink_storage_nolock(
2169 ++ sk_storage, selem, true, false);
2170 + }
2171 + raw_spin_unlock_bh(&sk_storage->lock);
2172 + rcu_read_unlock();
2173 +diff --git a/net/core/filter.c b/net/core/filter.c
2174 +index af0bafe9dcce2..f8fbb5fa74f35 100644
2175 +--- a/net/core/filter.c
2176 ++++ b/net/core/filter.c
2177 +@@ -1687,7 +1687,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
2178 +
2179 + if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
2180 + return -EINVAL;
2181 +- if (unlikely(offset > 0xffff))
2182 ++ if (unlikely(offset > INT_MAX))
2183 + return -EFAULT;
2184 + if (unlikely(bpf_try_make_writable(skb, offset + len)))
2185 + return -EFAULT;
2186 +@@ -1722,7 +1722,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
2187 + {
2188 + void *ptr;
2189 +
2190 +- if (unlikely(offset > 0xffff))
2191 ++ if (unlikely(offset > INT_MAX))
2192 + goto err_clear;
2193 +
2194 + ptr = skb_header_pointer(skb, offset, len, to);
2195 +diff --git a/net/key/af_key.c b/net/key/af_key.c
2196 +index 92e9d75dba2f4..339d95df19d32 100644
2197 +--- a/net/key/af_key.c
2198 ++++ b/net/key/af_key.c
2199 +@@ -2900,7 +2900,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
2200 + break;
2201 + if (!aalg->pfkey_supported)
2202 + continue;
2203 +- if (aalg_tmpl_set(t, aalg))
2204 ++ if (aalg_tmpl_set(t, aalg) && aalg->available)
2205 + sz += sizeof(struct sadb_comb);
2206 + }
2207 + return sz + sizeof(struct sadb_prop);
2208 +@@ -2918,7 +2918,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
2209 + if (!ealg->pfkey_supported)
2210 + continue;
2211 +
2212 +- if (!(ealg_tmpl_set(t, ealg)))
2213 ++ if (!(ealg_tmpl_set(t, ealg) && ealg->available))
2214 + continue;
2215 +
2216 + for (k = 1; ; k++) {
2217 +@@ -2929,7 +2929,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
2218 + if (!aalg->pfkey_supported)
2219 + continue;
2220 +
2221 +- if (aalg_tmpl_set(t, aalg))
2222 ++ if (aalg_tmpl_set(t, aalg) && aalg->available)
2223 + sz += sizeof(struct sadb_comb);
2224 + }
2225 + }
2226 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
2227 +index 30d29d038d095..42cc703a68e50 100644
2228 +--- a/net/netfilter/nf_tables_api.c
2229 ++++ b/net/netfilter/nf_tables_api.c
2230 +@@ -222,12 +222,18 @@ err_register:
2231 + }
2232 +
2233 + static void nft_netdev_unregister_hooks(struct net *net,
2234 +- struct list_head *hook_list)
2235 ++ struct list_head *hook_list,
2236 ++ bool release_netdev)
2237 + {
2238 +- struct nft_hook *hook;
2239 ++ struct nft_hook *hook, *next;
2240 +
2241 +- list_for_each_entry(hook, hook_list, list)
2242 ++ list_for_each_entry_safe(hook, next, hook_list, list) {
2243 + nf_unregister_net_hook(net, &hook->ops);
2244 ++ if (release_netdev) {
2245 ++ list_del(&hook->list);
2246 ++ kfree_rcu(hook, rcu);
2247 ++ }
2248 ++ }
2249 + }
2250 +
2251 + static int nf_tables_register_hook(struct net *net,
2252 +@@ -253,9 +259,10 @@ static int nf_tables_register_hook(struct net *net,
2253 + return nf_register_net_hook(net, &basechain->ops);
2254 + }
2255 +
2256 +-static void nf_tables_unregister_hook(struct net *net,
2257 +- const struct nft_table *table,
2258 +- struct nft_chain *chain)
2259 ++static void __nf_tables_unregister_hook(struct net *net,
2260 ++ const struct nft_table *table,
2261 ++ struct nft_chain *chain,
2262 ++ bool release_netdev)
2263 + {
2264 + struct nft_base_chain *basechain;
2265 + const struct nf_hook_ops *ops;
2266 +@@ -270,11 +277,19 @@ static void nf_tables_unregister_hook(struct net *net,
2267 + return basechain->type->ops_unregister(net, ops);
2268 +
2269 + if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
2270 +- nft_netdev_unregister_hooks(net, &basechain->hook_list);
2271 ++ nft_netdev_unregister_hooks(net, &basechain->hook_list,
2272 ++ release_netdev);
2273 + else
2274 + nf_unregister_net_hook(net, &basechain->ops);
2275 + }
2276 +
2277 ++static void nf_tables_unregister_hook(struct net *net,
2278 ++ const struct nft_table *table,
2279 ++ struct nft_chain *chain)
2280 ++{
2281 ++ return __nf_tables_unregister_hook(net, table, chain, false);
2282 ++}
2283 ++
2284 + static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *trans)
2285 + {
2286 + struct nftables_pernet *nft_net = nft_pernet(net);
2287 +@@ -2794,27 +2809,31 @@ static struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
2288 +
2289 + err = nf_tables_expr_parse(ctx, nla, &expr_info);
2290 + if (err < 0)
2291 +- goto err1;
2292 ++ goto err_expr_parse;
2293 ++
2294 ++ err = -EOPNOTSUPP;
2295 ++ if (!(expr_info.ops->type->flags & NFT_EXPR_STATEFUL))
2296 ++ goto err_expr_stateful;
2297 +
2298 + err = -ENOMEM;
2299 + expr = kzalloc(expr_info.ops->size, GFP_KERNEL);
2300 + if (expr == NULL)
2301 +- goto err2;
2302 ++ goto err_expr_stateful;
2303 +
2304 + err = nf_tables_newexpr(ctx, &expr_info, expr);
2305 + if (err < 0)
2306 +- goto err3;
2307 ++ goto err_expr_new;
2308 +
2309 + return expr;
2310 +-err3:
2311 ++err_expr_new:
2312 + kfree(expr);
2313 +-err2:
2314 ++err_expr_stateful:
2315 + owner = expr_info.ops->type->owner;
2316 + if (expr_info.ops->type->release_ops)
2317 + expr_info.ops->type->release_ops(expr_info.ops);
2318 +
2319 + module_put(owner);
2320 +-err1:
2321 ++err_expr_parse:
2322 + return ERR_PTR(err);
2323 + }
2324 +
2325 +@@ -4163,6 +4182,9 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
2326 + u32 len;
2327 + int err;
2328 +
2329 ++ if (desc->field_count >= ARRAY_SIZE(desc->field_len))
2330 ++ return -E2BIG;
2331 ++
2332 + err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr,
2333 + nft_concat_policy, NULL);
2334 + if (err < 0)
2335 +@@ -4172,9 +4194,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
2336 + return -EINVAL;
2337 +
2338 + len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN]));
2339 +-
2340 +- if (len * BITS_PER_BYTE / 32 > NFT_REG32_COUNT)
2341 +- return -E2BIG;
2342 ++ if (!len || len > U8_MAX)
2343 ++ return -EINVAL;
2344 +
2345 + desc->field_len[desc->field_count++] = len;
2346 +
2347 +@@ -4185,7 +4206,8 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
2348 + const struct nlattr *nla)
2349 + {
2350 + struct nlattr *attr;
2351 +- int rem, err;
2352 ++ u32 num_regs = 0;
2353 ++ int rem, err, i;
2354 +
2355 + nla_for_each_nested(attr, nla, rem) {
2356 + if (nla_type(attr) != NFTA_LIST_ELEM)
2357 +@@ -4196,6 +4218,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
2358 + return err;
2359 + }
2360 +
2361 ++ for (i = 0; i < desc->field_count; i++)
2362 ++ num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
2363 ++
2364 ++ if (num_regs > NFT_REG32_COUNT)
2365 ++ return -E2BIG;
2366 ++
2367 + return 0;
2368 + }
2369 +
2370 +@@ -5334,9 +5362,6 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx,
2371 + return expr;
2372 +
2373 + err = -EOPNOTSUPP;
2374 +- if (!(expr->ops->type->flags & NFT_EXPR_STATEFUL))
2375 +- goto err_set_elem_expr;
2376 +-
2377 + if (expr->ops->type->flags & NFT_EXPR_GC) {
2378 + if (set->flags & NFT_SET_TIMEOUT)
2379 + goto err_set_elem_expr;
2380 +@@ -7212,13 +7237,25 @@ static void nft_unregister_flowtable_hook(struct net *net,
2381 + FLOW_BLOCK_UNBIND);
2382 + }
2383 +
2384 +-static void nft_unregister_flowtable_net_hooks(struct net *net,
2385 +- struct list_head *hook_list)
2386 ++static void __nft_unregister_flowtable_net_hooks(struct net *net,
2387 ++ struct list_head *hook_list,
2388 ++ bool release_netdev)
2389 + {
2390 +- struct nft_hook *hook;
2391 ++ struct nft_hook *hook, *next;
2392 +
2393 +- list_for_each_entry(hook, hook_list, list)
2394 ++ list_for_each_entry_safe(hook, next, hook_list, list) {
2395 + nf_unregister_net_hook(net, &hook->ops);
2396 ++ if (release_netdev) {
2397 ++ list_del(&hook->list);
2398 ++ kfree_rcu(hook);
2399 ++ }
2400 ++ }
2401 ++}
2402 ++
2403 ++static void nft_unregister_flowtable_net_hooks(struct net *net,
2404 ++ struct list_head *hook_list)
2405 ++{
2406 ++ __nft_unregister_flowtable_net_hooks(net, hook_list, false);
2407 + }
2408 +
2409 + static int nft_register_flowtable_net_hooks(struct net *net,
2410 +@@ -9662,9 +9699,10 @@ static void __nft_release_hook(struct net *net, struct nft_table *table)
2411 + struct nft_chain *chain;
2412 +
2413 + list_for_each_entry(chain, &table->chains, list)
2414 +- nf_tables_unregister_hook(net, table, chain);
2415 ++ __nf_tables_unregister_hook(net, table, chain, true);
2416 + list_for_each_entry(flowtable, &table->flowtables, list)
2417 +- nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list);
2418 ++ __nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list,
2419 ++ true);
2420 + }
2421 +
2422 + static void __nft_release_hooks(struct net *net)
2423 +@@ -9803,7 +9841,11 @@ static int __net_init nf_tables_init_net(struct net *net)
2424 +
2425 + static void __net_exit nf_tables_pre_exit_net(struct net *net)
2426 + {
2427 ++ struct nftables_pernet *nft_net = nft_pernet(net);
2428 ++
2429 ++ mutex_lock(&nft_net->commit_mutex);
2430 + __nft_release_hooks(net);
2431 ++ mutex_unlock(&nft_net->commit_mutex);
2432 + }
2433 +
2434 + static void __net_exit nf_tables_exit_net(struct net *net)
2435 +diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
2436 +index a726b623963de..05a17dc1febbd 100644
2437 +--- a/net/netfilter/nft_limit.c
2438 ++++ b/net/netfilter/nft_limit.c
2439 +@@ -213,6 +213,8 @@ static int nft_limit_pkts_clone(struct nft_expr *dst, const struct nft_expr *src
2440 + struct nft_limit_priv_pkts *priv_dst = nft_expr_priv(dst);
2441 + struct nft_limit_priv_pkts *priv_src = nft_expr_priv(src);
2442 +
2443 ++ priv_dst->cost = priv_src->cost;
2444 ++
2445 + return nft_limit_clone(&priv_dst->limit, &priv_src->limit);
2446 + }
2447 +
2448 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2449 +index e38acdbe1a3b5..53d1586b71ec6 100644
2450 +--- a/sound/pci/hda/patch_realtek.c
2451 ++++ b/sound/pci/hda/patch_realtek.c
2452 +@@ -6773,6 +6773,41 @@ static void alc256_fixup_mic_no_presence_and_resume(struct hda_codec *codec,
2453 + }
2454 + }
2455 +
2456 ++static void alc_fixup_dell4_mic_no_presence_quiet(struct hda_codec *codec,
2457 ++ const struct hda_fixup *fix,
2458 ++ int action)
2459 ++{
2460 ++ struct alc_spec *spec = codec->spec;
2461 ++ struct hda_input_mux *imux = &spec->gen.input_mux;
2462 ++ int i;
2463 ++
2464 ++ alc269_fixup_limit_int_mic_boost(codec, fix, action);
2465 ++
2466 ++ switch (action) {
2467 ++ case HDA_FIXUP_ACT_PRE_PROBE:
2468 ++ /**
2469 ++ * Set the vref of pin 0x19 (Headset Mic) and pin 0x1b (Headphone Mic)
2470 ++ * to Hi-Z to avoid pop noises at startup and when plugging and
2471 ++ * unplugging headphones.
2472 ++ */
2473 ++ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
2474 ++ snd_hda_codec_set_pin_target(codec, 0x1b, PIN_VREFHIZ);
2475 ++ break;
2476 ++ case HDA_FIXUP_ACT_PROBE:
2477 ++ /**
2478 ++ * Make the internal mic (0x12) the default input source to
2479 ++ * prevent pop noises on cold boot.
2480 ++ */
2481 ++ for (i = 0; i < imux->num_items; i++) {
2482 ++ if (spec->gen.imux_pins[i] == 0x12) {
2483 ++ spec->gen.cur_mux[0] = i;
2484 ++ break;
2485 ++ }
2486 ++ }
2487 ++ break;
2488 ++ }
2489 ++}
2490 ++
2491 + enum {
2492 + ALC269_FIXUP_GPIO2,
2493 + ALC269_FIXUP_SONY_VAIO,
2494 +@@ -6814,6 +6849,7 @@ enum {
2495 + ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
2496 + ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
2497 + ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
2498 ++ ALC269_FIXUP_DELL4_MIC_NO_PRESENCE_QUIET,
2499 + ALC269_FIXUP_HEADSET_MODE,
2500 + ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
2501 + ALC269_FIXUP_ASPIRE_HEADSET_MIC,
2502 +@@ -7000,6 +7036,7 @@ enum {
2503 + ALC287_FIXUP_LEGION_16ACHG6,
2504 + ALC287_FIXUP_CS35L41_I2C_2,
2505 + ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED,
2506 ++ ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE,
2507 + };
2508 +
2509 + static const struct hda_fixup alc269_fixups[] = {
2510 +@@ -8770,6 +8807,21 @@ static const struct hda_fixup alc269_fixups[] = {
2511 + .chained = true,
2512 + .chain_id = ALC285_FIXUP_HP_MUTE_LED,
2513 + },
2514 ++ [ALC269_FIXUP_DELL4_MIC_NO_PRESENCE_QUIET] = {
2515 ++ .type = HDA_FIXUP_FUNC,
2516 ++ .v.func = alc_fixup_dell4_mic_no_presence_quiet,
2517 ++ .chained = true,
2518 ++ .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
2519 ++ },
2520 ++ [ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE] = {
2521 ++ .type = HDA_FIXUP_PINS,
2522 ++ .v.pins = (const struct hda_pintbl[]) {
2523 ++ { 0x19, 0x02a1112c }, /* use as headset mic, without its own jack detect */
2524 ++ { }
2525 ++ },
2526 ++ .chained = true,
2527 ++ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
2528 ++ },
2529 + };
2530 +
2531 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2532 +@@ -8860,6 +8912,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2533 + SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
2534 + SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
2535 + SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
2536 ++ SND_PCI_QUIRK(0x1028, 0x0a38, "Dell Latitude 7520", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE_QUIET),
2537 + SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
2538 + SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK),
2539 + SND_PCI_QUIRK(0x1028, 0x0a62, "Dell Precision 5560", ALC289_FIXUP_DUAL_SPK),
2540 +@@ -9249,6 +9302,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2541 + SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
2542 + SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
2543 + SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
2544 ++ SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
2545 +
2546 + #if 0
2547 + /* Below is a quirk table taken from the old code.
2548 +diff --git a/sound/usb/clock.c b/sound/usb/clock.c
2549 +index 4dfe76416794f..33db334e65566 100644
2550 +--- a/sound/usb/clock.c
2551 ++++ b/sound/usb/clock.c
2552 +@@ -572,6 +572,17 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip,
2553 + /* continue processing */
2554 + }
2555 +
2556 ++ /* FIXME - TEAC devices require the immediate interface setup */
2557 ++ if (USB_ID_VENDOR(chip->usb_id) == 0x0644) {
2558 ++ bool cur_base_48k = (rate % 48000 == 0);
2559 ++ bool prev_base_48k = (prev_rate % 48000 == 0);
2560 ++ if (cur_base_48k != prev_base_48k) {
2561 ++ usb_set_interface(chip->dev, fmt->iface, fmt->altsetting);
2562 ++ if (chip->quirk_flags & QUIRK_FLAG_IFACE_DELAY)
2563 ++ msleep(50);
2564 ++ }
2565 ++ }
2566 ++
2567 + validation:
2568 + /* validate clock after rate change */
2569 + if (!uac_clock_source_is_valid(chip, fmt, clock))
2570 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
2571 +index 6d699065e81a2..b470404a5376c 100644
2572 +--- a/sound/usb/pcm.c
2573 ++++ b/sound/usb/pcm.c
2574 +@@ -439,16 +439,21 @@ static int configure_endpoints(struct snd_usb_audio *chip,
2575 + /* stop any running stream beforehand */
2576 + if (stop_endpoints(subs, false))
2577 + sync_pending_stops(subs);
2578 ++ if (subs->sync_endpoint) {
2579 ++ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
2580 ++ if (err < 0)
2581 ++ return err;
2582 ++ }
2583 + err = snd_usb_endpoint_configure(chip, subs->data_endpoint);
2584 + if (err < 0)
2585 + return err;
2586 + snd_usb_set_format_quirk(subs, subs->cur_audiofmt);
2587 +- }
2588 +-
2589 +- if (subs->sync_endpoint) {
2590 +- err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
2591 +- if (err < 0)
2592 +- return err;
2593 ++ } else {
2594 ++ if (subs->sync_endpoint) {
2595 ++ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
2596 ++ if (err < 0)
2597 ++ return err;
2598 ++ }
2599 + }
2600 +
2601 + return 0;
2602 +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
2603 +index 40a5e3eb4ef26..78eb41b621d63 100644
2604 +--- a/sound/usb/quirks-table.h
2605 ++++ b/sound/usb/quirks-table.h
2606 +@@ -2672,6 +2672,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2607 + .altset_idx = 1,
2608 + .attributes = 0,
2609 + .endpoint = 0x82,
2610 ++ .ep_idx = 1,
2611 + .ep_attr = USB_ENDPOINT_XFER_ISOC,
2612 + .datainterval = 1,
2613 + .maxpacksize = 0x0126,
2614 +@@ -2875,6 +2876,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2615 + .altset_idx = 1,
2616 + .attributes = 0x4,
2617 + .endpoint = 0x81,
2618 ++ .ep_idx = 1,
2619 + .ep_attr = USB_ENDPOINT_XFER_ISOC |
2620 + USB_ENDPOINT_SYNC_ASYNC,
2621 + .maxpacksize = 0x130,
2622 +@@ -3391,6 +3393,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2623 + .altset_idx = 1,
2624 + .attributes = 0,
2625 + .endpoint = 0x03,
2626 ++ .ep_idx = 1,
2627 + .rates = SNDRV_PCM_RATE_96000,
2628 + .ep_attr = USB_ENDPOINT_XFER_ISOC |
2629 + USB_ENDPOINT_SYNC_ASYNC,
2630 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
2631 +index ab9f3da49941f..fbbe59054c3fb 100644
2632 +--- a/sound/usb/quirks.c
2633 ++++ b/sound/usb/quirks.c
2634 +@@ -1822,6 +1822,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
2635 + QUIRK_FLAG_IGNORE_CTL_ERROR),
2636 + DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */
2637 + QUIRK_FLAG_IGNORE_CTL_ERROR),
2638 ++ DEVICE_FLG(0x0711, 0x5800, /* MCT Trigger 5 USB-to-HDMI */
2639 ++ QUIRK_FLAG_GET_SAMPLE_RATE),
2640 + DEVICE_FLG(0x074d, 0x3553, /* Outlaw RR2150 (Micronas UAC3553B) */
2641 + QUIRK_FLAG_GET_SAMPLE_RATE),
2642 + DEVICE_FLG(0x08bb, 0x2702, /* LineX FM Transmitter */
2643 +diff --git a/tools/memory-model/README b/tools/memory-model/README
2644 +index 9edd402704c4f..dab38904206a0 100644
2645 +--- a/tools/memory-model/README
2646 ++++ b/tools/memory-model/README
2647 +@@ -54,7 +54,8 @@ klitmus7 Compatibility Table
2648 + -- 4.14 7.48 --
2649 + 4.15 -- 4.19 7.49 --
2650 + 4.20 -- 5.5 7.54 --
2651 +- 5.6 -- 7.56 --
2652 ++ 5.6 -- 5.16 7.56 --
2653 ++ 5.17 -- 7.56.1 --
2654 + ============ ==========
2655 +
2656 +