Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Mon, 06 Jun 2022 11:03:00
Message-Id: 1654513335.02e95987b64a317609e54c721f38764ae7c1c73b.mpagano@gentoo
1 commit: 02e95987b64a317609e54c721f38764ae7c1c73b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Jun 6 11:02:15 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Jun 6 11:02:15 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=02e95987
7
8 Linux patch 5.15.45
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1044_linux-5.15.45.patch | 2237 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2241 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index d8201ada..3eb87193 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -219,6 +219,10 @@ Patch: 1043_linux-5.15.44.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.44
23
24 +Patch: 1044_linux-5.15.45.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.45
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1044_linux-5.15.45.patch b/1044_linux-5.15.45.patch
33 new file mode 100644
34 index 00000000..7c13cc7b
35 --- /dev/null
36 +++ b/1044_linux-5.15.45.patch
37 @@ -0,0 +1,2237 @@
38 +diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
39 +index 8ad6b93f91e6d..025272139539c 100644
40 +--- a/Documentation/process/submitting-patches.rst
41 ++++ b/Documentation/process/submitting-patches.rst
42 +@@ -72,7 +72,7 @@ as you intend it to.
43 +
44 + The maintainer will thank you if you write your patch description in a
45 + form which can be easily pulled into Linux's source code management
46 +-system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`.
47 ++system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`.
48 +
49 + Solve only one problem per patch. If your description starts to get
50 + long, that's a sign that you probably need to split up your patch.
51 +diff --git a/Makefile b/Makefile
52 +index b8ce2ba174862..e58d682071a89 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 15
59 +-SUBLEVEL = 44
60 ++SUBLEVEL = 45
61 + EXTRAVERSION =
62 + NAME = Trick or Treat
63 +
64 +diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi
65 +index 160f8cd9a68da..2f57100a011a3 100644
66 +--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
67 ++++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
68 +@@ -895,7 +895,7 @@
69 + device-wakeup-gpios = <&gpg3 4 GPIO_ACTIVE_HIGH>;
70 + interrupt-parent = <&gph2>;
71 + interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
72 +- interrupt-names = "host-wake";
73 ++ interrupt-names = "host-wakeup";
74 + };
75 + };
76 +
77 +diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
78 +index 0b2f684cd8ca5..a30c036577a32 100644
79 +--- a/arch/arm64/kvm/arm.c
80 ++++ b/arch/arm64/kvm/arm.c
81 +@@ -1458,7 +1458,8 @@ static int kvm_init_vector_slots(void)
82 + base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
83 + kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
84 +
85 +- if (kvm_system_needs_idmapped_vectors() && !has_vhe()) {
86 ++ if (kvm_system_needs_idmapped_vectors() &&
87 ++ !is_protected_kvm_enabled()) {
88 + err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
89 + __BP_HARDEN_HYP_VECS_SZ, &base);
90 + if (err)
91 +diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
92 +index a7061ee3b1577..3fbe710ff8390 100644
93 +--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
94 ++++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
95 +@@ -360,13 +360,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
96 + static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
97 + struct kvm *kvm, unsigned long *gfn)
98 + {
99 +- struct kvmppc_uvmem_slot *p;
100 ++ struct kvmppc_uvmem_slot *p = NULL, *iter;
101 + bool ret = false;
102 + unsigned long i;
103 +
104 +- list_for_each_entry(p, &kvm->arch.uvmem_pfns, list)
105 +- if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns)
106 ++ list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
107 ++ if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
108 ++ p = iter;
109 + break;
110 ++ }
111 + if (!p)
112 + return ret;
113 + /*
114 +diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
115 +index 7c63a1911fae9..3c24e6124d955 100644
116 +--- a/arch/x86/kernel/cpu/sgx/encl.c
117 ++++ b/arch/x86/kernel/cpu/sgx/encl.c
118 +@@ -12,6 +12,92 @@
119 + #include "encls.h"
120 + #include "sgx.h"
121 +
122 ++#define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd))
123 ++/*
124 ++ * 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to
125 ++ * determine the page index associated with the first PCMD entry
126 ++ * within a PCMD page.
127 ++ */
128 ++#define PCMD_FIRST_MASK GENMASK(4, 0)
129 ++
130 ++/**
131 ++ * reclaimer_writing_to_pcmd() - Query if any enclave page associated with
132 ++ * a PCMD page is in process of being reclaimed.
133 ++ * @encl: Enclave to which PCMD page belongs
134 ++ * @start_addr: Address of enclave page using first entry within the PCMD page
135 ++ *
136 ++ * When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is
137 ++ * stored. The PCMD data of a reclaimed enclave page contains enough
138 ++ * information for the processor to verify the page at the time
139 ++ * it is loaded back into the Enclave Page Cache (EPC).
140 ++ *
141 ++ * The backing storage to which enclave pages are reclaimed is laid out as
142 ++ * follows:
143 ++ * Encrypted enclave pages:SECS page:PCMD pages
144 ++ *
145 ++ * Each PCMD page contains the PCMD metadata of
146 ++ * PAGE_SIZE/sizeof(struct sgx_pcmd) enclave pages.
147 ++ *
148 ++ * A PCMD page can only be truncated if it is (a) empty, and (b) not in the
149 ++ * process of getting data (and thus soon being non-empty). (b) is tested with
150 ++ * a check if an enclave page sharing the PCMD page is in the process of being
151 ++ * reclaimed.
152 ++ *
153 ++ * The reclaimer sets the SGX_ENCL_PAGE_BEING_RECLAIMED flag when it
154 ++ * intends to reclaim that enclave page - it means that the PCMD page
155 ++ * associated with that enclave page is about to get some data and thus
156 ++ * even if the PCMD page is empty, it should not be truncated.
157 ++ *
158 ++ * Context: Enclave mutex (&sgx_encl->lock) must be held.
159 ++ * Return: 1 if the reclaimer is about to write to the PCMD page
160 ++ * 0 if the reclaimer has no intention to write to the PCMD page
161 ++ */
162 ++static int reclaimer_writing_to_pcmd(struct sgx_encl *encl,
163 ++ unsigned long start_addr)
164 ++{
165 ++ int reclaimed = 0;
166 ++ int i;
167 ++
168 ++ /*
169 ++ * PCMD_FIRST_MASK is based on number of PCMD entries within
170 ++ * PCMD page being 32.
171 ++ */
172 ++ BUILD_BUG_ON(PCMDS_PER_PAGE != 32);
173 ++
174 ++ for (i = 0; i < PCMDS_PER_PAGE; i++) {
175 ++ struct sgx_encl_page *entry;
176 ++ unsigned long addr;
177 ++
178 ++ addr = start_addr + i * PAGE_SIZE;
179 ++
180 ++ /*
181 ++ * Stop when reaching the SECS page - it does not
182 ++ * have a page_array entry and its reclaim is
183 ++ * started and completed with enclave mutex held so
184 ++ * it does not use the SGX_ENCL_PAGE_BEING_RECLAIMED
185 ++ * flag.
186 ++ */
187 ++ if (addr == encl->base + encl->size)
188 ++ break;
189 ++
190 ++ entry = xa_load(&encl->page_array, PFN_DOWN(addr));
191 ++ if (!entry)
192 ++ continue;
193 ++
194 ++ /*
195 ++ * VA page slot ID uses same bit as the flag so it is important
196 ++ * to ensure that the page is not already in backing store.
197 ++ */
198 ++ if (entry->epc_page &&
199 ++ (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)) {
200 ++ reclaimed = 1;
201 ++ break;
202 ++ }
203 ++ }
204 ++
205 ++ return reclaimed;
206 ++}
207 ++
208 + /*
209 + * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's
210 + * follow right after the EPC data in the backing storage. In addition to the
211 +@@ -47,6 +133,7 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
212 + unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
213 + struct sgx_encl *encl = encl_page->encl;
214 + pgoff_t page_index, page_pcmd_off;
215 ++ unsigned long pcmd_first_page;
216 + struct sgx_pageinfo pginfo;
217 + struct sgx_backing b;
218 + bool pcmd_page_empty;
219 +@@ -58,6 +145,11 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
220 + else
221 + page_index = PFN_DOWN(encl->size);
222 +
223 ++ /*
224 ++ * Address of enclave page using the first entry within the PCMD page.
225 ++ */
226 ++ pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base;
227 ++
228 + page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
229 +
230 + ret = sgx_encl_get_backing(encl, page_index, &b);
231 +@@ -84,6 +176,7 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
232 + }
233 +
234 + memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
235 ++ set_page_dirty(b.pcmd);
236 +
237 + /*
238 + * The area for the PCMD in the page was zeroed above. Check if the
239 +@@ -94,12 +187,20 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
240 + kunmap_atomic(pcmd_page);
241 + kunmap_atomic((void *)(unsigned long)pginfo.contents);
242 +
243 +- sgx_encl_put_backing(&b, false);
244 ++ get_page(b.pcmd);
245 ++ sgx_encl_put_backing(&b);
246 +
247 + sgx_encl_truncate_backing_page(encl, page_index);
248 +
249 +- if (pcmd_page_empty)
250 ++ if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) {
251 + sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
252 ++ pcmd_page = kmap_atomic(b.pcmd);
253 ++ if (memchr_inv(pcmd_page, 0, PAGE_SIZE))
254 ++ pr_warn("PCMD page not empty after truncate.\n");
255 ++ kunmap_atomic(pcmd_page);
256 ++ }
257 ++
258 ++ put_page(b.pcmd);
259 +
260 + return ret;
261 + }
262 +@@ -645,15 +746,9 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
263 + /**
264 + * sgx_encl_put_backing() - Unpin the backing storage
265 + * @backing: data for accessing backing storage for the page
266 +- * @do_write: mark pages dirty
267 + */
268 +-void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write)
269 ++void sgx_encl_put_backing(struct sgx_backing *backing)
270 + {
271 +- if (do_write) {
272 +- set_page_dirty(backing->pcmd);
273 +- set_page_dirty(backing->contents);
274 +- }
275 +-
276 + put_page(backing->pcmd);
277 + put_page(backing->contents);
278 + }
279 +diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
280 +index fec43ca65065b..d44e7372151f0 100644
281 +--- a/arch/x86/kernel/cpu/sgx/encl.h
282 ++++ b/arch/x86/kernel/cpu/sgx/encl.h
283 +@@ -107,7 +107,7 @@ void sgx_encl_release(struct kref *ref);
284 + int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
285 + int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
286 + struct sgx_backing *backing);
287 +-void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
288 ++void sgx_encl_put_backing(struct sgx_backing *backing);
289 + int sgx_encl_test_and_clear_young(struct mm_struct *mm,
290 + struct sgx_encl_page *page);
291 +
292 +diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
293 +index 8471a8b9b48e8..00e09a2b933ac 100644
294 +--- a/arch/x86/kernel/cpu/sgx/main.c
295 ++++ b/arch/x86/kernel/cpu/sgx/main.c
296 +@@ -170,6 +170,8 @@ static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot,
297 + backing->pcmd_offset;
298 +
299 + ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot);
300 ++ set_page_dirty(backing->pcmd);
301 ++ set_page_dirty(backing->contents);
302 +
303 + kunmap_atomic((void *)(unsigned long)(pginfo.metadata -
304 + backing->pcmd_offset));
305 +@@ -287,6 +289,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
306 + sgx_encl_ewb(epc_page, backing);
307 + encl_page->epc_page = NULL;
308 + encl->secs_child_cnt--;
309 ++ sgx_encl_put_backing(backing);
310 +
311 + if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) {
312 + ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size),
313 +@@ -299,7 +302,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
314 + sgx_encl_free_epc_page(encl->secs.epc_page);
315 + encl->secs.epc_page = NULL;
316 +
317 +- sgx_encl_put_backing(&secs_backing, true);
318 ++ sgx_encl_put_backing(&secs_backing);
319 + }
320 +
321 + out:
322 +@@ -360,11 +363,14 @@ static void sgx_reclaim_pages(void)
323 + goto skip;
324 +
325 + page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
326 ++
327 ++ mutex_lock(&encl_page->encl->lock);
328 + ret = sgx_encl_get_backing(encl_page->encl, page_index, &backing[i]);
329 +- if (ret)
330 ++ if (ret) {
331 ++ mutex_unlock(&encl_page->encl->lock);
332 + goto skip;
333 ++ }
334 +
335 +- mutex_lock(&encl_page->encl->lock);
336 + encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED;
337 + mutex_unlock(&encl_page->encl->lock);
338 + continue;
339 +@@ -392,7 +398,6 @@ skip:
340 +
341 + encl_page = epc_page->owner;
342 + sgx_reclaimer_write(epc_page, &backing[i]);
343 +- sgx_encl_put_backing(&backing[i], true);
344 +
345 + kref_put(&encl_page->encl->refcount, sgx_encl_release);
346 + epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
347 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
348 +index d36b58e705b6c..9e3af56747e8f 100644
349 +--- a/arch/x86/kernel/kvm.c
350 ++++ b/arch/x86/kernel/kvm.c
351 +@@ -188,7 +188,7 @@ void kvm_async_pf_task_wake(u32 token)
352 + {
353 + u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
354 + struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
355 +- struct kvm_task_sleep_node *n;
356 ++ struct kvm_task_sleep_node *n, *dummy = NULL;
357 +
358 + if (token == ~0) {
359 + apf_task_wake_all();
360 +@@ -200,28 +200,41 @@ again:
361 + n = _find_apf_task(b, token);
362 + if (!n) {
363 + /*
364 +- * async PF was not yet handled.
365 +- * Add dummy entry for the token.
366 ++ * Async #PF not yet handled, add a dummy entry for the token.
367 ++ * Allocating the token must be down outside of the raw lock
368 ++ * as the allocator is preemptible on PREEMPT_RT kernels.
369 + */
370 +- n = kzalloc(sizeof(*n), GFP_ATOMIC);
371 +- if (!n) {
372 ++ if (!dummy) {
373 ++ raw_spin_unlock(&b->lock);
374 ++ dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC);
375 ++
376 + /*
377 +- * Allocation failed! Busy wait while other cpu
378 +- * handles async PF.
379 ++ * Continue looping on allocation failure, eventually
380 ++ * the async #PF will be handled and allocating a new
381 ++ * node will be unnecessary.
382 ++ */
383 ++ if (!dummy)
384 ++ cpu_relax();
385 ++
386 ++ /*
387 ++ * Recheck for async #PF completion before enqueueing
388 ++ * the dummy token to avoid duplicate list entries.
389 + */
390 +- raw_spin_unlock(&b->lock);
391 +- cpu_relax();
392 + goto again;
393 + }
394 +- n->token = token;
395 +- n->cpu = smp_processor_id();
396 +- init_swait_queue_head(&n->wq);
397 +- hlist_add_head(&n->link, &b->list);
398 ++ dummy->token = token;
399 ++ dummy->cpu = smp_processor_id();
400 ++ init_swait_queue_head(&dummy->wq);
401 ++ hlist_add_head(&dummy->link, &b->list);
402 ++ dummy = NULL;
403 + } else {
404 + apf_task_wake_one(n);
405 + }
406 + raw_spin_unlock(&b->lock);
407 +- return;
408 ++
409 ++ /* A dummy token might be allocated and ultimately not used. */
410 ++ if (dummy)
411 ++ kfree(dummy);
412 + }
413 + EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
414 +
415 +diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
416 +index 556e7a3f35627..993daa6fb1287 100644
417 +--- a/arch/x86/kvm/svm/nested.c
418 ++++ b/arch/x86/kvm/svm/nested.c
419 +@@ -750,9 +750,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
420 + struct kvm_host_map map;
421 + int rc;
422 +
423 +- /* Triple faults in L2 should never escape. */
424 +- WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
425 +-
426 + rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
427 + if (rc) {
428 + if (rc == -EINVAL)
429 +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
430 +index c8c3212250618..4a4dc105552e3 100644
431 +--- a/arch/x86/kvm/svm/sev.c
432 ++++ b/arch/x86/kvm/svm/sev.c
433 +@@ -676,7 +676,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
434 + if (params.len > SEV_FW_BLOB_MAX_SIZE)
435 + return -EINVAL;
436 +
437 +- blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
438 ++ blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
439 + if (!blob)
440 + return -ENOMEM;
441 +
442 +@@ -796,7 +796,7 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
443 + if (!IS_ALIGNED(dst_paddr, 16) ||
444 + !IS_ALIGNED(paddr, 16) ||
445 + !IS_ALIGNED(size, 16)) {
446 +- tpage = (void *)alloc_page(GFP_KERNEL);
447 ++ tpage = (void *)alloc_page(GFP_KERNEL | __GFP_ZERO);
448 + if (!tpage)
449 + return -ENOMEM;
450 +
451 +@@ -1082,7 +1082,7 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
452 + if (params.len > SEV_FW_BLOB_MAX_SIZE)
453 + return -EINVAL;
454 +
455 +- blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
456 ++ blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
457 + if (!blob)
458 + return -ENOMEM;
459 +
460 +@@ -1164,7 +1164,7 @@ static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
461 + return -EINVAL;
462 +
463 + /* allocate the memory to hold the session data blob */
464 +- session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
465 ++ session_data = kzalloc(params.session_len, GFP_KERNEL_ACCOUNT);
466 + if (!session_data)
467 + return -ENOMEM;
468 +
469 +@@ -1288,11 +1288,11 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
470 +
471 + /* allocate memory for header and transport buffer */
472 + ret = -ENOMEM;
473 +- hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
474 ++ hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
475 + if (!hdr)
476 + goto e_unpin;
477 +
478 +- trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
479 ++ trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
480 + if (!trans_data)
481 + goto e_free_hdr;
482 +
483 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
484 +index 1546a10ecb564..5eae69c8123b2 100644
485 +--- a/arch/x86/kvm/vmx/nested.c
486 ++++ b/arch/x86/kvm/vmx/nested.c
487 +@@ -4501,9 +4501,6 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
488 + /* trying to cancel vmlaunch/vmresume is a bug */
489 + WARN_ON_ONCE(vmx->nested.nested_run_pending);
490 +
491 +- /* Similarly, triple faults in L2 should never escape. */
492 +- WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
493 +-
494 + if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
495 + /*
496 + * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
497 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
498 +index 75da9c0d5ae37..23905ba3058ae 100644
499 +--- a/arch/x86/kvm/x86.c
500 ++++ b/arch/x86/kvm/x86.c
501 +@@ -7846,7 +7846,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
502 + }
503 + EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
504 +
505 +-static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
506 ++static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r)
507 + {
508 + if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
509 + (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
510 +@@ -7915,25 +7915,23 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
511 + }
512 +
513 + /*
514 +- * Decode to be emulated instruction. Return EMULATION_OK if success.
515 ++ * Decode an instruction for emulation. The caller is responsible for handling
516 ++ * code breakpoints. Note, manually detecting code breakpoints is unnecessary
517 ++ * (and wrong) when emulating on an intercepted fault-like exception[*], as
518 ++ * code breakpoints have higher priority and thus have already been done by
519 ++ * hardware.
520 ++ *
521 ++ * [*] Except #MC, which is higher priority, but KVM should never emulate in
522 ++ * response to a machine check.
523 + */
524 + int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
525 + void *insn, int insn_len)
526 + {
527 +- int r = EMULATION_OK;
528 + struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
529 ++ int r;
530 +
531 + init_emulate_ctxt(vcpu);
532 +
533 +- /*
534 +- * We will reenter on the same instruction since we do not set
535 +- * complete_userspace_io. This does not handle watchpoints yet,
536 +- * those would be handled in the emulate_ops.
537 +- */
538 +- if (!(emulation_type & EMULTYPE_SKIP) &&
539 +- kvm_vcpu_check_breakpoint(vcpu, &r))
540 +- return r;
541 +-
542 + r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
543 +
544 + trace_kvm_emulate_insn_start(vcpu);
545 +@@ -7966,6 +7964,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
546 + if (!(emulation_type & EMULTYPE_NO_DECODE)) {
547 + kvm_clear_exception_queue(vcpu);
548 +
549 ++ /*
550 ++ * Return immediately if RIP hits a code breakpoint, such #DBs
551 ++ * are fault-like and are higher priority than any faults on
552 ++ * the code fetch itself.
553 ++ */
554 ++ if (!(emulation_type & EMULTYPE_SKIP) &&
555 ++ kvm_vcpu_check_code_breakpoint(vcpu, &r))
556 ++ return r;
557 ++
558 + r = x86_decode_emulated_instruction(vcpu, emulation_type,
559 + insn, insn_len);
560 + if (r != EMULATION_OK) {
561 +diff --git a/crypto/Kconfig b/crypto/Kconfig
562 +index 55718de561375..a346b6f74bb39 100644
563 +--- a/crypto/Kconfig
564 ++++ b/crypto/Kconfig
565 +@@ -1924,5 +1924,3 @@ source "crypto/asymmetric_keys/Kconfig"
566 + source "certs/Kconfig"
567 +
568 + endif # if CRYPTO
569 +-
570 +-source "lib/crypto/Kconfig"
571 +diff --git a/crypto/drbg.c b/crypto/drbg.c
572 +index 03c9ef768c227..761104e93d44a 100644
573 +--- a/crypto/drbg.c
574 ++++ b/crypto/drbg.c
575 +@@ -1036,17 +1036,38 @@ static const struct drbg_state_ops drbg_hash_ops = {
576 + ******************************************************************/
577 +
578 + static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
579 +- int reseed)
580 ++ int reseed, enum drbg_seed_state new_seed_state)
581 + {
582 + int ret = drbg->d_ops->update(drbg, seed, reseed);
583 +
584 + if (ret)
585 + return ret;
586 +
587 +- drbg->seeded = true;
588 ++ drbg->seeded = new_seed_state;
589 + /* 10.1.1.2 / 10.1.1.3 step 5 */
590 + drbg->reseed_ctr = 1;
591 +
592 ++ switch (drbg->seeded) {
593 ++ case DRBG_SEED_STATE_UNSEEDED:
594 ++ /* Impossible, but handle it to silence compiler warnings. */
595 ++ fallthrough;
596 ++ case DRBG_SEED_STATE_PARTIAL:
597 ++ /*
598 ++ * Require frequent reseeds until the seed source is
599 ++ * fully initialized.
600 ++ */
601 ++ drbg->reseed_threshold = 50;
602 ++ break;
603 ++
604 ++ case DRBG_SEED_STATE_FULL:
605 ++ /*
606 ++ * Seed source has become fully initialized, frequent
607 ++ * reseeds no longer required.
608 ++ */
609 ++ drbg->reseed_threshold = drbg_max_requests(drbg);
610 ++ break;
611 ++ }
612 ++
613 + return ret;
614 + }
615 +
616 +@@ -1066,12 +1087,10 @@ static inline int drbg_get_random_bytes(struct drbg_state *drbg,
617 + return 0;
618 + }
619 +
620 +-static void drbg_async_seed(struct work_struct *work)
621 ++static int drbg_seed_from_random(struct drbg_state *drbg)
622 + {
623 + struct drbg_string data;
624 + LIST_HEAD(seedlist);
625 +- struct drbg_state *drbg = container_of(work, struct drbg_state,
626 +- seed_work);
627 + unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
628 + unsigned char entropy[32];
629 + int ret;
630 +@@ -1082,26 +1101,15 @@ static void drbg_async_seed(struct work_struct *work)
631 + drbg_string_fill(&data, entropy, entropylen);
632 + list_add_tail(&data.list, &seedlist);
633 +
634 +- mutex_lock(&drbg->drbg_mutex);
635 +-
636 + ret = drbg_get_random_bytes(drbg, entropy, entropylen);
637 + if (ret)
638 +- goto unlock;
639 +-
640 +- /* Set seeded to false so that if __drbg_seed fails the
641 +- * next generate call will trigger a reseed.
642 +- */
643 +- drbg->seeded = false;
644 +-
645 +- __drbg_seed(drbg, &seedlist, true);
646 +-
647 +- if (drbg->seeded)
648 +- drbg->reseed_threshold = drbg_max_requests(drbg);
649 ++ goto out;
650 +
651 +-unlock:
652 +- mutex_unlock(&drbg->drbg_mutex);
653 ++ ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);
654 +
655 ++out:
656 + memzero_explicit(entropy, entropylen);
657 ++ return ret;
658 + }
659 +
660 + /*
661 +@@ -1123,6 +1131,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
662 + unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
663 + struct drbg_string data1;
664 + LIST_HEAD(seedlist);
665 ++ enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL;
666 +
667 + /* 9.1 / 9.2 / 9.3.1 step 3 */
668 + if (pers && pers->len > (drbg_max_addtl(drbg))) {
669 +@@ -1150,6 +1159,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
670 + BUG_ON((entropylen * 2) > sizeof(entropy));
671 +
672 + /* Get seed from in-kernel /dev/urandom */
673 ++ if (!rng_is_initialized())
674 ++ new_seed_state = DRBG_SEED_STATE_PARTIAL;
675 ++
676 + ret = drbg_get_random_bytes(drbg, entropy, entropylen);
677 + if (ret)
678 + goto out;
679 +@@ -1206,7 +1218,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
680 + memset(drbg->C, 0, drbg_statelen(drbg));
681 + }
682 +
683 +- ret = __drbg_seed(drbg, &seedlist, reseed);
684 ++ ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state);
685 +
686 + out:
687 + memzero_explicit(entropy, entropylen * 2);
688 +@@ -1386,19 +1398,25 @@ static int drbg_generate(struct drbg_state *drbg,
689 + * here. The spec is a bit convoluted here, we make it simpler.
690 + */
691 + if (drbg->reseed_threshold < drbg->reseed_ctr)
692 +- drbg->seeded = false;
693 ++ drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
694 +
695 +- if (drbg->pr || !drbg->seeded) {
696 ++ if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) {
697 + pr_devel("DRBG: reseeding before generation (prediction "
698 + "resistance: %s, state %s)\n",
699 + drbg->pr ? "true" : "false",
700 +- drbg->seeded ? "seeded" : "unseeded");
701 ++ (drbg->seeded == DRBG_SEED_STATE_FULL ?
702 ++ "seeded" : "unseeded"));
703 + /* 9.3.1 steps 7.1 through 7.3 */
704 + len = drbg_seed(drbg, addtl, true);
705 + if (len)
706 + goto err;
707 + /* 9.3.1 step 7.4 */
708 + addtl = NULL;
709 ++ } else if (rng_is_initialized() &&
710 ++ drbg->seeded == DRBG_SEED_STATE_PARTIAL) {
711 ++ len = drbg_seed_from_random(drbg);
712 ++ if (len)
713 ++ goto err;
714 + }
715 +
716 + if (addtl && 0 < addtl->len)
717 +@@ -1491,50 +1509,15 @@ static int drbg_generate_long(struct drbg_state *drbg,
718 + return 0;
719 + }
720 +
721 +-static int drbg_schedule_async_seed(struct notifier_block *nb, unsigned long action, void *data)
722 +-{
723 +- struct drbg_state *drbg = container_of(nb, struct drbg_state,
724 +- random_ready);
725 +-
726 +- schedule_work(&drbg->seed_work);
727 +- return 0;
728 +-}
729 +-
730 + static int drbg_prepare_hrng(struct drbg_state *drbg)
731 + {
732 +- int err;
733 +-
734 + /* We do not need an HRNG in test mode. */
735 + if (list_empty(&drbg->test_data.list))
736 + return 0;
737 +
738 + drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
739 +
740 +- INIT_WORK(&drbg->seed_work, drbg_async_seed);
741 +-
742 +- drbg->random_ready.notifier_call = drbg_schedule_async_seed;
743 +- err = register_random_ready_notifier(&drbg->random_ready);
744 +-
745 +- switch (err) {
746 +- case 0:
747 +- break;
748 +-
749 +- case -EALREADY:
750 +- err = 0;
751 +- fallthrough;
752 +-
753 +- default:
754 +- drbg->random_ready.notifier_call = NULL;
755 +- return err;
756 +- }
757 +-
758 +- /*
759 +- * Require frequent reseeds until the seed source is fully
760 +- * initialized.
761 +- */
762 +- drbg->reseed_threshold = 50;
763 +-
764 +- return err;
765 ++ return 0;
766 + }
767 +
768 + /*
769 +@@ -1577,7 +1560,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
770 + if (!drbg->core) {
771 + drbg->core = &drbg_cores[coreref];
772 + drbg->pr = pr;
773 +- drbg->seeded = false;
774 ++ drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
775 + drbg->reseed_threshold = drbg_max_requests(drbg);
776 +
777 + ret = drbg_alloc_state(drbg);
778 +@@ -1628,11 +1611,6 @@ free_everything:
779 + */
780 + static int drbg_uninstantiate(struct drbg_state *drbg)
781 + {
782 +- if (drbg->random_ready.notifier_call) {
783 +- unregister_random_ready_notifier(&drbg->random_ready);
784 +- cancel_work_sync(&drbg->seed_work);
785 +- }
786 +-
787 + if (!IS_ERR_OR_NULL(drbg->jent))
788 + crypto_free_rng(drbg->jent);
789 + drbg->jent = NULL;
790 +diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
791 +index 6a3fd09057d0c..f7ed430206720 100644
792 +--- a/crypto/ecrdsa.c
793 ++++ b/crypto/ecrdsa.c
794 +@@ -113,15 +113,15 @@ static int ecrdsa_verify(struct akcipher_request *req)
795 +
796 + /* Step 1: verify that 0 < r < q, 0 < s < q */
797 + if (vli_is_zero(r, ndigits) ||
798 +- vli_cmp(r, ctx->curve->n, ndigits) == 1 ||
799 ++ vli_cmp(r, ctx->curve->n, ndigits) >= 0 ||
800 + vli_is_zero(s, ndigits) ||
801 +- vli_cmp(s, ctx->curve->n, ndigits) == 1)
802 ++ vli_cmp(s, ctx->curve->n, ndigits) >= 0)
803 + return -EKEYREJECTED;
804 +
805 + /* Step 2: calculate hash (h) of the message (passed as input) */
806 + /* Step 3: calculate e = h \mod q */
807 + vli_from_le64(e, digest, ndigits);
808 +- if (vli_cmp(e, ctx->curve->n, ndigits) == 1)
809 ++ if (vli_cmp(e, ctx->curve->n, ndigits) >= 0)
810 + vli_sub(e, e, ctx->curve->n, ndigits);
811 + if (vli_is_zero(e, ndigits))
812 + e[0] = 1;
813 +@@ -137,7 +137,7 @@ static int ecrdsa_verify(struct akcipher_request *req)
814 + /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */
815 + ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key,
816 + ctx->curve);
817 +- if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1)
818 ++ if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0)
819 + vli_sub(cc.x, cc.x, ctx->curve->n, ndigits);
820 +
821 + /* Step 7: if R == r signature is valid */
822 +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
823 +index 8eb7fddfb9300..ed91af4319b5b 100644
824 +--- a/drivers/bluetooth/hci_qca.c
825 ++++ b/drivers/bluetooth/hci_qca.c
826 +@@ -696,9 +696,9 @@ static int qca_close(struct hci_uart *hu)
827 + skb_queue_purge(&qca->tx_wait_q);
828 + skb_queue_purge(&qca->txq);
829 + skb_queue_purge(&qca->rx_memdump_q);
830 +- del_timer(&qca->tx_idle_timer);
831 +- del_timer(&qca->wake_retrans_timer);
832 + destroy_workqueue(qca->workqueue);
833 ++ del_timer_sync(&qca->tx_idle_timer);
834 ++ del_timer_sync(&qca->wake_retrans_timer);
835 + qca->hu = NULL;
836 +
837 + kfree_skb(qca->rx_skb);
838 +diff --git a/drivers/char/random.c b/drivers/char/random.c
839 +index ca17a658c2147..e2f1fce8dc977 100644
840 +--- a/drivers/char/random.c
841 ++++ b/drivers/char/random.c
842 +@@ -163,7 +163,6 @@ int __cold register_random_ready_notifier(struct notifier_block *nb)
843 + spin_unlock_irqrestore(&random_ready_chain_lock, flags);
844 + return ret;
845 + }
846 +-EXPORT_SYMBOL(register_random_ready_notifier);
847 +
848 + /*
849 + * Delete a previously registered readiness callback function.
850 +@@ -178,7 +177,6 @@ int __cold unregister_random_ready_notifier(struct notifier_block *nb)
851 + spin_unlock_irqrestore(&random_ready_chain_lock, flags);
852 + return ret;
853 + }
854 +-EXPORT_SYMBOL(unregister_random_ready_notifier);
855 +
856 + static void __cold process_random_ready_list(void)
857 + {
858 +diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
859 +index a25815a6f6253..de92065394be9 100644
860 +--- a/drivers/char/tpm/tpm2-cmd.c
861 ++++ b/drivers/char/tpm/tpm2-cmd.c
862 +@@ -400,7 +400,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
863 + if (!rc) {
864 + out = (struct tpm2_get_cap_out *)
865 + &buf.data[TPM_HEADER_SIZE];
866 +- *value = be32_to_cpu(out->value);
867 ++ /*
868 ++ * To prevent failing boot up of some systems, Infineon TPM2.0
869 ++ * returns SUCCESS on TPM2_Startup in field upgrade mode. Also
870 ++ * the TPM2_Getcapability command returns a zero length list
871 ++ * in field upgrade mode.
872 ++ */
873 ++ if (be32_to_cpu(out->property_cnt) > 0)
874 ++ *value = be32_to_cpu(out->value);
875 ++ else
876 ++ rc = -ENODATA;
877 + }
878 + tpm_buf_destroy(&buf);
879 + return rc;
880 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
881 +index 3af4c07a9342f..d3989b257f422 100644
882 +--- a/drivers/char/tpm/tpm_ibmvtpm.c
883 ++++ b/drivers/char/tpm/tpm_ibmvtpm.c
884 +@@ -681,6 +681,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
885 + if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
886 + ibmvtpm->rtce_buf != NULL,
887 + HZ)) {
888 ++ rc = -ENODEV;
889 + dev_err(dev, "CRQ response timed out\n");
890 + goto init_irq_cleanup;
891 + }
892 +diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
893 +index ca0361b2dbb07..f87aa2169e5f5 100644
894 +--- a/drivers/crypto/caam/ctrl.c
895 ++++ b/drivers/crypto/caam/ctrl.c
896 +@@ -609,6 +609,13 @@ static bool check_version(struct fsl_mc_version *mc_version, u32 major,
897 + }
898 + #endif
899 +
900 ++static bool needs_entropy_delay_adjustment(void)
901 ++{
902 ++ if (of_machine_is_compatible("fsl,imx6sx"))
903 ++ return true;
904 ++ return false;
905 ++}
906 ++
907 + /* Probe routine for CAAM top (controller) level */
908 + static int caam_probe(struct platform_device *pdev)
909 + {
910 +@@ -855,6 +862,8 @@ static int caam_probe(struct platform_device *pdev)
911 + * Also, if a handle was instantiated, do not change
912 + * the TRNG parameters.
913 + */
914 ++ if (needs_entropy_delay_adjustment())
915 ++ ent_delay = 12000;
916 + if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
917 + dev_info(dev,
918 + "Entropy delay = %u\n",
919 +@@ -871,6 +880,15 @@ static int caam_probe(struct platform_device *pdev)
920 + */
921 + ret = instantiate_rng(dev, inst_handles,
922 + gen_sk);
923 ++ /*
924 ++ * Entropy delay is determined via TRNG characterization.
925 ++ * TRNG characterization is run across different voltages
926 ++ * and temperatures.
927 ++ * If worst case value for ent_dly is identified,
928 ++ * the loop can be skipped for that platform.
929 ++ */
930 ++ if (needs_entropy_delay_adjustment())
931 ++ break;
932 + if (ret == -EAGAIN)
933 + /*
934 + * if here, the loop will rerun,
935 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
936 +index 57c1dda76b94a..1a27e4833adfa 100644
937 +--- a/drivers/gpu/drm/i915/intel_pm.c
938 ++++ b/drivers/gpu/drm/i915/intel_pm.c
939 +@@ -2863,7 +2863,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
940 + }
941 +
942 + static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
943 +- u16 wm[8])
944 ++ u16 wm[])
945 + {
946 + struct intel_uncore *uncore = &dev_priv->uncore;
947 +
948 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
949 +index 645a5f566d233..42b5b050b72d1 100644
950 +--- a/drivers/hid/hid-ids.h
951 ++++ b/drivers/hid/hid-ids.h
952 +@@ -753,6 +753,7 @@
953 + #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
954 + #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
955 + #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
956 ++#define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe
957 + #define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e
958 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
959 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
960 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
961 +index e1afddb7b33d8..f382444dc2dba 100644
962 +--- a/drivers/hid/hid-multitouch.c
963 ++++ b/drivers/hid/hid-multitouch.c
964 +@@ -2032,6 +2032,12 @@ static const struct hid_device_id mt_devices[] = {
965 + USB_VENDOR_ID_LENOVO,
966 + USB_DEVICE_ID_LENOVO_X1_TAB3) },
967 +
968 ++ /* Lenovo X12 TAB Gen 1 */
969 ++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
970 ++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
971 ++ USB_VENDOR_ID_LENOVO,
972 ++ USB_DEVICE_ID_LENOVO_X12_TAB) },
973 ++
974 + /* MosArt panels */
975 + { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
976 + MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
977 +@@ -2176,6 +2182,9 @@ static const struct hid_device_id mt_devices[] = {
978 + { .driver_data = MT_CLS_GOOGLE,
979 + HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
980 + USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) },
981 ++ { .driver_data = MT_CLS_GOOGLE,
982 ++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE,
983 ++ USB_DEVICE_ID_GOOGLE_WHISKERS) },
984 +
985 + /* Generic MT device */
986 + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
987 +diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
988 +index a6187cbec2c94..483428c5e30b9 100644
989 +--- a/drivers/i2c/busses/i2c-ismt.c
990 ++++ b/drivers/i2c/busses/i2c-ismt.c
991 +@@ -82,6 +82,7 @@
992 +
993 + #define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
994 + #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
995 ++#define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */
996 +
997 + /* Hardware Descriptor Constants - Control Field */
998 + #define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
999 +@@ -175,6 +176,8 @@ struct ismt_priv {
1000 + u8 head; /* ring buffer head pointer */
1001 + struct completion cmp; /* interrupt completion */
1002 + u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */
1003 ++ dma_addr_t log_dma;
1004 ++ u32 *log;
1005 + };
1006 +
1007 + static const struct pci_device_id ismt_ids[] = {
1008 +@@ -411,6 +414,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
1009 + memset(desc, 0, sizeof(struct ismt_desc));
1010 + desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
1011 +
1012 ++ /* Always clear the log entries */
1013 ++ memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32));
1014 ++
1015 + /* Initialize common control bits */
1016 + if (likely(pci_dev_msi_enabled(priv->pci_dev)))
1017 + desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
1018 +@@ -522,6 +528,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
1019 +
1020 + case I2C_SMBUS_BLOCK_PROC_CALL:
1021 + dev_dbg(dev, "I2C_SMBUS_BLOCK_PROC_CALL\n");
1022 ++ if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
1023 ++ return -EINVAL;
1024 ++
1025 + dma_size = I2C_SMBUS_BLOCK_MAX;
1026 + desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, 1);
1027 + desc->wr_len_cmd = data->block[0] + 1;
1028 +@@ -708,6 +717,8 @@ static void ismt_hw_init(struct ismt_priv *priv)
1029 + /* initialize the Master Descriptor Base Address (MDBA) */
1030 + writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
1031 +
1032 ++ writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL);
1033 ++
1034 + /* initialize the Master Control Register (MCTRL) */
1035 + writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
1036 +
1037 +@@ -795,6 +806,12 @@ static int ismt_dev_init(struct ismt_priv *priv)
1038 + priv->head = 0;
1039 + init_completion(&priv->cmp);
1040 +
1041 ++ priv->log = dmam_alloc_coherent(&priv->pci_dev->dev,
1042 ++ ISMT_LOG_ENTRIES * sizeof(u32),
1043 ++ &priv->log_dma, GFP_KERNEL);
1044 ++ if (!priv->log)
1045 ++ return -ENOMEM;
1046 ++
1047 + return 0;
1048 + }
1049 +
1050 +diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
1051 +index 12c90aa0900e6..a77cd86fe75ed 100644
1052 +--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
1053 ++++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
1054 +@@ -213,6 +213,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
1055 + i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
1056 + i2c->adap.dev.parent = dev;
1057 + i2c->adap.dev.of_node = pdev->dev.of_node;
1058 ++ i2c->adap.dev.fwnode = dev->fwnode;
1059 + snprintf(i2c->adap.name, sizeof(i2c->adap.name),
1060 + "Cavium ThunderX i2c adapter at %s", dev_name(dev));
1061 + i2c_set_adapdata(&i2c->adap, i2c);
1062 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1063 +index 154139bf7d22b..f30fd38c3773b 100644
1064 +--- a/drivers/md/dm-crypt.c
1065 ++++ b/drivers/md/dm-crypt.c
1066 +@@ -3435,6 +3435,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
1067 + return DM_MAPIO_SUBMITTED;
1068 + }
1069 +
1070 ++static char hex2asc(unsigned char c)
1071 ++{
1072 ++ return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
1073 ++}
1074 ++
1075 + static void crypt_status(struct dm_target *ti, status_type_t type,
1076 + unsigned status_flags, char *result, unsigned maxlen)
1077 + {
1078 +@@ -3453,9 +3458,12 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
1079 + if (cc->key_size > 0) {
1080 + if (cc->key_string)
1081 + DMEMIT(":%u:%s", cc->key_size, cc->key_string);
1082 +- else
1083 +- for (i = 0; i < cc->key_size; i++)
1084 +- DMEMIT("%02x", cc->key[i]);
1085 ++ else {
1086 ++ for (i = 0; i < cc->key_size; i++) {
1087 ++ DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
1088 ++ hex2asc(cc->key[i] & 0xf));
1089 ++ }
1090 ++ }
1091 + } else
1092 + DMEMIT("-");
1093 +
1094 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
1095 +index e2a51c184a254..d5b8270869620 100644
1096 +--- a/drivers/md/dm-integrity.c
1097 ++++ b/drivers/md/dm-integrity.c
1098 +@@ -4478,8 +4478,6 @@ try_smaller_buffer:
1099 + }
1100 +
1101 + if (should_write_sb) {
1102 +- int r;
1103 +-
1104 + init_journal(ic, 0, ic->journal_sections, 0);
1105 + r = dm_integrity_failed(ic);
1106 + if (unlikely(r)) {
1107 +diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
1108 +index 0e039a8c0bf2e..a3f2050b9c9b4 100644
1109 +--- a/drivers/md/dm-stats.c
1110 ++++ b/drivers/md/dm-stats.c
1111 +@@ -225,6 +225,7 @@ void dm_stats_cleanup(struct dm_stats *stats)
1112 + atomic_read(&shared->in_flight[READ]),
1113 + atomic_read(&shared->in_flight[WRITE]));
1114 + }
1115 ++ cond_resched();
1116 + }
1117 + dm_stat_free(&s->rcu_head);
1118 + }
1119 +@@ -330,6 +331,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
1120 + for (ni = 0; ni < n_entries; ni++) {
1121 + atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
1122 + atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
1123 ++ cond_resched();
1124 + }
1125 +
1126 + if (s->n_histogram_entries) {
1127 +@@ -342,6 +344,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
1128 + for (ni = 0; ni < n_entries; ni++) {
1129 + s->stat_shared[ni].tmp.histogram = hi;
1130 + hi += s->n_histogram_entries + 1;
1131 ++ cond_resched();
1132 + }
1133 + }
1134 +
1135 +@@ -362,6 +365,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
1136 + for (ni = 0; ni < n_entries; ni++) {
1137 + p[ni].histogram = hi;
1138 + hi += s->n_histogram_entries + 1;
1139 ++ cond_resched();
1140 + }
1141 + }
1142 + }
1143 +@@ -497,6 +501,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
1144 + }
1145 + DMEMIT("\n");
1146 + }
1147 ++ cond_resched();
1148 + }
1149 + mutex_unlock(&stats->mutex);
1150 +
1151 +@@ -774,6 +779,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
1152 + local_irq_enable();
1153 + }
1154 + }
1155 ++ cond_resched();
1156 + }
1157 + }
1158 +
1159 +@@ -889,6 +895,8 @@ static int dm_stats_print(struct dm_stats *stats, int id,
1160 +
1161 + if (unlikely(sz + 1 >= maxlen))
1162 + goto buffer_overflow;
1163 ++
1164 ++ cond_resched();
1165 + }
1166 +
1167 + if (clear)
1168 +diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
1169 +index 88288c8d6bc8c..426299ceb33d7 100644
1170 +--- a/drivers/md/dm-verity-target.c
1171 ++++ b/drivers/md/dm-verity-target.c
1172 +@@ -1312,6 +1312,7 @@ bad:
1173 +
1174 + static struct target_type verity_target = {
1175 + .name = "verity",
1176 ++ .features = DM_TARGET_IMMUTABLE,
1177 + .version = {1, 8, 0},
1178 + .module = THIS_MODULE,
1179 + .ctr = verity_ctr,
1180 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1181 +index b9d062f0a02b2..e54d802ee0bb8 100644
1182 +--- a/drivers/md/raid5.c
1183 ++++ b/drivers/md/raid5.c
1184 +@@ -686,17 +686,17 @@ int raid5_calc_degraded(struct r5conf *conf)
1185 + return degraded;
1186 + }
1187 +
1188 +-static int has_failed(struct r5conf *conf)
1189 ++static bool has_failed(struct r5conf *conf)
1190 + {
1191 +- int degraded;
1192 ++ int degraded = conf->mddev->degraded;
1193 +
1194 +- if (conf->mddev->reshape_position == MaxSector)
1195 +- return conf->mddev->degraded > conf->max_degraded;
1196 ++ if (test_bit(MD_BROKEN, &conf->mddev->flags))
1197 ++ return true;
1198 +
1199 +- degraded = raid5_calc_degraded(conf);
1200 +- if (degraded > conf->max_degraded)
1201 +- return 1;
1202 +- return 0;
1203 ++ if (conf->mddev->reshape_position != MaxSector)
1204 ++ degraded = raid5_calc_degraded(conf);
1205 ++
1206 ++ return degraded > conf->max_degraded;
1207 + }
1208 +
1209 + struct stripe_head *
1210 +@@ -2877,34 +2877,31 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
1211 + unsigned long flags;
1212 + pr_debug("raid456: error called\n");
1213 +
1214 ++ pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
1215 ++ mdname(mddev), bdevname(rdev->bdev, b));
1216 ++
1217 + spin_lock_irqsave(&conf->device_lock, flags);
1218 ++ set_bit(Faulty, &rdev->flags);
1219 ++ clear_bit(In_sync, &rdev->flags);
1220 ++ mddev->degraded = raid5_calc_degraded(conf);
1221 +
1222 +- if (test_bit(In_sync, &rdev->flags) &&
1223 +- mddev->degraded == conf->max_degraded) {
1224 +- /*
1225 +- * Don't allow to achieve failed state
1226 +- * Don't try to recover this device
1227 +- */
1228 ++ if (has_failed(conf)) {
1229 ++ set_bit(MD_BROKEN, &conf->mddev->flags);
1230 + conf->recovery_disabled = mddev->recovery_disabled;
1231 +- spin_unlock_irqrestore(&conf->device_lock, flags);
1232 +- return;
1233 ++
1234 ++ pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n",
1235 ++ mdname(mddev), mddev->degraded, conf->raid_disks);
1236 ++ } else {
1237 ++ pr_crit("md/raid:%s: Operation continuing on %d devices.\n",
1238 ++ mdname(mddev), conf->raid_disks - mddev->degraded);
1239 + }
1240 +
1241 +- set_bit(Faulty, &rdev->flags);
1242 +- clear_bit(In_sync, &rdev->flags);
1243 +- mddev->degraded = raid5_calc_degraded(conf);
1244 + spin_unlock_irqrestore(&conf->device_lock, flags);
1245 + set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1246 +
1247 + set_bit(Blocked, &rdev->flags);
1248 + set_mask_bits(&mddev->sb_flags, 0,
1249 + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1250 +- pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
1251 +- "md/raid:%s: Operation continuing on %d devices.\n",
1252 +- mdname(mddev),
1253 +- bdevname(rdev->bdev, b),
1254 +- mdname(mddev),
1255 +- conf->raid_disks - mddev->degraded);
1256 + r5c_update_on_rdev_error(mddev, rdev);
1257 + }
1258 +
1259 +diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
1260 +index be3f6ea555597..84279a6808730 100644
1261 +--- a/drivers/media/i2c/imx412.c
1262 ++++ b/drivers/media/i2c/imx412.c
1263 +@@ -1011,7 +1011,7 @@ static int imx412_power_on(struct device *dev)
1264 + struct imx412 *imx412 = to_imx412(sd);
1265 + int ret;
1266 +
1267 +- gpiod_set_value_cansleep(imx412->reset_gpio, 1);
1268 ++ gpiod_set_value_cansleep(imx412->reset_gpio, 0);
1269 +
1270 + ret = clk_prepare_enable(imx412->inclk);
1271 + if (ret) {
1272 +@@ -1024,7 +1024,7 @@ static int imx412_power_on(struct device *dev)
1273 + return 0;
1274 +
1275 + error_reset:
1276 +- gpiod_set_value_cansleep(imx412->reset_gpio, 0);
1277 ++ gpiod_set_value_cansleep(imx412->reset_gpio, 1);
1278 +
1279 + return ret;
1280 + }
1281 +@@ -1040,10 +1040,10 @@ static int imx412_power_off(struct device *dev)
1282 + struct v4l2_subdev *sd = dev_get_drvdata(dev);
1283 + struct imx412 *imx412 = to_imx412(sd);
1284 +
1285 +- gpiod_set_value_cansleep(imx412->reset_gpio, 0);
1286 +-
1287 + clk_disable_unprepare(imx412->inclk);
1288 +
1289 ++ gpiod_set_value_cansleep(imx412->reset_gpio, 1);
1290 ++
1291 + return 0;
1292 + }
1293 +
1294 +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
1295 +index e1df2dc810a28..0b833572205f3 100644
1296 +--- a/drivers/net/ethernet/faraday/ftgmac100.c
1297 ++++ b/drivers/net/ethernet/faraday/ftgmac100.c
1298 +@@ -1910,6 +1910,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
1299 + /* AST2400 doesn't have working HW checksum generation */
1300 + if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
1301 + netdev->hw_features &= ~NETIF_F_HW_CSUM;
1302 ++
1303 ++ /* AST2600 tx checksum with NCSI is broken */
1304 ++ if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
1305 ++ netdev->hw_features &= ~NETIF_F_HW_CSUM;
1306 ++
1307 + if (np && of_get_property(np, "no-hw-checksum", NULL))
1308 + netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
1309 + netdev->features |= netdev->hw_features;
1310 +diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
1311 +index 87e42db1b61e6..477eb4051bed7 100644
1312 +--- a/drivers/net/ipa/ipa_endpoint.c
1313 ++++ b/drivers/net/ipa/ipa_endpoint.c
1314 +@@ -722,13 +722,15 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
1315 +
1316 + if (endpoint->data->aggregation) {
1317 + if (!endpoint->toward_ipa) {
1318 ++ u32 buffer_size;
1319 + bool close_eof;
1320 + u32 limit;
1321 +
1322 + val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
1323 + val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
1324 +
1325 +- limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
1326 ++ buffer_size = IPA_RX_BUFFER_SIZE - NET_SKB_PAD;
1327 ++ limit = ipa_aggr_size_kb(buffer_size);
1328 + val |= aggr_byte_limit_encoded(version, limit);
1329 +
1330 + limit = IPA_AGGR_TIME_LIMIT;
1331 +diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
1332 +index d32aec0c334fe..6dc0af63440f4 100644
1333 +--- a/drivers/nfc/pn533/pn533.c
1334 ++++ b/drivers/nfc/pn533/pn533.c
1335 +@@ -2789,13 +2789,14 @@ void pn53x_common_clean(struct pn533 *priv)
1336 + {
1337 + struct pn533_cmd *cmd, *n;
1338 +
1339 ++ /* delete the timer before cleanup the worker */
1340 ++ del_timer_sync(&priv->listen_timer);
1341 ++
1342 + flush_delayed_work(&priv->poll_work);
1343 + destroy_workqueue(priv->wq);
1344 +
1345 + skb_queue_purge(&priv->resp_q);
1346 +
1347 +- del_timer(&priv->listen_timer);
1348 +-
1349 + list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) {
1350 + list_del(&cmd->queue);
1351 + kfree(cmd);
1352 +diff --git a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
1353 +index 2801ca7062732..68a5b627fb9b2 100644
1354 +--- a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
1355 ++++ b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
1356 +@@ -204,7 +204,7 @@ static const struct sunxi_desc_pin suniv_f1c100s_pins[] = {
1357 + SUNXI_FUNCTION(0x0, "gpio_in"),
1358 + SUNXI_FUNCTION(0x1, "gpio_out"),
1359 + SUNXI_FUNCTION(0x2, "lcd"), /* D20 */
1360 +- SUNXI_FUNCTION(0x3, "lvds1"), /* RX */
1361 ++ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
1362 + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)),
1363 + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
1364 + SUNXI_FUNCTION(0x0, "gpio_in"),
1365 +diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
1366 +index cc5cffc4a7691..e2113e0a848c4 100644
1367 +--- a/fs/exfat/balloc.c
1368 ++++ b/fs/exfat/balloc.c
1369 +@@ -148,7 +148,9 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
1370 + struct super_block *sb = inode->i_sb;
1371 + struct exfat_sb_info *sbi = EXFAT_SB(sb);
1372 +
1373 +- WARN_ON(clu < EXFAT_FIRST_CLUSTER);
1374 ++ if (!is_valid_cluster(sbi, clu))
1375 ++ return -EINVAL;
1376 ++
1377 + ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
1378 + i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
1379 + b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
1380 +@@ -166,7 +168,9 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
1381 + struct exfat_sb_info *sbi = EXFAT_SB(sb);
1382 + struct exfat_mount_options *opts = &sbi->options;
1383 +
1384 +- WARN_ON(clu < EXFAT_FIRST_CLUSTER);
1385 ++ if (!is_valid_cluster(sbi, clu))
1386 ++ return;
1387 ++
1388 + ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
1389 + i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
1390 + b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
1391 +diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
1392 +index 1d6da61157c93..9f82a8a835eec 100644
1393 +--- a/fs/exfat/exfat_fs.h
1394 ++++ b/fs/exfat/exfat_fs.h
1395 +@@ -381,6 +381,14 @@ static inline int exfat_sector_to_cluster(struct exfat_sb_info *sbi,
1396 + EXFAT_RESERVED_CLUSTERS;
1397 + }
1398 +
1399 ++static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
1400 ++ unsigned int clus)
1401 ++{
1402 ++ if (clus < EXFAT_FIRST_CLUSTER || sbi->num_clusters <= clus)
1403 ++ return false;
1404 ++ return true;
1405 ++}
1406 ++
1407 + /* super.c */
1408 + int exfat_set_volume_dirty(struct super_block *sb);
1409 + int exfat_clear_volume_dirty(struct super_block *sb);
1410 +diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
1411 +index e949e563443c9..421c273531049 100644
1412 +--- a/fs/exfat/fatent.c
1413 ++++ b/fs/exfat/fatent.c
1414 +@@ -81,14 +81,6 @@ int exfat_ent_set(struct super_block *sb, unsigned int loc,
1415 + return 0;
1416 + }
1417 +
1418 +-static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
1419 +- unsigned int clus)
1420 +-{
1421 +- if (clus < EXFAT_FIRST_CLUSTER || sbi->num_clusters <= clus)
1422 +- return false;
1423 +- return true;
1424 +-}
1425 +-
1426 + int exfat_ent_get(struct super_block *sb, unsigned int loc,
1427 + unsigned int *content)
1428 + {
1429 +diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
1430 +index c8845242d4225..fbfe293af72c9 100644
1431 +--- a/fs/nfs/internal.h
1432 ++++ b/fs/nfs/internal.h
1433 +@@ -834,6 +834,7 @@ static inline bool nfs_error_is_fatal_on_server(int err)
1434 + case 0:
1435 + case -ERESTARTSYS:
1436 + case -EINTR:
1437 ++ case -ENOMEM:
1438 + return false;
1439 + }
1440 + return nfs_error_is_fatal(err);
1441 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1442 +index 181bc3d9f5663..f9e2fa9cfbec5 100644
1443 +--- a/fs/nfsd/nfs4state.c
1444 ++++ b/fs/nfsd/nfs4state.c
1445 +@@ -7299,16 +7299,12 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
1446 + if (sop->so_is_open_owner || !same_owner_str(sop, owner))
1447 + continue;
1448 +
1449 +- /* see if there are still any locks associated with it */
1450 +- lo = lockowner(sop);
1451 +- list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
1452 +- if (check_for_locks(stp->st_stid.sc_file, lo)) {
1453 +- status = nfserr_locks_held;
1454 +- spin_unlock(&clp->cl_lock);
1455 +- return status;
1456 +- }
1457 ++ if (atomic_read(&sop->so_count) != 1) {
1458 ++ spin_unlock(&clp->cl_lock);
1459 ++ return nfserr_locks_held;
1460 + }
1461 +
1462 ++ lo = lockowner(sop);
1463 + nfs4_get_stateowner(sop);
1464 + break;
1465 + }
1466 +diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
1467 +index d41d76979e121..7f85ec83e196f 100644
1468 +--- a/fs/ntfs3/super.c
1469 ++++ b/fs/ntfs3/super.c
1470 +@@ -668,9 +668,11 @@ static u32 format_size_gb(const u64 bytes, u32 *mb)
1471 +
1472 + static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
1473 + {
1474 +- return boot->sectors_per_clusters <= 0x80
1475 +- ? boot->sectors_per_clusters
1476 +- : (1u << (0 - boot->sectors_per_clusters));
1477 ++ if (boot->sectors_per_clusters <= 0x80)
1478 ++ return boot->sectors_per_clusters;
1479 ++ if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */
1480 ++ return 1U << (0 - boot->sectors_per_clusters);
1481 ++ return -EINVAL;
1482 + }
1483 +
1484 + /*
1485 +@@ -713,6 +715,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
1486 +
1487 + /* cluster size: 512, 1K, 2K, 4K, ... 2M */
1488 + sct_per_clst = true_sectors_per_clst(boot);
1489 ++ if ((int)sct_per_clst < 0)
1490 ++ goto out;
1491 + if (!is_power_of_2(sct_per_clst))
1492 + goto out;
1493 +
1494 +diff --git a/fs/pipe.c b/fs/pipe.c
1495 +index 751d5b36c84bb..e08f0fe55584b 100644
1496 +--- a/fs/pipe.c
1497 ++++ b/fs/pipe.c
1498 +@@ -652,7 +652,7 @@ pipe_poll(struct file *filp, poll_table *wait)
1499 + unsigned int head, tail;
1500 +
1501 + /* Epoll has some historical nasty semantics, this enables them */
1502 +- pipe->poll_usage = 1;
1503 ++ WRITE_ONCE(pipe->poll_usage, true);
1504 +
1505 + /*
1506 + * Reading pipe state only -- no need for acquiring the semaphore.
1507 +@@ -1244,30 +1244,33 @@ unsigned int round_pipe_size(unsigned long size)
1508 +
1509 + /*
1510 + * Resize the pipe ring to a number of slots.
1511 ++ *
1512 ++ * Note the pipe can be reduced in capacity, but only if the current
1513 ++ * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1514 ++ * returned instead.
1515 + */
1516 + int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1517 + {
1518 + struct pipe_buffer *bufs;
1519 + unsigned int head, tail, mask, n;
1520 +
1521 +- /*
1522 +- * We can shrink the pipe, if arg is greater than the ring occupancy.
1523 +- * Since we don't expect a lot of shrink+grow operations, just free and
1524 +- * allocate again like we would do for growing. If the pipe currently
1525 +- * contains more buffers than arg, then return busy.
1526 +- */
1527 +- mask = pipe->ring_size - 1;
1528 +- head = pipe->head;
1529 +- tail = pipe->tail;
1530 +- n = pipe_occupancy(pipe->head, pipe->tail);
1531 +- if (nr_slots < n)
1532 +- return -EBUSY;
1533 +-
1534 + bufs = kcalloc(nr_slots, sizeof(*bufs),
1535 + GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1536 + if (unlikely(!bufs))
1537 + return -ENOMEM;
1538 +
1539 ++ spin_lock_irq(&pipe->rd_wait.lock);
1540 ++ mask = pipe->ring_size - 1;
1541 ++ head = pipe->head;
1542 ++ tail = pipe->tail;
1543 ++
1544 ++ n = pipe_occupancy(head, tail);
1545 ++ if (nr_slots < n) {
1546 ++ spin_unlock_irq(&pipe->rd_wait.lock);
1547 ++ kfree(bufs);
1548 ++ return -EBUSY;
1549 ++ }
1550 ++
1551 + /*
1552 + * The pipe array wraps around, so just start the new one at zero
1553 + * and adjust the indices.
1554 +@@ -1299,6 +1302,8 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1555 + pipe->tail = tail;
1556 + pipe->head = head;
1557 +
1558 ++ spin_unlock_irq(&pipe->rd_wait.lock);
1559 ++
1560 + /* This might have made more room for writers */
1561 + wake_up_interruptible(&pipe->wr_wait);
1562 + return 0;
1563 +diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
1564 +index 88e4d145f7cda..a6c3b8e7deb64 100644
1565 +--- a/include/crypto/drbg.h
1566 ++++ b/include/crypto/drbg.h
1567 +@@ -105,6 +105,12 @@ struct drbg_test_data {
1568 + struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
1569 + };
1570 +
1571 ++enum drbg_seed_state {
1572 ++ DRBG_SEED_STATE_UNSEEDED,
1573 ++ DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */
1574 ++ DRBG_SEED_STATE_FULL,
1575 ++};
1576 ++
1577 + struct drbg_state {
1578 + struct mutex drbg_mutex; /* lock around DRBG */
1579 + unsigned char *V; /* internal state 10.1.1.1 1a) */
1580 +@@ -127,16 +133,14 @@ struct drbg_state {
1581 + struct crypto_wait ctr_wait; /* CTR mode async wait obj */
1582 + struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
1583 +
1584 +- bool seeded; /* DRBG fully seeded? */
1585 ++ enum drbg_seed_state seeded; /* DRBG fully seeded? */
1586 + bool pr; /* Prediction resistance enabled? */
1587 + bool fips_primed; /* Continuous test primed? */
1588 + unsigned char *prev; /* FIPS 140-2 continuous test value */
1589 +- struct work_struct seed_work; /* asynchronous seeding support */
1590 + struct crypto_rng *jent;
1591 + const struct drbg_state_ops *d_ops;
1592 + const struct drbg_core *core;
1593 + struct drbg_string test_data;
1594 +- struct notifier_block random_ready;
1595 + };
1596 +
1597 + static inline __u8 drbg_statelen(struct drbg_state *drbg)
1598 +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
1599 +index fc5642431b923..c0b6ec6bf65b7 100644
1600 +--- a/include/linux/pipe_fs_i.h
1601 ++++ b/include/linux/pipe_fs_i.h
1602 +@@ -71,7 +71,7 @@ struct pipe_inode_info {
1603 + unsigned int files;
1604 + unsigned int r_counter;
1605 + unsigned int w_counter;
1606 +- unsigned int poll_usage;
1607 ++ bool poll_usage;
1608 + struct page *tmp_page;
1609 + struct fasync_struct *fasync_readers;
1610 + struct fasync_struct *fasync_writers;
1611 +diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
1612 +index 13807ea94cd2b..2d524782f53b7 100644
1613 +--- a/include/net/netfilter/nf_conntrack_core.h
1614 ++++ b/include/net/netfilter/nf_conntrack_core.h
1615 +@@ -58,8 +58,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
1616 + int ret = NF_ACCEPT;
1617 +
1618 + if (ct) {
1619 +- if (!nf_ct_is_confirmed(ct))
1620 ++ if (!nf_ct_is_confirmed(ct)) {
1621 + ret = __nf_conntrack_confirm(skb);
1622 ++
1623 ++ if (ret == NF_ACCEPT)
1624 ++ ct = (struct nf_conn *)skb_nfct(skb);
1625 ++ }
1626 ++
1627 + if (likely(ret == NF_ACCEPT))
1628 + nf_ct_deliver_cached_events(ct);
1629 + }
1630 +diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
1631 +index fc0f77f91224b..7efae3af62017 100644
1632 +--- a/kernel/bpf/stackmap.c
1633 ++++ b/kernel/bpf/stackmap.c
1634 +@@ -119,7 +119,6 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
1635 + return ERR_PTR(-E2BIG);
1636 +
1637 + cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
1638 +- cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
1639 + smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
1640 + if (!smap)
1641 + return ERR_PTR(-ENOMEM);
1642 +diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
1643 +index 2660fbced9ad4..4fa75791b45e2 100644
1644 +--- a/kernel/bpf/trampoline.c
1645 ++++ b/kernel/bpf/trampoline.c
1646 +@@ -414,7 +414,7 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
1647 + {
1648 + enum bpf_tramp_prog_type kind;
1649 + int err = 0;
1650 +- int cnt;
1651 ++ int cnt = 0, i;
1652 +
1653 + kind = bpf_attach_type_to_tramp(prog);
1654 + mutex_lock(&tr->mutex);
1655 +@@ -425,7 +425,10 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
1656 + err = -EBUSY;
1657 + goto out;
1658 + }
1659 +- cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
1660 ++
1661 ++ for (i = 0; i < BPF_TRAMP_MAX; i++)
1662 ++ cnt += tr->progs_cnt[i];
1663 ++
1664 + if (kind == BPF_TRAMP_REPLACE) {
1665 + /* Cannot attach extension if fentry/fexit are in use. */
1666 + if (cnt) {
1667 +@@ -503,16 +506,19 @@ out:
1668 +
1669 + void bpf_trampoline_put(struct bpf_trampoline *tr)
1670 + {
1671 ++ int i;
1672 ++
1673 + if (!tr)
1674 + return;
1675 + mutex_lock(&trampoline_mutex);
1676 + if (!refcount_dec_and_test(&tr->refcnt))
1677 + goto out;
1678 + WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
1679 +- if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
1680 +- goto out;
1681 +- if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
1682 +- goto out;
1683 ++
1684 ++ for (i = 0; i < BPF_TRAMP_MAX; i++)
1685 ++ if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i])))
1686 ++ goto out;
1687 ++
1688 + /* This code will be executed even when the last bpf_tramp_image
1689 + * is alive. All progs are detached from the trampoline and the
1690 + * trampoline image is patched with jmp into epilogue to skip
1691 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1692 +index d2b119b4fbe74..25ee8d9572c6f 100644
1693 +--- a/kernel/bpf/verifier.c
1694 ++++ b/kernel/bpf/verifier.c
1695 +@@ -4587,6 +4587,11 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1696 + return check_packet_access(env, regno, reg->off, access_size,
1697 + zero_size_allowed);
1698 + case PTR_TO_MAP_KEY:
1699 ++ if (meta && meta->raw_mode) {
1700 ++ verbose(env, "R%d cannot write into %s\n", regno,
1701 ++ reg_type_str(env, reg->type));
1702 ++ return -EACCES;
1703 ++ }
1704 + return check_mem_region_access(env, regno, reg->off, access_size,
1705 + reg->map_ptr->key_size, false);
1706 + case PTR_TO_MAP_VALUE:
1707 +@@ -4597,13 +4602,23 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1708 + return check_map_access(env, regno, reg->off, access_size,
1709 + zero_size_allowed);
1710 + case PTR_TO_MEM:
1711 ++ if (type_is_rdonly_mem(reg->type)) {
1712 ++ if (meta && meta->raw_mode) {
1713 ++ verbose(env, "R%d cannot write into %s\n", regno,
1714 ++ reg_type_str(env, reg->type));
1715 ++ return -EACCES;
1716 ++ }
1717 ++ }
1718 + return check_mem_region_access(env, regno, reg->off,
1719 + access_size, reg->mem_size,
1720 + zero_size_allowed);
1721 + case PTR_TO_BUF:
1722 + if (type_is_rdonly_mem(reg->type)) {
1723 +- if (meta && meta->raw_mode)
1724 ++ if (meta && meta->raw_mode) {
1725 ++ verbose(env, "R%d cannot write into %s\n", regno,
1726 ++ reg_type_str(env, reg->type));
1727 + return -EACCES;
1728 ++ }
1729 +
1730 + buf_info = "rdonly";
1731 + max_access = &env->prog->aux->max_rdonly_access;
1732 +diff --git a/lib/Kconfig b/lib/Kconfig
1733 +index fa4b10322efcd..e052f843afedc 100644
1734 +--- a/lib/Kconfig
1735 ++++ b/lib/Kconfig
1736 +@@ -121,6 +121,8 @@ config INDIRECT_IOMEM_FALLBACK
1737 + mmio accesses when the IO memory address is not a registered
1738 + emulated region.
1739 +
1740 ++source "lib/crypto/Kconfig"
1741 ++
1742 + config CRC_CCITT
1743 + tristate "CRC-CCITT functions"
1744 + help
1745 +diff --git a/lib/assoc_array.c b/lib/assoc_array.c
1746 +index 04c98799c3baf..70304b8f15ace 100644
1747 +--- a/lib/assoc_array.c
1748 ++++ b/lib/assoc_array.c
1749 +@@ -1462,6 +1462,7 @@ int assoc_array_gc(struct assoc_array *array,
1750 + struct assoc_array_ptr *cursor, *ptr;
1751 + struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
1752 + unsigned long nr_leaves_on_tree;
1753 ++ bool retained;
1754 + int keylen, slot, nr_free, next_slot, i;
1755 +
1756 + pr_devel("-->%s()\n", __func__);
1757 +@@ -1538,6 +1539,7 @@ continue_node:
1758 + goto descend;
1759 + }
1760 +
1761 ++retry_compress:
1762 + pr_devel("-- compress node %p --\n", new_n);
1763 +
1764 + /* Count up the number of empty slots in this node and work out the
1765 +@@ -1555,6 +1557,7 @@ continue_node:
1766 + pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
1767 +
1768 + /* See what we can fold in */
1769 ++ retained = false;
1770 + next_slot = 0;
1771 + for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
1772 + struct assoc_array_shortcut *s;
1773 +@@ -1604,9 +1607,14 @@ continue_node:
1774 + pr_devel("[%d] retain node %lu/%d [nx %d]\n",
1775 + slot, child->nr_leaves_on_branch, nr_free + 1,
1776 + next_slot);
1777 ++ retained = true;
1778 + }
1779 + }
1780 +
1781 ++ if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
1782 ++ pr_devel("internal nodes remain despite enough space, retrying\n");
1783 ++ goto retry_compress;
1784 ++ }
1785 + pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
1786 +
1787 + nr_leaves_on_tree = new_n->nr_leaves_on_branch;
1788 +diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
1789 +index 8620f38e117c0..e8e525650cf29 100644
1790 +--- a/lib/crypto/Kconfig
1791 ++++ b/lib/crypto/Kconfig
1792 +@@ -1,5 +1,7 @@
1793 + # SPDX-License-Identifier: GPL-2.0
1794 +
1795 ++menu "Crypto library routines"
1796 ++
1797 + config CRYPTO_LIB_AES
1798 + tristate
1799 +
1800 +@@ -31,7 +33,7 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
1801 +
1802 + config CRYPTO_LIB_CHACHA_GENERIC
1803 + tristate
1804 +- select CRYPTO_ALGAPI
1805 ++ select XOR_BLOCKS
1806 + help
1807 + This symbol can be depended upon by arch implementations of the
1808 + ChaCha library interface that require the generic code as a
1809 +@@ -40,7 +42,8 @@ config CRYPTO_LIB_CHACHA_GENERIC
1810 + of CRYPTO_LIB_CHACHA.
1811 +
1812 + config CRYPTO_LIB_CHACHA
1813 +- tristate
1814 ++ tristate "ChaCha library interface"
1815 ++ depends on CRYPTO
1816 + depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
1817 + select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
1818 + help
1819 +@@ -65,7 +68,7 @@ config CRYPTO_LIB_CURVE25519_GENERIC
1820 + of CRYPTO_LIB_CURVE25519.
1821 +
1822 + config CRYPTO_LIB_CURVE25519
1823 +- tristate
1824 ++ tristate "Curve25519 scalar multiplication library"
1825 + depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
1826 + select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
1827 + help
1828 +@@ -100,7 +103,7 @@ config CRYPTO_LIB_POLY1305_GENERIC
1829 + of CRYPTO_LIB_POLY1305.
1830 +
1831 + config CRYPTO_LIB_POLY1305
1832 +- tristate
1833 ++ tristate "Poly1305 library interface"
1834 + depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
1835 + select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
1836 + help
1837 +@@ -109,14 +112,18 @@ config CRYPTO_LIB_POLY1305
1838 + is available and enabled.
1839 +
1840 + config CRYPTO_LIB_CHACHA20POLY1305
1841 +- tristate
1842 ++ tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)"
1843 + depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
1844 + depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
1845 ++ depends on CRYPTO
1846 + select CRYPTO_LIB_CHACHA
1847 + select CRYPTO_LIB_POLY1305
1848 ++ select CRYPTO_ALGAPI
1849 +
1850 + config CRYPTO_LIB_SHA256
1851 + tristate
1852 +
1853 + config CRYPTO_LIB_SM4
1854 + tristate
1855 ++
1856 ++endmenu
1857 +diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
1858 +index af9302141bcf6..e5c5315da2741 100644
1859 +--- a/lib/percpu-refcount.c
1860 ++++ b/lib/percpu-refcount.c
1861 +@@ -76,6 +76,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
1862 + data = kzalloc(sizeof(*ref->data), gfp);
1863 + if (!data) {
1864 + free_percpu((void __percpu *)ref->percpu_count_ptr);
1865 ++ ref->percpu_count_ptr = 0;
1866 + return -ENOMEM;
1867 + }
1868 +
1869 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
1870 +index b897ce3b399a1..439deb8decbcc 100644
1871 +--- a/mm/zsmalloc.c
1872 ++++ b/mm/zsmalloc.c
1873 +@@ -1743,11 +1743,40 @@ static enum fullness_group putback_zspage(struct size_class *class,
1874 + */
1875 + static void lock_zspage(struct zspage *zspage)
1876 + {
1877 +- struct page *page = get_first_page(zspage);
1878 ++ struct page *curr_page, *page;
1879 +
1880 +- do {
1881 +- lock_page(page);
1882 +- } while ((page = get_next_page(page)) != NULL);
1883 ++ /*
1884 ++ * Pages we haven't locked yet can be migrated off the list while we're
1885 ++ * trying to lock them, so we need to be careful and only attempt to
1886 ++ * lock each page under migrate_read_lock(). Otherwise, the page we lock
1887 ++ * may no longer belong to the zspage. This means that we may wait for
1888 ++ * the wrong page to unlock, so we must take a reference to the page
1889 ++ * prior to waiting for it to unlock outside migrate_read_lock().
1890 ++ */
1891 ++ while (1) {
1892 ++ migrate_read_lock(zspage);
1893 ++ page = get_first_page(zspage);
1894 ++ if (trylock_page(page))
1895 ++ break;
1896 ++ get_page(page);
1897 ++ migrate_read_unlock(zspage);
1898 ++ wait_on_page_locked(page);
1899 ++ put_page(page);
1900 ++ }
1901 ++
1902 ++ curr_page = page;
1903 ++ while ((page = get_next_page(curr_page))) {
1904 ++ if (trylock_page(page)) {
1905 ++ curr_page = page;
1906 ++ } else {
1907 ++ get_page(page);
1908 ++ migrate_read_unlock(zspage);
1909 ++ wait_on_page_locked(page);
1910 ++ put_page(page);
1911 ++ migrate_read_lock(zspage);
1912 ++ }
1913 ++ }
1914 ++ migrate_read_unlock(zspage);
1915 + }
1916 +
1917 + static int zs_init_fs_context(struct fs_context *fc)
1918 +diff --git a/net/core/filter.c b/net/core/filter.c
1919 +index 821278b906b71..707e2e48d7691 100644
1920 +--- a/net/core/filter.c
1921 ++++ b/net/core/filter.c
1922 +@@ -1688,7 +1688,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1923 +
1924 + if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1925 + return -EINVAL;
1926 +- if (unlikely(offset > 0xffff))
1927 ++ if (unlikely(offset > INT_MAX))
1928 + return -EFAULT;
1929 + if (unlikely(bpf_try_make_writable(skb, offset + len)))
1930 + return -EFAULT;
1931 +@@ -1723,7 +1723,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1932 + {
1933 + void *ptr;
1934 +
1935 +- if (unlikely(offset > 0xffff))
1936 ++ if (unlikely(offset > INT_MAX))
1937 + goto err_clear;
1938 +
1939 + ptr = skb_header_pointer(skb, offset, len, to);
1940 +diff --git a/net/key/af_key.c b/net/key/af_key.c
1941 +index 92e9d75dba2f4..339d95df19d32 100644
1942 +--- a/net/key/af_key.c
1943 ++++ b/net/key/af_key.c
1944 +@@ -2900,7 +2900,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
1945 + break;
1946 + if (!aalg->pfkey_supported)
1947 + continue;
1948 +- if (aalg_tmpl_set(t, aalg))
1949 ++ if (aalg_tmpl_set(t, aalg) && aalg->available)
1950 + sz += sizeof(struct sadb_comb);
1951 + }
1952 + return sz + sizeof(struct sadb_prop);
1953 +@@ -2918,7 +2918,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
1954 + if (!ealg->pfkey_supported)
1955 + continue;
1956 +
1957 +- if (!(ealg_tmpl_set(t, ealg)))
1958 ++ if (!(ealg_tmpl_set(t, ealg) && ealg->available))
1959 + continue;
1960 +
1961 + for (k = 1; ; k++) {
1962 +@@ -2929,7 +2929,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
1963 + if (!aalg->pfkey_supported)
1964 + continue;
1965 +
1966 +- if (aalg_tmpl_set(t, aalg))
1967 ++ if (aalg_tmpl_set(t, aalg) && aalg->available)
1968 + sz += sizeof(struct sadb_comb);
1969 + }
1970 + }
1971 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
1972 +index 2feb88ffcd81f..79e8fc687fdd4 100644
1973 +--- a/net/netfilter/nf_tables_api.c
1974 ++++ b/net/netfilter/nf_tables_api.c
1975 +@@ -222,12 +222,18 @@ err_register:
1976 + }
1977 +
1978 + static void nft_netdev_unregister_hooks(struct net *net,
1979 +- struct list_head *hook_list)
1980 ++ struct list_head *hook_list,
1981 ++ bool release_netdev)
1982 + {
1983 +- struct nft_hook *hook;
1984 ++ struct nft_hook *hook, *next;
1985 +
1986 +- list_for_each_entry(hook, hook_list, list)
1987 ++ list_for_each_entry_safe(hook, next, hook_list, list) {
1988 + nf_unregister_net_hook(net, &hook->ops);
1989 ++ if (release_netdev) {
1990 ++ list_del(&hook->list);
1991 ++ kfree_rcu(hook, rcu);
1992 ++ }
1993 ++ }
1994 + }
1995 +
1996 + static int nf_tables_register_hook(struct net *net,
1997 +@@ -253,9 +259,10 @@ static int nf_tables_register_hook(struct net *net,
1998 + return nf_register_net_hook(net, &basechain->ops);
1999 + }
2000 +
2001 +-static void nf_tables_unregister_hook(struct net *net,
2002 +- const struct nft_table *table,
2003 +- struct nft_chain *chain)
2004 ++static void __nf_tables_unregister_hook(struct net *net,
2005 ++ const struct nft_table *table,
2006 ++ struct nft_chain *chain,
2007 ++ bool release_netdev)
2008 + {
2009 + struct nft_base_chain *basechain;
2010 + const struct nf_hook_ops *ops;
2011 +@@ -270,11 +277,19 @@ static void nf_tables_unregister_hook(struct net *net,
2012 + return basechain->type->ops_unregister(net, ops);
2013 +
2014 + if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
2015 +- nft_netdev_unregister_hooks(net, &basechain->hook_list);
2016 ++ nft_netdev_unregister_hooks(net, &basechain->hook_list,
2017 ++ release_netdev);
2018 + else
2019 + nf_unregister_net_hook(net, &basechain->ops);
2020 + }
2021 +
2022 ++static void nf_tables_unregister_hook(struct net *net,
2023 ++ const struct nft_table *table,
2024 ++ struct nft_chain *chain)
2025 ++{
2026 ++ return __nf_tables_unregister_hook(net, table, chain, false);
2027 ++}
2028 ++
2029 + static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *trans)
2030 + {
2031 + struct nftables_pernet *nft_net = nft_pernet(net);
2032 +@@ -2778,27 +2793,31 @@ static struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
2033 +
2034 + err = nf_tables_expr_parse(ctx, nla, &expr_info);
2035 + if (err < 0)
2036 +- goto err1;
2037 ++ goto err_expr_parse;
2038 ++
2039 ++ err = -EOPNOTSUPP;
2040 ++ if (!(expr_info.ops->type->flags & NFT_EXPR_STATEFUL))
2041 ++ goto err_expr_stateful;
2042 +
2043 + err = -ENOMEM;
2044 + expr = kzalloc(expr_info.ops->size, GFP_KERNEL);
2045 + if (expr == NULL)
2046 +- goto err2;
2047 ++ goto err_expr_stateful;
2048 +
2049 + err = nf_tables_newexpr(ctx, &expr_info, expr);
2050 + if (err < 0)
2051 +- goto err3;
2052 ++ goto err_expr_new;
2053 +
2054 + return expr;
2055 +-err3:
2056 ++err_expr_new:
2057 + kfree(expr);
2058 +-err2:
2059 ++err_expr_stateful:
2060 + owner = expr_info.ops->type->owner;
2061 + if (expr_info.ops->type->release_ops)
2062 + expr_info.ops->type->release_ops(expr_info.ops);
2063 +
2064 + module_put(owner);
2065 +-err1:
2066 ++err_expr_parse:
2067 + return ERR_PTR(err);
2068 + }
2069 +
2070 +@@ -4147,6 +4166,9 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
2071 + u32 len;
2072 + int err;
2073 +
2074 ++ if (desc->field_count >= ARRAY_SIZE(desc->field_len))
2075 ++ return -E2BIG;
2076 ++
2077 + err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr,
2078 + nft_concat_policy, NULL);
2079 + if (err < 0)
2080 +@@ -4156,9 +4178,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
2081 + return -EINVAL;
2082 +
2083 + len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN]));
2084 +-
2085 +- if (len * BITS_PER_BYTE / 32 > NFT_REG32_COUNT)
2086 +- return -E2BIG;
2087 ++ if (!len || len > U8_MAX)
2088 ++ return -EINVAL;
2089 +
2090 + desc->field_len[desc->field_count++] = len;
2091 +
2092 +@@ -4169,7 +4190,8 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
2093 + const struct nlattr *nla)
2094 + {
2095 + struct nlattr *attr;
2096 +- int rem, err;
2097 ++ u32 num_regs = 0;
2098 ++ int rem, err, i;
2099 +
2100 + nla_for_each_nested(attr, nla, rem) {
2101 + if (nla_type(attr) != NFTA_LIST_ELEM)
2102 +@@ -4180,6 +4202,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
2103 + return err;
2104 + }
2105 +
2106 ++ for (i = 0; i < desc->field_count; i++)
2107 ++ num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
2108 ++
2109 ++ if (num_regs > NFT_REG32_COUNT)
2110 ++ return -E2BIG;
2111 ++
2112 + return 0;
2113 + }
2114 +
2115 +@@ -5318,9 +5346,6 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx,
2116 + return expr;
2117 +
2118 + err = -EOPNOTSUPP;
2119 +- if (!(expr->ops->type->flags & NFT_EXPR_STATEFUL))
2120 +- goto err_set_elem_expr;
2121 +-
2122 + if (expr->ops->type->flags & NFT_EXPR_GC) {
2123 + if (set->flags & NFT_SET_TIMEOUT)
2124 + goto err_set_elem_expr;
2125 +@@ -7196,13 +7221,25 @@ static void nft_unregister_flowtable_hook(struct net *net,
2126 + FLOW_BLOCK_UNBIND);
2127 + }
2128 +
2129 +-static void nft_unregister_flowtable_net_hooks(struct net *net,
2130 +- struct list_head *hook_list)
2131 ++static void __nft_unregister_flowtable_net_hooks(struct net *net,
2132 ++ struct list_head *hook_list,
2133 ++ bool release_netdev)
2134 + {
2135 +- struct nft_hook *hook;
2136 ++ struct nft_hook *hook, *next;
2137 +
2138 +- list_for_each_entry(hook, hook_list, list)
2139 ++ list_for_each_entry_safe(hook, next, hook_list, list) {
2140 + nf_unregister_net_hook(net, &hook->ops);
2141 ++ if (release_netdev) {
2142 ++ list_del(&hook->list);
2143 ++ kfree_rcu(hook);
2144 ++ }
2145 ++ }
2146 ++}
2147 ++
2148 ++static void nft_unregister_flowtable_net_hooks(struct net *net,
2149 ++ struct list_head *hook_list)
2150 ++{
2151 ++ __nft_unregister_flowtable_net_hooks(net, hook_list, false);
2152 + }
2153 +
2154 + static int nft_register_flowtable_net_hooks(struct net *net,
2155 +@@ -9595,9 +9632,10 @@ static void __nft_release_hook(struct net *net, struct nft_table *table)
2156 + struct nft_chain *chain;
2157 +
2158 + list_for_each_entry(chain, &table->chains, list)
2159 +- nf_tables_unregister_hook(net, table, chain);
2160 ++ __nf_tables_unregister_hook(net, table, chain, true);
2161 + list_for_each_entry(flowtable, &table->flowtables, list)
2162 +- nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list);
2163 ++ __nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list,
2164 ++ true);
2165 + }
2166 +
2167 + static void __nft_release_hooks(struct net *net)
2168 +@@ -9736,7 +9774,11 @@ static int __net_init nf_tables_init_net(struct net *net)
2169 +
2170 + static void __net_exit nf_tables_pre_exit_net(struct net *net)
2171 + {
2172 ++ struct nftables_pernet *nft_net = nft_pernet(net);
2173 ++
2174 ++ mutex_lock(&nft_net->commit_mutex);
2175 + __nft_release_hooks(net);
2176 ++ mutex_unlock(&nft_net->commit_mutex);
2177 + }
2178 +
2179 + static void __net_exit nf_tables_exit_net(struct net *net)
2180 +diff --git a/sound/usb/clock.c b/sound/usb/clock.c
2181 +index 98345a695dccb..ccca9efa7d33f 100644
2182 +--- a/sound/usb/clock.c
2183 ++++ b/sound/usb/clock.c
2184 +@@ -572,6 +572,17 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip,
2185 + /* continue processing */
2186 + }
2187 +
2188 ++ /* FIXME - TEAC devices require the immediate interface setup */
2189 ++ if (USB_ID_VENDOR(chip->usb_id) == 0x0644) {
2190 ++ bool cur_base_48k = (rate % 48000 == 0);
2191 ++ bool prev_base_48k = (prev_rate % 48000 == 0);
2192 ++ if (cur_base_48k != prev_base_48k) {
2193 ++ usb_set_interface(chip->dev, fmt->iface, fmt->altsetting);
2194 ++ if (chip->quirk_flags & QUIRK_FLAG_IFACE_DELAY)
2195 ++ msleep(50);
2196 ++ }
2197 ++ }
2198 ++
2199 + validation:
2200 + /* validate clock after rate change */
2201 + if (!uac_clock_source_is_valid(chip, fmt, clock))
2202 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
2203 +index 729e26f5ac4c7..9e65a42cc9b77 100644
2204 +--- a/sound/usb/pcm.c
2205 ++++ b/sound/usb/pcm.c
2206 +@@ -439,16 +439,21 @@ static int configure_endpoints(struct snd_usb_audio *chip,
2207 + /* stop any running stream beforehand */
2208 + if (stop_endpoints(subs, false))
2209 + sync_pending_stops(subs);
2210 ++ if (subs->sync_endpoint) {
2211 ++ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
2212 ++ if (err < 0)
2213 ++ return err;
2214 ++ }
2215 + err = snd_usb_endpoint_configure(chip, subs->data_endpoint);
2216 + if (err < 0)
2217 + return err;
2218 + snd_usb_set_format_quirk(subs, subs->cur_audiofmt);
2219 +- }
2220 +-
2221 +- if (subs->sync_endpoint) {
2222 +- err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
2223 +- if (err < 0)
2224 +- return err;
2225 ++ } else {
2226 ++ if (subs->sync_endpoint) {
2227 ++ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
2228 ++ if (err < 0)
2229 ++ return err;
2230 ++ }
2231 + }
2232 +
2233 + return 0;
2234 +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
2235 +index 40a5e3eb4ef26..78eb41b621d63 100644
2236 +--- a/sound/usb/quirks-table.h
2237 ++++ b/sound/usb/quirks-table.h
2238 +@@ -2672,6 +2672,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2239 + .altset_idx = 1,
2240 + .attributes = 0,
2241 + .endpoint = 0x82,
2242 ++ .ep_idx = 1,
2243 + .ep_attr = USB_ENDPOINT_XFER_ISOC,
2244 + .datainterval = 1,
2245 + .maxpacksize = 0x0126,
2246 +@@ -2875,6 +2876,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2247 + .altset_idx = 1,
2248 + .attributes = 0x4,
2249 + .endpoint = 0x81,
2250 ++ .ep_idx = 1,
2251 + .ep_attr = USB_ENDPOINT_XFER_ISOC |
2252 + USB_ENDPOINT_SYNC_ASYNC,
2253 + .maxpacksize = 0x130,
2254 +@@ -3391,6 +3393,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2255 + .altset_idx = 1,
2256 + .attributes = 0,
2257 + .endpoint = 0x03,
2258 ++ .ep_idx = 1,
2259 + .rates = SNDRV_PCM_RATE_96000,
2260 + .ep_attr = USB_ENDPOINT_XFER_ISOC |
2261 + USB_ENDPOINT_SYNC_ASYNC,
2262 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
2263 +index ab9f3da49941f..fbbe59054c3fb 100644
2264 +--- a/sound/usb/quirks.c
2265 ++++ b/sound/usb/quirks.c
2266 +@@ -1822,6 +1822,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
2267 + QUIRK_FLAG_IGNORE_CTL_ERROR),
2268 + DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */
2269 + QUIRK_FLAG_IGNORE_CTL_ERROR),
2270 ++ DEVICE_FLG(0x0711, 0x5800, /* MCT Trigger 5 USB-to-HDMI */
2271 ++ QUIRK_FLAG_GET_SAMPLE_RATE),
2272 + DEVICE_FLG(0x074d, 0x3553, /* Outlaw RR2150 (Micronas UAC3553B) */
2273 + QUIRK_FLAG_GET_SAMPLE_RATE),
2274 + DEVICE_FLG(0x08bb, 0x2702, /* LineX FM Transmitter */