Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Mon, 06 Jun 2022 11:03:54
Message-Id: 1654513418.4b2fca6d71c3e7831becbf4ea5a7ac5e0130e589.mpagano@gentoo
1 commit: 4b2fca6d71c3e7831becbf4ea5a7ac5e0130e589
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Jun 6 11:03:38 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Jun 6 11:03:38 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4b2fca6d
7
8 Linux patch 5.10.120
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1119_linux-5.10.120.patch | 2010 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2014 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 32647390..773deb53 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -519,6 +519,10 @@ Patch: 1118_linux-5.10.119.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.119
23
24 +Patch: 1119_linux-5.10.120.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.120
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1119_linux-5.10.120.patch b/1119_linux-5.10.120.patch
33 new file mode 100644
34 index 00000000..baad5e2a
35 --- /dev/null
36 +++ b/1119_linux-5.10.120.patch
37 @@ -0,0 +1,2010 @@
38 +diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
39 +index 5a267f5d1a501..edd263e0992dc 100644
40 +--- a/Documentation/process/submitting-patches.rst
41 ++++ b/Documentation/process/submitting-patches.rst
42 +@@ -71,7 +71,7 @@ as you intend it to.
43 +
44 + The maintainer will thank you if you write your patch description in a
45 + form which can be easily pulled into Linux's source code management
46 +-system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`.
47 ++system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`.
48 +
49 + Solve only one problem per patch. If your description starts to get
50 + long, that's a sign that you probably need to split up your patch.
51 +diff --git a/Makefile b/Makefile
52 +index b442cc5bbfc30..fdd2ac273f420 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 10
59 +-SUBLEVEL = 119
60 ++SUBLEVEL = 120
61 + EXTRAVERSION =
62 + NAME = Dare mighty things
63 +
64 +diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi
65 +index bd4450dbdcb61..986fa0b1a8774 100644
66 +--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
67 ++++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
68 +@@ -896,7 +896,7 @@
69 + device-wakeup-gpios = <&gpg3 4 GPIO_ACTIVE_HIGH>;
70 + interrupt-parent = <&gph2>;
71 + interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
72 +- interrupt-names = "host-wake";
73 ++ interrupt-names = "host-wakeup";
74 + };
75 + };
76 +
77 +diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
78 +index 84e5a2dc8be53..3dd58b4ee33e5 100644
79 +--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
80 ++++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
81 +@@ -359,13 +359,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
82 + static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
83 + struct kvm *kvm, unsigned long *gfn)
84 + {
85 +- struct kvmppc_uvmem_slot *p;
86 ++ struct kvmppc_uvmem_slot *p = NULL, *iter;
87 + bool ret = false;
88 + unsigned long i;
89 +
90 +- list_for_each_entry(p, &kvm->arch.uvmem_pfns, list)
91 +- if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns)
92 ++ list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
93 ++ if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
94 ++ p = iter;
95 + break;
96 ++ }
97 + if (!p)
98 + return ret;
99 + /*
100 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
101 +index 6c3d38b5a8add..971609fb15c59 100644
102 +--- a/arch/x86/kernel/kvm.c
103 ++++ b/arch/x86/kernel/kvm.c
104 +@@ -188,7 +188,7 @@ void kvm_async_pf_task_wake(u32 token)
105 + {
106 + u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
107 + struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
108 +- struct kvm_task_sleep_node *n;
109 ++ struct kvm_task_sleep_node *n, *dummy = NULL;
110 +
111 + if (token == ~0) {
112 + apf_task_wake_all();
113 +@@ -200,28 +200,41 @@ again:
114 + n = _find_apf_task(b, token);
115 + if (!n) {
116 + /*
117 +- * async PF was not yet handled.
118 +- * Add dummy entry for the token.
119 ++ * Async #PF not yet handled, add a dummy entry for the token.
120 ++ * Allocating the token must be down outside of the raw lock
121 ++ * as the allocator is preemptible on PREEMPT_RT kernels.
122 + */
123 +- n = kzalloc(sizeof(*n), GFP_ATOMIC);
124 +- if (!n) {
125 ++ if (!dummy) {
126 ++ raw_spin_unlock(&b->lock);
127 ++ dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC);
128 ++
129 + /*
130 +- * Allocation failed! Busy wait while other cpu
131 +- * handles async PF.
132 ++ * Continue looping on allocation failure, eventually
133 ++ * the async #PF will be handled and allocating a new
134 ++ * node will be unnecessary.
135 ++ */
136 ++ if (!dummy)
137 ++ cpu_relax();
138 ++
139 ++ /*
140 ++ * Recheck for async #PF completion before enqueueing
141 ++ * the dummy token to avoid duplicate list entries.
142 + */
143 +- raw_spin_unlock(&b->lock);
144 +- cpu_relax();
145 + goto again;
146 + }
147 +- n->token = token;
148 +- n->cpu = smp_processor_id();
149 +- init_swait_queue_head(&n->wq);
150 +- hlist_add_head(&n->link, &b->list);
151 ++ dummy->token = token;
152 ++ dummy->cpu = smp_processor_id();
153 ++ init_swait_queue_head(&dummy->wq);
154 ++ hlist_add_head(&dummy->link, &b->list);
155 ++ dummy = NULL;
156 + } else {
157 + apf_task_wake_one(n);
158 + }
159 + raw_spin_unlock(&b->lock);
160 +- return;
161 ++
162 ++ /* A dummy token might be allocated and ultimately not used. */
163 ++ if (dummy)
164 ++ kfree(dummy);
165 + }
166 + EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
167 +
168 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
169 +index ae18062c26a66..d9cec5daa1fff 100644
170 +--- a/arch/x86/kvm/x86.c
171 ++++ b/arch/x86/kvm/x86.c
172 +@@ -7295,7 +7295,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
173 + }
174 + EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
175 +
176 +-static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
177 ++static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r)
178 + {
179 + if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
180 + (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
181 +@@ -7364,25 +7364,23 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
182 + }
183 +
184 + /*
185 +- * Decode to be emulated instruction. Return EMULATION_OK if success.
186 ++ * Decode an instruction for emulation. The caller is responsible for handling
187 ++ * code breakpoints. Note, manually detecting code breakpoints is unnecessary
188 ++ * (and wrong) when emulating on an intercepted fault-like exception[*], as
189 ++ * code breakpoints have higher priority and thus have already been done by
190 ++ * hardware.
191 ++ *
192 ++ * [*] Except #MC, which is higher priority, but KVM should never emulate in
193 ++ * response to a machine check.
194 + */
195 + int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
196 + void *insn, int insn_len)
197 + {
198 +- int r = EMULATION_OK;
199 + struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
200 ++ int r;
201 +
202 + init_emulate_ctxt(vcpu);
203 +
204 +- /*
205 +- * We will reenter on the same instruction since we do not set
206 +- * complete_userspace_io. This does not handle watchpoints yet,
207 +- * those would be handled in the emulate_ops.
208 +- */
209 +- if (!(emulation_type & EMULTYPE_SKIP) &&
210 +- kvm_vcpu_check_breakpoint(vcpu, &r))
211 +- return r;
212 +-
213 + ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
214 +
215 + r = x86_decode_insn(ctxt, insn, insn_len);
216 +@@ -7417,6 +7415,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
217 + if (!(emulation_type & EMULTYPE_NO_DECODE)) {
218 + kvm_clear_exception_queue(vcpu);
219 +
220 ++ /*
221 ++ * Return immediately if RIP hits a code breakpoint, such #DBs
222 ++ * are fault-like and are higher priority than any faults on
223 ++ * the code fetch itself.
224 ++ */
225 ++ if (!(emulation_type & EMULTYPE_SKIP) &&
226 ++ kvm_vcpu_check_code_breakpoint(vcpu, &r))
227 ++ return r;
228 ++
229 + r = x86_decode_emulated_instruction(vcpu, emulation_type,
230 + insn, insn_len);
231 + if (r != EMULATION_OK) {
232 +diff --git a/crypto/Kconfig b/crypto/Kconfig
233 +index 0dee9242491cb..c15bfc0e3723a 100644
234 +--- a/crypto/Kconfig
235 ++++ b/crypto/Kconfig
236 +@@ -1941,5 +1941,3 @@ source "crypto/asymmetric_keys/Kconfig"
237 + source "certs/Kconfig"
238 +
239 + endif # if CRYPTO
240 +-
241 +-source "lib/crypto/Kconfig"
242 +diff --git a/crypto/drbg.c b/crypto/drbg.c
243 +index 19ea8d6628ffb..a4b5d6dbe99d3 100644
244 +--- a/crypto/drbg.c
245 ++++ b/crypto/drbg.c
246 +@@ -1035,17 +1035,38 @@ static const struct drbg_state_ops drbg_hash_ops = {
247 + ******************************************************************/
248 +
249 + static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
250 +- int reseed)
251 ++ int reseed, enum drbg_seed_state new_seed_state)
252 + {
253 + int ret = drbg->d_ops->update(drbg, seed, reseed);
254 +
255 + if (ret)
256 + return ret;
257 +
258 +- drbg->seeded = true;
259 ++ drbg->seeded = new_seed_state;
260 + /* 10.1.1.2 / 10.1.1.3 step 5 */
261 + drbg->reseed_ctr = 1;
262 +
263 ++ switch (drbg->seeded) {
264 ++ case DRBG_SEED_STATE_UNSEEDED:
265 ++ /* Impossible, but handle it to silence compiler warnings. */
266 ++ fallthrough;
267 ++ case DRBG_SEED_STATE_PARTIAL:
268 ++ /*
269 ++ * Require frequent reseeds until the seed source is
270 ++ * fully initialized.
271 ++ */
272 ++ drbg->reseed_threshold = 50;
273 ++ break;
274 ++
275 ++ case DRBG_SEED_STATE_FULL:
276 ++ /*
277 ++ * Seed source has become fully initialized, frequent
278 ++ * reseeds no longer required.
279 ++ */
280 ++ drbg->reseed_threshold = drbg_max_requests(drbg);
281 ++ break;
282 ++ }
283 ++
284 + return ret;
285 + }
286 +
287 +@@ -1065,12 +1086,10 @@ static inline int drbg_get_random_bytes(struct drbg_state *drbg,
288 + return 0;
289 + }
290 +
291 +-static void drbg_async_seed(struct work_struct *work)
292 ++static int drbg_seed_from_random(struct drbg_state *drbg)
293 + {
294 + struct drbg_string data;
295 + LIST_HEAD(seedlist);
296 +- struct drbg_state *drbg = container_of(work, struct drbg_state,
297 +- seed_work);
298 + unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
299 + unsigned char entropy[32];
300 + int ret;
301 +@@ -1081,26 +1100,15 @@ static void drbg_async_seed(struct work_struct *work)
302 + drbg_string_fill(&data, entropy, entropylen);
303 + list_add_tail(&data.list, &seedlist);
304 +
305 +- mutex_lock(&drbg->drbg_mutex);
306 +-
307 + ret = drbg_get_random_bytes(drbg, entropy, entropylen);
308 + if (ret)
309 +- goto unlock;
310 +-
311 +- /* Set seeded to false so that if __drbg_seed fails the
312 +- * next generate call will trigger a reseed.
313 +- */
314 +- drbg->seeded = false;
315 +-
316 +- __drbg_seed(drbg, &seedlist, true);
317 +-
318 +- if (drbg->seeded)
319 +- drbg->reseed_threshold = drbg_max_requests(drbg);
320 ++ goto out;
321 +
322 +-unlock:
323 +- mutex_unlock(&drbg->drbg_mutex);
324 ++ ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);
325 +
326 ++out:
327 + memzero_explicit(entropy, entropylen);
328 ++ return ret;
329 + }
330 +
331 + /*
332 +@@ -1122,6 +1130,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
333 + unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
334 + struct drbg_string data1;
335 + LIST_HEAD(seedlist);
336 ++ enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL;
337 +
338 + /* 9.1 / 9.2 / 9.3.1 step 3 */
339 + if (pers && pers->len > (drbg_max_addtl(drbg))) {
340 +@@ -1149,6 +1158,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
341 + BUG_ON((entropylen * 2) > sizeof(entropy));
342 +
343 + /* Get seed from in-kernel /dev/urandom */
344 ++ if (!rng_is_initialized())
345 ++ new_seed_state = DRBG_SEED_STATE_PARTIAL;
346 ++
347 + ret = drbg_get_random_bytes(drbg, entropy, entropylen);
348 + if (ret)
349 + goto out;
350 +@@ -1205,7 +1217,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
351 + memset(drbg->C, 0, drbg_statelen(drbg));
352 + }
353 +
354 +- ret = __drbg_seed(drbg, &seedlist, reseed);
355 ++ ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state);
356 +
357 + out:
358 + memzero_explicit(entropy, entropylen * 2);
359 +@@ -1385,19 +1397,25 @@ static int drbg_generate(struct drbg_state *drbg,
360 + * here. The spec is a bit convoluted here, we make it simpler.
361 + */
362 + if (drbg->reseed_threshold < drbg->reseed_ctr)
363 +- drbg->seeded = false;
364 ++ drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
365 +
366 +- if (drbg->pr || !drbg->seeded) {
367 ++ if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) {
368 + pr_devel("DRBG: reseeding before generation (prediction "
369 + "resistance: %s, state %s)\n",
370 + drbg->pr ? "true" : "false",
371 +- drbg->seeded ? "seeded" : "unseeded");
372 ++ (drbg->seeded == DRBG_SEED_STATE_FULL ?
373 ++ "seeded" : "unseeded"));
374 + /* 9.3.1 steps 7.1 through 7.3 */
375 + len = drbg_seed(drbg, addtl, true);
376 + if (len)
377 + goto err;
378 + /* 9.3.1 step 7.4 */
379 + addtl = NULL;
380 ++ } else if (rng_is_initialized() &&
381 ++ drbg->seeded == DRBG_SEED_STATE_PARTIAL) {
382 ++ len = drbg_seed_from_random(drbg);
383 ++ if (len)
384 ++ goto err;
385 + }
386 +
387 + if (addtl && 0 < addtl->len)
388 +@@ -1490,50 +1508,15 @@ static int drbg_generate_long(struct drbg_state *drbg,
389 + return 0;
390 + }
391 +
392 +-static int drbg_schedule_async_seed(struct notifier_block *nb, unsigned long action, void *data)
393 +-{
394 +- struct drbg_state *drbg = container_of(nb, struct drbg_state,
395 +- random_ready);
396 +-
397 +- schedule_work(&drbg->seed_work);
398 +- return 0;
399 +-}
400 +-
401 + static int drbg_prepare_hrng(struct drbg_state *drbg)
402 + {
403 +- int err;
404 +-
405 + /* We do not need an HRNG in test mode. */
406 + if (list_empty(&drbg->test_data.list))
407 + return 0;
408 +
409 + drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
410 +
411 +- INIT_WORK(&drbg->seed_work, drbg_async_seed);
412 +-
413 +- drbg->random_ready.notifier_call = drbg_schedule_async_seed;
414 +- err = register_random_ready_notifier(&drbg->random_ready);
415 +-
416 +- switch (err) {
417 +- case 0:
418 +- break;
419 +-
420 +- case -EALREADY:
421 +- err = 0;
422 +- fallthrough;
423 +-
424 +- default:
425 +- drbg->random_ready.notifier_call = NULL;
426 +- return err;
427 +- }
428 +-
429 +- /*
430 +- * Require frequent reseeds until the seed source is fully
431 +- * initialized.
432 +- */
433 +- drbg->reseed_threshold = 50;
434 +-
435 +- return err;
436 ++ return 0;
437 + }
438 +
439 + /*
440 +@@ -1576,7 +1559,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
441 + if (!drbg->core) {
442 + drbg->core = &drbg_cores[coreref];
443 + drbg->pr = pr;
444 +- drbg->seeded = false;
445 ++ drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
446 + drbg->reseed_threshold = drbg_max_requests(drbg);
447 +
448 + ret = drbg_alloc_state(drbg);
449 +@@ -1627,11 +1610,6 @@ free_everything:
450 + */
451 + static int drbg_uninstantiate(struct drbg_state *drbg)
452 + {
453 +- if (drbg->random_ready.notifier_call) {
454 +- unregister_random_ready_notifier(&drbg->random_ready);
455 +- cancel_work_sync(&drbg->seed_work);
456 +- }
457 +-
458 + if (!IS_ERR_OR_NULL(drbg->jent))
459 + crypto_free_rng(drbg->jent);
460 + drbg->jent = NULL;
461 +diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
462 +index 6a3fd09057d0c..f7ed430206720 100644
463 +--- a/crypto/ecrdsa.c
464 ++++ b/crypto/ecrdsa.c
465 +@@ -113,15 +113,15 @@ static int ecrdsa_verify(struct akcipher_request *req)
466 +
467 + /* Step 1: verify that 0 < r < q, 0 < s < q */
468 + if (vli_is_zero(r, ndigits) ||
469 +- vli_cmp(r, ctx->curve->n, ndigits) == 1 ||
470 ++ vli_cmp(r, ctx->curve->n, ndigits) >= 0 ||
471 + vli_is_zero(s, ndigits) ||
472 +- vli_cmp(s, ctx->curve->n, ndigits) == 1)
473 ++ vli_cmp(s, ctx->curve->n, ndigits) >= 0)
474 + return -EKEYREJECTED;
475 +
476 + /* Step 2: calculate hash (h) of the message (passed as input) */
477 + /* Step 3: calculate e = h \mod q */
478 + vli_from_le64(e, digest, ndigits);
479 +- if (vli_cmp(e, ctx->curve->n, ndigits) == 1)
480 ++ if (vli_cmp(e, ctx->curve->n, ndigits) >= 0)
481 + vli_sub(e, e, ctx->curve->n, ndigits);
482 + if (vli_is_zero(e, ndigits))
483 + e[0] = 1;
484 +@@ -137,7 +137,7 @@ static int ecrdsa_verify(struct akcipher_request *req)
485 + /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */
486 + ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key,
487 + ctx->curve);
488 +- if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1)
489 ++ if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0)
490 + vli_sub(cc.x, cc.x, ctx->curve->n, ndigits);
491 +
492 + /* Step 7: if R == r signature is valid */
493 +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
494 +index dc7ee5dd2eeca..eea18aed17f8a 100644
495 +--- a/drivers/bluetooth/hci_qca.c
496 ++++ b/drivers/bluetooth/hci_qca.c
497 +@@ -689,9 +689,9 @@ static int qca_close(struct hci_uart *hu)
498 + skb_queue_purge(&qca->tx_wait_q);
499 + skb_queue_purge(&qca->txq);
500 + skb_queue_purge(&qca->rx_memdump_q);
501 +- del_timer(&qca->tx_idle_timer);
502 +- del_timer(&qca->wake_retrans_timer);
503 + destroy_workqueue(qca->workqueue);
504 ++ del_timer_sync(&qca->tx_idle_timer);
505 ++ del_timer_sync(&qca->wake_retrans_timer);
506 + qca->hu = NULL;
507 +
508 + kfree_skb(qca->rx_skb);
509 +diff --git a/drivers/char/random.c b/drivers/char/random.c
510 +index 00b50ccc9fae6..c206db96f60a1 100644
511 +--- a/drivers/char/random.c
512 ++++ b/drivers/char/random.c
513 +@@ -163,7 +163,6 @@ int __cold register_random_ready_notifier(struct notifier_block *nb)
514 + spin_unlock_irqrestore(&random_ready_chain_lock, flags);
515 + return ret;
516 + }
517 +-EXPORT_SYMBOL(register_random_ready_notifier);
518 +
519 + /*
520 + * Delete a previously registered readiness callback function.
521 +@@ -178,7 +177,6 @@ int __cold unregister_random_ready_notifier(struct notifier_block *nb)
522 + spin_unlock_irqrestore(&random_ready_chain_lock, flags);
523 + return ret;
524 + }
525 +-EXPORT_SYMBOL(unregister_random_ready_notifier);
526 +
527 + static void __cold process_random_ready_list(void)
528 + {
529 +diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
530 +index c84d239512197..d0e11d7a3c08b 100644
531 +--- a/drivers/char/tpm/tpm2-cmd.c
532 ++++ b/drivers/char/tpm/tpm2-cmd.c
533 +@@ -400,7 +400,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
534 + if (!rc) {
535 + out = (struct tpm2_get_cap_out *)
536 + &buf.data[TPM_HEADER_SIZE];
537 +- *value = be32_to_cpu(out->value);
538 ++ /*
539 ++ * To prevent failing boot up of some systems, Infineon TPM2.0
540 ++ * returns SUCCESS on TPM2_Startup in field upgrade mode. Also
541 ++ * the TPM2_Getcapability command returns a zero length list
542 ++ * in field upgrade mode.
543 ++ */
544 ++ if (be32_to_cpu(out->property_cnt) > 0)
545 ++ *value = be32_to_cpu(out->value);
546 ++ else
547 ++ rc = -ENODATA;
548 + }
549 + tpm_buf_destroy(&buf);
550 + return rc;
551 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
552 +index 3ca7528322f53..a1ec722d62a74 100644
553 +--- a/drivers/char/tpm/tpm_ibmvtpm.c
554 ++++ b/drivers/char/tpm/tpm_ibmvtpm.c
555 +@@ -683,6 +683,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
556 + if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
557 + ibmvtpm->rtce_buf != NULL,
558 + HZ)) {
559 ++ rc = -ENODEV;
560 + dev_err(dev, "CRQ response timed out\n");
561 + goto init_irq_cleanup;
562 + }
563 +diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
564 +index ca0361b2dbb07..f87aa2169e5f5 100644
565 +--- a/drivers/crypto/caam/ctrl.c
566 ++++ b/drivers/crypto/caam/ctrl.c
567 +@@ -609,6 +609,13 @@ static bool check_version(struct fsl_mc_version *mc_version, u32 major,
568 + }
569 + #endif
570 +
571 ++static bool needs_entropy_delay_adjustment(void)
572 ++{
573 ++ if (of_machine_is_compatible("fsl,imx6sx"))
574 ++ return true;
575 ++ return false;
576 ++}
577 ++
578 + /* Probe routine for CAAM top (controller) level */
579 + static int caam_probe(struct platform_device *pdev)
580 + {
581 +@@ -855,6 +862,8 @@ static int caam_probe(struct platform_device *pdev)
582 + * Also, if a handle was instantiated, do not change
583 + * the TRNG parameters.
584 + */
585 ++ if (needs_entropy_delay_adjustment())
586 ++ ent_delay = 12000;
587 + if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
588 + dev_info(dev,
589 + "Entropy delay = %u\n",
590 +@@ -871,6 +880,15 @@ static int caam_probe(struct platform_device *pdev)
591 + */
592 + ret = instantiate_rng(dev, inst_handles,
593 + gen_sk);
594 ++ /*
595 ++ * Entropy delay is determined via TRNG characterization.
596 ++ * TRNG characterization is run across different voltages
597 ++ * and temperatures.
598 ++ * If worst case value for ent_dly is identified,
599 ++ * the loop can be skipped for that platform.
600 ++ */
601 ++ if (needs_entropy_delay_adjustment())
602 ++ break;
603 + if (ret == -EAGAIN)
604 + /*
605 + * if here, the loop will rerun,
606 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
607 +index 472aaea75ef84..2f2dc029668bc 100644
608 +--- a/drivers/gpu/drm/i915/intel_pm.c
609 ++++ b/drivers/gpu/drm/i915/intel_pm.c
610 +@@ -2846,7 +2846,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
611 + }
612 +
613 + static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
614 +- u16 wm[8])
615 ++ u16 wm[])
616 + {
617 + struct intel_uncore *uncore = &dev_priv->uncore;
618 +
619 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
620 +index d2e4f9f5507d5..3744c3db51405 100644
621 +--- a/drivers/hid/hid-ids.h
622 ++++ b/drivers/hid/hid-ids.h
623 +@@ -743,6 +743,7 @@
624 + #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
625 + #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
626 + #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
627 ++#define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe
628 + #define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e
629 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
630 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
631 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
632 +index e5a3704b9fe8f..d686917cc3b1f 100644
633 +--- a/drivers/hid/hid-multitouch.c
634 ++++ b/drivers/hid/hid-multitouch.c
635 +@@ -1990,6 +1990,12 @@ static const struct hid_device_id mt_devices[] = {
636 + USB_VENDOR_ID_LENOVO,
637 + USB_DEVICE_ID_LENOVO_X1_TAB3) },
638 +
639 ++ /* Lenovo X12 TAB Gen 1 */
640 ++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
641 ++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
642 ++ USB_VENDOR_ID_LENOVO,
643 ++ USB_DEVICE_ID_LENOVO_X12_TAB) },
644 ++
645 + /* MosArt panels */
646 + { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
647 + MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
648 +@@ -2129,6 +2135,9 @@ static const struct hid_device_id mt_devices[] = {
649 + { .driver_data = MT_CLS_GOOGLE,
650 + HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
651 + USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) },
652 ++ { .driver_data = MT_CLS_GOOGLE,
653 ++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE,
654 ++ USB_DEVICE_ID_GOOGLE_WHISKERS) },
655 +
656 + /* Generic MT device */
657 + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
658 +diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
659 +index a35a27c320e7b..3d2d92640651e 100644
660 +--- a/drivers/i2c/busses/i2c-ismt.c
661 ++++ b/drivers/i2c/busses/i2c-ismt.c
662 +@@ -82,6 +82,7 @@
663 +
664 + #define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
665 + #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
666 ++#define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */
667 +
668 + /* Hardware Descriptor Constants - Control Field */
669 + #define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
670 +@@ -175,6 +176,8 @@ struct ismt_priv {
671 + u8 head; /* ring buffer head pointer */
672 + struct completion cmp; /* interrupt completion */
673 + u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */
674 ++ dma_addr_t log_dma;
675 ++ u32 *log;
676 + };
677 +
678 + static const struct pci_device_id ismt_ids[] = {
679 +@@ -409,6 +412,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
680 + memset(desc, 0, sizeof(struct ismt_desc));
681 + desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
682 +
683 ++ /* Always clear the log entries */
684 ++ memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32));
685 ++
686 + /* Initialize common control bits */
687 + if (likely(pci_dev_msi_enabled(priv->pci_dev)))
688 + desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
689 +@@ -693,6 +699,8 @@ static void ismt_hw_init(struct ismt_priv *priv)
690 + /* initialize the Master Descriptor Base Address (MDBA) */
691 + writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
692 +
693 ++ writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL);
694 ++
695 + /* initialize the Master Control Register (MCTRL) */
696 + writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
697 +
698 +@@ -780,6 +788,12 @@ static int ismt_dev_init(struct ismt_priv *priv)
699 + priv->head = 0;
700 + init_completion(&priv->cmp);
701 +
702 ++ priv->log = dmam_alloc_coherent(&priv->pci_dev->dev,
703 ++ ISMT_LOG_ENTRIES * sizeof(u32),
704 ++ &priv->log_dma, GFP_KERNEL);
705 ++ if (!priv->log)
706 ++ return -ENOMEM;
707 ++
708 + return 0;
709 + }
710 +
711 +diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
712 +index 12c90aa0900e6..a77cd86fe75ed 100644
713 +--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
714 ++++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
715 +@@ -213,6 +213,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
716 + i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
717 + i2c->adap.dev.parent = dev;
718 + i2c->adap.dev.of_node = pdev->dev.of_node;
719 ++ i2c->adap.dev.fwnode = dev->fwnode;
720 + snprintf(i2c->adap.name, sizeof(i2c->adap.name),
721 + "Cavium ThunderX i2c adapter at %s", dev_name(dev));
722 + i2c_set_adapdata(&i2c->adap, i2c);
723 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
724 +index b9677f701b6a1..3d975db86434f 100644
725 +--- a/drivers/md/dm-crypt.c
726 ++++ b/drivers/md/dm-crypt.c
727 +@@ -3404,6 +3404,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
728 + return DM_MAPIO_SUBMITTED;
729 + }
730 +
731 ++static char hex2asc(unsigned char c)
732 ++{
733 ++ return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
734 ++}
735 ++
736 + static void crypt_status(struct dm_target *ti, status_type_t type,
737 + unsigned status_flags, char *result, unsigned maxlen)
738 + {
739 +@@ -3422,9 +3427,12 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
740 + if (cc->key_size > 0) {
741 + if (cc->key_string)
742 + DMEMIT(":%u:%s", cc->key_size, cc->key_string);
743 +- else
744 +- for (i = 0; i < cc->key_size; i++)
745 +- DMEMIT("%02x", cc->key[i]);
746 ++ else {
747 ++ for (i = 0; i < cc->key_size; i++) {
748 ++ DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
749 ++ hex2asc(cc->key[i] & 0xf));
750 ++ }
751 ++ }
752 + } else
753 + DMEMIT("-");
754 +
755 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
756 +index 6f085e96c3f33..835b1f3464d06 100644
757 +--- a/drivers/md/dm-integrity.c
758 ++++ b/drivers/md/dm-integrity.c
759 +@@ -4327,8 +4327,6 @@ try_smaller_buffer:
760 + }
761 +
762 + if (should_write_sb) {
763 +- int r;
764 +-
765 + init_journal(ic, 0, ic->journal_sections, 0);
766 + r = dm_integrity_failed(ic);
767 + if (unlikely(r)) {
768 +diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
769 +index 35d368c418d03..55443a6598fa6 100644
770 +--- a/drivers/md/dm-stats.c
771 ++++ b/drivers/md/dm-stats.c
772 +@@ -224,6 +224,7 @@ void dm_stats_cleanup(struct dm_stats *stats)
773 + atomic_read(&shared->in_flight[READ]),
774 + atomic_read(&shared->in_flight[WRITE]));
775 + }
776 ++ cond_resched();
777 + }
778 + dm_stat_free(&s->rcu_head);
779 + }
780 +@@ -313,6 +314,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
781 + for (ni = 0; ni < n_entries; ni++) {
782 + atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
783 + atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
784 ++ cond_resched();
785 + }
786 +
787 + if (s->n_histogram_entries) {
788 +@@ -325,6 +327,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
789 + for (ni = 0; ni < n_entries; ni++) {
790 + s->stat_shared[ni].tmp.histogram = hi;
791 + hi += s->n_histogram_entries + 1;
792 ++ cond_resched();
793 + }
794 + }
795 +
796 +@@ -345,6 +348,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
797 + for (ni = 0; ni < n_entries; ni++) {
798 + p[ni].histogram = hi;
799 + hi += s->n_histogram_entries + 1;
800 ++ cond_resched();
801 + }
802 + }
803 + }
804 +@@ -474,6 +478,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
805 + }
806 + DMEMIT("\n");
807 + }
808 ++ cond_resched();
809 + }
810 + mutex_unlock(&stats->mutex);
811 +
812 +@@ -750,6 +755,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
813 + local_irq_enable();
814 + }
815 + }
816 ++ cond_resched();
817 + }
818 + }
819 +
820 +@@ -865,6 +871,8 @@ static int dm_stats_print(struct dm_stats *stats, int id,
821 +
822 + if (unlikely(sz + 1 >= maxlen))
823 + goto buffer_overflow;
824 ++
825 ++ cond_resched();
826 + }
827 +
828 + if (clear)
829 +diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
830 +index 808a98ef624c3..c801f6b93b7b4 100644
831 +--- a/drivers/md/dm-verity-target.c
832 ++++ b/drivers/md/dm-verity-target.c
833 +@@ -1242,6 +1242,7 @@ bad:
834 +
835 + static struct target_type verity_target = {
836 + .name = "verity",
837 ++ .features = DM_TARGET_IMMUTABLE,
838 + .version = {1, 7, 0},
839 + .module = THIS_MODULE,
840 + .ctr = verity_ctr,
841 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
842 +index c82953a3299e2..02767866b9ff6 100644
843 +--- a/drivers/md/raid5.c
844 ++++ b/drivers/md/raid5.c
845 +@@ -686,17 +686,17 @@ int raid5_calc_degraded(struct r5conf *conf)
846 + return degraded;
847 + }
848 +
849 +-static int has_failed(struct r5conf *conf)
850 ++static bool has_failed(struct r5conf *conf)
851 + {
852 +- int degraded;
853 ++ int degraded = conf->mddev->degraded;
854 +
855 +- if (conf->mddev->reshape_position == MaxSector)
856 +- return conf->mddev->degraded > conf->max_degraded;
857 ++ if (test_bit(MD_BROKEN, &conf->mddev->flags))
858 ++ return true;
859 +
860 +- degraded = raid5_calc_degraded(conf);
861 +- if (degraded > conf->max_degraded)
862 +- return 1;
863 +- return 0;
864 ++ if (conf->mddev->reshape_position != MaxSector)
865 ++ degraded = raid5_calc_degraded(conf);
866 ++
867 ++ return degraded > conf->max_degraded;
868 + }
869 +
870 + struct stripe_head *
871 +@@ -2877,34 +2877,31 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
872 + unsigned long flags;
873 + pr_debug("raid456: error called\n");
874 +
875 ++ pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
876 ++ mdname(mddev), bdevname(rdev->bdev, b));
877 ++
878 + spin_lock_irqsave(&conf->device_lock, flags);
879 ++ set_bit(Faulty, &rdev->flags);
880 ++ clear_bit(In_sync, &rdev->flags);
881 ++ mddev->degraded = raid5_calc_degraded(conf);
882 +
883 +- if (test_bit(In_sync, &rdev->flags) &&
884 +- mddev->degraded == conf->max_degraded) {
885 +- /*
886 +- * Don't allow to achieve failed state
887 +- * Don't try to recover this device
888 +- */
889 ++ if (has_failed(conf)) {
890 ++ set_bit(MD_BROKEN, &conf->mddev->flags);
891 + conf->recovery_disabled = mddev->recovery_disabled;
892 +- spin_unlock_irqrestore(&conf->device_lock, flags);
893 +- return;
894 ++
895 ++ pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n",
896 ++ mdname(mddev), mddev->degraded, conf->raid_disks);
897 ++ } else {
898 ++ pr_crit("md/raid:%s: Operation continuing on %d devices.\n",
899 ++ mdname(mddev), conf->raid_disks - mddev->degraded);
900 + }
901 +
902 +- set_bit(Faulty, &rdev->flags);
903 +- clear_bit(In_sync, &rdev->flags);
904 +- mddev->degraded = raid5_calc_degraded(conf);
905 + spin_unlock_irqrestore(&conf->device_lock, flags);
906 + set_bit(MD_RECOVERY_INTR, &mddev->recovery);
907 +
908 + set_bit(Blocked, &rdev->flags);
909 + set_mask_bits(&mddev->sb_flags, 0,
910 + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
911 +- pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
912 +- "md/raid:%s: Operation continuing on %d devices.\n",
913 +- mdname(mddev),
914 +- bdevname(rdev->bdev, b),
915 +- mdname(mddev),
916 +- conf->raid_disks - mddev->degraded);
917 + r5c_update_on_rdev_error(mddev, rdev);
918 + }
919 +
920 +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
921 +index 5bc11d1bb9df8..eea4bd3116e8d 100644
922 +--- a/drivers/net/ethernet/faraday/ftgmac100.c
923 ++++ b/drivers/net/ethernet/faraday/ftgmac100.c
924 +@@ -1893,6 +1893,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
925 + /* AST2400 doesn't have working HW checksum generation */
926 + if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
927 + netdev->hw_features &= ~NETIF_F_HW_CSUM;
928 ++
929 ++ /* AST2600 tx checksum with NCSI is broken */
930 ++ if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
931 ++ netdev->hw_features &= ~NETIF_F_HW_CSUM;
932 ++
933 + if (np && of_get_property(np, "no-hw-checksum", NULL))
934 + netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
935 + netdev->features |= netdev->hw_features;
936 +diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
937 +index 621648ce750b7..eb25a13042ea9 100644
938 +--- a/drivers/net/ipa/ipa_endpoint.c
939 ++++ b/drivers/net/ipa/ipa_endpoint.c
940 +@@ -610,12 +610,14 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
941 +
942 + if (endpoint->data->aggregation) {
943 + if (!endpoint->toward_ipa) {
944 ++ u32 buffer_size;
945 + u32 limit;
946 +
947 + val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
948 + val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
949 +
950 +- limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
951 ++ buffer_size = IPA_RX_BUFFER_SIZE - NET_SKB_PAD;
952 ++ limit = ipa_aggr_size_kb(buffer_size);
953 + val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
954 +
955 + limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
956 +diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
957 +index d2c0116157759..8d7e29d953b7e 100644
958 +--- a/drivers/nfc/pn533/pn533.c
959 ++++ b/drivers/nfc/pn533/pn533.c
960 +@@ -2844,13 +2844,14 @@ void pn53x_common_clean(struct pn533 *priv)
961 + {
962 + struct pn533_cmd *cmd, *n;
963 +
964 ++ /* delete the timer before cleanup the worker */
965 ++ del_timer_sync(&priv->listen_timer);
966 ++
967 + flush_delayed_work(&priv->poll_work);
968 + destroy_workqueue(priv->wq);
969 +
970 + skb_queue_purge(&priv->resp_q);
971 +
972 +- del_timer(&priv->listen_timer);
973 +-
974 + list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) {
975 + list_del(&cmd->queue);
976 + kfree(cmd);
977 +diff --git a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
978 +index 2801ca7062732..68a5b627fb9b2 100644
979 +--- a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
980 ++++ b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
981 +@@ -204,7 +204,7 @@ static const struct sunxi_desc_pin suniv_f1c100s_pins[] = {
982 + SUNXI_FUNCTION(0x0, "gpio_in"),
983 + SUNXI_FUNCTION(0x1, "gpio_out"),
984 + SUNXI_FUNCTION(0x2, "lcd"), /* D20 */
985 +- SUNXI_FUNCTION(0x3, "lvds1"), /* RX */
986 ++ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
987 + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)),
988 + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
989 + SUNXI_FUNCTION(0x0, "gpio_in"),
990 +diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
991 +index 579c10f57c2b0..258b6bb5762a4 100644
992 +--- a/fs/exfat/balloc.c
993 ++++ b/fs/exfat/balloc.c
994 +@@ -148,7 +148,9 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
995 + struct super_block *sb = inode->i_sb;
996 + struct exfat_sb_info *sbi = EXFAT_SB(sb);
997 +
998 +- WARN_ON(clu < EXFAT_FIRST_CLUSTER);
999 ++ if (!is_valid_cluster(sbi, clu))
1000 ++ return -EINVAL;
1001 ++
1002 + ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
1003 + i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
1004 + b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
1005 +@@ -166,7 +168,9 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu)
1006 + struct exfat_sb_info *sbi = EXFAT_SB(sb);
1007 + struct exfat_mount_options *opts = &sbi->options;
1008 +
1009 +- WARN_ON(clu < EXFAT_FIRST_CLUSTER);
1010 ++ if (!is_valid_cluster(sbi, clu))
1011 ++ return;
1012 ++
1013 + ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
1014 + i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
1015 + b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
1016 +diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
1017 +index b8f0e829ecbd2..0d139c7d150d9 100644
1018 +--- a/fs/exfat/exfat_fs.h
1019 ++++ b/fs/exfat/exfat_fs.h
1020 +@@ -380,6 +380,14 @@ static inline int exfat_sector_to_cluster(struct exfat_sb_info *sbi,
1021 + EXFAT_RESERVED_CLUSTERS;
1022 + }
1023 +
1024 ++static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
1025 ++ unsigned int clus)
1026 ++{
1027 ++ if (clus < EXFAT_FIRST_CLUSTER || sbi->num_clusters <= clus)
1028 ++ return false;
1029 ++ return true;
1030 ++}
1031 ++
1032 + /* super.c */
1033 + int exfat_set_volume_dirty(struct super_block *sb);
1034 + int exfat_clear_volume_dirty(struct super_block *sb);
1035 +diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
1036 +index c3c9afee7418f..a1481e47a7616 100644
1037 +--- a/fs/exfat/fatent.c
1038 ++++ b/fs/exfat/fatent.c
1039 +@@ -81,14 +81,6 @@ int exfat_ent_set(struct super_block *sb, unsigned int loc,
1040 + return 0;
1041 + }
1042 +
1043 +-static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
1044 +- unsigned int clus)
1045 +-{
1046 +- if (clus < EXFAT_FIRST_CLUSTER || sbi->num_clusters <= clus)
1047 +- return false;
1048 +- return true;
1049 +-}
1050 +-
1051 + int exfat_ent_get(struct super_block *sb, unsigned int loc,
1052 + unsigned int *content)
1053 + {
1054 +diff --git a/fs/io_uring.c b/fs/io_uring.c
1055 +index 3ecf71151fb1f..871475d3fca2c 100644
1056 +--- a/fs/io_uring.c
1057 ++++ b/fs/io_uring.c
1058 +@@ -2579,45 +2579,6 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res,
1059 + #ifdef CONFIG_BLOCK
1060 + static bool io_resubmit_prep(struct io_kiocb *req, int error)
1061 + {
1062 +- struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1063 +- ssize_t ret = -ECANCELED;
1064 +- struct iov_iter iter;
1065 +- int rw;
1066 +-
1067 +- if (error) {
1068 +- ret = error;
1069 +- goto end_req;
1070 +- }
1071 +-
1072 +- switch (req->opcode) {
1073 +- case IORING_OP_READV:
1074 +- case IORING_OP_READ_FIXED:
1075 +- case IORING_OP_READ:
1076 +- rw = READ;
1077 +- break;
1078 +- case IORING_OP_WRITEV:
1079 +- case IORING_OP_WRITE_FIXED:
1080 +- case IORING_OP_WRITE:
1081 +- rw = WRITE;
1082 +- break;
1083 +- default:
1084 +- printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
1085 +- req->opcode);
1086 +- goto end_req;
1087 +- }
1088 +-
1089 +- if (!req->async_data) {
1090 +- ret = io_import_iovec(rw, req, &iovec, &iter, false);
1091 +- if (ret < 0)
1092 +- goto end_req;
1093 +- ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
1094 +- if (!ret)
1095 +- return true;
1096 +- kfree(iovec);
1097 +- } else {
1098 +- return true;
1099 +- }
1100 +-end_req:
1101 + req_set_fail_links(req);
1102 + return false;
1103 + }
1104 +@@ -3428,6 +3389,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
1105 + struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1106 + struct kiocb *kiocb = &req->rw.kiocb;
1107 + struct iov_iter __iter, *iter = &__iter;
1108 ++ struct iov_iter iter_cp;
1109 + struct io_async_rw *rw = req->async_data;
1110 + ssize_t io_size, ret, ret2;
1111 + bool no_async;
1112 +@@ -3438,6 +3400,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
1113 + ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
1114 + if (ret < 0)
1115 + return ret;
1116 ++ iter_cp = *iter;
1117 + io_size = iov_iter_count(iter);
1118 + req->result = io_size;
1119 + ret = 0;
1120 +@@ -3473,7 +3436,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
1121 + if (req->file->f_flags & O_NONBLOCK)
1122 + goto done;
1123 + /* some cases will consume bytes even on error returns */
1124 +- iov_iter_revert(iter, io_size - iov_iter_count(iter));
1125 ++ *iter = iter_cp;
1126 + ret = 0;
1127 + goto copy_iov;
1128 + } else if (ret < 0) {
1129 +@@ -3556,6 +3519,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
1130 + struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1131 + struct kiocb *kiocb = &req->rw.kiocb;
1132 + struct iov_iter __iter, *iter = &__iter;
1133 ++ struct iov_iter iter_cp;
1134 + struct io_async_rw *rw = req->async_data;
1135 + ssize_t ret, ret2, io_size;
1136 +
1137 +@@ -3565,6 +3529,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
1138 + ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
1139 + if (ret < 0)
1140 + return ret;
1141 ++ iter_cp = *iter;
1142 + io_size = iov_iter_count(iter);
1143 + req->result = io_size;
1144 +
1145 +@@ -3626,7 +3591,7 @@ done:
1146 + } else {
1147 + copy_iov:
1148 + /* some cases will consume bytes even on error returns */
1149 +- iov_iter_revert(iter, io_size - iov_iter_count(iter));
1150 ++ *iter = iter_cp;
1151 + ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
1152 + if (!ret)
1153 + return -EAGAIN;
1154 +diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
1155 +index 7009a8dddd45b..a7e0970b5bfe1 100644
1156 +--- a/fs/nfs/internal.h
1157 ++++ b/fs/nfs/internal.h
1158 +@@ -832,6 +832,7 @@ static inline bool nfs_error_is_fatal_on_server(int err)
1159 + case 0:
1160 + case -ERESTARTSYS:
1161 + case -EINTR:
1162 ++ case -ENOMEM:
1163 + return false;
1164 + }
1165 + return nfs_error_is_fatal(err);
1166 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1167 +index 84dd68091f422..f1b503bec2221 100644
1168 +--- a/fs/nfsd/nfs4state.c
1169 ++++ b/fs/nfsd/nfs4state.c
1170 +@@ -7122,16 +7122,12 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
1171 + if (sop->so_is_open_owner || !same_owner_str(sop, owner))
1172 + continue;
1173 +
1174 +- /* see if there are still any locks associated with it */
1175 +- lo = lockowner(sop);
1176 +- list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
1177 +- if (check_for_locks(stp->st_stid.sc_file, lo)) {
1178 +- status = nfserr_locks_held;
1179 +- spin_unlock(&clp->cl_lock);
1180 +- return status;
1181 +- }
1182 ++ if (atomic_read(&sop->so_count) != 1) {
1183 ++ spin_unlock(&clp->cl_lock);
1184 ++ return nfserr_locks_held;
1185 + }
1186 +
1187 ++ lo = lockowner(sop);
1188 + nfs4_get_stateowner(sop);
1189 + break;
1190 + }
1191 +diff --git a/fs/pipe.c b/fs/pipe.c
1192 +index 9f2ca1b1c17ac..dbb090e1b026c 100644
1193 +--- a/fs/pipe.c
1194 ++++ b/fs/pipe.c
1195 +@@ -652,7 +652,7 @@ pipe_poll(struct file *filp, poll_table *wait)
1196 + unsigned int head, tail;
1197 +
1198 + /* Epoll has some historical nasty semantics, this enables them */
1199 +- pipe->poll_usage = 1;
1200 ++ WRITE_ONCE(pipe->poll_usage, true);
1201 +
1202 + /*
1203 + * Reading pipe state only -- no need for acquiring the semaphore.
1204 +@@ -1244,30 +1244,33 @@ unsigned int round_pipe_size(unsigned long size)
1205 +
1206 + /*
1207 + * Resize the pipe ring to a number of slots.
1208 ++ *
1209 ++ * Note the pipe can be reduced in capacity, but only if the current
1210 ++ * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1211 ++ * returned instead.
1212 + */
1213 + int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1214 + {
1215 + struct pipe_buffer *bufs;
1216 + unsigned int head, tail, mask, n;
1217 +
1218 +- /*
1219 +- * We can shrink the pipe, if arg is greater than the ring occupancy.
1220 +- * Since we don't expect a lot of shrink+grow operations, just free and
1221 +- * allocate again like we would do for growing. If the pipe currently
1222 +- * contains more buffers than arg, then return busy.
1223 +- */
1224 +- mask = pipe->ring_size - 1;
1225 +- head = pipe->head;
1226 +- tail = pipe->tail;
1227 +- n = pipe_occupancy(pipe->head, pipe->tail);
1228 +- if (nr_slots < n)
1229 +- return -EBUSY;
1230 +-
1231 + bufs = kcalloc(nr_slots, sizeof(*bufs),
1232 + GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1233 + if (unlikely(!bufs))
1234 + return -ENOMEM;
1235 +
1236 ++ spin_lock_irq(&pipe->rd_wait.lock);
1237 ++ mask = pipe->ring_size - 1;
1238 ++ head = pipe->head;
1239 ++ tail = pipe->tail;
1240 ++
1241 ++ n = pipe_occupancy(head, tail);
1242 ++ if (nr_slots < n) {
1243 ++ spin_unlock_irq(&pipe->rd_wait.lock);
1244 ++ kfree(bufs);
1245 ++ return -EBUSY;
1246 ++ }
1247 ++
1248 + /*
1249 + * The pipe array wraps around, so just start the new one at zero
1250 + * and adjust the indices.
1251 +@@ -1299,6 +1302,8 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1252 + pipe->tail = tail;
1253 + pipe->head = head;
1254 +
1255 ++ spin_unlock_irq(&pipe->rd_wait.lock);
1256 ++
1257 + /* This might have made more room for writers */
1258 + wake_up_interruptible(&pipe->wr_wait);
1259 + return 0;
1260 +diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
1261 +index d9a692484eaed..de9c27ef68d86 100644
1262 +--- a/fs/xfs/libxfs/xfs_bmap.c
1263 ++++ b/fs/xfs/libxfs/xfs_bmap.c
1264 +@@ -6229,6 +6229,11 @@ xfs_bmap_validate_extent(
1265 + xfs_fsblock_t endfsb;
1266 + bool isrt;
1267 +
1268 ++ if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
1269 ++ return __this_address;
1270 ++ if (irec->br_startoff + irec->br_blockcount <= irec->br_startoff)
1271 ++ return __this_address;
1272 ++
1273 + isrt = XFS_IS_REALTIME_INODE(ip);
1274 + endfsb = irec->br_startblock + irec->br_blockcount - 1;
1275 + if (isrt && whichfork == XFS_DATA_FORK) {
1276 +diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
1277 +index e55378640b056..d03e6098ded9f 100644
1278 +--- a/fs/xfs/libxfs/xfs_dir2.h
1279 ++++ b/fs/xfs/libxfs/xfs_dir2.h
1280 +@@ -47,8 +47,6 @@ extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp,
1281 + extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
1282 + struct xfs_name *name, xfs_ino_t ino,
1283 + xfs_extlen_t tot);
1284 +-extern bool xfs_dir2_sf_replace_needblock(struct xfs_inode *dp,
1285 +- xfs_ino_t inum);
1286 + extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
1287 + struct xfs_name *name, xfs_ino_t inum,
1288 + xfs_extlen_t tot);
1289 +diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
1290 +index 2463b5d734472..8c4f76bba88be 100644
1291 +--- a/fs/xfs/libxfs/xfs_dir2_sf.c
1292 ++++ b/fs/xfs/libxfs/xfs_dir2_sf.c
1293 +@@ -1018,7 +1018,7 @@ xfs_dir2_sf_removename(
1294 + /*
1295 + * Check whether the sf dir replace operation need more blocks.
1296 + */
1297 +-bool
1298 ++static bool
1299 + xfs_dir2_sf_replace_needblock(
1300 + struct xfs_inode *dp,
1301 + xfs_ino_t inum)
1302 +diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
1303 +index 0356f2e340a10..8c6e26d62ef28 100644
1304 +--- a/fs/xfs/xfs_buf_item.c
1305 ++++ b/fs/xfs/xfs_buf_item.c
1306 +@@ -56,14 +56,12 @@ xfs_buf_log_format_size(
1307 + }
1308 +
1309 + /*
1310 +- * This returns the number of log iovecs needed to log the
1311 +- * given buf log item.
1312 ++ * Return the number of log iovecs and space needed to log the given buf log
1313 ++ * item segment.
1314 + *
1315 +- * It calculates this as 1 iovec for the buf log format structure
1316 +- * and 1 for each stretch of non-contiguous chunks to be logged.
1317 +- * Contiguous chunks are logged in a single iovec.
1318 +- *
1319 +- * If the XFS_BLI_STALE flag has been set, then log nothing.
1320 ++ * It calculates this as 1 iovec for the buf log format structure and 1 for each
1321 ++ * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
1322 ++ * in a single iovec.
1323 + */
1324 + STATIC void
1325 + xfs_buf_item_size_segment(
1326 +@@ -119,11 +117,8 @@ xfs_buf_item_size_segment(
1327 + }
1328 +
1329 + /*
1330 +- * This returns the number of log iovecs needed to log the given buf log item.
1331 +- *
1332 +- * It calculates this as 1 iovec for the buf log format structure and 1 for each
1333 +- * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
1334 +- * in a single iovec.
1335 ++ * Return the number of log iovecs and space needed to log the given buf log
1336 ++ * item.
1337 + *
1338 + * Discontiguous buffers need a format structure per region that is being
1339 + * logged. This makes the changes in the buffer appear to log recovery as though
1340 +@@ -133,7 +128,11 @@ xfs_buf_item_size_segment(
1341 + * what ends up on disk.
1342 + *
1343 + * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
1344 +- * format structures.
1345 ++ * format structures. If the item has previously been logged and has dirty
1346 ++ * regions, we do not relog them in stale buffers. This has the effect of
1347 ++ * reducing the size of the relogged item by the amount of dirty data tracked
1348 ++ * by the log item. This can result in the committing transaction reducing the
1349 ++ * amount of space being consumed by the CIL.
1350 + */
1351 + STATIC void
1352 + xfs_buf_item_size(
1353 +@@ -147,9 +146,9 @@ xfs_buf_item_size(
1354 + ASSERT(atomic_read(&bip->bli_refcount) > 0);
1355 + if (bip->bli_flags & XFS_BLI_STALE) {
1356 + /*
1357 +- * The buffer is stale, so all we need to log
1358 +- * is the buf log format structure with the
1359 +- * cancel flag in it.
1360 ++ * The buffer is stale, so all we need to log is the buf log
1361 ++ * format structure with the cancel flag in it as we are never
1362 ++ * going to replay the changes tracked in the log item.
1363 + */
1364 + trace_xfs_buf_item_size_stale(bip);
1365 + ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
1366 +@@ -164,9 +163,9 @@ xfs_buf_item_size(
1367 +
1368 + if (bip->bli_flags & XFS_BLI_ORDERED) {
1369 + /*
1370 +- * The buffer has been logged just to order it.
1371 +- * It is not being included in the transaction
1372 +- * commit, so no vectors are used at all.
1373 ++ * The buffer has been logged just to order it. It is not being
1374 ++ * included in the transaction commit, so no vectors are used at
1375 ++ * all.
1376 + */
1377 + trace_xfs_buf_item_size_ordered(bip);
1378 + *nvecs = XFS_LOG_VEC_ORDERED;
1379 +diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
1380 +index 2bfbcf28b1bd2..e958b1c745615 100644
1381 +--- a/fs/xfs/xfs_inode.c
1382 ++++ b/fs/xfs/xfs_inode.c
1383 +@@ -3152,7 +3152,7 @@ xfs_rename(
1384 + struct xfs_trans *tp;
1385 + struct xfs_inode *wip = NULL; /* whiteout inode */
1386 + struct xfs_inode *inodes[__XFS_SORT_INODES];
1387 +- struct xfs_buf *agibp;
1388 ++ int i;
1389 + int num_inodes = __XFS_SORT_INODES;
1390 + bool new_parent = (src_dp != target_dp);
1391 + bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
1392 +@@ -3265,6 +3265,30 @@ xfs_rename(
1393 + }
1394 + }
1395 +
1396 ++ /*
1397 ++ * Lock the AGI buffers we need to handle bumping the nlink of the
1398 ++ * whiteout inode off the unlinked list and to handle dropping the
1399 ++ * nlink of the target inode. Per locking order rules, do this in
1400 ++ * increasing AG order and before directory block allocation tries to
1401 ++ * grab AGFs because we grab AGIs before AGFs.
1402 ++ *
1403 ++ * The (vfs) caller must ensure that if src is a directory then
1404 ++ * target_ip is either null or an empty directory.
1405 ++ */
1406 ++ for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
1407 ++ if (inodes[i] == wip ||
1408 ++ (inodes[i] == target_ip &&
1409 ++ (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
1410 ++ struct xfs_buf *bp;
1411 ++ xfs_agnumber_t agno;
1412 ++
1413 ++ agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
1414 ++ error = xfs_read_agi(mp, tp, agno, &bp);
1415 ++ if (error)
1416 ++ goto out_trans_cancel;
1417 ++ }
1418 ++ }
1419 ++
1420 + /*
1421 + * Directory entry creation below may acquire the AGF. Remove
1422 + * the whiteout from the unlinked list first to preserve correct
1423 +@@ -3317,22 +3341,6 @@ xfs_rename(
1424 + * In case there is already an entry with the same
1425 + * name at the destination directory, remove it first.
1426 + */
1427 +-
1428 +- /*
1429 +- * Check whether the replace operation will need to allocate
1430 +- * blocks. This happens when the shortform directory lacks
1431 +- * space and we have to convert it to a block format directory.
1432 +- * When more blocks are necessary, we must lock the AGI first
1433 +- * to preserve locking order (AGI -> AGF).
1434 +- */
1435 +- if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) {
1436 +- error = xfs_read_agi(mp, tp,
1437 +- XFS_INO_TO_AGNO(mp, target_ip->i_ino),
1438 +- &agibp);
1439 +- if (error)
1440 +- goto out_trans_cancel;
1441 +- }
1442 +-
1443 + error = xfs_dir_replace(tp, target_dp, target_name,
1444 + src_ip->i_ino, spaceres);
1445 + if (error)
1446 +diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
1447 +index 17e20a6d8b4e2..6ff91e5bf3cd7 100644
1448 +--- a/fs/xfs/xfs_inode_item.c
1449 ++++ b/fs/xfs/xfs_inode_item.c
1450 +@@ -28,6 +28,20 @@ static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
1451 + return container_of(lip, struct xfs_inode_log_item, ili_item);
1452 + }
1453 +
1454 ++/*
1455 ++ * The logged size of an inode fork is always the current size of the inode
1456 ++ * fork. This means that when an inode fork is relogged, the size of the logged
1457 ++ * region is determined by the current state, not the combination of the
1458 ++ * previously logged state + the current state. This is different relogging
1459 ++ * behaviour to most other log items which will retain the size of the
1460 ++ * previously logged changes when smaller regions are relogged.
1461 ++ *
1462 ++ * Hence operations that remove data from the inode fork (e.g. shortform
1463 ++ * dir/attr remove, extent form extent removal, etc), the size of the relogged
1464 ++ * inode gets -smaller- rather than stays the same size as the previously logged
1465 ++ * size and this can result in the committing transaction reducing the amount of
1466 ++ * space being consumed by the CIL.
1467 ++ */
1468 + STATIC void
1469 + xfs_inode_item_data_fork_size(
1470 + struct xfs_inode_log_item *iip,
1471 +diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c
1472 +index 2a45138831e33..eae3aff9bc976 100644
1473 +--- a/fs/xfs/xfs_iwalk.c
1474 ++++ b/fs/xfs/xfs_iwalk.c
1475 +@@ -363,7 +363,7 @@ xfs_iwalk_run_callbacks(
1476 + /* Delete cursor but remember the last record we cached... */
1477 + xfs_iwalk_del_inobt(tp, curpp, agi_bpp, 0);
1478 + irec = &iwag->recs[iwag->nr_recs - 1];
1479 +- ASSERT(next_agino == irec->ir_startino + XFS_INODES_PER_CHUNK);
1480 ++ ASSERT(next_agino >= irec->ir_startino + XFS_INODES_PER_CHUNK);
1481 +
1482 + error = xfs_iwalk_ag_recs(iwag);
1483 + if (error)
1484 +diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
1485 +index b0ef071b3cb53..cd5c04dabe2e1 100644
1486 +--- a/fs/xfs/xfs_log_cil.c
1487 ++++ b/fs/xfs/xfs_log_cil.c
1488 +@@ -668,9 +668,14 @@ xlog_cil_push_work(
1489 + ASSERT(push_seq <= ctx->sequence);
1490 +
1491 + /*
1492 +- * Wake up any background push waiters now this context is being pushed.
1493 ++ * As we are about to switch to a new, empty CIL context, we no longer
1494 ++ * need to throttle tasks on CIL space overruns. Wake any waiters that
1495 ++ * the hard push throttle may have caught so they can start committing
1496 ++ * to the new context. The ctx->xc_push_lock provides the serialisation
1497 ++ * necessary for safely using the lockless waitqueue_active() check in
1498 ++ * this context.
1499 + */
1500 +- if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
1501 ++ if (waitqueue_active(&cil->xc_push_wait))
1502 + wake_up_all(&cil->xc_push_wait);
1503 +
1504 + /*
1505 +@@ -907,7 +912,7 @@ xlog_cil_push_background(
1506 + ASSERT(!list_empty(&cil->xc_cil));
1507 +
1508 + /*
1509 +- * don't do a background push if we haven't used up all the
1510 ++ * Don't do a background push if we haven't used up all the
1511 + * space available yet.
1512 + */
1513 + if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) {
1514 +@@ -931,9 +936,16 @@ xlog_cil_push_background(
1515 +
1516 + /*
1517 + * If we are well over the space limit, throttle the work that is being
1518 +- * done until the push work on this context has begun.
1519 ++ * done until the push work on this context has begun. Enforce the hard
1520 ++ * throttle on all transaction commits once it has been activated, even
1521 ++ * if the committing transactions have resulted in the space usage
1522 ++ * dipping back down under the hard limit.
1523 ++ *
1524 ++ * The ctx->xc_push_lock provides the serialisation necessary for safely
1525 ++ * using the lockless waitqueue_active() check in this context.
1526 + */
1527 +- if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) {
1528 ++ if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) ||
1529 ++ waitqueue_active(&cil->xc_push_wait)) {
1530 + trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
1531 + ASSERT(cil->xc_ctx->space_used < log->l_logsize);
1532 + xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
1533 +diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
1534 +index e3e229e52512a..5ebd6cdc44a7b 100644
1535 +--- a/fs/xfs/xfs_super.c
1536 ++++ b/fs/xfs/xfs_super.c
1537 +@@ -199,10 +199,12 @@ xfs_fs_show_options(
1538 + seq_printf(m, ",swidth=%d",
1539 + (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
1540 +
1541 +- if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
1542 +- seq_puts(m, ",usrquota");
1543 +- else if (mp->m_qflags & XFS_UQUOTA_ACCT)
1544 +- seq_puts(m, ",uqnoenforce");
1545 ++ if (mp->m_qflags & XFS_UQUOTA_ACCT) {
1546 ++ if (mp->m_qflags & XFS_UQUOTA_ENFD)
1547 ++ seq_puts(m, ",usrquota");
1548 ++ else
1549 ++ seq_puts(m, ",uqnoenforce");
1550 ++ }
1551 +
1552 + if (mp->m_qflags & XFS_PQUOTA_ACCT) {
1553 + if (mp->m_qflags & XFS_PQUOTA_ENFD)
1554 +diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
1555 +index 88e4d145f7cda..a6c3b8e7deb64 100644
1556 +--- a/include/crypto/drbg.h
1557 ++++ b/include/crypto/drbg.h
1558 +@@ -105,6 +105,12 @@ struct drbg_test_data {
1559 + struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
1560 + };
1561 +
1562 ++enum drbg_seed_state {
1563 ++ DRBG_SEED_STATE_UNSEEDED,
1564 ++ DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */
1565 ++ DRBG_SEED_STATE_FULL,
1566 ++};
1567 ++
1568 + struct drbg_state {
1569 + struct mutex drbg_mutex; /* lock around DRBG */
1570 + unsigned char *V; /* internal state 10.1.1.1 1a) */
1571 +@@ -127,16 +133,14 @@ struct drbg_state {
1572 + struct crypto_wait ctr_wait; /* CTR mode async wait obj */
1573 + struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
1574 +
1575 +- bool seeded; /* DRBG fully seeded? */
1576 ++ enum drbg_seed_state seeded; /* DRBG fully seeded? */
1577 + bool pr; /* Prediction resistance enabled? */
1578 + bool fips_primed; /* Continuous test primed? */
1579 + unsigned char *prev; /* FIPS 140-2 continuous test value */
1580 +- struct work_struct seed_work; /* asynchronous seeding support */
1581 + struct crypto_rng *jent;
1582 + const struct drbg_state_ops *d_ops;
1583 + const struct drbg_core *core;
1584 + struct drbg_string test_data;
1585 +- struct notifier_block random_ready;
1586 + };
1587 +
1588 + static inline __u8 drbg_statelen(struct drbg_state *drbg)
1589 +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
1590 +index fc5642431b923..c0b6ec6bf65b7 100644
1591 +--- a/include/linux/pipe_fs_i.h
1592 ++++ b/include/linux/pipe_fs_i.h
1593 +@@ -71,7 +71,7 @@ struct pipe_inode_info {
1594 + unsigned int files;
1595 + unsigned int r_counter;
1596 + unsigned int w_counter;
1597 +- unsigned int poll_usage;
1598 ++ bool poll_usage;
1599 + struct page *tmp_page;
1600 + struct fasync_struct *fasync_readers;
1601 + struct fasync_struct *fasync_writers;
1602 +diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
1603 +index 09f2efea0b970..5805fe4947f3c 100644
1604 +--- a/include/net/netfilter/nf_conntrack_core.h
1605 ++++ b/include/net/netfilter/nf_conntrack_core.h
1606 +@@ -59,8 +59,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
1607 + int ret = NF_ACCEPT;
1608 +
1609 + if (ct) {
1610 +- if (!nf_ct_is_confirmed(ct))
1611 ++ if (!nf_ct_is_confirmed(ct)) {
1612 + ret = __nf_conntrack_confirm(skb);
1613 ++
1614 ++ if (ret == NF_ACCEPT)
1615 ++ ct = (struct nf_conn *)skb_nfct(skb);
1616 ++ }
1617 ++
1618 + if (likely(ret == NF_ACCEPT))
1619 + nf_ct_deliver_cached_events(ct);
1620 + }
1621 +diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
1622 +index 986dabc3d11f0..87becf77cc759 100644
1623 +--- a/kernel/bpf/trampoline.c
1624 ++++ b/kernel/bpf/trampoline.c
1625 +@@ -378,7 +378,7 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
1626 + {
1627 + enum bpf_tramp_prog_type kind;
1628 + int err = 0;
1629 +- int cnt;
1630 ++ int cnt = 0, i;
1631 +
1632 + kind = bpf_attach_type_to_tramp(prog);
1633 + mutex_lock(&tr->mutex);
1634 +@@ -389,7 +389,10 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
1635 + err = -EBUSY;
1636 + goto out;
1637 + }
1638 +- cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
1639 ++
1640 ++ for (i = 0; i < BPF_TRAMP_MAX; i++)
1641 ++ cnt += tr->progs_cnt[i];
1642 ++
1643 + if (kind == BPF_TRAMP_REPLACE) {
1644 + /* Cannot attach extension if fentry/fexit are in use. */
1645 + if (cnt) {
1646 +@@ -467,16 +470,19 @@ out:
1647 +
1648 + void bpf_trampoline_put(struct bpf_trampoline *tr)
1649 + {
1650 ++ int i;
1651 ++
1652 + if (!tr)
1653 + return;
1654 + mutex_lock(&trampoline_mutex);
1655 + if (!refcount_dec_and_test(&tr->refcnt))
1656 + goto out;
1657 + WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
1658 +- if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
1659 +- goto out;
1660 +- if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
1661 +- goto out;
1662 ++
1663 ++ for (i = 0; i < BPF_TRAMP_MAX; i++)
1664 ++ if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i])))
1665 ++ goto out;
1666 ++
1667 + /* This code will be executed even when the last bpf_tramp_image
1668 + * is alive. All progs are detached from the trampoline and the
1669 + * trampoline image is patched with jmp into epilogue to skip
1670 +diff --git a/lib/Kconfig b/lib/Kconfig
1671 +index 9216e24e51646..258e1ec7d5920 100644
1672 +--- a/lib/Kconfig
1673 ++++ b/lib/Kconfig
1674 +@@ -101,6 +101,8 @@ config INDIRECT_PIO
1675 +
1676 + When in doubt, say N.
1677 +
1678 ++source "lib/crypto/Kconfig"
1679 ++
1680 + config CRC_CCITT
1681 + tristate "CRC-CCITT functions"
1682 + help
1683 +diff --git a/lib/assoc_array.c b/lib/assoc_array.c
1684 +index 6f4bcf5245547..b537a83678e11 100644
1685 +--- a/lib/assoc_array.c
1686 ++++ b/lib/assoc_array.c
1687 +@@ -1462,6 +1462,7 @@ int assoc_array_gc(struct assoc_array *array,
1688 + struct assoc_array_ptr *cursor, *ptr;
1689 + struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
1690 + unsigned long nr_leaves_on_tree;
1691 ++ bool retained;
1692 + int keylen, slot, nr_free, next_slot, i;
1693 +
1694 + pr_devel("-->%s()\n", __func__);
1695 +@@ -1538,6 +1539,7 @@ continue_node:
1696 + goto descend;
1697 + }
1698 +
1699 ++retry_compress:
1700 + pr_devel("-- compress node %p --\n", new_n);
1701 +
1702 + /* Count up the number of empty slots in this node and work out the
1703 +@@ -1555,6 +1557,7 @@ continue_node:
1704 + pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
1705 +
1706 + /* See what we can fold in */
1707 ++ retained = false;
1708 + next_slot = 0;
1709 + for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
1710 + struct assoc_array_shortcut *s;
1711 +@@ -1604,9 +1607,14 @@ continue_node:
1712 + pr_devel("[%d] retain node %lu/%d [nx %d]\n",
1713 + slot, child->nr_leaves_on_branch, nr_free + 1,
1714 + next_slot);
1715 ++ retained = true;
1716 + }
1717 + }
1718 +
1719 ++ if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
1720 ++ pr_devel("internal nodes remain despite enough space, retrying\n");
1721 ++ goto retry_compress;
1722 ++ }
1723 + pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
1724 +
1725 + nr_leaves_on_tree = new_n->nr_leaves_on_branch;
1726 +diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
1727 +index af3da5a8bde8d..9856e291f4141 100644
1728 +--- a/lib/crypto/Kconfig
1729 ++++ b/lib/crypto/Kconfig
1730 +@@ -1,5 +1,7 @@
1731 + # SPDX-License-Identifier: GPL-2.0
1732 +
1733 ++menu "Crypto library routines"
1734 ++
1735 + config CRYPTO_LIB_AES
1736 + tristate
1737 +
1738 +@@ -31,7 +33,7 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
1739 +
1740 + config CRYPTO_LIB_CHACHA_GENERIC
1741 + tristate
1742 +- select CRYPTO_ALGAPI
1743 ++ select XOR_BLOCKS
1744 + help
1745 + This symbol can be depended upon by arch implementations of the
1746 + ChaCha library interface that require the generic code as a
1747 +@@ -40,7 +42,8 @@ config CRYPTO_LIB_CHACHA_GENERIC
1748 + of CRYPTO_LIB_CHACHA.
1749 +
1750 + config CRYPTO_LIB_CHACHA
1751 +- tristate
1752 ++ tristate "ChaCha library interface"
1753 ++ depends on CRYPTO
1754 + depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
1755 + select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
1756 + help
1757 +@@ -65,7 +68,7 @@ config CRYPTO_LIB_CURVE25519_GENERIC
1758 + of CRYPTO_LIB_CURVE25519.
1759 +
1760 + config CRYPTO_LIB_CURVE25519
1761 +- tristate
1762 ++ tristate "Curve25519 scalar multiplication library"
1763 + depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
1764 + select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
1765 + help
1766 +@@ -100,7 +103,7 @@ config CRYPTO_LIB_POLY1305_GENERIC
1767 + of CRYPTO_LIB_POLY1305.
1768 +
1769 + config CRYPTO_LIB_POLY1305
1770 +- tristate
1771 ++ tristate "Poly1305 library interface"
1772 + depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
1773 + select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
1774 + help
1775 +@@ -109,11 +112,15 @@ config CRYPTO_LIB_POLY1305
1776 + is available and enabled.
1777 +
1778 + config CRYPTO_LIB_CHACHA20POLY1305
1779 +- tristate
1780 ++ tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)"
1781 + depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
1782 + depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
1783 ++ depends on CRYPTO
1784 + select CRYPTO_LIB_CHACHA
1785 + select CRYPTO_LIB_POLY1305
1786 ++ select CRYPTO_ALGAPI
1787 +
1788 + config CRYPTO_LIB_SHA256
1789 + tristate
1790 ++
1791 ++endmenu
1792 +diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
1793 +index e59eda07305e6..493093b97093f 100644
1794 +--- a/lib/percpu-refcount.c
1795 ++++ b/lib/percpu-refcount.c
1796 +@@ -75,6 +75,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
1797 + data = kzalloc(sizeof(*ref->data), gfp);
1798 + if (!data) {
1799 + free_percpu((void __percpu *)ref->percpu_count_ptr);
1800 ++ ref->percpu_count_ptr = 0;
1801 + return -ENOMEM;
1802 + }
1803 +
1804 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
1805 +index 73cd50735df29..c18dc8e61d352 100644
1806 +--- a/mm/zsmalloc.c
1807 ++++ b/mm/zsmalloc.c
1808 +@@ -1748,11 +1748,40 @@ static enum fullness_group putback_zspage(struct size_class *class,
1809 + */
1810 + static void lock_zspage(struct zspage *zspage)
1811 + {
1812 +- struct page *page = get_first_page(zspage);
1813 ++ struct page *curr_page, *page;
1814 +
1815 +- do {
1816 +- lock_page(page);
1817 +- } while ((page = get_next_page(page)) != NULL);
1818 ++ /*
1819 ++ * Pages we haven't locked yet can be migrated off the list while we're
1820 ++ * trying to lock them, so we need to be careful and only attempt to
1821 ++ * lock each page under migrate_read_lock(). Otherwise, the page we lock
1822 ++ * may no longer belong to the zspage. This means that we may wait for
1823 ++ * the wrong page to unlock, so we must take a reference to the page
1824 ++ * prior to waiting for it to unlock outside migrate_read_lock().
1825 ++ */
1826 ++ while (1) {
1827 ++ migrate_read_lock(zspage);
1828 ++ page = get_first_page(zspage);
1829 ++ if (trylock_page(page))
1830 ++ break;
1831 ++ get_page(page);
1832 ++ migrate_read_unlock(zspage);
1833 ++ wait_on_page_locked(page);
1834 ++ put_page(page);
1835 ++ }
1836 ++
1837 ++ curr_page = page;
1838 ++ while ((page = get_next_page(curr_page))) {
1839 ++ if (trylock_page(page)) {
1840 ++ curr_page = page;
1841 ++ } else {
1842 ++ get_page(page);
1843 ++ migrate_read_unlock(zspage);
1844 ++ wait_on_page_locked(page);
1845 ++ put_page(page);
1846 ++ migrate_read_lock(zspage);
1847 ++ }
1848 ++ }
1849 ++ migrate_read_unlock(zspage);
1850 + }
1851 +
1852 + static int zs_init_fs_context(struct fs_context *fc)
1853 +diff --git a/net/core/filter.c b/net/core/filter.c
1854 +index ddf9792c0cb2e..d348f1d3fb8fc 100644
1855 +--- a/net/core/filter.c
1856 ++++ b/net/core/filter.c
1857 +@@ -1687,7 +1687,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1858 +
1859 + if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1860 + return -EINVAL;
1861 +- if (unlikely(offset > 0xffff))
1862 ++ if (unlikely(offset > INT_MAX))
1863 + return -EFAULT;
1864 + if (unlikely(bpf_try_make_writable(skb, offset + len)))
1865 + return -EFAULT;
1866 +@@ -1722,7 +1722,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1867 + {
1868 + void *ptr;
1869 +
1870 +- if (unlikely(offset > 0xffff))
1871 ++ if (unlikely(offset > INT_MAX))
1872 + goto err_clear;
1873 +
1874 + ptr = skb_header_pointer(skb, offset, len, to);
1875 +diff --git a/net/key/af_key.c b/net/key/af_key.c
1876 +index 61505b0df57db..6b7ed5568c090 100644
1877 +--- a/net/key/af_key.c
1878 ++++ b/net/key/af_key.c
1879 +@@ -2904,7 +2904,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
1880 + break;
1881 + if (!aalg->pfkey_supported)
1882 + continue;
1883 +- if (aalg_tmpl_set(t, aalg))
1884 ++ if (aalg_tmpl_set(t, aalg) && aalg->available)
1885 + sz += sizeof(struct sadb_comb);
1886 + }
1887 + return sz + sizeof(struct sadb_prop);
1888 +@@ -2922,7 +2922,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
1889 + if (!ealg->pfkey_supported)
1890 + continue;
1891 +
1892 +- if (!(ealg_tmpl_set(t, ealg)))
1893 ++ if (!(ealg_tmpl_set(t, ealg) && ealg->available))
1894 + continue;
1895 +
1896 + for (k = 1; ; k++) {
1897 +@@ -2933,7 +2933,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
1898 + if (!aalg->pfkey_supported)
1899 + continue;
1900 +
1901 +- if (aalg_tmpl_set(t, aalg))
1902 ++ if (aalg_tmpl_set(t, aalg) && aalg->available)
1903 + sz += sizeof(struct sadb_comb);
1904 + }
1905 + }
1906 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
1907 +index fdd1da9ecea9e..ea162e36e0e4b 100644
1908 +--- a/net/netfilter/nf_tables_api.c
1909 ++++ b/net/netfilter/nf_tables_api.c
1910 +@@ -2679,27 +2679,31 @@ static struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
1911 +
1912 + err = nf_tables_expr_parse(ctx, nla, &info);
1913 + if (err < 0)
1914 +- goto err1;
1915 ++ goto err_expr_parse;
1916 ++
1917 ++ err = -EOPNOTSUPP;
1918 ++ if (!(info.ops->type->flags & NFT_EXPR_STATEFUL))
1919 ++ goto err_expr_stateful;
1920 +
1921 + err = -ENOMEM;
1922 + expr = kzalloc(info.ops->size, GFP_KERNEL);
1923 + if (expr == NULL)
1924 +- goto err2;
1925 ++ goto err_expr_stateful;
1926 +
1927 + err = nf_tables_newexpr(ctx, &info, expr);
1928 + if (err < 0)
1929 +- goto err3;
1930 ++ goto err_expr_new;
1931 +
1932 + return expr;
1933 +-err3:
1934 ++err_expr_new:
1935 + kfree(expr);
1936 +-err2:
1937 ++err_expr_stateful:
1938 + owner = info.ops->type->owner;
1939 + if (info.ops->type->release_ops)
1940 + info.ops->type->release_ops(info.ops);
1941 +
1942 + module_put(owner);
1943 +-err1:
1944 ++err_expr_parse:
1945 + return ERR_PTR(err);
1946 + }
1947 +
1948 +@@ -4047,6 +4051,9 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
1949 + u32 len;
1950 + int err;
1951 +
1952 ++ if (desc->field_count >= ARRAY_SIZE(desc->field_len))
1953 ++ return -E2BIG;
1954 ++
1955 + err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr,
1956 + nft_concat_policy, NULL);
1957 + if (err < 0)
1958 +@@ -4056,9 +4063,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
1959 + return -EINVAL;
1960 +
1961 + len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN]));
1962 +-
1963 +- if (len * BITS_PER_BYTE / 32 > NFT_REG32_COUNT)
1964 +- return -E2BIG;
1965 ++ if (!len || len > U8_MAX)
1966 ++ return -EINVAL;
1967 +
1968 + desc->field_len[desc->field_count++] = len;
1969 +
1970 +@@ -4069,7 +4075,8 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
1971 + const struct nlattr *nla)
1972 + {
1973 + struct nlattr *attr;
1974 +- int rem, err;
1975 ++ u32 num_regs = 0;
1976 ++ int rem, err, i;
1977 +
1978 + nla_for_each_nested(attr, nla, rem) {
1979 + if (nla_type(attr) != NFTA_LIST_ELEM)
1980 +@@ -4080,6 +4087,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
1981 + return err;
1982 + }
1983 +
1984 ++ for (i = 0; i < desc->field_count; i++)
1985 ++ num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
1986 ++
1987 ++ if (num_regs > NFT_REG32_COUNT)
1988 ++ return -E2BIG;
1989 ++
1990 + return 0;
1991 + }
1992 +
1993 +@@ -5055,9 +5068,6 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx,
1994 + return expr;
1995 +
1996 + err = -EOPNOTSUPP;
1997 +- if (!(expr->ops->type->flags & NFT_EXPR_STATEFUL))
1998 +- goto err_set_elem_expr;
1999 +-
2000 + if (expr->ops->type->flags & NFT_EXPR_GC) {
2001 + if (set->flags & NFT_SET_TIMEOUT)
2002 + goto err_set_elem_expr;
2003 +diff --git a/net/wireless/core.c b/net/wireless/core.c
2004 +index 3f4554723761d..3b25b78896a28 100644
2005 +--- a/net/wireless/core.c
2006 ++++ b/net/wireless/core.c
2007 +@@ -5,7 +5,7 @@
2008 + * Copyright 2006-2010 Johannes Berg <johannes@××××××××××××.net>
2009 + * Copyright 2013-2014 Intel Mobile Communications GmbH
2010 + * Copyright 2015-2017 Intel Deutschland GmbH
2011 +- * Copyright (C) 2018-2020 Intel Corporation
2012 ++ * Copyright (C) 2018-2021 Intel Corporation
2013 + */
2014 +
2015 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2016 +@@ -918,9 +918,6 @@ int wiphy_register(struct wiphy *wiphy)
2017 + return res;
2018 + }
2019 +
2020 +- /* set up regulatory info */
2021 +- wiphy_regulatory_register(wiphy);
2022 +-
2023 + list_add_rcu(&rdev->list, &cfg80211_rdev_list);
2024 + cfg80211_rdev_list_generation++;
2025 +
2026 +@@ -931,6 +928,9 @@ int wiphy_register(struct wiphy *wiphy)
2027 + cfg80211_debugfs_rdev_add(rdev);
2028 + nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
2029 +
2030 ++ /* set up regulatory info */
2031 ++ wiphy_regulatory_register(wiphy);
2032 ++
2033 + if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) {
2034 + struct regulatory_request request;
2035 +
2036 +diff --git a/net/wireless/reg.c b/net/wireless/reg.c
2037 +index a04fdfb35f070..6b3386e1d93a5 100644
2038 +--- a/net/wireless/reg.c
2039 ++++ b/net/wireless/reg.c
2040 +@@ -4001,6 +4001,7 @@ void wiphy_regulatory_register(struct wiphy *wiphy)
2041 +
2042 + wiphy_update_regulatory(wiphy, lr->initiator);
2043 + wiphy_all_share_dfs_chan_state(wiphy);
2044 ++ reg_process_self_managed_hints();
2045 + }
2046 +
2047 + void wiphy_regulatory_deregister(struct wiphy *wiphy)