Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Wed, 06 Jan 2021 14:54:07
Message-Id: 1609944830.d2a35690ae40f9b1fa3f2e91ab73c25acff4c71e.mpagano@gentoo
1 commit: d2a35690ae40f9b1fa3f2e91ab73c25acff4c71e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jan 6 14:53:50 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jan 6 14:53:50 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d2a35690
7
8 Linux patch 5.10.5
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1004_linux-5.10.5.patch | 3149 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3153 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index ce1d3f7..53642e2 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -59,6 +59,10 @@ Patch: 1003_linux-5.10.4.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.4
23
24 +Patch: 1004_linux-5.10.5.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.5
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1004_linux-5.10.5.patch b/1004_linux-5.10.5.patch
33 new file mode 100644
34 index 0000000..1af7e50
35 --- /dev/null
36 +++ b/1004_linux-5.10.5.patch
37 @@ -0,0 +1,3149 @@
38 +diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
39 +index b0ea17da8ff63..654649556306f 100644
40 +--- a/Documentation/gpu/todo.rst
41 ++++ b/Documentation/gpu/todo.rst
42 +@@ -273,6 +273,24 @@ Contact: Daniel Vetter, Noralf Tronnes
43 +
44 + Level: Advanced
45 +
46 ++Garbage collect fbdev scrolling acceleration
47 ++--------------------------------------------
48 ++
49 ++Scroll acceleration is disabled in fbcon by hard-wiring p->scrollmode =
50 ++SCROLL_REDRAW. There's a ton of code this will allow us to remove:
51 ++- lots of code in fbcon.c
52 ++- a bunch of the hooks in fbcon_ops, maybe the remaining hooks could be called
53 ++ directly instead of the function table (with a switch on p->rotate)
54 ++- fb_copyarea is unused after this, and can be deleted from all drivers
55 ++
56 ++Note that not all acceleration code can be deleted, since clearing and cursor
57 ++support is still accelerated, which might be good candidates for further
58 ++deletion projects.
59 ++
60 ++Contact: Daniel Vetter
61 ++
62 ++Level: Intermediate
63 ++
64 + idr_init_base()
65 + ---------------
66 +
67 +diff --git a/Makefile b/Makefile
68 +index 1e50d6af932ab..bb431fd473d2c 100644
69 +--- a/Makefile
70 ++++ b/Makefile
71 +@@ -1,7 +1,7 @@
72 + # SPDX-License-Identifier: GPL-2.0
73 + VERSION = 5
74 + PATCHLEVEL = 10
75 +-SUBLEVEL = 4
76 ++SUBLEVEL = 5
77 + EXTRAVERSION =
78 + NAME = Kleptomaniac Octopus
79 +
80 +diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
81 +index ef12e097f3184..27ca549ff47ed 100644
82 +--- a/arch/ia64/mm/init.c
83 ++++ b/arch/ia64/mm/init.c
84 +@@ -536,7 +536,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
85 +
86 + if (map_start < map_end)
87 + memmap_init_zone((unsigned long)(map_end - map_start),
88 +- args->nid, args->zone, page_to_pfn(map_start),
89 ++ args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end),
90 + MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
91 + return 0;
92 + }
93 +@@ -546,7 +546,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
94 + unsigned long start_pfn)
95 + {
96 + if (!vmem_map) {
97 +- memmap_init_zone(size, nid, zone, start_pfn,
98 ++ memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size,
99 + MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
100 + } else {
101 + struct page *start;
102 +diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
103 +index 7d0f7682d01df..6b1eca53e36cc 100644
104 +--- a/arch/powerpc/kernel/irq.c
105 ++++ b/arch/powerpc/kernel/irq.c
106 +@@ -102,14 +102,6 @@ static inline notrace unsigned long get_irq_happened(void)
107 + return happened;
108 + }
109 +
110 +-static inline notrace int decrementer_check_overflow(void)
111 +-{
112 +- u64 now = get_tb();
113 +- u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
114 +-
115 +- return now >= *next_tb;
116 +-}
117 +-
118 + #ifdef CONFIG_PPC_BOOK3E
119 +
120 + /* This is called whenever we are re-enabling interrupts
121 +@@ -142,35 +134,6 @@ notrace unsigned int __check_irq_replay(void)
122 + trace_hardirqs_on();
123 + trace_hardirqs_off();
124 +
125 +- /*
126 +- * We are always hard disabled here, but PACA_IRQ_HARD_DIS may
127 +- * not be set, which means interrupts have only just been hard
128 +- * disabled as part of the local_irq_restore or interrupt return
129 +- * code. In that case, skip the decrementr check becaus it's
130 +- * expensive to read the TB.
131 +- *
132 +- * HARD_DIS then gets cleared here, but it's reconciled later.
133 +- * Either local_irq_disable will replay the interrupt and that
134 +- * will reconcile state like other hard interrupts. Or interrupt
135 +- * retur will replay the interrupt and in that case it sets
136 +- * PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S).
137 +- */
138 +- if (happened & PACA_IRQ_HARD_DIS) {
139 +- local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
140 +-
141 +- /*
142 +- * We may have missed a decrementer interrupt if hard disabled.
143 +- * Check the decrementer register in case we had a rollover
144 +- * while hard disabled.
145 +- */
146 +- if (!(happened & PACA_IRQ_DEC)) {
147 +- if (decrementer_check_overflow()) {
148 +- local_paca->irq_happened |= PACA_IRQ_DEC;
149 +- happened |= PACA_IRQ_DEC;
150 +- }
151 +- }
152 +- }
153 +-
154 + if (happened & PACA_IRQ_DEC) {
155 + local_paca->irq_happened &= ~PACA_IRQ_DEC;
156 + return 0x900;
157 +@@ -186,6 +149,9 @@ notrace unsigned int __check_irq_replay(void)
158 + return 0x280;
159 + }
160 +
161 ++ if (happened & PACA_IRQ_HARD_DIS)
162 ++ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
163 ++
164 + /* There should be nothing left ! */
165 + BUG_ON(local_paca->irq_happened != 0);
166 +
167 +@@ -229,18 +195,6 @@ again:
168 + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
169 + WARN_ON_ONCE(mfmsr() & MSR_EE);
170 +
171 +- if (happened & PACA_IRQ_HARD_DIS) {
172 +- /*
173 +- * We may have missed a decrementer interrupt if hard disabled.
174 +- * Check the decrementer register in case we had a rollover
175 +- * while hard disabled.
176 +- */
177 +- if (!(happened & PACA_IRQ_DEC)) {
178 +- if (decrementer_check_overflow())
179 +- happened |= PACA_IRQ_DEC;
180 +- }
181 +- }
182 +-
183 + /*
184 + * Force the delivery of pending soft-disabled interrupts on PS3.
185 + * Any HV call will have this side effect.
186 +@@ -345,6 +299,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
187 + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
188 + WARN_ON_ONCE(!(mfmsr() & MSR_EE));
189 + __hard_irq_disable();
190 ++ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
191 + } else {
192 + /*
193 + * We should already be hard disabled here. We had bugs
194 +diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
195 +index 74efe46f55327..7d372ff3504b2 100644
196 +--- a/arch/powerpc/kernel/time.c
197 ++++ b/arch/powerpc/kernel/time.c
198 +@@ -552,14 +552,11 @@ void timer_interrupt(struct pt_regs *regs)
199 + struct pt_regs *old_regs;
200 + u64 now;
201 +
202 +- /* Some implementations of hotplug will get timer interrupts while
203 +- * offline, just ignore these and we also need to set
204 +- * decrementers_next_tb as MAX to make sure __check_irq_replay
205 +- * don't replay timer interrupt when return, otherwise we'll trap
206 +- * here infinitely :(
207 ++ /*
208 ++ * Some implementations of hotplug will get timer interrupts while
209 ++ * offline, just ignore these.
210 + */
211 + if (unlikely(!cpu_online(smp_processor_id()))) {
212 +- *next_tb = ~(u64)0;
213 + set_dec(decrementer_max);
214 + return;
215 + }
216 +diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
217 +index d95954ad4c0af..c61c3b62c8c62 100644
218 +--- a/arch/powerpc/platforms/powernv/opal.c
219 ++++ b/arch/powerpc/platforms/powernv/opal.c
220 +@@ -731,7 +731,7 @@ int opal_hmi_exception_early2(struct pt_regs *regs)
221 + return 1;
222 + }
223 +
224 +-/* HMI exception handler called in virtual mode during check_irq_replay. */
225 ++/* HMI exception handler called in virtual mode when irqs are next enabled. */
226 + int opal_handle_hmi_exception(struct pt_regs *regs)
227 + {
228 + /*
229 +diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
230 +index f6b253e2be409..36ec0bdd8b63c 100644
231 +--- a/arch/powerpc/sysdev/mpic_msgr.c
232 ++++ b/arch/powerpc/sysdev/mpic_msgr.c
233 +@@ -191,7 +191,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
234 +
235 + /* IO map the message register block. */
236 + of_address_to_resource(np, 0, &rsrc);
237 +- msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
238 ++ msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, resource_size(&rsrc));
239 + if (!msgr_block_addr) {
240 + dev_err(&dev->dev, "Failed to iomap MPIC message registers");
241 + return -EFAULT;
242 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
243 +index 6343dca0dbeb6..71203324ff42b 100644
244 +--- a/arch/s390/kernel/entry.S
245 ++++ b/arch/s390/kernel/entry.S
246 +@@ -406,6 +406,7 @@ ENTRY(system_call)
247 + mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
248 + mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
249 + stg %r14,__PT_FLAGS(%r11)
250 ++ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
251 + ENABLE_INTS
252 + .Lsysc_do_svc:
253 + # clear user controlled register to prevent speculative use
254 +@@ -422,7 +423,6 @@ ENTRY(system_call)
255 + jnl .Lsysc_nr_ok
256 + slag %r8,%r1,3
257 + .Lsysc_nr_ok:
258 +- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
259 + stg %r2,__PT_ORIG_GPR2(%r11)
260 + stg %r7,STACK_FRAME_OVERHEAD(%r15)
261 + lg %r9,0(%r8,%r10) # get system call add.
262 +@@ -712,8 +712,8 @@ ENTRY(pgm_check_handler)
263 + mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
264 + mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
265 + mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
266 +-6: RESTORE_SM_CLEAR_PER
267 +- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
268 ++6: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
269 ++ RESTORE_SM_CLEAR_PER
270 + larl %r1,pgm_check_table
271 + llgh %r10,__PT_INT_CODE+2(%r11)
272 + nill %r10,0x007f
273 +@@ -734,8 +734,8 @@ ENTRY(pgm_check_handler)
274 + # PER event in supervisor state, must be kprobes
275 + #
276 + .Lpgm_kprobe:
277 +- RESTORE_SM_CLEAR_PER
278 + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
279 ++ RESTORE_SM_CLEAR_PER
280 + lgr %r2,%r11 # pass pointer to pt_regs
281 + brasl %r14,do_per_trap
282 + j .Lpgm_return
283 +@@ -777,10 +777,10 @@ ENTRY(io_int_handler)
284 + stmg %r8,%r9,__PT_PSW(%r11)
285 + mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
286 + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
287 ++ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
288 + TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
289 + jo .Lio_restore
290 + TRACE_IRQS_OFF
291 +- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
292 + .Lio_loop:
293 + lgr %r2,%r11 # pass pointer to pt_regs
294 + lghi %r3,IO_INTERRUPT
295 +@@ -980,10 +980,10 @@ ENTRY(ext_int_handler)
296 + mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
297 + mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
298 + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
299 ++ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
300 + TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
301 + jo .Lio_restore
302 + TRACE_IRQS_OFF
303 +- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
304 + lgr %r2,%r11 # pass pointer to pt_regs
305 + lghi %r3,EXT_INTERRUPT
306 + brasl %r14,do_IRQ
307 +diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
308 +index ce115fce52f02..e4b9b2ce9abf4 100644
309 +--- a/arch/um/drivers/random.c
310 ++++ b/arch/um/drivers/random.c
311 +@@ -11,6 +11,7 @@
312 + #include <linux/fs.h>
313 + #include <linux/interrupt.h>
314 + #include <linux/miscdevice.h>
315 ++#include <linux/hw_random.h>
316 + #include <linux/delay.h>
317 + #include <linux/uaccess.h>
318 + #include <init.h>
319 +@@ -18,9 +19,8 @@
320 + #include <os.h>
321 +
322 + /*
323 +- * core module and version information
324 ++ * core module information
325 + */
326 +-#define RNG_VERSION "1.0.0"
327 + #define RNG_MODULE_NAME "hw_random"
328 +
329 + /* Changed at init time, in the non-modular case, and at module load
330 +@@ -28,88 +28,36 @@
331 + * protects against a module being loaded twice at the same time.
332 + */
333 + static int random_fd = -1;
334 +-static DECLARE_WAIT_QUEUE_HEAD(host_read_wait);
335 ++static struct hwrng hwrng = { 0, };
336 ++static DECLARE_COMPLETION(have_data);
337 +
338 +-static int rng_dev_open (struct inode *inode, struct file *filp)
339 ++static int rng_dev_read(struct hwrng *rng, void *buf, size_t max, bool block)
340 + {
341 +- /* enforce read-only access to this chrdev */
342 +- if ((filp->f_mode & FMODE_READ) == 0)
343 +- return -EINVAL;
344 +- if ((filp->f_mode & FMODE_WRITE) != 0)
345 +- return -EINVAL;
346 ++ int ret;
347 +
348 +- return 0;
349 +-}
350 +-
351 +-static atomic_t host_sleep_count = ATOMIC_INIT(0);
352 +-
353 +-static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
354 +- loff_t *offp)
355 +-{
356 +- u32 data;
357 +- int n, ret = 0, have_data;
358 +-
359 +- while (size) {
360 +- n = os_read_file(random_fd, &data, sizeof(data));
361 +- if (n > 0) {
362 +- have_data = n;
363 +- while (have_data && size) {
364 +- if (put_user((u8) data, buf++)) {
365 +- ret = ret ? : -EFAULT;
366 +- break;
367 +- }
368 +- size--;
369 +- ret++;
370 +- have_data--;
371 +- data >>= 8;
372 +- }
373 +- }
374 +- else if (n == -EAGAIN) {
375 +- DECLARE_WAITQUEUE(wait, current);
376 +-
377 +- if (filp->f_flags & O_NONBLOCK)
378 +- return ret ? : -EAGAIN;
379 +-
380 +- atomic_inc(&host_sleep_count);
381 ++ for (;;) {
382 ++ ret = os_read_file(random_fd, buf, max);
383 ++ if (block && ret == -EAGAIN) {
384 + add_sigio_fd(random_fd);
385 +
386 +- add_wait_queue(&host_read_wait, &wait);
387 +- set_current_state(TASK_INTERRUPTIBLE);
388 ++ ret = wait_for_completion_killable(&have_data);
389 +
390 +- schedule();
391 +- remove_wait_queue(&host_read_wait, &wait);
392 ++ ignore_sigio_fd(random_fd);
393 ++ deactivate_fd(random_fd, RANDOM_IRQ);
394 +
395 +- if (atomic_dec_and_test(&host_sleep_count)) {
396 +- ignore_sigio_fd(random_fd);
397 +- deactivate_fd(random_fd, RANDOM_IRQ);
398 +- }
399 ++ if (ret < 0)
400 ++ break;
401 ++ } else {
402 ++ break;
403 + }
404 +- else
405 +- return n;
406 +-
407 +- if (signal_pending (current))
408 +- return ret ? : -ERESTARTSYS;
409 + }
410 +- return ret;
411 +-}
412 +
413 +-static const struct file_operations rng_chrdev_ops = {
414 +- .owner = THIS_MODULE,
415 +- .open = rng_dev_open,
416 +- .read = rng_dev_read,
417 +- .llseek = noop_llseek,
418 +-};
419 +-
420 +-/* rng_init shouldn't be called more than once at boot time */
421 +-static struct miscdevice rng_miscdev = {
422 +- HWRNG_MINOR,
423 +- RNG_MODULE_NAME,
424 +- &rng_chrdev_ops,
425 +-};
426 ++ return ret != -EAGAIN ? ret : 0;
427 ++}
428 +
429 + static irqreturn_t random_interrupt(int irq, void *data)
430 + {
431 +- wake_up(&host_read_wait);
432 ++ complete(&have_data);
433 +
434 + return IRQ_HANDLED;
435 + }
436 +@@ -126,18 +74,19 @@ static int __init rng_init (void)
437 + goto out;
438 +
439 + random_fd = err;
440 +-
441 + err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt,
442 + 0, "random", NULL);
443 + if (err)
444 + goto err_out_cleanup_hw;
445 +
446 + sigio_broken(random_fd, 1);
447 ++ hwrng.name = RNG_MODULE_NAME;
448 ++ hwrng.read = rng_dev_read;
449 ++ hwrng.quality = 1024;
450 +
451 +- err = misc_register (&rng_miscdev);
452 ++ err = hwrng_register(&hwrng);
453 + if (err) {
454 +- printk (KERN_ERR RNG_MODULE_NAME ": misc device register "
455 +- "failed\n");
456 ++ pr_err(RNG_MODULE_NAME " registering failed (%d)\n", err);
457 + goto err_out_cleanup_hw;
458 + }
459 + out:
460 +@@ -161,8 +110,8 @@ static void cleanup(void)
461 +
462 + static void __exit rng_cleanup(void)
463 + {
464 ++ hwrng_unregister(&hwrng);
465 + os_close_file(random_fd);
466 +- misc_deregister (&rng_miscdev);
467 + }
468 +
469 + module_init (rng_init);
470 +diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
471 +index eae8c83364f71..b12c1b0d3e1d0 100644
472 +--- a/arch/um/drivers/ubd_kern.c
473 ++++ b/arch/um/drivers/ubd_kern.c
474 +@@ -47,18 +47,25 @@
475 + /* Max request size is determined by sector mask - 32K */
476 + #define UBD_MAX_REQUEST (8 * sizeof(long))
477 +
478 ++struct io_desc {
479 ++ char *buffer;
480 ++ unsigned long length;
481 ++ unsigned long sector_mask;
482 ++ unsigned long long cow_offset;
483 ++ unsigned long bitmap_words[2];
484 ++};
485 ++
486 + struct io_thread_req {
487 + struct request *req;
488 + int fds[2];
489 + unsigned long offsets[2];
490 + unsigned long long offset;
491 +- unsigned long length;
492 +- char *buffer;
493 + int sectorsize;
494 +- unsigned long sector_mask;
495 +- unsigned long long cow_offset;
496 +- unsigned long bitmap_words[2];
497 + int error;
498 ++
499 ++ int desc_cnt;
500 ++ /* io_desc has to be the last element of the struct */
501 ++ struct io_desc io_desc[];
502 + };
503 +
504 +
505 +@@ -525,12 +532,7 @@ static void ubd_handler(void)
506 + blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
507 + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, io_req->req->q);
508 + }
509 +- if ((io_req->error) || (io_req->buffer == NULL))
510 +- blk_mq_end_request(io_req->req, io_req->error);
511 +- else {
512 +- if (!blk_update_request(io_req->req, io_req->error, io_req->length))
513 +- __blk_mq_end_request(io_req->req, io_req->error);
514 +- }
515 ++ blk_mq_end_request(io_req->req, io_req->error);
516 + kfree(io_req);
517 + }
518 + }
519 +@@ -946,6 +948,7 @@ static int ubd_add(int n, char **error_out)
520 + blk_queue_write_cache(ubd_dev->queue, true, false);
521 +
522 + blk_queue_max_segments(ubd_dev->queue, MAX_SG);
523 ++ blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1);
524 + err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
525 + if(err){
526 + *error_out = "Failed to register device";
527 +@@ -1289,37 +1292,74 @@ static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask,
528 + *cow_offset += bitmap_offset;
529 + }
530 +
531 +-static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
532 ++static void cowify_req(struct io_thread_req *req, struct io_desc *segment,
533 ++ unsigned long offset, unsigned long *bitmap,
534 + __u64 bitmap_offset, __u64 bitmap_len)
535 + {
536 +- __u64 sector = req->offset >> SECTOR_SHIFT;
537 ++ __u64 sector = offset >> SECTOR_SHIFT;
538 + int i;
539 +
540 +- if (req->length > (sizeof(req->sector_mask) * 8) << SECTOR_SHIFT)
541 ++ if (segment->length > (sizeof(segment->sector_mask) * 8) << SECTOR_SHIFT)
542 + panic("Operation too long");
543 +
544 + if (req_op(req->req) == REQ_OP_READ) {
545 +- for (i = 0; i < req->length >> SECTOR_SHIFT; i++) {
546 ++ for (i = 0; i < segment->length >> SECTOR_SHIFT; i++) {
547 + if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
548 + ubd_set_bit(i, (unsigned char *)
549 +- &req->sector_mask);
550 ++ &segment->sector_mask);
551 ++ }
552 ++ } else {
553 ++ cowify_bitmap(offset, segment->length, &segment->sector_mask,
554 ++ &segment->cow_offset, bitmap, bitmap_offset,
555 ++ segment->bitmap_words, bitmap_len);
556 ++ }
557 ++}
558 ++
559 ++static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req,
560 ++ struct request *req)
561 ++{
562 ++ struct bio_vec bvec;
563 ++ struct req_iterator iter;
564 ++ int i = 0;
565 ++ unsigned long byte_offset = io_req->offset;
566 ++ int op = req_op(req);
567 ++
568 ++ if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) {
569 ++ io_req->io_desc[0].buffer = NULL;
570 ++ io_req->io_desc[0].length = blk_rq_bytes(req);
571 ++ } else {
572 ++ rq_for_each_segment(bvec, req, iter) {
573 ++ BUG_ON(i >= io_req->desc_cnt);
574 ++
575 ++ io_req->io_desc[i].buffer =
576 ++ page_address(bvec.bv_page) + bvec.bv_offset;
577 ++ io_req->io_desc[i].length = bvec.bv_len;
578 ++ i++;
579 ++ }
580 ++ }
581 ++
582 ++ if (dev->cow.file) {
583 ++ for (i = 0; i < io_req->desc_cnt; i++) {
584 ++ cowify_req(io_req, &io_req->io_desc[i], byte_offset,
585 ++ dev->cow.bitmap, dev->cow.bitmap_offset,
586 ++ dev->cow.bitmap_len);
587 ++ byte_offset += io_req->io_desc[i].length;
588 + }
589 ++
590 + }
591 +- else cowify_bitmap(req->offset, req->length, &req->sector_mask,
592 +- &req->cow_offset, bitmap, bitmap_offset,
593 +- req->bitmap_words, bitmap_len);
594 + }
595 +
596 +-static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
597 +- u64 off, struct bio_vec *bvec)
598 ++static struct io_thread_req *ubd_alloc_req(struct ubd *dev, struct request *req,
599 ++ int desc_cnt)
600 + {
601 +- struct ubd *dev = hctx->queue->queuedata;
602 + struct io_thread_req *io_req;
603 +- int ret;
604 ++ int i;
605 +
606 +- io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC);
607 ++ io_req = kmalloc(sizeof(*io_req) +
608 ++ (desc_cnt * sizeof(struct io_desc)),
609 ++ GFP_ATOMIC);
610 + if (!io_req)
611 +- return -ENOMEM;
612 ++ return NULL;
613 +
614 + io_req->req = req;
615 + if (dev->cow.file)
616 +@@ -1327,26 +1367,41 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
617 + else
618 + io_req->fds[0] = dev->fd;
619 + io_req->error = 0;
620 +-
621 +- if (bvec != NULL) {
622 +- io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset;
623 +- io_req->length = bvec->bv_len;
624 +- } else {
625 +- io_req->buffer = NULL;
626 +- io_req->length = blk_rq_bytes(req);
627 +- }
628 +-
629 + io_req->sectorsize = SECTOR_SIZE;
630 + io_req->fds[1] = dev->fd;
631 +- io_req->cow_offset = -1;
632 +- io_req->offset = off;
633 +- io_req->sector_mask = 0;
634 ++ io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT;
635 + io_req->offsets[0] = 0;
636 + io_req->offsets[1] = dev->cow.data_offset;
637 +
638 +- if (dev->cow.file)
639 +- cowify_req(io_req, dev->cow.bitmap,
640 +- dev->cow.bitmap_offset, dev->cow.bitmap_len);
641 ++ for (i = 0 ; i < desc_cnt; i++) {
642 ++ io_req->io_desc[i].sector_mask = 0;
643 ++ io_req->io_desc[i].cow_offset = -1;
644 ++ }
645 ++
646 ++ return io_req;
647 ++}
648 ++
649 ++static int ubd_submit_request(struct ubd *dev, struct request *req)
650 ++{
651 ++ int segs = 0;
652 ++ struct io_thread_req *io_req;
653 ++ int ret;
654 ++ int op = req_op(req);
655 ++
656 ++ if (op == REQ_OP_FLUSH)
657 ++ segs = 0;
658 ++ else if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD)
659 ++ segs = 1;
660 ++ else
661 ++ segs = blk_rq_nr_phys_segments(req);
662 ++
663 ++ io_req = ubd_alloc_req(dev, req, segs);
664 ++ if (!io_req)
665 ++ return -ENOMEM;
666 ++
667 ++ io_req->desc_cnt = segs;
668 ++ if (segs)
669 ++ ubd_map_req(dev, io_req, req);
670 +
671 + ret = os_write_file(thread_fd, &io_req, sizeof(io_req));
672 + if (ret != sizeof(io_req)) {
673 +@@ -1357,22 +1412,6 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
674 + return ret;
675 + }
676 +
677 +-static int queue_rw_req(struct blk_mq_hw_ctx *hctx, struct request *req)
678 +-{
679 +- struct req_iterator iter;
680 +- struct bio_vec bvec;
681 +- int ret;
682 +- u64 off = (u64)blk_rq_pos(req) << SECTOR_SHIFT;
683 +-
684 +- rq_for_each_segment(bvec, req, iter) {
685 +- ret = ubd_queue_one_vec(hctx, req, off, &bvec);
686 +- if (ret < 0)
687 +- return ret;
688 +- off += bvec.bv_len;
689 +- }
690 +- return 0;
691 +-}
692 +-
693 + static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
694 + const struct blk_mq_queue_data *bd)
695 + {
696 +@@ -1385,17 +1424,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
697 + spin_lock_irq(&ubd_dev->lock);
698 +
699 + switch (req_op(req)) {
700 +- /* operations with no lentgth/offset arguments */
701 + case REQ_OP_FLUSH:
702 +- ret = ubd_queue_one_vec(hctx, req, 0, NULL);
703 +- break;
704 + case REQ_OP_READ:
705 + case REQ_OP_WRITE:
706 +- ret = queue_rw_req(hctx, req);
707 +- break;
708 + case REQ_OP_DISCARD:
709 + case REQ_OP_WRITE_ZEROES:
710 +- ret = ubd_queue_one_vec(hctx, req, (u64)blk_rq_pos(req) << 9, NULL);
711 ++ ret = ubd_submit_request(ubd_dev, req);
712 + break;
713 + default:
714 + WARN_ON_ONCE(1);
715 +@@ -1483,22 +1517,22 @@ static int map_error(int error_code)
716 + * will result in unpredictable behaviour and/or crashes.
717 + */
718 +
719 +-static int update_bitmap(struct io_thread_req *req)
720 ++static int update_bitmap(struct io_thread_req *req, struct io_desc *segment)
721 + {
722 + int n;
723 +
724 +- if(req->cow_offset == -1)
725 ++ if (segment->cow_offset == -1)
726 + return map_error(0);
727 +
728 +- n = os_pwrite_file(req->fds[1], &req->bitmap_words,
729 +- sizeof(req->bitmap_words), req->cow_offset);
730 +- if (n != sizeof(req->bitmap_words))
731 ++ n = os_pwrite_file(req->fds[1], &segment->bitmap_words,
732 ++ sizeof(segment->bitmap_words), segment->cow_offset);
733 ++ if (n != sizeof(segment->bitmap_words))
734 + return map_error(-n);
735 +
736 + return map_error(0);
737 + }
738 +
739 +-static void do_io(struct io_thread_req *req)
740 ++static void do_io(struct io_thread_req *req, struct io_desc *desc)
741 + {
742 + char *buf = NULL;
743 + unsigned long len;
744 +@@ -1513,21 +1547,20 @@ static void do_io(struct io_thread_req *req)
745 + return;
746 + }
747 +
748 +- nsectors = req->length / req->sectorsize;
749 ++ nsectors = desc->length / req->sectorsize;
750 + start = 0;
751 + do {
752 +- bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask);
753 ++ bit = ubd_test_bit(start, (unsigned char *) &desc->sector_mask);
754 + end = start;
755 + while((end < nsectors) &&
756 +- (ubd_test_bit(end, (unsigned char *)
757 +- &req->sector_mask) == bit))
758 ++ (ubd_test_bit(end, (unsigned char *) &desc->sector_mask) == bit))
759 + end++;
760 +
761 + off = req->offset + req->offsets[bit] +
762 + start * req->sectorsize;
763 + len = (end - start) * req->sectorsize;
764 +- if (req->buffer != NULL)
765 +- buf = &req->buffer[start * req->sectorsize];
766 ++ if (desc->buffer != NULL)
767 ++ buf = &desc->buffer[start * req->sectorsize];
768 +
769 + switch (req_op(req->req)) {
770 + case REQ_OP_READ:
771 +@@ -1567,7 +1600,8 @@ static void do_io(struct io_thread_req *req)
772 + start = end;
773 + } while(start < nsectors);
774 +
775 +- req->error = update_bitmap(req);
776 ++ req->offset += len;
777 ++ req->error = update_bitmap(req, desc);
778 + }
779 +
780 + /* Changed in start_io_thread, which is serialized by being called only
781 +@@ -1600,8 +1634,13 @@ int io_thread(void *arg)
782 + }
783 +
784 + for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
785 ++ struct io_thread_req *req = (*io_req_buffer)[count];
786 ++ int i;
787 ++
788 + io_count++;
789 +- do_io((*io_req_buffer)[count]);
790 ++ for (i = 0; !req->error && i < req->desc_cnt; i++)
791 ++ do_io(req, &(req->io_desc[i]));
792 ++
793 + }
794 +
795 + written = 0;
796 +diff --git a/block/blk-pm.c b/block/blk-pm.c
797 +index b85234d758f7b..17bd020268d42 100644
798 +--- a/block/blk-pm.c
799 ++++ b/block/blk-pm.c
800 +@@ -67,6 +67,10 @@ int blk_pre_runtime_suspend(struct request_queue *q)
801 +
802 + WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
803 +
804 ++ spin_lock_irq(&q->queue_lock);
805 ++ q->rpm_status = RPM_SUSPENDING;
806 ++ spin_unlock_irq(&q->queue_lock);
807 ++
808 + /*
809 + * Increase the pm_only counter before checking whether any
810 + * non-PM blk_queue_enter() calls are in progress to avoid that any
811 +@@ -89,15 +93,14 @@ int blk_pre_runtime_suspend(struct request_queue *q)
812 + /* Switch q_usage_counter back to per-cpu mode. */
813 + blk_mq_unfreeze_queue(q);
814 +
815 +- spin_lock_irq(&q->queue_lock);
816 +- if (ret < 0)
817 ++ if (ret < 0) {
818 ++ spin_lock_irq(&q->queue_lock);
819 ++ q->rpm_status = RPM_ACTIVE;
820 + pm_runtime_mark_last_busy(q->dev);
821 +- else
822 +- q->rpm_status = RPM_SUSPENDING;
823 +- spin_unlock_irq(&q->queue_lock);
824 ++ spin_unlock_irq(&q->queue_lock);
825 +
826 +- if (ret)
827 + blk_clear_pm_only(q);
828 ++ }
829 +
830 + return ret;
831 + }
832 +diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
833 +index 78d635f1d1567..376164cdf2ea9 100644
834 +--- a/drivers/bluetooth/hci_h5.c
835 ++++ b/drivers/bluetooth/hci_h5.c
836 +@@ -251,8 +251,12 @@ static int h5_close(struct hci_uart *hu)
837 + if (h5->vnd && h5->vnd->close)
838 + h5->vnd->close(h5);
839 +
840 +- if (!hu->serdev)
841 +- kfree(h5);
842 ++ if (hu->serdev)
843 ++ serdev_device_close(hu->serdev);
844 ++
845 ++ kfree_skb(h5->rx_skb);
846 ++ kfree(h5);
847 ++ h5 = NULL;
848 +
849 + return 0;
850 + }
851 +diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
852 +index e92c4d9469d82..5952210526aaa 100644
853 +--- a/drivers/char/hw_random/Kconfig
854 ++++ b/drivers/char/hw_random/Kconfig
855 +@@ -540,15 +540,15 @@ endif # HW_RANDOM
856 +
857 + config UML_RANDOM
858 + depends on UML
859 +- tristate "Hardware random number generator"
860 ++ select HW_RANDOM
861 ++ tristate "UML Random Number Generator support"
862 + help
863 + This option enables UML's "hardware" random number generator. It
864 + attaches itself to the host's /dev/random, supplying as much entropy
865 + as the host has, rather than the small amount the UML gets from its
866 +- own drivers. It registers itself as a standard hardware random number
867 +- generator, major 10, minor 183, and the canonical device name is
868 +- /dev/hwrng.
869 +- The way to make use of this is to install the rng-tools package
870 +- (check your distro, or download from
871 +- http://sourceforge.net/projects/gkernel/). rngd periodically reads
872 +- /dev/hwrng and injects the entropy into /dev/random.
873 ++ own drivers. It registers itself as a rng-core driver thus providing
874 ++ a device which is usually called /dev/hwrng. This hardware random
875 ++ number generator does feed into the kernel's random number generator
876 ++ entropy pool.
877 ++
878 ++ If unsure, say Y.
879 +diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
880 +index 27513d311242e..de7b74505e75e 100644
881 +--- a/drivers/dax/bus.c
882 ++++ b/drivers/dax/bus.c
883 +@@ -367,19 +367,28 @@ void kill_dev_dax(struct dev_dax *dev_dax)
884 + }
885 + EXPORT_SYMBOL_GPL(kill_dev_dax);
886 +
887 +-static void free_dev_dax_ranges(struct dev_dax *dev_dax)
888 ++static void trim_dev_dax_range(struct dev_dax *dev_dax)
889 + {
890 ++ int i = dev_dax->nr_range - 1;
891 ++ struct range *range = &dev_dax->ranges[i].range;
892 + struct dax_region *dax_region = dev_dax->region;
893 +- int i;
894 +
895 + device_lock_assert(dax_region->dev);
896 +- for (i = 0; i < dev_dax->nr_range; i++) {
897 +- struct range *range = &dev_dax->ranges[i].range;
898 +-
899 +- __release_region(&dax_region->res, range->start,
900 +- range_len(range));
901 ++ dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i,
902 ++ (unsigned long long)range->start,
903 ++ (unsigned long long)range->end);
904 ++
905 ++ __release_region(&dax_region->res, range->start, range_len(range));
906 ++ if (--dev_dax->nr_range == 0) {
907 ++ kfree(dev_dax->ranges);
908 ++ dev_dax->ranges = NULL;
909 + }
910 +- dev_dax->nr_range = 0;
911 ++}
912 ++
913 ++static void free_dev_dax_ranges(struct dev_dax *dev_dax)
914 ++{
915 ++ while (dev_dax->nr_range)
916 ++ trim_dev_dax_range(dev_dax);
917 + }
918 +
919 + static void unregister_dev_dax(void *dev)
920 +@@ -804,15 +813,10 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
921 + return 0;
922 +
923 + rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1);
924 +- if (rc) {
925 +- dev_dbg(dev, "delete range[%d]: %pa:%pa\n", dev_dax->nr_range - 1,
926 +- &alloc->start, &alloc->end);
927 +- dev_dax->nr_range--;
928 +- __release_region(res, alloc->start, resource_size(alloc));
929 +- return rc;
930 +- }
931 ++ if (rc)
932 ++ trim_dev_dax_range(dev_dax);
933 +
934 +- return 0;
935 ++ return rc;
936 + }
937 +
938 + static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size)
939 +@@ -885,12 +889,7 @@ static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size)
940 + if (shrink >= range_len(range)) {
941 + devm_release_action(dax_region->dev,
942 + unregister_dax_mapping, &mapping->dev);
943 +- __release_region(&dax_region->res, range->start,
944 +- range_len(range));
945 +- dev_dax->nr_range--;
946 +- dev_dbg(dev, "delete range[%d]: %#llx:%#llx\n", i,
947 +- (unsigned long long) range->start,
948 +- (unsigned long long) range->end);
949 ++ trim_dev_dax_range(dev_dax);
950 + to_shrink -= shrink;
951 + if (!to_shrink)
952 + break;
953 +@@ -1274,7 +1273,6 @@ static void dev_dax_release(struct device *dev)
954 + put_dax(dax_dev);
955 + free_dev_dax_id(dev_dax);
956 + dax_region_put(dax_region);
957 +- kfree(dev_dax->ranges);
958 + kfree(dev_dax->pgmap);
959 + kfree(dev_dax);
960 + }
961 +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
962 +index 6b431db146cd9..1c6e401dd4cce 100644
963 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
964 ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
965 +@@ -704,24 +704,24 @@ static struct wm_table ddr4_wm_table_rn = {
966 + .wm_inst = WM_B,
967 + .wm_type = WM_TYPE_PSTATE_CHG,
968 + .pstate_latency_us = 11.72,
969 +- .sr_exit_time_us = 10.12,
970 +- .sr_enter_plus_exit_time_us = 11.48,
971 ++ .sr_exit_time_us = 11.12,
972 ++ .sr_enter_plus_exit_time_us = 12.48,
973 + .valid = true,
974 + },
975 + {
976 + .wm_inst = WM_C,
977 + .wm_type = WM_TYPE_PSTATE_CHG,
978 + .pstate_latency_us = 11.72,
979 +- .sr_exit_time_us = 10.12,
980 +- .sr_enter_plus_exit_time_us = 11.48,
981 ++ .sr_exit_time_us = 11.12,
982 ++ .sr_enter_plus_exit_time_us = 12.48,
983 + .valid = true,
984 + },
985 + {
986 + .wm_inst = WM_D,
987 + .wm_type = WM_TYPE_PSTATE_CHG,
988 + .pstate_latency_us = 11.72,
989 +- .sr_exit_time_us = 10.12,
990 +- .sr_enter_plus_exit_time_us = 11.48,
991 ++ .sr_exit_time_us = 11.12,
992 ++ .sr_enter_plus_exit_time_us = 12.48,
993 + .valid = true,
994 + },
995 + }
996 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
997 +index b409f6b2bfd83..210466b2d8631 100644
998 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
999 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
1000 +@@ -119,7 +119,8 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
1001 + .disable_hpd = dce110_link_encoder_disable_hpd,
1002 + .is_dig_enabled = dce110_is_dig_enabled,
1003 + .destroy = dce110_link_encoder_destroy,
1004 +- .get_max_link_cap = dce110_link_encoder_get_max_link_cap
1005 ++ .get_max_link_cap = dce110_link_encoder_get_max_link_cap,
1006 ++ .get_dig_frontend = dce110_get_dig_frontend,
1007 + };
1008 +
1009 + static enum bp_result link_transmitter_control(
1010 +@@ -235,6 +236,44 @@ static void set_link_training_complete(
1011 +
1012 + }
1013 +
1014 ++unsigned int dce110_get_dig_frontend(struct link_encoder *enc)
1015 ++{
1016 ++ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
1017 ++ u32 value;
1018 ++ enum engine_id result;
1019 ++
1020 ++ REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &value);
1021 ++
1022 ++ switch (value) {
1023 ++ case DCE110_DIG_FE_SOURCE_SELECT_DIGA:
1024 ++ result = ENGINE_ID_DIGA;
1025 ++ break;
1026 ++ case DCE110_DIG_FE_SOURCE_SELECT_DIGB:
1027 ++ result = ENGINE_ID_DIGB;
1028 ++ break;
1029 ++ case DCE110_DIG_FE_SOURCE_SELECT_DIGC:
1030 ++ result = ENGINE_ID_DIGC;
1031 ++ break;
1032 ++ case DCE110_DIG_FE_SOURCE_SELECT_DIGD:
1033 ++ result = ENGINE_ID_DIGD;
1034 ++ break;
1035 ++ case DCE110_DIG_FE_SOURCE_SELECT_DIGE:
1036 ++ result = ENGINE_ID_DIGE;
1037 ++ break;
1038 ++ case DCE110_DIG_FE_SOURCE_SELECT_DIGF:
1039 ++ result = ENGINE_ID_DIGF;
1040 ++ break;
1041 ++ case DCE110_DIG_FE_SOURCE_SELECT_DIGG:
1042 ++ result = ENGINE_ID_DIGG;
1043 ++ break;
1044 ++ default:
1045 ++ // invalid source select DIG
1046 ++ result = ENGINE_ID_UNKNOWN;
1047 ++ }
1048 ++
1049 ++ return result;
1050 ++}
1051 ++
1052 + void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
1053 + struct link_encoder *enc,
1054 + uint32_t index)
1055 +@@ -1665,7 +1704,8 @@ static const struct link_encoder_funcs dce60_lnk_enc_funcs = {
1056 + .disable_hpd = dce110_link_encoder_disable_hpd,
1057 + .is_dig_enabled = dce110_is_dig_enabled,
1058 + .destroy = dce110_link_encoder_destroy,
1059 +- .get_max_link_cap = dce110_link_encoder_get_max_link_cap
1060 ++ .get_max_link_cap = dce110_link_encoder_get_max_link_cap,
1061 ++ .get_dig_frontend = dce110_get_dig_frontend
1062 + };
1063 +
1064 + void dce60_link_encoder_construct(
1065 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
1066 +index cb714a48b171c..fc6ade824c231 100644
1067 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
1068 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
1069 +@@ -295,6 +295,8 @@ void dce110_link_encoder_connect_dig_be_to_fe(
1070 + enum engine_id engine,
1071 + bool connect);
1072 +
1073 ++unsigned int dce110_get_dig_frontend(struct link_encoder *enc);
1074 ++
1075 + void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
1076 + struct link_encoder *enc,
1077 + uint32_t index);
1078 +diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
1079 +index 1c6b78ad5ade4..b61bf53ec07af 100644
1080 +--- a/drivers/i3c/master.c
1081 ++++ b/drivers/i3c/master.c
1082 +@@ -2537,7 +2537,7 @@ int i3c_master_register(struct i3c_master_controller *master,
1083 +
1084 + ret = i3c_master_bus_init(master);
1085 + if (ret)
1086 +- goto err_put_dev;
1087 ++ goto err_destroy_wq;
1088 +
1089 + ret = device_add(&master->dev);
1090 + if (ret)
1091 +@@ -2568,6 +2568,9 @@ err_del_dev:
1092 + err_cleanup_bus:
1093 + i3c_master_bus_cleanup(master);
1094 +
1095 ++err_destroy_wq:
1096 ++ destroy_workqueue(master->wq);
1097 ++
1098 + err_put_dev:
1099 + put_device(&master->dev);
1100 +
1101 +diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
1102 +index f74982dcbea0d..6b8e5bdd8526d 100644
1103 +--- a/drivers/md/dm-verity-target.c
1104 ++++ b/drivers/md/dm-verity-target.c
1105 +@@ -537,6 +537,15 @@ static int verity_verify_io(struct dm_verity_io *io)
1106 + return 0;
1107 + }
1108 +
1109 ++/*
1110 ++ * Skip verity work in response to I/O error when system is shutting down.
1111 ++ */
1112 ++static inline bool verity_is_system_shutting_down(void)
1113 ++{
1114 ++ return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
1115 ++ || system_state == SYSTEM_RESTART;
1116 ++}
1117 ++
1118 + /*
1119 + * End one "io" structure with a given error.
1120 + */
1121 +@@ -564,7 +573,8 @@ static void verity_end_io(struct bio *bio)
1122 + {
1123 + struct dm_verity_io *io = bio->bi_private;
1124 +
1125 +- if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
1126 ++ if (bio->bi_status &&
1127 ++ (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
1128 + verity_finish_io(io, bio->bi_status);
1129 + return;
1130 + }
1131 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
1132 +index 3b598a3cb462a..9f9d8b67b5dd1 100644
1133 +--- a/drivers/md/raid10.c
1134 ++++ b/drivers/md/raid10.c
1135 +@@ -1128,7 +1128,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1136 + struct md_rdev *err_rdev = NULL;
1137 + gfp_t gfp = GFP_NOIO;
1138 +
1139 +- if (r10_bio->devs[slot].rdev) {
1140 ++ if (slot >= 0 && r10_bio->devs[slot].rdev) {
1141 + /*
1142 + * This is an error retry, but we cannot
1143 + * safely dereference the rdev in the r10_bio,
1144 +@@ -1493,6 +1493,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1145 + r10_bio->mddev = mddev;
1146 + r10_bio->sector = bio->bi_iter.bi_sector;
1147 + r10_bio->state = 0;
1148 ++ r10_bio->read_slot = -1;
1149 + memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
1150 +
1151 + if (bio_data_dir(bio) == READ)
1152 +diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
1153 +index c07f46f5176ea..b4f661bb56481 100644
1154 +--- a/drivers/media/usb/dvb-usb/gp8psk.c
1155 ++++ b/drivers/media/usb/dvb-usb/gp8psk.c
1156 +@@ -182,7 +182,7 @@ out_rel_fw:
1157 +
1158 + static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
1159 + {
1160 +- u8 status, buf;
1161 ++ u8 status = 0, buf;
1162 + int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
1163 +
1164 + if (onoff) {
1165 +diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
1166 +index 16695366ec926..26ff49fdf0f7d 100644
1167 +--- a/drivers/misc/vmw_vmci/vmci_context.c
1168 ++++ b/drivers/misc/vmw_vmci/vmci_context.c
1169 +@@ -743,7 +743,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
1170 + return VMCI_ERROR_MORE_DATA;
1171 + }
1172 +
1173 +- dbells = kmalloc(data_size, GFP_ATOMIC);
1174 ++ dbells = kzalloc(data_size, GFP_ATOMIC);
1175 + if (!dbells)
1176 + return VMCI_ERROR_NO_MEM;
1177 +
1178 +diff --git a/drivers/opp/core.c b/drivers/opp/core.c
1179 +index 0e0a5269dc82f..903b465c8568b 100644
1180 +--- a/drivers/opp/core.c
1181 ++++ b/drivers/opp/core.c
1182 +@@ -1102,7 +1102,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
1183 + if (IS_ERR(opp_table->clk)) {
1184 + ret = PTR_ERR(opp_table->clk);
1185 + if (ret == -EPROBE_DEFER)
1186 +- goto err;
1187 ++ goto remove_opp_dev;
1188 +
1189 + dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
1190 + }
1191 +@@ -1111,7 +1111,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
1192 + ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
1193 + if (ret) {
1194 + if (ret == -EPROBE_DEFER)
1195 +- goto err;
1196 ++ goto put_clk;
1197 +
1198 + dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
1199 + __func__, ret);
1200 +@@ -1125,6 +1125,11 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
1201 + list_add(&opp_table->node, &opp_tables);
1202 + return opp_table;
1203 +
1204 ++put_clk:
1205 ++ if (!IS_ERR(opp_table->clk))
1206 ++ clk_put(opp_table->clk);
1207 ++remove_opp_dev:
1208 ++ _remove_opp_dev(opp_dev, opp_table);
1209 + err:
1210 + kfree(opp_table);
1211 + return ERR_PTR(ret);
1212 +diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
1213 +index c6b89273feba8..d4b2ab7861266 100644
1214 +--- a/drivers/rtc/rtc-pl031.c
1215 ++++ b/drivers/rtc/rtc-pl031.c
1216 +@@ -361,8 +361,10 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
1217 +
1218 + device_init_wakeup(&adev->dev, true);
1219 + ldata->rtc = devm_rtc_allocate_device(&adev->dev);
1220 +- if (IS_ERR(ldata->rtc))
1221 +- return PTR_ERR(ldata->rtc);
1222 ++ if (IS_ERR(ldata->rtc)) {
1223 ++ ret = PTR_ERR(ldata->rtc);
1224 ++ goto out;
1225 ++ }
1226 +
1227 + ldata->rtc->ops = ops;
1228 + ldata->rtc->range_min = vendor->range_min;
1229 +diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
1230 +index e2b8b150bcb44..f2818cdd11d82 100644
1231 +--- a/drivers/rtc/rtc-sun6i.c
1232 ++++ b/drivers/rtc/rtc-sun6i.c
1233 +@@ -272,7 +272,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
1234 + 300000000);
1235 + if (IS_ERR(rtc->int_osc)) {
1236 + pr_crit("Couldn't register the internal oscillator\n");
1237 +- return;
1238 ++ goto err;
1239 + }
1240 +
1241 + parents[0] = clk_hw_get_name(rtc->int_osc);
1242 +@@ -290,7 +290,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
1243 + rtc->losc = clk_register(NULL, &rtc->hw);
1244 + if (IS_ERR(rtc->losc)) {
1245 + pr_crit("Couldn't register the LOSC clock\n");
1246 +- return;
1247 ++ goto err_register;
1248 + }
1249 +
1250 + of_property_read_string_index(node, "clock-output-names", 1,
1251 +@@ -301,7 +301,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
1252 + &rtc->lock);
1253 + if (IS_ERR(rtc->ext_losc)) {
1254 + pr_crit("Couldn't register the LOSC external gate\n");
1255 +- return;
1256 ++ goto err_register;
1257 + }
1258 +
1259 + clk_data->num = 2;
1260 +@@ -314,6 +314,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
1261 + of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
1262 + return;
1263 +
1264 ++err_register:
1265 ++ clk_hw_unregister_fixed_rate(rtc->int_osc);
1266 + err:
1267 + kfree(clk_data);
1268 + }
1269 +diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
1270 +index b206e266b4e72..8b0deece9758b 100644
1271 +--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
1272 ++++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
1273 +@@ -4,6 +4,7 @@ config SCSI_CXGB4_ISCSI
1274 + depends on PCI && INET && (IPV6 || IPV6=n)
1275 + depends on THERMAL || !THERMAL
1276 + depends on ETHERNET
1277 ++ depends on TLS || TLS=n
1278 + select NET_VENDOR_CHELSIO
1279 + select CHELSIO_T4
1280 + select CHELSIO_LIB
1281 +diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
1282 +index 3fd16b7f61507..aadaea052f51d 100644
1283 +--- a/drivers/spi/Kconfig
1284 ++++ b/drivers/spi/Kconfig
1285 +@@ -256,6 +256,7 @@ config SPI_DW_BT1
1286 + tristate "Baikal-T1 SPI driver for DW SPI core"
1287 + depends on MIPS_BAIKAL_T1 || COMPILE_TEST
1288 + select MULTIPLEXER
1289 ++ select MUX_MMIO
1290 + help
1291 + Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI
1292 + controllers. Two of them are pretty much normal: with IRQ, DMA,
1293 +@@ -269,8 +270,6 @@ config SPI_DW_BT1
1294 + config SPI_DW_BT1_DIRMAP
1295 + bool "Directly mapped Baikal-T1 Boot SPI flash support"
1296 + depends on SPI_DW_BT1
1297 +- select MULTIPLEXER
1298 +- select MUX_MMIO
1299 + help
1300 + Directly mapped SPI flash memory is an interface specific to the
1301 + Baikal-T1 System Boot Controller. It is a 16MB MMIO region, which
1302 +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
1303 +index cef437817b0dc..8d1ae973041ae 100644
1304 +--- a/drivers/video/fbdev/core/fbcon.c
1305 ++++ b/drivers/video/fbdev/core/fbcon.c
1306 +@@ -1033,7 +1033,7 @@ static void fbcon_init(struct vc_data *vc, int init)
1307 + struct vc_data *svc = *default_mode;
1308 + struct fbcon_display *t, *p = &fb_display[vc->vc_num];
1309 + int logo = 1, new_rows, new_cols, rows, cols, charcnt = 256;
1310 +- int cap, ret;
1311 ++ int ret;
1312 +
1313 + if (WARN_ON(info_idx == -1))
1314 + return;
1315 +@@ -1042,7 +1042,6 @@ static void fbcon_init(struct vc_data *vc, int init)
1316 + con2fb_map[vc->vc_num] = info_idx;
1317 +
1318 + info = registered_fb[con2fb_map[vc->vc_num]];
1319 +- cap = info->flags;
1320 +
1321 + if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
1322 + logo_shown = FBCON_LOGO_DONTSHOW;
1323 +@@ -1147,11 +1146,13 @@ static void fbcon_init(struct vc_data *vc, int init)
1324 +
1325 + ops->graphics = 0;
1326 +
1327 +- if ((cap & FBINFO_HWACCEL_COPYAREA) &&
1328 +- !(cap & FBINFO_HWACCEL_DISABLED))
1329 +- p->scrollmode = SCROLL_MOVE;
1330 +- else /* default to something safe */
1331 +- p->scrollmode = SCROLL_REDRAW;
1332 ++ /*
1333 ++ * No more hw acceleration for fbcon.
1334 ++ *
1335 ++ * FIXME: Garbage collect all the now dead code after sufficient time
1336 ++ * has passed.
1337 ++ */
1338 ++ p->scrollmode = SCROLL_REDRAW;
1339 +
1340 + /*
1341 + * ++guenther: console.c:vc_allocate() relies on initializing
1342 +@@ -1961,45 +1962,15 @@ static void updatescrollmode(struct fbcon_display *p,
1343 + {
1344 + struct fbcon_ops *ops = info->fbcon_par;
1345 + int fh = vc->vc_font.height;
1346 +- int cap = info->flags;
1347 +- u16 t = 0;
1348 +- int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep,
1349 +- info->fix.xpanstep);
1350 +- int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t);
1351 + int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
1352 + int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual,
1353 + info->var.xres_virtual);
1354 +- int good_pan = (cap & FBINFO_HWACCEL_YPAN) &&
1355 +- divides(ypan, vc->vc_font.height) && vyres > yres;
1356 +- int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) &&
1357 +- divides(ywrap, vc->vc_font.height) &&
1358 +- divides(vc->vc_font.height, vyres) &&
1359 +- divides(vc->vc_font.height, yres);
1360 +- int reading_fast = cap & FBINFO_READS_FAST;
1361 +- int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) &&
1362 +- !(cap & FBINFO_HWACCEL_DISABLED);
1363 +- int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) &&
1364 +- !(cap & FBINFO_HWACCEL_DISABLED);
1365 +
1366 + p->vrows = vyres/fh;
1367 + if (yres > (fh * (vc->vc_rows + 1)))
1368 + p->vrows -= (yres - (fh * vc->vc_rows)) / fh;
1369 + if ((yres % fh) && (vyres % fh < yres % fh))
1370 + p->vrows--;
1371 +-
1372 +- if (good_wrap || good_pan) {
1373 +- if (reading_fast || fast_copyarea)
1374 +- p->scrollmode = good_wrap ?
1375 +- SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE;
1376 +- else
1377 +- p->scrollmode = good_wrap ? SCROLL_REDRAW :
1378 +- SCROLL_PAN_REDRAW;
1379 +- } else {
1380 +- if (reading_fast || (fast_copyarea && !fast_imageblit))
1381 +- p->scrollmode = SCROLL_MOVE;
1382 +- else
1383 +- p->scrollmode = SCROLL_REDRAW;
1384 +- }
1385 + }
1386 +
1387 + #define PITCH(w) (((w) + 7) >> 3)
1388 +diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
1389 +index 836319cbaca9d..359302f71f7ef 100644
1390 +--- a/drivers/watchdog/rti_wdt.c
1391 ++++ b/drivers/watchdog/rti_wdt.c
1392 +@@ -227,8 +227,10 @@ static int rti_wdt_probe(struct platform_device *pdev)
1393 +
1394 + pm_runtime_enable(dev);
1395 + ret = pm_runtime_get_sync(dev);
1396 +- if (ret)
1397 ++ if (ret) {
1398 ++ pm_runtime_put_noidle(dev);
1399 + return dev_err_probe(dev, ret, "runtime pm failed\n");
1400 ++ }
1401 +
1402 + platform_set_drvdata(pdev, wdt);
1403 +
1404 +diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
1405 +index 3ac7611ef7ce2..fd691e4815c56 100644
1406 +--- a/fs/bfs/inode.c
1407 ++++ b/fs/bfs/inode.c
1408 +@@ -350,7 +350,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
1409 +
1410 + info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / sizeof(struct bfs_inode) + BFS_ROOT_INO - 1;
1411 + if (info->si_lasti == BFS_MAX_LASTI)
1412 +- printf("WARNING: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id);
1413 ++ printf("NOTE: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id);
1414 + else if (info->si_lasti > BFS_MAX_LASTI) {
1415 + printf("Impossible last inode number %lu > %d on %s\n", info->si_lasti, BFS_MAX_LASTI, s->s_id);
1416 + goto out1;
1417 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
1418 +index 526faf4778ce4..2462a9a84b956 100644
1419 +--- a/fs/ceph/inode.c
1420 ++++ b/fs/ceph/inode.c
1421 +@@ -1335,6 +1335,8 @@ retry_lookup:
1422 + in, ceph_vinop(in));
1423 + if (in->i_state & I_NEW)
1424 + discard_new_inode(in);
1425 ++ else
1426 ++ iput(in);
1427 + goto done;
1428 + }
1429 + req->r_target_inode = in;
1430 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
1431 +index 37a619bf1ac7c..e67d5de6f28ca 100644
1432 +--- a/fs/ext4/mballoc.c
1433 ++++ b/fs/ext4/mballoc.c
1434 +@@ -2395,9 +2395,9 @@ repeat:
1435 +
1436 + nr = sbi->s_mb_prefetch;
1437 + if (ext4_has_feature_flex_bg(sb)) {
1438 +- nr = (group / sbi->s_mb_prefetch) *
1439 +- sbi->s_mb_prefetch;
1440 +- nr = nr + sbi->s_mb_prefetch - group;
1441 ++ nr = 1 << sbi->s_log_groups_per_flex;
1442 ++ nr -= group & (nr - 1);
1443 ++ nr = min(nr, sbi->s_mb_prefetch);
1444 + }
1445 + prefetch_grp = ext4_mb_prefetch(sb, group,
1446 + nr, &prefetch_ios);
1447 +@@ -2733,7 +2733,8 @@ static int ext4_mb_init_backend(struct super_block *sb)
1448 +
1449 + if (ext4_has_feature_flex_bg(sb)) {
1450 + /* a single flex group is supposed to be read by a single IO */
1451 +- sbi->s_mb_prefetch = 1 << sbi->s_es->s_log_groups_per_flex;
1452 ++ sbi->s_mb_prefetch = min(1 << sbi->s_es->s_log_groups_per_flex,
1453 ++ BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
1454 + sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
1455 + } else {
1456 + sbi->s_mb_prefetch = 32;
1457 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1458 +index 2b08b162075c3..ea5aefa23a20a 100644
1459 +--- a/fs/ext4/super.c
1460 ++++ b/fs/ext4/super.c
1461 +@@ -4186,18 +4186,25 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1462 + */
1463 + sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
1464 +
1465 +- blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
1466 +-
1467 +- if (blocksize == PAGE_SIZE)
1468 +- set_opt(sb, DIOREAD_NOLOCK);
1469 +-
1470 +- if (blocksize < EXT4_MIN_BLOCK_SIZE ||
1471 +- blocksize > EXT4_MAX_BLOCK_SIZE) {
1472 ++ if (le32_to_cpu(es->s_log_block_size) >
1473 ++ (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
1474 + ext4_msg(sb, KERN_ERR,
1475 +- "Unsupported filesystem blocksize %d (%d log_block_size)",
1476 +- blocksize, le32_to_cpu(es->s_log_block_size));
1477 ++ "Invalid log block size: %u",
1478 ++ le32_to_cpu(es->s_log_block_size));
1479 + goto failed_mount;
1480 + }
1481 ++ if (le32_to_cpu(es->s_log_cluster_size) >
1482 ++ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
1483 ++ ext4_msg(sb, KERN_ERR,
1484 ++ "Invalid log cluster size: %u",
1485 ++ le32_to_cpu(es->s_log_cluster_size));
1486 ++ goto failed_mount;
1487 ++ }
1488 ++
1489 ++ blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
1490 ++
1491 ++ if (blocksize == PAGE_SIZE)
1492 ++ set_opt(sb, DIOREAD_NOLOCK);
1493 +
1494 + if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
1495 + sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
1496 +@@ -4416,21 +4423,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1497 + if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
1498 + goto failed_mount;
1499 +
1500 +- if (le32_to_cpu(es->s_log_block_size) >
1501 +- (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
1502 +- ext4_msg(sb, KERN_ERR,
1503 +- "Invalid log block size: %u",
1504 +- le32_to_cpu(es->s_log_block_size));
1505 +- goto failed_mount;
1506 +- }
1507 +- if (le32_to_cpu(es->s_log_cluster_size) >
1508 +- (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
1509 +- ext4_msg(sb, KERN_ERR,
1510 +- "Invalid log cluster size: %u",
1511 +- le32_to_cpu(es->s_log_cluster_size));
1512 +- goto failed_mount;
1513 +- }
1514 +-
1515 + if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
1516 + ext4_msg(sb, KERN_ERR,
1517 + "Number of reserved GDT blocks insanely large: %d",
1518 +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
1519 +index 023462e80e58d..b39bf416d5114 100644
1520 +--- a/fs/f2fs/checkpoint.c
1521 ++++ b/fs/f2fs/checkpoint.c
1522 +@@ -1600,7 +1600,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1523 + goto out;
1524 + }
1525 +
1526 +- if (NM_I(sbi)->dirty_nat_cnt == 0 &&
1527 ++ if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 &&
1528 + SIT_I(sbi)->dirty_sentries == 0 &&
1529 + prefree_segments(sbi) == 0) {
1530 + f2fs_flush_sit_entries(sbi, cpc);
1531 +diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
1532 +index 14262e0f1cd60..c5fee4d7ea72f 100644
1533 +--- a/fs/f2fs/compress.c
1534 ++++ b/fs/f2fs/compress.c
1535 +@@ -798,8 +798,6 @@ destroy_decompress_ctx:
1536 + if (cops->destroy_decompress_ctx)
1537 + cops->destroy_decompress_ctx(dic);
1538 + out_free_dic:
1539 +- if (verity)
1540 +- atomic_set(&dic->pending_pages, dic->nr_cpages);
1541 + if (!verity)
1542 + f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
1543 + ret, false);
1544 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
1545 +index be4da52604edc..b29243ee1c3e5 100644
1546 +--- a/fs/f2fs/data.c
1547 ++++ b/fs/f2fs/data.c
1548 +@@ -202,7 +202,7 @@ static void f2fs_verify_bio(struct bio *bio)
1549 + dic = (struct decompress_io_ctx *)page_private(page);
1550 +
1551 + if (dic) {
1552 +- if (atomic_dec_return(&dic->pending_pages))
1553 ++ if (atomic_dec_return(&dic->verity_pages))
1554 + continue;
1555 + f2fs_verify_pages(dic->rpages,
1556 + dic->cluster_size);
1557 +@@ -1027,7 +1027,8 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
1558 +
1559 + static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
1560 + unsigned nr_pages, unsigned op_flag,
1561 +- pgoff_t first_idx, bool for_write)
1562 ++ pgoff_t first_idx, bool for_write,
1563 ++ bool for_verity)
1564 + {
1565 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1566 + struct bio *bio;
1567 +@@ -1049,7 +1050,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
1568 + post_read_steps |= 1 << STEP_DECRYPT;
1569 + if (f2fs_compressed_file(inode))
1570 + post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ;
1571 +- if (f2fs_need_verity(inode, first_idx))
1572 ++ if (for_verity && f2fs_need_verity(inode, first_idx))
1573 + post_read_steps |= 1 << STEP_VERITY;
1574 +
1575 + if (post_read_steps) {
1576 +@@ -1079,7 +1080,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1577 + struct bio *bio;
1578 +
1579 + bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1580 +- page->index, for_write);
1581 ++ page->index, for_write, true);
1582 + if (IS_ERR(bio))
1583 + return PTR_ERR(bio);
1584 +
1585 +@@ -2133,7 +2134,7 @@ submit_and_realloc:
1586 + if (bio == NULL) {
1587 + bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
1588 + is_readahead ? REQ_RAHEAD : 0, page->index,
1589 +- false);
1590 ++ false, true);
1591 + if (IS_ERR(bio)) {
1592 + ret = PTR_ERR(bio);
1593 + bio = NULL;
1594 +@@ -2180,6 +2181,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
1595 + const unsigned blkbits = inode->i_blkbits;
1596 + const unsigned blocksize = 1 << blkbits;
1597 + struct decompress_io_ctx *dic = NULL;
1598 ++ struct bio_post_read_ctx *ctx;
1599 ++ bool for_verity = false;
1600 + int i;
1601 + int ret = 0;
1602 +
1603 +@@ -2245,10 +2248,29 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
1604 + goto out_put_dnode;
1605 + }
1606 +
1607 ++ /*
1608 ++ * It's possible to enable fsverity on the fly when handling a cluster,
1609 ++ * which requires complicated error handling. Instead of adding more
1610 ++ * complexity, let's give a rule where end_io post-processes fsverity
1611 ++ * per cluster. In order to do that, we need to submit bio, if previous
1612 ++ * bio sets a different post-process policy.
1613 ++ */
1614 ++ if (fsverity_active(cc->inode)) {
1615 ++ atomic_set(&dic->verity_pages, cc->nr_cpages);
1616 ++ for_verity = true;
1617 ++
1618 ++ if (bio) {
1619 ++ ctx = bio->bi_private;
1620 ++ if (!(ctx->enabled_steps & (1 << STEP_VERITY))) {
1621 ++ __submit_bio(sbi, bio, DATA);
1622 ++ bio = NULL;
1623 ++ }
1624 ++ }
1625 ++ }
1626 ++
1627 + for (i = 0; i < dic->nr_cpages; i++) {
1628 + struct page *page = dic->cpages[i];
1629 + block_t blkaddr;
1630 +- struct bio_post_read_ctx *ctx;
1631 +
1632 + blkaddr = data_blkaddr(dn.inode, dn.node_page,
1633 + dn.ofs_in_node + i + 1);
1634 +@@ -2264,17 +2286,31 @@ submit_and_realloc:
1635 + if (!bio) {
1636 + bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
1637 + is_readahead ? REQ_RAHEAD : 0,
1638 +- page->index, for_write);
1639 ++ page->index, for_write, for_verity);
1640 + if (IS_ERR(bio)) {
1641 ++ unsigned int remained = dic->nr_cpages - i;
1642 ++ bool release = false;
1643 ++
1644 + ret = PTR_ERR(bio);
1645 + dic->failed = true;
1646 +- if (!atomic_sub_return(dic->nr_cpages - i,
1647 +- &dic->pending_pages)) {
1648 ++
1649 ++ if (for_verity) {
1650 ++ if (!atomic_sub_return(remained,
1651 ++ &dic->verity_pages))
1652 ++ release = true;
1653 ++ } else {
1654 ++ if (!atomic_sub_return(remained,
1655 ++ &dic->pending_pages))
1656 ++ release = true;
1657 ++ }
1658 ++
1659 ++ if (release) {
1660 + f2fs_decompress_end_io(dic->rpages,
1661 +- cc->cluster_size, true,
1662 +- false);
1663 ++ cc->cluster_size, true,
1664 ++ false);
1665 + f2fs_free_dic(dic);
1666 + }
1667 ++
1668 + f2fs_put_dnode(&dn);
1669 + *bio_ret = NULL;
1670 + return ret;
1671 +diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
1672 +index a8357fd4f5fab..197c914119da8 100644
1673 +--- a/fs/f2fs/debug.c
1674 ++++ b/fs/f2fs/debug.c
1675 +@@ -145,8 +145,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
1676 + si->node_pages = NODE_MAPPING(sbi)->nrpages;
1677 + if (sbi->meta_inode)
1678 + si->meta_pages = META_MAPPING(sbi)->nrpages;
1679 +- si->nats = NM_I(sbi)->nat_cnt;
1680 +- si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
1681 ++ si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT];
1682 ++ si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT];
1683 + si->sits = MAIN_SEGS(sbi);
1684 + si->dirty_sits = SIT_I(sbi)->dirty_sentries;
1685 + si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID];
1686 +@@ -278,9 +278,10 @@ get_cache:
1687 + si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID] +
1688 + NM_I(sbi)->nid_cnt[PREALLOC_NID]) *
1689 + sizeof(struct free_nid);
1690 +- si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
1691 +- si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
1692 +- sizeof(struct nat_entry_set);
1693 ++ si->cache_mem += NM_I(sbi)->nat_cnt[TOTAL_NAT] *
1694 ++ sizeof(struct nat_entry);
1695 ++ si->cache_mem += NM_I(sbi)->nat_cnt[DIRTY_NAT] *
1696 ++ sizeof(struct nat_entry_set);
1697 + si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
1698 + for (i = 0; i < MAX_INO_ENTRY; i++)
1699 + si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
1700 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
1701 +index 9a321c52facec..06e5a6053f3f9 100644
1702 +--- a/fs/f2fs/f2fs.h
1703 ++++ b/fs/f2fs/f2fs.h
1704 +@@ -894,6 +894,13 @@ enum nid_state {
1705 + MAX_NID_STATE,
1706 + };
1707 +
1708 ++enum nat_state {
1709 ++ TOTAL_NAT,
1710 ++ DIRTY_NAT,
1711 ++ RECLAIMABLE_NAT,
1712 ++ MAX_NAT_STATE,
1713 ++};
1714 ++
1715 + struct f2fs_nm_info {
1716 + block_t nat_blkaddr; /* base disk address of NAT */
1717 + nid_t max_nid; /* maximum possible node ids */
1718 +@@ -909,8 +916,7 @@ struct f2fs_nm_info {
1719 + struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */
1720 + struct list_head nat_entries; /* cached nat entry list (clean) */
1721 + spinlock_t nat_list_lock; /* protect clean nat entry list */
1722 +- unsigned int nat_cnt; /* the # of cached nat entries */
1723 +- unsigned int dirty_nat_cnt; /* total num of nat entries in set */
1724 ++ unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
1725 + unsigned int nat_blocks; /* # of nat blocks */
1726 +
1727 + /* free node ids management */
1728 +@@ -1404,6 +1410,7 @@ struct decompress_io_ctx {
1729 + size_t rlen; /* valid data length in rbuf */
1730 + size_t clen; /* valid data length in cbuf */
1731 + atomic_t pending_pages; /* in-flight compressed page count */
1732 ++ atomic_t verity_pages; /* in-flight page count for verity */
1733 + bool failed; /* indicate IO error during decompression */
1734 + void *private; /* payload buffer for specified decompression algorithm */
1735 + void *private2; /* extra payload buffer */
1736 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
1737 +index 42394de6c7eb1..e65d73293a3f6 100644
1738 +--- a/fs/f2fs/node.c
1739 ++++ b/fs/f2fs/node.c
1740 +@@ -62,8 +62,8 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
1741 + sizeof(struct free_nid)) >> PAGE_SHIFT;
1742 + res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
1743 + } else if (type == NAT_ENTRIES) {
1744 +- mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
1745 +- PAGE_SHIFT;
1746 ++ mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
1747 ++ sizeof(struct nat_entry)) >> PAGE_SHIFT;
1748 + res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
1749 + if (excess_cached_nats(sbi))
1750 + res = false;
1751 +@@ -177,7 +177,8 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
1752 + list_add_tail(&ne->list, &nm_i->nat_entries);
1753 + spin_unlock(&nm_i->nat_list_lock);
1754 +
1755 +- nm_i->nat_cnt++;
1756 ++ nm_i->nat_cnt[TOTAL_NAT]++;
1757 ++ nm_i->nat_cnt[RECLAIMABLE_NAT]++;
1758 + return ne;
1759 + }
1760 +
1761 +@@ -207,7 +208,8 @@ static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
1762 + static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
1763 + {
1764 + radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
1765 +- nm_i->nat_cnt--;
1766 ++ nm_i->nat_cnt[TOTAL_NAT]--;
1767 ++ nm_i->nat_cnt[RECLAIMABLE_NAT]--;
1768 + __free_nat_entry(e);
1769 + }
1770 +
1771 +@@ -253,7 +255,8 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
1772 + if (get_nat_flag(ne, IS_DIRTY))
1773 + goto refresh_list;
1774 +
1775 +- nm_i->dirty_nat_cnt++;
1776 ++ nm_i->nat_cnt[DIRTY_NAT]++;
1777 ++ nm_i->nat_cnt[RECLAIMABLE_NAT]--;
1778 + set_nat_flag(ne, IS_DIRTY, true);
1779 + refresh_list:
1780 + spin_lock(&nm_i->nat_list_lock);
1781 +@@ -273,7 +276,8 @@ static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
1782 +
1783 + set_nat_flag(ne, IS_DIRTY, false);
1784 + set->entry_cnt--;
1785 +- nm_i->dirty_nat_cnt--;
1786 ++ nm_i->nat_cnt[DIRTY_NAT]--;
1787 ++ nm_i->nat_cnt[RECLAIMABLE_NAT]++;
1788 + }
1789 +
1790 + static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
1791 +@@ -2944,14 +2948,17 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1792 + LIST_HEAD(sets);
1793 + int err = 0;
1794 +
1795 +- /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
1796 ++ /*
1797 ++ * during unmount, let's flush nat_bits before checking
1798 ++ * nat_cnt[DIRTY_NAT].
1799 ++ */
1800 + if (enabled_nat_bits(sbi, cpc)) {
1801 + down_write(&nm_i->nat_tree_lock);
1802 + remove_nats_in_journal(sbi);
1803 + up_write(&nm_i->nat_tree_lock);
1804 + }
1805 +
1806 +- if (!nm_i->dirty_nat_cnt)
1807 ++ if (!nm_i->nat_cnt[DIRTY_NAT])
1808 + return 0;
1809 +
1810 + down_write(&nm_i->nat_tree_lock);
1811 +@@ -2962,7 +2969,8 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1812 + * into nat entry set.
1813 + */
1814 + if (enabled_nat_bits(sbi, cpc) ||
1815 +- !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
1816 ++ !__has_cursum_space(journal,
1817 ++ nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
1818 + remove_nats_in_journal(sbi);
1819 +
1820 + while ((found = __gang_lookup_nat_set(nm_i,
1821 +@@ -3086,7 +3094,6 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
1822 + F2FS_RESERVED_NODE_NUM;
1823 + nm_i->nid_cnt[FREE_NID] = 0;
1824 + nm_i->nid_cnt[PREALLOC_NID] = 0;
1825 +- nm_i->nat_cnt = 0;
1826 + nm_i->ram_thresh = DEF_RAM_THRESHOLD;
1827 + nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
1828 + nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
1829 +@@ -3220,7 +3227,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
1830 + __del_from_nat_cache(nm_i, natvec[idx]);
1831 + }
1832 + }
1833 +- f2fs_bug_on(sbi, nm_i->nat_cnt);
1834 ++ f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
1835 +
1836 + /* destroy nat set cache */
1837 + nid = 0;
1838 +diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
1839 +index 69e5859e993cf..f84541b57acbb 100644
1840 +--- a/fs/f2fs/node.h
1841 ++++ b/fs/f2fs/node.h
1842 +@@ -126,13 +126,13 @@ static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
1843 +
1844 + static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
1845 + {
1846 +- return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
1847 ++ return NM_I(sbi)->nat_cnt[DIRTY_NAT] >= NM_I(sbi)->max_nid *
1848 + NM_I(sbi)->dirty_nats_ratio / 100;
1849 + }
1850 +
1851 + static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
1852 + {
1853 +- return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
1854 ++ return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD;
1855 + }
1856 +
1857 + static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
1858 +diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
1859 +index d66de5999a26d..dd3c3c7a90ec8 100644
1860 +--- a/fs/f2fs/shrinker.c
1861 ++++ b/fs/f2fs/shrinker.c
1862 +@@ -18,9 +18,7 @@ static unsigned int shrinker_run_no;
1863 +
1864 + static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
1865 + {
1866 +- long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
1867 +-
1868 +- return count > 0 ? count : 0;
1869 ++ return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT];
1870 + }
1871 +
1872 + static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
1873 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
1874 +index fef22e476c526..aa284ce7ec00d 100644
1875 +--- a/fs/f2fs/super.c
1876 ++++ b/fs/f2fs/super.c
1877 +@@ -2744,7 +2744,6 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
1878 + block_t total_sections, blocks_per_seg;
1879 + struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1880 + (bh->b_data + F2FS_SUPER_OFFSET);
1881 +- unsigned int blocksize;
1882 + size_t crc_offset = 0;
1883 + __u32 crc = 0;
1884 +
1885 +@@ -2778,10 +2777,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
1886 + }
1887 +
1888 + /* Currently, support only 4KB block size */
1889 +- blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
1890 +- if (blocksize != F2FS_BLKSIZE) {
1891 +- f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
1892 +- blocksize);
1893 ++ if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
1894 ++ f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
1895 ++ le32_to_cpu(raw_super->log_blocksize),
1896 ++ F2FS_BLKSIZE_BITS);
1897 + return -EFSCORRUPTED;
1898 + }
1899 +
1900 +diff --git a/fs/fcntl.c b/fs/fcntl.c
1901 +index 19ac5baad50fd..05b36b28f2e87 100644
1902 +--- a/fs/fcntl.c
1903 ++++ b/fs/fcntl.c
1904 +@@ -781,9 +781,10 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
1905 + {
1906 + struct task_struct *p;
1907 + enum pid_type type;
1908 ++ unsigned long flags;
1909 + struct pid *pid;
1910 +
1911 +- read_lock(&fown->lock);
1912 ++ read_lock_irqsave(&fown->lock, flags);
1913 +
1914 + type = fown->pid_type;
1915 + pid = fown->pid;
1916 +@@ -804,7 +805,7 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
1917 + read_unlock(&tasklist_lock);
1918 + }
1919 + out_unlock_fown:
1920 +- read_unlock(&fown->lock);
1921 ++ read_unlock_irqrestore(&fown->lock, flags);
1922 + }
1923 +
1924 + static void send_sigurg_to_task(struct task_struct *p,
1925 +@@ -819,9 +820,10 @@ int send_sigurg(struct fown_struct *fown)
1926 + struct task_struct *p;
1927 + enum pid_type type;
1928 + struct pid *pid;
1929 ++ unsigned long flags;
1930 + int ret = 0;
1931 +
1932 +- read_lock(&fown->lock);
1933 ++ read_lock_irqsave(&fown->lock, flags);
1934 +
1935 + type = fown->pid_type;
1936 + pid = fown->pid;
1937 +@@ -844,7 +846,7 @@ int send_sigurg(struct fown_struct *fown)
1938 + read_unlock(&tasklist_lock);
1939 + }
1940 + out_unlock_fown:
1941 +- read_unlock(&fown->lock);
1942 ++ read_unlock_irqrestore(&fown->lock, flags);
1943 + return ret;
1944 + }
1945 +
1946 +diff --git a/fs/io_uring.c b/fs/io_uring.c
1947 +index 0fcd065baa760..1f798c5c4213e 100644
1948 +--- a/fs/io_uring.c
1949 ++++ b/fs/io_uring.c
1950 +@@ -941,6 +941,10 @@ enum io_mem_account {
1951 + ACCT_PINNED,
1952 + };
1953 +
1954 ++static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
1955 ++static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
1956 ++ struct io_ring_ctx *ctx);
1957 ++
1958 + static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
1959 + struct io_comp_state *cs);
1960 + static void io_cqring_fill_event(struct io_kiocb *req, long res);
1961 +@@ -1369,6 +1373,13 @@ static bool io_grab_identity(struct io_kiocb *req)
1962 + spin_unlock_irq(&ctx->inflight_lock);
1963 + req->work.flags |= IO_WQ_WORK_FILES;
1964 + }
1965 ++ if (!(req->work.flags & IO_WQ_WORK_MM) &&
1966 ++ (def->work_flags & IO_WQ_WORK_MM)) {
1967 ++ if (id->mm != current->mm)
1968 ++ return false;
1969 ++ mmgrab(id->mm);
1970 ++ req->work.flags |= IO_WQ_WORK_MM;
1971 ++ }
1972 +
1973 + return true;
1974 + }
1975 +@@ -1393,13 +1404,6 @@ static void io_prep_async_work(struct io_kiocb *req)
1976 + req->work.flags |= IO_WQ_WORK_UNBOUND;
1977 + }
1978 +
1979 +- /* ->mm can never change on us */
1980 +- if (!(req->work.flags & IO_WQ_WORK_MM) &&
1981 +- (def->work_flags & IO_WQ_WORK_MM)) {
1982 +- mmgrab(id->mm);
1983 +- req->work.flags |= IO_WQ_WORK_MM;
1984 +- }
1985 +-
1986 + /* if we fail grabbing identity, we must COW, regrab, and retry */
1987 + if (io_grab_identity(req))
1988 + return;
1989 +@@ -1632,8 +1636,6 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1990 + LIST_HEAD(list);
1991 +
1992 + if (!force) {
1993 +- if (list_empty_careful(&ctx->cq_overflow_list))
1994 +- return true;
1995 + if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1996 + rings->cq_ring_entries))
1997 + return false;
1998 +@@ -5861,15 +5863,15 @@ static void io_req_drop_files(struct io_kiocb *req)
1999 + struct io_ring_ctx *ctx = req->ctx;
2000 + unsigned long flags;
2001 +
2002 ++ put_files_struct(req->work.identity->files);
2003 ++ put_nsproxy(req->work.identity->nsproxy);
2004 + spin_lock_irqsave(&ctx->inflight_lock, flags);
2005 + list_del(&req->inflight_entry);
2006 +- if (waitqueue_active(&ctx->inflight_wait))
2007 +- wake_up(&ctx->inflight_wait);
2008 + spin_unlock_irqrestore(&ctx->inflight_lock, flags);
2009 + req->flags &= ~REQ_F_INFLIGHT;
2010 +- put_files_struct(req->work.identity->files);
2011 +- put_nsproxy(req->work.identity->nsproxy);
2012 + req->work.flags &= ~IO_WQ_WORK_FILES;
2013 ++ if (waitqueue_active(&ctx->inflight_wait))
2014 ++ wake_up(&ctx->inflight_wait);
2015 + }
2016 +
2017 + static void __io_clean_op(struct io_kiocb *req)
2018 +@@ -6575,8 +6577,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
2019 +
2020 + /* if we have a backlog and couldn't flush it all, return BUSY */
2021 + if (test_bit(0, &ctx->sq_check_overflow)) {
2022 +- if (!list_empty(&ctx->cq_overflow_list) &&
2023 +- !io_cqring_overflow_flush(ctx, false, NULL, NULL))
2024 ++ if (!io_cqring_overflow_flush(ctx, false, NULL, NULL))
2025 + return -EBUSY;
2026 + }
2027 +
2028 +@@ -6798,8 +6799,16 @@ static int io_sq_thread(void *data)
2029 + * kthread parking. This synchronizes the thread vs users,
2030 + * the users are synchronized on the sqd->ctx_lock.
2031 + */
2032 +- if (kthread_should_park())
2033 ++ if (kthread_should_park()) {
2034 + kthread_parkme();
2035 ++ /*
2036 ++ * When sq thread is unparked, in case the previous park operation
2037 ++ * comes from io_put_sq_data(), which means that sq thread is going
2038 ++ * to be stopped, so here needs to have a check.
2039 ++ */
2040 ++ if (kthread_should_stop())
2041 ++ break;
2042 ++ }
2043 +
2044 + if (unlikely(!list_empty(&sqd->ctx_new_list)))
2045 + io_sqd_init_new(sqd);
2046 +@@ -6991,18 +7000,32 @@ static void io_file_ref_kill(struct percpu_ref *ref)
2047 + complete(&data->done);
2048 + }
2049 +
2050 ++static void io_sqe_files_set_node(struct fixed_file_data *file_data,
2051 ++ struct fixed_file_ref_node *ref_node)
2052 ++{
2053 ++ spin_lock_bh(&file_data->lock);
2054 ++ file_data->node = ref_node;
2055 ++ list_add_tail(&ref_node->node, &file_data->ref_list);
2056 ++ spin_unlock_bh(&file_data->lock);
2057 ++ percpu_ref_get(&file_data->refs);
2058 ++}
2059 ++
2060 + static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
2061 + {
2062 + struct fixed_file_data *data = ctx->file_data;
2063 +- struct fixed_file_ref_node *ref_node = NULL;
2064 ++ struct fixed_file_ref_node *backup_node, *ref_node = NULL;
2065 + unsigned nr_tables, i;
2066 ++ int ret;
2067 +
2068 + if (!data)
2069 + return -ENXIO;
2070 ++ backup_node = alloc_fixed_file_ref_node(ctx);
2071 ++ if (!backup_node)
2072 ++ return -ENOMEM;
2073 +
2074 +- spin_lock(&data->lock);
2075 ++ spin_lock_bh(&data->lock);
2076 + ref_node = data->node;
2077 +- spin_unlock(&data->lock);
2078 ++ spin_unlock_bh(&data->lock);
2079 + if (ref_node)
2080 + percpu_ref_kill(&ref_node->refs);
2081 +
2082 +@@ -7010,7 +7033,18 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
2083 +
2084 + /* wait for all refs nodes to complete */
2085 + flush_delayed_work(&ctx->file_put_work);
2086 +- wait_for_completion(&data->done);
2087 ++ do {
2088 ++ ret = wait_for_completion_interruptible(&data->done);
2089 ++ if (!ret)
2090 ++ break;
2091 ++ ret = io_run_task_work_sig();
2092 ++ if (ret < 0) {
2093 ++ percpu_ref_resurrect(&data->refs);
2094 ++ reinit_completion(&data->done);
2095 ++ io_sqe_files_set_node(data, backup_node);
2096 ++ return ret;
2097 ++ }
2098 ++ } while (1);
2099 +
2100 + __io_sqe_files_unregister(ctx);
2101 + nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
2102 +@@ -7021,6 +7055,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
2103 + kfree(data);
2104 + ctx->file_data = NULL;
2105 + ctx->nr_user_files = 0;
2106 ++ destroy_fixed_file_ref_node(backup_node);
2107 + return 0;
2108 + }
2109 +
2110 +@@ -7385,7 +7420,7 @@ static void io_file_data_ref_zero(struct percpu_ref *ref)
2111 + data = ref_node->file_data;
2112 + ctx = data->ctx;
2113 +
2114 +- spin_lock(&data->lock);
2115 ++ spin_lock_bh(&data->lock);
2116 + ref_node->done = true;
2117 +
2118 + while (!list_empty(&data->ref_list)) {
2119 +@@ -7397,7 +7432,7 @@ static void io_file_data_ref_zero(struct percpu_ref *ref)
2120 + list_del(&ref_node->node);
2121 + first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
2122 + }
2123 +- spin_unlock(&data->lock);
2124 ++ spin_unlock_bh(&data->lock);
2125 +
2126 + if (percpu_ref_is_dying(&data->refs))
2127 + delay = 0;
2128 +@@ -7519,11 +7554,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
2129 + return PTR_ERR(ref_node);
2130 + }
2131 +
2132 +- file_data->node = ref_node;
2133 +- spin_lock(&file_data->lock);
2134 +- list_add_tail(&ref_node->node, &file_data->ref_list);
2135 +- spin_unlock(&file_data->lock);
2136 +- percpu_ref_get(&file_data->refs);
2137 ++ io_sqe_files_set_node(file_data, ref_node);
2138 + return ret;
2139 + out_fput:
2140 + for (i = 0; i < ctx->nr_user_files; i++) {
2141 +@@ -7679,11 +7710,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
2142 +
2143 + if (needs_switch) {
2144 + percpu_ref_kill(&data->node->refs);
2145 +- spin_lock(&data->lock);
2146 +- list_add_tail(&ref_node->node, &data->ref_list);
2147 +- data->node = ref_node;
2148 +- spin_unlock(&data->lock);
2149 +- percpu_ref_get(&ctx->file_data->refs);
2150 ++ io_sqe_files_set_node(data, ref_node);
2151 + } else
2152 + destroy_fixed_file_ref_node(ref_node);
2153 +
2154 +diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
2155 +index 778275f48a879..5a7091746f68b 100644
2156 +--- a/fs/jffs2/jffs2_fs_sb.h
2157 ++++ b/fs/jffs2/jffs2_fs_sb.h
2158 +@@ -38,6 +38,7 @@ struct jffs2_mount_opts {
2159 + * users. This is implemented simply by means of not allowing the
2160 + * latter users to write to the file system if the amount if the
2161 + * available space is less then 'rp_size'. */
2162 ++ bool set_rp_size;
2163 + unsigned int rp_size;
2164 + };
2165 +
2166 +diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
2167 +index 4fd297bdf0f3f..81ca58c10b728 100644
2168 +--- a/fs/jffs2/super.c
2169 ++++ b/fs/jffs2/super.c
2170 +@@ -88,7 +88,7 @@ static int jffs2_show_options(struct seq_file *s, struct dentry *root)
2171 +
2172 + if (opts->override_compr)
2173 + seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
2174 +- if (opts->rp_size)
2175 ++ if (opts->set_rp_size)
2176 + seq_printf(s, ",rp_size=%u", opts->rp_size / 1024);
2177 +
2178 + return 0;
2179 +@@ -202,11 +202,8 @@ static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
2180 + case Opt_rp_size:
2181 + if (result.uint_32 > UINT_MAX / 1024)
2182 + return invalf(fc, "jffs2: rp_size unrepresentable");
2183 +- opt = result.uint_32 * 1024;
2184 +- if (opt > c->mtd->size)
2185 +- return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
2186 +- c->mtd->size / 1024);
2187 +- c->mount_opts.rp_size = opt;
2188 ++ c->mount_opts.rp_size = result.uint_32 * 1024;
2189 ++ c->mount_opts.set_rp_size = true;
2190 + break;
2191 + default:
2192 + return -EINVAL;
2193 +@@ -225,8 +222,10 @@ static inline void jffs2_update_mount_opts(struct fs_context *fc)
2194 + c->mount_opts.override_compr = new_c->mount_opts.override_compr;
2195 + c->mount_opts.compr = new_c->mount_opts.compr;
2196 + }
2197 +- if (new_c->mount_opts.rp_size)
2198 ++ if (new_c->mount_opts.set_rp_size) {
2199 ++ c->mount_opts.set_rp_size = new_c->mount_opts.set_rp_size;
2200 + c->mount_opts.rp_size = new_c->mount_opts.rp_size;
2201 ++ }
2202 + mutex_unlock(&c->alloc_sem);
2203 + }
2204 +
2205 +@@ -266,6 +265,10 @@ static int jffs2_fill_super(struct super_block *sb, struct fs_context *fc)
2206 + c->mtd = sb->s_mtd;
2207 + c->os_priv = sb;
2208 +
2209 ++ if (c->mount_opts.rp_size > c->mtd->size)
2210 ++ return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
2211 ++ c->mtd->size / 1024);
2212 ++
2213 + /* Initialize JFFS2 superblock locks, the further initialization will
2214 + * be done later */
2215 + mutex_init(&c->alloc_sem);
2216 +diff --git a/fs/namespace.c b/fs/namespace.c
2217 +index cebaa3e817940..93006abe7946a 100644
2218 +--- a/fs/namespace.c
2219 ++++ b/fs/namespace.c
2220 +@@ -156,10 +156,10 @@ static inline void mnt_add_count(struct mount *mnt, int n)
2221 + /*
2222 + * vfsmount lock must be held for write
2223 + */
2224 +-unsigned int mnt_get_count(struct mount *mnt)
2225 ++int mnt_get_count(struct mount *mnt)
2226 + {
2227 + #ifdef CONFIG_SMP
2228 +- unsigned int count = 0;
2229 ++ int count = 0;
2230 + int cpu;
2231 +
2232 + for_each_possible_cpu(cpu) {
2233 +@@ -1139,6 +1139,7 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
2234 + static void mntput_no_expire(struct mount *mnt)
2235 + {
2236 + LIST_HEAD(list);
2237 ++ int count;
2238 +
2239 + rcu_read_lock();
2240 + if (likely(READ_ONCE(mnt->mnt_ns))) {
2241 +@@ -1162,7 +1163,9 @@ static void mntput_no_expire(struct mount *mnt)
2242 + */
2243 + smp_mb();
2244 + mnt_add_count(mnt, -1);
2245 +- if (mnt_get_count(mnt)) {
2246 ++ count = mnt_get_count(mnt);
2247 ++ if (count != 0) {
2248 ++ WARN_ON(count < 0);
2249 + rcu_read_unlock();
2250 + unlock_mount_hash();
2251 + return;
2252 +diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
2253 +index 8432bd6b95f08..c078f88552695 100644
2254 +--- a/fs/nfs/nfs42xdr.c
2255 ++++ b/fs/nfs/nfs42xdr.c
2256 +@@ -1019,29 +1019,24 @@ static int decode_deallocate(struct xdr_stream *xdr, struct nfs42_falloc_res *re
2257 + return decode_op_hdr(xdr, OP_DEALLOCATE);
2258 + }
2259 +
2260 +-static int decode_read_plus_data(struct xdr_stream *xdr, struct nfs_pgio_res *res,
2261 +- uint32_t *eof)
2262 ++static int decode_read_plus_data(struct xdr_stream *xdr,
2263 ++ struct nfs_pgio_res *res)
2264 + {
2265 + uint32_t count, recvd;
2266 + uint64_t offset;
2267 + __be32 *p;
2268 +
2269 + p = xdr_inline_decode(xdr, 8 + 4);
2270 +- if (unlikely(!p))
2271 +- return -EIO;
2272 ++ if (!p)
2273 ++ return 1;
2274 +
2275 + p = xdr_decode_hyper(p, &offset);
2276 + count = be32_to_cpup(p);
2277 + recvd = xdr_align_data(xdr, res->count, count);
2278 + res->count += recvd;
2279 +
2280 +- if (count > recvd) {
2281 +- dprintk("NFS: server cheating in read reply: "
2282 +- "count %u > recvd %u\n", count, recvd);
2283 +- *eof = 0;
2284 ++ if (count > recvd)
2285 + return 1;
2286 +- }
2287 +-
2288 + return 0;
2289 + }
2290 +
2291 +@@ -1052,18 +1047,16 @@ static int decode_read_plus_hole(struct xdr_stream *xdr, struct nfs_pgio_res *re
2292 + __be32 *p;
2293 +
2294 + p = xdr_inline_decode(xdr, 8 + 8);
2295 +- if (unlikely(!p))
2296 +- return -EIO;
2297 ++ if (!p)
2298 ++ return 1;
2299 +
2300 + p = xdr_decode_hyper(p, &offset);
2301 + p = xdr_decode_hyper(p, &length);
2302 + recvd = xdr_expand_hole(xdr, res->count, length);
2303 + res->count += recvd;
2304 +
2305 +- if (recvd < length) {
2306 +- *eof = 0;
2307 ++ if (recvd < length)
2308 + return 1;
2309 +- }
2310 + return 0;
2311 + }
2312 +
2313 +@@ -1088,12 +1081,12 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
2314 +
2315 + for (i = 0; i < segments; i++) {
2316 + p = xdr_inline_decode(xdr, 4);
2317 +- if (unlikely(!p))
2318 +- return -EIO;
2319 ++ if (!p)
2320 ++ goto early_out;
2321 +
2322 + type = be32_to_cpup(p++);
2323 + if (type == NFS4_CONTENT_DATA)
2324 +- status = decode_read_plus_data(xdr, res, &eof);
2325 ++ status = decode_read_plus_data(xdr, res);
2326 + else if (type == NFS4_CONTENT_HOLE)
2327 + status = decode_read_plus_hole(xdr, res, &eof);
2328 + else
2329 +@@ -1102,12 +1095,17 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
2330 + if (status < 0)
2331 + return status;
2332 + if (status > 0)
2333 +- break;
2334 ++ goto early_out;
2335 + }
2336 +
2337 + out:
2338 + res->eof = eof;
2339 + return 0;
2340 ++early_out:
2341 ++ if (unlikely(!i))
2342 ++ return -EIO;
2343 ++ res->eof = 0;
2344 ++ return 0;
2345 + }
2346 +
2347 + static int decode_seek(struct xdr_stream *xdr, struct nfs42_seek_res *res)
2348 +diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
2349 +index 93f5c1678ec29..984cc42ee54d8 100644
2350 +--- a/fs/nfs/nfs4super.c
2351 ++++ b/fs/nfs/nfs4super.c
2352 +@@ -67,7 +67,7 @@ static void nfs4_evict_inode(struct inode *inode)
2353 + nfs_inode_evict_delegation(inode);
2354 + /* Note that above delegreturn would trigger pnfs return-on-close */
2355 + pnfs_return_layout(inode);
2356 +- pnfs_destroy_layout(NFS_I(inode));
2357 ++ pnfs_destroy_layout_final(NFS_I(inode));
2358 + /* First call standard NFS clear_inode() code */
2359 + nfs_clear_inode(inode);
2360 + nfs4_xattr_cache_zap(inode);
2361 +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
2362 +index 0e50b9d45c320..07f59dc8cb2e7 100644
2363 +--- a/fs/nfs/pnfs.c
2364 ++++ b/fs/nfs/pnfs.c
2365 +@@ -294,6 +294,7 @@ void
2366 + pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
2367 + {
2368 + struct inode *inode;
2369 ++ unsigned long i_state;
2370 +
2371 + if (!lo)
2372 + return;
2373 +@@ -304,8 +305,12 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
2374 + if (!list_empty(&lo->plh_segs))
2375 + WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
2376 + pnfs_detach_layout_hdr(lo);
2377 ++ i_state = inode->i_state;
2378 + spin_unlock(&inode->i_lock);
2379 + pnfs_free_layout_hdr(lo);
2380 ++ /* Notify pnfs_destroy_layout_final() that we're done */
2381 ++ if (i_state & (I_FREEING | I_CLEAR))
2382 ++ wake_up_var(lo);
2383 + }
2384 + }
2385 +
2386 +@@ -734,8 +739,7 @@ pnfs_free_lseg_list(struct list_head *free_me)
2387 + }
2388 + }
2389 +
2390 +-void
2391 +-pnfs_destroy_layout(struct nfs_inode *nfsi)
2392 ++static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi)
2393 + {
2394 + struct pnfs_layout_hdr *lo;
2395 + LIST_HEAD(tmp_list);
2396 +@@ -753,9 +757,34 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
2397 + pnfs_put_layout_hdr(lo);
2398 + } else
2399 + spin_unlock(&nfsi->vfs_inode.i_lock);
2400 ++ return lo;
2401 ++}
2402 ++
2403 ++void pnfs_destroy_layout(struct nfs_inode *nfsi)
2404 ++{
2405 ++ __pnfs_destroy_layout(nfsi);
2406 + }
2407 + EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
2408 +
2409 ++static bool pnfs_layout_removed(struct nfs_inode *nfsi,
2410 ++ struct pnfs_layout_hdr *lo)
2411 ++{
2412 ++ bool ret;
2413 ++
2414 ++ spin_lock(&nfsi->vfs_inode.i_lock);
2415 ++ ret = nfsi->layout != lo;
2416 ++ spin_unlock(&nfsi->vfs_inode.i_lock);
2417 ++ return ret;
2418 ++}
2419 ++
2420 ++void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
2421 ++{
2422 ++ struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi);
2423 ++
2424 ++ if (lo)
2425 ++ wait_var_event(lo, pnfs_layout_removed(nfsi, lo));
2426 ++}
2427 ++
2428 + static bool
2429 + pnfs_layout_add_bulk_destroy_list(struct inode *inode,
2430 + struct list_head *layout_list)
2431 +diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
2432 +index 2661c44c62db4..78c3893918486 100644
2433 +--- a/fs/nfs/pnfs.h
2434 ++++ b/fs/nfs/pnfs.h
2435 +@@ -266,6 +266,7 @@ struct pnfs_layout_segment *pnfs_layout_process(struct nfs4_layoutget *lgp);
2436 + void pnfs_layoutget_free(struct nfs4_layoutget *lgp);
2437 + void pnfs_free_lseg_list(struct list_head *tmp_list);
2438 + void pnfs_destroy_layout(struct nfs_inode *);
2439 ++void pnfs_destroy_layout_final(struct nfs_inode *);
2440 + void pnfs_destroy_all_layouts(struct nfs_client *);
2441 + int pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
2442 + struct nfs_fsid *fsid,
2443 +@@ -710,6 +711,10 @@ static inline void pnfs_destroy_layout(struct nfs_inode *nfsi)
2444 + {
2445 + }
2446 +
2447 ++static inline void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
2448 ++{
2449 ++}
2450 ++
2451 + static inline struct pnfs_layout_segment *
2452 + pnfs_get_lseg(struct pnfs_layout_segment *lseg)
2453 + {
2454 +diff --git a/fs/pnode.h b/fs/pnode.h
2455 +index 49a058c73e4c7..26f74e092bd98 100644
2456 +--- a/fs/pnode.h
2457 ++++ b/fs/pnode.h
2458 +@@ -44,7 +44,7 @@ int propagate_mount_busy(struct mount *, int);
2459 + void propagate_mount_unlock(struct mount *);
2460 + void mnt_release_group_id(struct mount *);
2461 + int get_dominating_id(struct mount *mnt, const struct path *root);
2462 +-unsigned int mnt_get_count(struct mount *mnt);
2463 ++int mnt_get_count(struct mount *mnt);
2464 + void mnt_set_mountpoint(struct mount *, struct mountpoint *,
2465 + struct mount *);
2466 + void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,
2467 +diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
2468 +index a6f856f341dc7..c5562c871c8be 100644
2469 +--- a/fs/quota/quota_tree.c
2470 ++++ b/fs/quota/quota_tree.c
2471 +@@ -62,7 +62,7 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
2472 +
2473 + memset(buf, 0, info->dqi_usable_bs);
2474 + return sb->s_op->quota_read(sb, info->dqi_type, buf,
2475 +- info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
2476 ++ info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
2477 + }
2478 +
2479 + static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
2480 +@@ -71,7 +71,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
2481 + ssize_t ret;
2482 +
2483 + ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
2484 +- info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
2485 ++ info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
2486 + if (ret != info->dqi_usable_bs) {
2487 + quota_error(sb, "dquota write failed");
2488 + if (ret >= 0)
2489 +@@ -284,7 +284,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
2490 + blk);
2491 + goto out_buf;
2492 + }
2493 +- dquot->dq_off = (blk << info->dqi_blocksize_bits) +
2494 ++ dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
2495 + sizeof(struct qt_disk_dqdbheader) +
2496 + i * info->dqi_entry_size;
2497 + kfree(buf);
2498 +@@ -559,7 +559,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
2499 + ret = -EIO;
2500 + goto out_buf;
2501 + } else {
2502 +- ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
2503 ++ ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
2504 + qt_disk_dqdbheader) + i * info->dqi_entry_size;
2505 + }
2506 + out_buf:
2507 +diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
2508 +index 8bf88d690729e..476a7ff494822 100644
2509 +--- a/fs/reiserfs/stree.c
2510 ++++ b/fs/reiserfs/stree.c
2511 +@@ -454,6 +454,12 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
2512 + "(second one): %h", ih);
2513 + return 0;
2514 + }
2515 ++ if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) {
2516 ++ reiserfs_warning(NULL, "reiserfs-5093",
2517 ++ "item entry count seems wrong %h",
2518 ++ ih);
2519 ++ return 0;
2520 ++ }
2521 + prev_location = ih_location(ih);
2522 + }
2523 +
2524 +diff --git a/include/linux/mm.h b/include/linux/mm.h
2525 +index db6ae4d3fb4ed..cd5c313729ea1 100644
2526 +--- a/include/linux/mm.h
2527 ++++ b/include/linux/mm.h
2528 +@@ -2439,8 +2439,9 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2529 + #endif
2530 +
2531 + extern void set_dma_reserve(unsigned long new_dma_reserve);
2532 +-extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
2533 +- enum meminit_context, struct vmem_altmap *, int migratetype);
2534 ++extern void memmap_init_zone(unsigned long, int, unsigned long,
2535 ++ unsigned long, unsigned long, enum meminit_context,
2536 ++ struct vmem_altmap *, int migratetype);
2537 + extern void setup_per_zone_wmarks(void);
2538 + extern int __meminit init_per_zone_wmark_min(void);
2539 + extern void mem_init(void);
2540 +diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
2541 +index 5ed721ad5b198..af2a44c08683d 100644
2542 +--- a/include/uapi/linux/const.h
2543 ++++ b/include/uapi/linux/const.h
2544 +@@ -28,4 +28,9 @@
2545 + #define _BITUL(x) (_UL(1) << (x))
2546 + #define _BITULL(x) (_ULL(1) << (x))
2547 +
2548 ++#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
2549 ++#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
2550 ++
2551 ++#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
2552 ++
2553 + #endif /* _UAPI_LINUX_CONST_H */
2554 +diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
2555 +index 9ca87bc73c447..cde753bb20935 100644
2556 +--- a/include/uapi/linux/ethtool.h
2557 ++++ b/include/uapi/linux/ethtool.h
2558 +@@ -14,7 +14,7 @@
2559 + #ifndef _UAPI_LINUX_ETHTOOL_H
2560 + #define _UAPI_LINUX_ETHTOOL_H
2561 +
2562 +-#include <linux/kernel.h>
2563 ++#include <linux/const.h>
2564 + #include <linux/types.h>
2565 + #include <linux/if_ether.h>
2566 +
2567 +diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h
2568 +index 0ff8f7477847c..fadf2db71fe8a 100644
2569 +--- a/include/uapi/linux/kernel.h
2570 ++++ b/include/uapi/linux/kernel.h
2571 +@@ -3,13 +3,6 @@
2572 + #define _UAPI_LINUX_KERNEL_H
2573 +
2574 + #include <linux/sysinfo.h>
2575 +-
2576 +-/*
2577 +- * 'kernel.h' contains some often-used function prototypes etc
2578 +- */
2579 +-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
2580 +-#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
2581 +-
2582 +-#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
2583 ++#include <linux/const.h>
2584 +
2585 + #endif /* _UAPI_LINUX_KERNEL_H */
2586 +diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
2587 +index f9a1be7fc6962..ead2e72e5c88e 100644
2588 +--- a/include/uapi/linux/lightnvm.h
2589 ++++ b/include/uapi/linux/lightnvm.h
2590 +@@ -21,7 +21,7 @@
2591 + #define _UAPI_LINUX_LIGHTNVM_H
2592 +
2593 + #ifdef __KERNEL__
2594 +-#include <linux/kernel.h>
2595 ++#include <linux/const.h>
2596 + #include <linux/ioctl.h>
2597 + #else /* __KERNEL__ */
2598 + #include <stdio.h>
2599 +diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
2600 +index c36177a86516e..a1fd6173e2dbe 100644
2601 +--- a/include/uapi/linux/mroute6.h
2602 ++++ b/include/uapi/linux/mroute6.h
2603 +@@ -2,7 +2,7 @@
2604 + #ifndef _UAPI__LINUX_MROUTE6_H
2605 + #define _UAPI__LINUX_MROUTE6_H
2606 +
2607 +-#include <linux/kernel.h>
2608 ++#include <linux/const.h>
2609 + #include <linux/types.h>
2610 + #include <linux/sockios.h>
2611 + #include <linux/in6.h> /* For struct sockaddr_in6. */
2612 +diff --git a/include/uapi/linux/netfilter/x_tables.h b/include/uapi/linux/netfilter/x_tables.h
2613 +index a8283f7dbc519..b8c6bb233ac1c 100644
2614 +--- a/include/uapi/linux/netfilter/x_tables.h
2615 ++++ b/include/uapi/linux/netfilter/x_tables.h
2616 +@@ -1,7 +1,7 @@
2617 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2618 + #ifndef _UAPI_X_TABLES_H
2619 + #define _UAPI_X_TABLES_H
2620 +-#include <linux/kernel.h>
2621 ++#include <linux/const.h>
2622 + #include <linux/types.h>
2623 +
2624 + #define XT_FUNCTION_MAXNAMELEN 30
2625 +diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
2626 +index c3816ff7bfc32..3d94269bbfa87 100644
2627 +--- a/include/uapi/linux/netlink.h
2628 ++++ b/include/uapi/linux/netlink.h
2629 +@@ -2,7 +2,7 @@
2630 + #ifndef _UAPI__LINUX_NETLINK_H
2631 + #define _UAPI__LINUX_NETLINK_H
2632 +
2633 +-#include <linux/kernel.h>
2634 ++#include <linux/const.h>
2635 + #include <linux/socket.h> /* for __kernel_sa_family_t */
2636 + #include <linux/types.h>
2637 +
2638 +diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
2639 +index 27c1ed2822e69..458179df9b271 100644
2640 +--- a/include/uapi/linux/sysctl.h
2641 ++++ b/include/uapi/linux/sysctl.h
2642 +@@ -23,7 +23,7 @@
2643 + #ifndef _UAPI_LINUX_SYSCTL_H
2644 + #define _UAPI_LINUX_SYSCTL_H
2645 +
2646 +-#include <linux/kernel.h>
2647 ++#include <linux/const.h>
2648 + #include <linux/types.h>
2649 + #include <linux/compiler.h>
2650 +
2651 +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
2652 +index 191c329e482ad..32596fdbcd5b8 100644
2653 +--- a/kernel/cgroup/cgroup-v1.c
2654 ++++ b/kernel/cgroup/cgroup-v1.c
2655 +@@ -908,6 +908,8 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
2656 + opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
2657 + if (opt == -ENOPARAM) {
2658 + if (strcmp(param->key, "source") == 0) {
2659 ++ if (fc->source)
2660 ++ return invalf(fc, "Multiple sources not supported");
2661 + fc->source = param->string;
2662 + param->string = NULL;
2663 + return 0;
2664 +diff --git a/kernel/module.c b/kernel/module.c
2665 +index a4fa44a652a75..e20499309b2af 100644
2666 +--- a/kernel/module.c
2667 ++++ b/kernel/module.c
2668 +@@ -1895,7 +1895,6 @@ static int mod_sysfs_init(struct module *mod)
2669 + if (err)
2670 + mod_kobject_put(mod);
2671 +
2672 +- /* delay uevent until full sysfs population */
2673 + out:
2674 + return err;
2675 + }
2676 +@@ -1932,7 +1931,6 @@ static int mod_sysfs_setup(struct module *mod,
2677 + add_sect_attrs(mod, info);
2678 + add_notes_attrs(mod, info);
2679 +
2680 +- kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
2681 + return 0;
2682 +
2683 + out_unreg_modinfo_attrs:
2684 +@@ -3639,6 +3637,9 @@ static noinline int do_init_module(struct module *mod)
2685 + blocking_notifier_call_chain(&module_notify_list,
2686 + MODULE_STATE_LIVE, mod);
2687 +
2688 ++ /* Delay uevent until module has finished its init routine */
2689 ++ kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
2690 ++
2691 + /*
2692 + * We need to finish all async code before the module init sequence
2693 + * is done. This has potential to deadlock. For example, a newly
2694 +@@ -3991,6 +3992,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
2695 + MODULE_STATE_GOING, mod);
2696 + klp_module_going(mod);
2697 + bug_cleanup:
2698 ++ mod->state = MODULE_STATE_GOING;
2699 + /* module_bug_cleanup needs module_mutex protection */
2700 + mutex_lock(&module_mutex);
2701 + module_bug_cleanup(mod);
2702 +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
2703 +index 81632cd5e3b72..e8d351b7f9b03 100644
2704 +--- a/kernel/time/tick-sched.c
2705 ++++ b/kernel/time/tick-sched.c
2706 +@@ -941,13 +941,6 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
2707 + */
2708 + if (tick_do_timer_cpu == cpu)
2709 + return false;
2710 +- /*
2711 +- * Boot safety: make sure the timekeeping duty has been
2712 +- * assigned before entering dyntick-idle mode,
2713 +- * tick_do_timer_cpu is TICK_DO_TIMER_BOOT
2714 +- */
2715 +- if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT))
2716 +- return false;
2717 +
2718 + /* Should not happen for nohz-full */
2719 + if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
2720 +diff --git a/lib/zlib_dfltcc/Makefile b/lib/zlib_dfltcc/Makefile
2721 +index 8e4d5afbbb109..66e1c96387c40 100644
2722 +--- a/lib/zlib_dfltcc/Makefile
2723 ++++ b/lib/zlib_dfltcc/Makefile
2724 +@@ -8,4 +8,4 @@
2725 +
2726 + obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc.o
2727 +
2728 +-zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o dfltcc_syms.o
2729 ++zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o
2730 +diff --git a/lib/zlib_dfltcc/dfltcc.c b/lib/zlib_dfltcc/dfltcc.c
2731 +index c30de430b30ca..782f76e9d4dab 100644
2732 +--- a/lib/zlib_dfltcc/dfltcc.c
2733 ++++ b/lib/zlib_dfltcc/dfltcc.c
2734 +@@ -1,7 +1,8 @@
2735 + // SPDX-License-Identifier: Zlib
2736 + /* dfltcc.c - SystemZ DEFLATE CONVERSION CALL support. */
2737 +
2738 +-#include <linux/zutil.h>
2739 ++#include <linux/export.h>
2740 ++#include <linux/module.h>
2741 + #include "dfltcc_util.h"
2742 + #include "dfltcc.h"
2743 +
2744 +@@ -53,3 +54,6 @@ void dfltcc_reset(
2745 + dfltcc_state->dht_threshold = DFLTCC_DHT_MIN_SAMPLE_SIZE;
2746 + dfltcc_state->param.ribm = DFLTCC_RIBM;
2747 + }
2748 ++EXPORT_SYMBOL(dfltcc_reset);
2749 ++
2750 ++MODULE_LICENSE("GPL");
2751 +diff --git a/lib/zlib_dfltcc/dfltcc_deflate.c b/lib/zlib_dfltcc/dfltcc_deflate.c
2752 +index 00c185101c6d1..6c946e8532eec 100644
2753 +--- a/lib/zlib_dfltcc/dfltcc_deflate.c
2754 ++++ b/lib/zlib_dfltcc/dfltcc_deflate.c
2755 +@@ -4,6 +4,7 @@
2756 + #include "dfltcc_util.h"
2757 + #include "dfltcc.h"
2758 + #include <asm/setup.h>
2759 ++#include <linux/export.h>
2760 + #include <linux/zutil.h>
2761 +
2762 + /*
2763 +@@ -34,6 +35,7 @@ int dfltcc_can_deflate(
2764 +
2765 + return 1;
2766 + }
2767 ++EXPORT_SYMBOL(dfltcc_can_deflate);
2768 +
2769 + static void dfltcc_gdht(
2770 + z_streamp strm
2771 +@@ -277,3 +279,4 @@ again:
2772 + goto again; /* deflate() must use all input or all output */
2773 + return 1;
2774 + }
2775 ++EXPORT_SYMBOL(dfltcc_deflate);
2776 +diff --git a/lib/zlib_dfltcc/dfltcc_inflate.c b/lib/zlib_dfltcc/dfltcc_inflate.c
2777 +index db107016d29b3..fb60b5a6a1cb6 100644
2778 +--- a/lib/zlib_dfltcc/dfltcc_inflate.c
2779 ++++ b/lib/zlib_dfltcc/dfltcc_inflate.c
2780 +@@ -125,7 +125,7 @@ dfltcc_inflate_action dfltcc_inflate(
2781 + param->ho = (state->write - state->whave) & ((1 << HB_BITS) - 1);
2782 + if (param->hl)
2783 + param->nt = 0; /* Honor history for the first block */
2784 +- param->cv = state->flags ? REVERSE(state->check) : state->check;
2785 ++ param->cv = state->check;
2786 +
2787 + /* Inflate */
2788 + do {
2789 +@@ -138,7 +138,7 @@ dfltcc_inflate_action dfltcc_inflate(
2790 + state->bits = param->sbb;
2791 + state->whave = param->hl;
2792 + state->write = (param->ho + param->hl) & ((1 << HB_BITS) - 1);
2793 +- state->check = state->flags ? REVERSE(param->cv) : param->cv;
2794 ++ state->check = param->cv;
2795 + if (cc == DFLTCC_CC_OP2_CORRUPT && param->oesc != 0) {
2796 + /* Report an error if stream is corrupted */
2797 + state->mode = BAD;
2798 +diff --git a/lib/zlib_dfltcc/dfltcc_syms.c b/lib/zlib_dfltcc/dfltcc_syms.c
2799 +deleted file mode 100644
2800 +index 6f23481804c1d..0000000000000
2801 +--- a/lib/zlib_dfltcc/dfltcc_syms.c
2802 ++++ /dev/null
2803 +@@ -1,17 +0,0 @@
2804 +-// SPDX-License-Identifier: GPL-2.0-only
2805 +-/*
2806 +- * linux/lib/zlib_dfltcc/dfltcc_syms.c
2807 +- *
2808 +- * Exported symbols for the s390 zlib dfltcc support.
2809 +- *
2810 +- */
2811 +-
2812 +-#include <linux/init.h>
2813 +-#include <linux/module.h>
2814 +-#include <linux/zlib.h>
2815 +-#include "dfltcc.h"
2816 +-
2817 +-EXPORT_SYMBOL(dfltcc_can_deflate);
2818 +-EXPORT_SYMBOL(dfltcc_deflate);
2819 +-EXPORT_SYMBOL(dfltcc_reset);
2820 +-MODULE_LICENSE("GPL");
2821 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2822 +index 3b38ea958e954..1fd11f96a707a 100644
2823 +--- a/mm/hugetlb.c
2824 ++++ b/mm/hugetlb.c
2825 +@@ -4106,10 +4106,30 @@ retry_avoidcopy:
2826 + * may get SIGKILLed if it later faults.
2827 + */
2828 + if (outside_reserve) {
2829 ++ struct address_space *mapping = vma->vm_file->f_mapping;
2830 ++ pgoff_t idx;
2831 ++ u32 hash;
2832 ++
2833 + put_page(old_page);
2834 + BUG_ON(huge_pte_none(pte));
2835 ++ /*
2836 ++ * Drop hugetlb_fault_mutex and i_mmap_rwsem before
2837 ++ * unmapping. unmapping needs to hold i_mmap_rwsem
2838 ++ * in write mode. Dropping i_mmap_rwsem in read mode
2839 ++ * here is OK as COW mappings do not interact with
2840 ++ * PMD sharing.
2841 ++ *
2842 ++ * Reacquire both after unmap operation.
2843 ++ */
2844 ++ idx = vma_hugecache_offset(h, vma, haddr);
2845 ++ hash = hugetlb_fault_mutex_hash(mapping, idx);
2846 ++ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
2847 ++ i_mmap_unlock_read(mapping);
2848 ++
2849 + unmap_ref_private(mm, vma, old_page, haddr);
2850 +- BUG_ON(huge_pte_none(pte));
2851 ++
2852 ++ i_mmap_lock_read(mapping);
2853 ++ mutex_lock(&hugetlb_fault_mutex_table[hash]);
2854 + spin_lock(ptl);
2855 + ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
2856 + if (likely(ptep &&
2857 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
2858 +index 0f855deea4b2d..aa453a4331437 100644
2859 +--- a/mm/memory_hotplug.c
2860 ++++ b/mm/memory_hotplug.c
2861 +@@ -714,7 +714,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
2862 + * expects the zone spans the pfn range. All the pages in the range
2863 + * are reserved so nobody should be touching them so we should be safe
2864 + */
2865 +- memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
2866 ++ memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, 0,
2867 + MEMINIT_HOTPLUG, altmap, migratetype);
2868 +
2869 + set_zone_contiguous(zone);
2870 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2871 +index 32f783ddb5c3a..14b9e83ff9da2 100644
2872 +--- a/mm/page_alloc.c
2873 ++++ b/mm/page_alloc.c
2874 +@@ -448,6 +448,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
2875 + if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
2876 + return false;
2877 +
2878 ++ if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
2879 ++ return true;
2880 + /*
2881 + * We start only with one section of pages, more pages are added as
2882 + * needed until the rest of deferred pages are initialized.
2883 +@@ -6050,7 +6052,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
2884 + * zone stats (e.g., nr_isolate_pageblock) are touched.
2885 + */
2886 + void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2887 +- unsigned long start_pfn,
2888 ++ unsigned long start_pfn, unsigned long zone_end_pfn,
2889 + enum meminit_context context,
2890 + struct vmem_altmap *altmap, int migratetype)
2891 + {
2892 +@@ -6086,7 +6088,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2893 + if (context == MEMINIT_EARLY) {
2894 + if (overlap_memmap_init(zone, &pfn))
2895 + continue;
2896 +- if (defer_init(nid, pfn, end_pfn))
2897 ++ if (defer_init(nid, pfn, zone_end_pfn))
2898 + break;
2899 + }
2900 +
2901 +@@ -6200,7 +6202,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
2902 +
2903 + if (end_pfn > start_pfn) {
2904 + size = end_pfn - start_pfn;
2905 +- memmap_init_zone(size, nid, zone, start_pfn,
2906 ++ memmap_init_zone(size, nid, zone, start_pfn, range_end_pfn,
2907 + MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
2908 + }
2909 + }
2910 +diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
2911 +index 5635604cb9ba1..25a9e566ef5cd 100644
2912 +--- a/net/ethtool/channels.c
2913 ++++ b/net/ethtool/channels.c
2914 +@@ -194,8 +194,9 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
2915 + if (netif_is_rxfh_configured(dev) &&
2916 + !ethtool_get_max_rxfh_channel(dev, &max_rx_in_use) &&
2917 + (channels.combined_count + channels.rx_count) <= max_rx_in_use) {
2918 ++ ret = -EINVAL;
2919 + GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings");
2920 +- return -EINVAL;
2921 ++ goto out_ops;
2922 + }
2923 +
2924 + /* Disabling channels, query zero-copy AF_XDP sockets */
2925 +@@ -203,8 +204,9 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
2926 + min(channels.rx_count, channels.tx_count);
2927 + for (i = from_channel; i < old_total; i++)
2928 + if (xsk_get_pool_from_qid(dev, i)) {
2929 ++ ret = -EINVAL;
2930 + GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets");
2931 +- return -EINVAL;
2932 ++ goto out_ops;
2933 + }
2934 +
2935 + ret = dev->ethtool_ops->set_channels(dev, &channels);
2936 +diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
2937 +index 0baad0ce18328..c3a5489964cde 100644
2938 +--- a/net/ethtool/strset.c
2939 ++++ b/net/ethtool/strset.c
2940 +@@ -182,7 +182,7 @@ static int strset_parse_request(struct ethnl_req_info *req_base,
2941 + ret = strset_get_id(attr, &id, extack);
2942 + if (ret < 0)
2943 + return ret;
2944 +- if (ret >= ETH_SS_COUNT) {
2945 ++ if (id >= ETH_SS_COUNT) {
2946 + NL_SET_ERR_MSG_ATTR(extack, attr,
2947 + "unknown string set id");
2948 + return -EOPNOTSUPP;
2949 +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
2950 +index 88f2a7a0ccb86..967ce9ccfc0da 100644
2951 +--- a/net/mptcp/protocol.c
2952 ++++ b/net/mptcp/protocol.c
2953 +@@ -2081,6 +2081,8 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
2954 + sock_reset_flag(nsk, SOCK_RCU_FREE);
2955 + /* will be fully established after successful MPC subflow creation */
2956 + inet_sk_state_store(nsk, TCP_SYN_RECV);
2957 ++
2958 ++ security_inet_csk_clone(nsk, req);
2959 + bh_unlock_sock(nsk);
2960 +
2961 + /* keep a single reference */
2962 +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
2963 +index b0ad7687ee2c8..c6653ee7f701b 100644
2964 +--- a/net/sched/sch_taprio.c
2965 ++++ b/net/sched/sch_taprio.c
2966 +@@ -1596,6 +1596,21 @@ free_sched:
2967 + return err;
2968 + }
2969 +
2970 ++static void taprio_reset(struct Qdisc *sch)
2971 ++{
2972 ++ struct taprio_sched *q = qdisc_priv(sch);
2973 ++ struct net_device *dev = qdisc_dev(sch);
2974 ++ int i;
2975 ++
2976 ++ hrtimer_cancel(&q->advance_timer);
2977 ++ if (q->qdiscs) {
2978 ++ for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
2979 ++ qdisc_reset(q->qdiscs[i]);
2980 ++ }
2981 ++ sch->qstats.backlog = 0;
2982 ++ sch->q.qlen = 0;
2983 ++}
2984 ++
2985 + static void taprio_destroy(struct Qdisc *sch)
2986 + {
2987 + struct taprio_sched *q = qdisc_priv(sch);
2988 +@@ -1606,7 +1621,6 @@ static void taprio_destroy(struct Qdisc *sch)
2989 + list_del(&q->taprio_list);
2990 + spin_unlock(&taprio_list_lock);
2991 +
2992 +- hrtimer_cancel(&q->advance_timer);
2993 +
2994 + taprio_disable_offload(dev, q, NULL);
2995 +
2996 +@@ -1953,6 +1967,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
2997 + .init = taprio_init,
2998 + .change = taprio_change,
2999 + .destroy = taprio_destroy,
3000 ++ .reset = taprio_reset,
3001 + .peek = taprio_peek,
3002 + .dequeue = taprio_dequeue,
3003 + .enqueue = taprio_enqueue,
3004 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
3005 +index 47b155a49226f..9f3f8e953ff04 100644
3006 +--- a/sound/core/pcm_native.c
3007 ++++ b/sound/core/pcm_native.c
3008 +@@ -755,8 +755,13 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
3009 + runtime->boundary *= 2;
3010 +
3011 + /* clear the buffer for avoiding possible kernel info leaks */
3012 +- if (runtime->dma_area && !substream->ops->copy_user)
3013 +- memset(runtime->dma_area, 0, runtime->dma_bytes);
3014 ++ if (runtime->dma_area && !substream->ops->copy_user) {
3015 ++ size_t size = runtime->dma_bytes;
3016 ++
3017 ++ if (runtime->info & SNDRV_PCM_INFO_MMAP)
3018 ++ size = PAGE_ALIGN(size);
3019 ++ memset(runtime->dma_area, 0, size);
3020 ++ }
3021 +
3022 + snd_pcm_timer_resolution_change(substream);
3023 + snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
3024 +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
3025 +index c78720a3299c4..257ad5206240f 100644
3026 +--- a/sound/core/rawmidi.c
3027 ++++ b/sound/core/rawmidi.c
3028 +@@ -95,11 +95,21 @@ static inline unsigned short snd_rawmidi_file_flags(struct file *file)
3029 + }
3030 + }
3031 +
3032 +-static inline int snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
3033 ++static inline bool __snd_rawmidi_ready(struct snd_rawmidi_runtime *runtime)
3034 ++{
3035 ++ return runtime->avail >= runtime->avail_min;
3036 ++}
3037 ++
3038 ++static bool snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
3039 + {
3040 + struct snd_rawmidi_runtime *runtime = substream->runtime;
3041 ++ unsigned long flags;
3042 ++ bool ready;
3043 +
3044 +- return runtime->avail >= runtime->avail_min;
3045 ++ spin_lock_irqsave(&runtime->lock, flags);
3046 ++ ready = __snd_rawmidi_ready(runtime);
3047 ++ spin_unlock_irqrestore(&runtime->lock, flags);
3048 ++ return ready;
3049 + }
3050 +
3051 + static inline int snd_rawmidi_ready_append(struct snd_rawmidi_substream *substream,
3052 +@@ -1019,7 +1029,7 @@ int snd_rawmidi_receive(struct snd_rawmidi_substream *substream,
3053 + if (result > 0) {
3054 + if (runtime->event)
3055 + schedule_work(&runtime->event_work);
3056 +- else if (snd_rawmidi_ready(substream))
3057 ++ else if (__snd_rawmidi_ready(runtime))
3058 + wake_up(&runtime->sleep);
3059 + }
3060 + spin_unlock_irqrestore(&runtime->lock, flags);
3061 +@@ -1098,7 +1108,7 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
3062 + result = 0;
3063 + while (count > 0) {
3064 + spin_lock_irq(&runtime->lock);
3065 +- while (!snd_rawmidi_ready(substream)) {
3066 ++ while (!__snd_rawmidi_ready(runtime)) {
3067 + wait_queue_entry_t wait;
3068 +
3069 + if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
3070 +@@ -1115,9 +1125,11 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
3071 + return -ENODEV;
3072 + if (signal_pending(current))
3073 + return result > 0 ? result : -ERESTARTSYS;
3074 +- if (!runtime->avail)
3075 +- return result > 0 ? result : -EIO;
3076 + spin_lock_irq(&runtime->lock);
3077 ++ if (!runtime->avail) {
3078 ++ spin_unlock_irq(&runtime->lock);
3079 ++ return result > 0 ? result : -EIO;
3080 ++ }
3081 + }
3082 + spin_unlock_irq(&runtime->lock);
3083 + count1 = snd_rawmidi_kernel_read1(substream,
3084 +@@ -1255,7 +1267,7 @@ int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int coun
3085 + runtime->avail += count;
3086 + substream->bytes += count;
3087 + if (count > 0) {
3088 +- if (runtime->drain || snd_rawmidi_ready(substream))
3089 ++ if (runtime->drain || __snd_rawmidi_ready(runtime))
3090 + wake_up(&runtime->sleep);
3091 + }
3092 + return count;
3093 +@@ -1444,9 +1456,11 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
3094 + return -ENODEV;
3095 + if (signal_pending(current))
3096 + return result > 0 ? result : -ERESTARTSYS;
3097 +- if (!runtime->avail && !timeout)
3098 +- return result > 0 ? result : -EIO;
3099 + spin_lock_irq(&runtime->lock);
3100 ++ if (!runtime->avail && !timeout) {
3101 ++ spin_unlock_irq(&runtime->lock);
3102 ++ return result > 0 ? result : -EIO;
3103 ++ }
3104 + }
3105 + spin_unlock_irq(&runtime->lock);
3106 + count1 = snd_rawmidi_kernel_write1(substream, buf, NULL, count);
3107 +@@ -1526,6 +1540,7 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
3108 + struct snd_rawmidi *rmidi;
3109 + struct snd_rawmidi_substream *substream;
3110 + struct snd_rawmidi_runtime *runtime;
3111 ++ unsigned long buffer_size, avail, xruns;
3112 +
3113 + rmidi = entry->private_data;
3114 + snd_iprintf(buffer, "%s\n\n", rmidi->name);
3115 +@@ -1544,13 +1559,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
3116 + " Owner PID : %d\n",
3117 + pid_vnr(substream->pid));
3118 + runtime = substream->runtime;
3119 ++ spin_lock_irq(&runtime->lock);
3120 ++ buffer_size = runtime->buffer_size;
3121 ++ avail = runtime->avail;
3122 ++ spin_unlock_irq(&runtime->lock);
3123 + snd_iprintf(buffer,
3124 + " Mode : %s\n"
3125 + " Buffer size : %lu\n"
3126 + " Avail : %lu\n",
3127 + runtime->oss ? "OSS compatible" : "native",
3128 +- (unsigned long) runtime->buffer_size,
3129 +- (unsigned long) runtime->avail);
3130 ++ buffer_size, avail);
3131 + }
3132 + }
3133 + }
3134 +@@ -1568,13 +1586,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
3135 + " Owner PID : %d\n",
3136 + pid_vnr(substream->pid));
3137 + runtime = substream->runtime;
3138 ++ spin_lock_irq(&runtime->lock);
3139 ++ buffer_size = runtime->buffer_size;
3140 ++ avail = runtime->avail;
3141 ++ xruns = runtime->xruns;
3142 ++ spin_unlock_irq(&runtime->lock);
3143 + snd_iprintf(buffer,
3144 + " Buffer size : %lu\n"
3145 + " Avail : %lu\n"
3146 + " Overruns : %lu\n",
3147 +- (unsigned long) runtime->buffer_size,
3148 +- (unsigned long) runtime->avail,
3149 +- (unsigned long) runtime->xruns);
3150 ++ buffer_size, avail, xruns);
3151 + }
3152 + }
3153 + }
3154 +diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
3155 +index 9254c8dbe5e37..25d2d6b610079 100644
3156 +--- a/sound/core/seq/seq_queue.h
3157 ++++ b/sound/core/seq/seq_queue.h
3158 +@@ -26,10 +26,10 @@ struct snd_seq_queue {
3159 +
3160 + struct snd_seq_timer *timer; /* time keeper for this queue */
3161 + int owner; /* client that 'owns' the timer */
3162 +- unsigned int locked:1, /* timer is only accesibble by owner if set */
3163 +- klocked:1, /* kernel lock (after START) */
3164 +- check_again:1,
3165 +- check_blocked:1;
3166 ++ bool locked; /* timer is only accesibble by owner if set */
3167 ++ bool klocked; /* kernel lock (after START) */
3168 ++ bool check_again; /* concurrent access happened during check */
3169 ++ bool check_blocked; /* queue being checked */
3170 +
3171 + unsigned int flags; /* status flags */
3172 + unsigned int info_flags; /* info for sync */
3173 +diff --git a/tools/include/uapi/linux/const.h b/tools/include/uapi/linux/const.h
3174 +index 5ed721ad5b198..af2a44c08683d 100644
3175 +--- a/tools/include/uapi/linux/const.h
3176 ++++ b/tools/include/uapi/linux/const.h
3177 +@@ -28,4 +28,9 @@
3178 + #define _BITUL(x) (_UL(1) << (x))
3179 + #define _BITULL(x) (_ULL(1) << (x))
3180 +
3181 ++#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
3182 ++#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
3183 ++
3184 ++#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
3185 ++
3186 + #endif /* _UAPI_LINUX_CONST_H */