Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Sun, 09 Sep 2018 23:29:00
Message-Id: 1536535727.47c28eec857bf59e5ffd9d3fc0d1f6daa471aa9b.mpagano@gentoo
1 commit: 47c28eec857bf59e5ffd9d3fc0d1f6daa471aa9b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Sep 9 23:28:47 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Sep 9 23:28:47 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=47c28eec
7
8 Linux patch 4.14.69
9
10 0000_README | 4 +
11 1068_linux-4.14.69.patch | 3362 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3366 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 4fd9ed9..2a8e1bb 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -315,6 +315,10 @@ Patch: 1067_linux-4.14.68.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.14.68
21
22 +Patch: 1068_linux-4.14.69.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.14.69
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1068_linux-4.14.69.patch b/1068_linux-4.14.69.patch
31 new file mode 100644
32 index 0000000..461b50e
33 --- /dev/null
34 +++ b/1068_linux-4.14.69.patch
35 @@ -0,0 +1,3362 @@
36 +diff --git a/Makefile b/Makefile
37 +index 3da579058926..3ecda1d2e23a 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 14
44 +-SUBLEVEL = 68
45 ++SUBLEVEL = 69
46 + EXTRAVERSION =
47 + NAME = Petit Gorille
48 +
49 +diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
50 +index a48976dc9bcd..918c3938ef66 100644
51 +--- a/arch/alpha/kernel/osf_sys.c
52 ++++ b/arch/alpha/kernel/osf_sys.c
53 +@@ -530,24 +530,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path,
54 + SYSCALL_DEFINE1(osf_utsname, char __user *, name)
55 + {
56 + int error;
57 ++ char tmp[5 * 32];
58 +
59 + down_read(&uts_sem);
60 +- error = -EFAULT;
61 +- if (copy_to_user(name + 0, utsname()->sysname, 32))
62 +- goto out;
63 +- if (copy_to_user(name + 32, utsname()->nodename, 32))
64 +- goto out;
65 +- if (copy_to_user(name + 64, utsname()->release, 32))
66 +- goto out;
67 +- if (copy_to_user(name + 96, utsname()->version, 32))
68 +- goto out;
69 +- if (copy_to_user(name + 128, utsname()->machine, 32))
70 +- goto out;
71 ++ memcpy(tmp + 0 * 32, utsname()->sysname, 32);
72 ++ memcpy(tmp + 1 * 32, utsname()->nodename, 32);
73 ++ memcpy(tmp + 2 * 32, utsname()->release, 32);
74 ++ memcpy(tmp + 3 * 32, utsname()->version, 32);
75 ++ memcpy(tmp + 4 * 32, utsname()->machine, 32);
76 ++ up_read(&uts_sem);
77 +
78 +- error = 0;
79 +- out:
80 +- up_read(&uts_sem);
81 +- return error;
82 ++ if (copy_to_user(name, tmp, sizeof(tmp)))
83 ++ return -EFAULT;
84 ++ return 0;
85 + }
86 +
87 + SYSCALL_DEFINE0(getpagesize)
88 +@@ -567,18 +562,21 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
89 + {
90 + int len, err = 0;
91 + char *kname;
92 ++ char tmp[32];
93 +
94 +- if (namelen > 32)
95 ++ if (namelen < 0 || namelen > 32)
96 + namelen = 32;
97 +
98 + down_read(&uts_sem);
99 + kname = utsname()->domainname;
100 + len = strnlen(kname, namelen);
101 +- if (copy_to_user(name, kname, min(len + 1, namelen)))
102 +- err = -EFAULT;
103 ++ len = min(len + 1, namelen);
104 ++ memcpy(tmp, kname, len);
105 + up_read(&uts_sem);
106 +
107 +- return err;
108 ++ if (copy_to_user(name, tmp, len))
109 ++ return -EFAULT;
110 ++ return 0;
111 + }
112 +
113 + /*
114 +@@ -739,13 +737,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
115 + };
116 + unsigned long offset;
117 + const char *res;
118 +- long len, err = -EINVAL;
119 ++ long len;
120 ++ char tmp[__NEW_UTS_LEN + 1];
121 +
122 + offset = command-1;
123 + if (offset >= ARRAY_SIZE(sysinfo_table)) {
124 + /* Digital UNIX has a few unpublished interfaces here */
125 + printk("sysinfo(%d)", command);
126 +- goto out;
127 ++ return -EINVAL;
128 + }
129 +
130 + down_read(&uts_sem);
131 +@@ -753,13 +752,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
132 + len = strlen(res)+1;
133 + if ((unsigned long)len > (unsigned long)count)
134 + len = count;
135 +- if (copy_to_user(buf, res, len))
136 +- err = -EFAULT;
137 +- else
138 +- err = 0;
139 ++ memcpy(tmp, res, len);
140 + up_read(&uts_sem);
141 +- out:
142 +- return err;
143 ++ if (copy_to_user(buf, tmp, len))
144 ++ return -EFAULT;
145 ++ return 0;
146 + }
147 +
148 + SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
149 +diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
150 +index 92a9740c533f..3b1db7b9ec50 100644
151 +--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
152 ++++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
153 +@@ -206,6 +206,7 @@
154 + #address-cells = <1>;
155 + #size-cells = <0>;
156 + reg = <0x70>;
157 ++ reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>;
158 + };
159 + };
160 +
161 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
162 +index 1bbb89d37f57..c30cd78b6918 100644
163 +--- a/arch/arm64/Kconfig
164 ++++ b/arch/arm64/Kconfig
165 +@@ -693,7 +693,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
166 +
167 + config HOLES_IN_ZONE
168 + def_bool y
169 +- depends on NUMA
170 +
171 + source kernel/Kconfig.preempt
172 + source kernel/Kconfig.hz
173 +diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
174 +index 5a23010af600..1e7a33592e29 100644
175 +--- a/arch/powerpc/include/asm/fadump.h
176 ++++ b/arch/powerpc/include/asm/fadump.h
177 +@@ -195,9 +195,6 @@ struct fadump_crash_info_header {
178 + struct cpumask online_mask;
179 + };
180 +
181 +-/* Crash memory ranges */
182 +-#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
183 +-
184 + struct fad_crash_memory_ranges {
185 + unsigned long long base;
186 + unsigned long long size;
187 +diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
188 +index d0020bc1f209..5a6470383ca3 100644
189 +--- a/arch/powerpc/kernel/fadump.c
190 ++++ b/arch/powerpc/kernel/fadump.c
191 +@@ -47,8 +47,10 @@ static struct fadump_mem_struct fdm;
192 + static const struct fadump_mem_struct *fdm_active;
193 +
194 + static DEFINE_MUTEX(fadump_mutex);
195 +-struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
196 ++struct fad_crash_memory_ranges *crash_memory_ranges;
197 ++int crash_memory_ranges_size;
198 + int crash_mem_ranges;
199 ++int max_crash_mem_ranges;
200 +
201 + /* Scan the Firmware Assisted dump configuration details. */
202 + int __init early_init_dt_scan_fw_dump(unsigned long node,
203 +@@ -843,38 +845,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
204 + return 0;
205 + }
206 +
207 +-static inline void fadump_add_crash_memory(unsigned long long base,
208 +- unsigned long long end)
209 ++static void free_crash_memory_ranges(void)
210 ++{
211 ++ kfree(crash_memory_ranges);
212 ++ crash_memory_ranges = NULL;
213 ++ crash_memory_ranges_size = 0;
214 ++ max_crash_mem_ranges = 0;
215 ++}
216 ++
217 ++/*
218 ++ * Allocate or reallocate crash memory ranges array in incremental units
219 ++ * of PAGE_SIZE.
220 ++ */
221 ++static int allocate_crash_memory_ranges(void)
222 ++{
223 ++ struct fad_crash_memory_ranges *new_array;
224 ++ u64 new_size;
225 ++
226 ++ new_size = crash_memory_ranges_size + PAGE_SIZE;
227 ++ pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
228 ++ new_size);
229 ++
230 ++ new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
231 ++ if (new_array == NULL) {
232 ++ pr_err("Insufficient memory for setting up crash memory ranges\n");
233 ++ free_crash_memory_ranges();
234 ++ return -ENOMEM;
235 ++ }
236 ++
237 ++ crash_memory_ranges = new_array;
238 ++ crash_memory_ranges_size = new_size;
239 ++ max_crash_mem_ranges = (new_size /
240 ++ sizeof(struct fad_crash_memory_ranges));
241 ++ return 0;
242 ++}
243 ++
244 ++static inline int fadump_add_crash_memory(unsigned long long base,
245 ++ unsigned long long end)
246 + {
247 + if (base == end)
248 +- return;
249 ++ return 0;
250 ++
251 ++ if (crash_mem_ranges == max_crash_mem_ranges) {
252 ++ int ret;
253 ++
254 ++ ret = allocate_crash_memory_ranges();
255 ++ if (ret)
256 ++ return ret;
257 ++ }
258 +
259 + pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
260 + crash_mem_ranges, base, end - 1, (end - base));
261 + crash_memory_ranges[crash_mem_ranges].base = base;
262 + crash_memory_ranges[crash_mem_ranges].size = end - base;
263 + crash_mem_ranges++;
264 ++ return 0;
265 + }
266 +
267 +-static void fadump_exclude_reserved_area(unsigned long long start,
268 ++static int fadump_exclude_reserved_area(unsigned long long start,
269 + unsigned long long end)
270 + {
271 + unsigned long long ra_start, ra_end;
272 ++ int ret = 0;
273 +
274 + ra_start = fw_dump.reserve_dump_area_start;
275 + ra_end = ra_start + fw_dump.reserve_dump_area_size;
276 +
277 + if ((ra_start < end) && (ra_end > start)) {
278 + if ((start < ra_start) && (end > ra_end)) {
279 +- fadump_add_crash_memory(start, ra_start);
280 +- fadump_add_crash_memory(ra_end, end);
281 ++ ret = fadump_add_crash_memory(start, ra_start);
282 ++ if (ret)
283 ++ return ret;
284 ++
285 ++ ret = fadump_add_crash_memory(ra_end, end);
286 + } else if (start < ra_start) {
287 +- fadump_add_crash_memory(start, ra_start);
288 ++ ret = fadump_add_crash_memory(start, ra_start);
289 + } else if (ra_end < end) {
290 +- fadump_add_crash_memory(ra_end, end);
291 ++ ret = fadump_add_crash_memory(ra_end, end);
292 + }
293 + } else
294 +- fadump_add_crash_memory(start, end);
295 ++ ret = fadump_add_crash_memory(start, end);
296 ++
297 ++ return ret;
298 + }
299 +
300 + static int fadump_init_elfcore_header(char *bufp)
301 +@@ -914,10 +966,11 @@ static int fadump_init_elfcore_header(char *bufp)
302 + * Traverse through memblock structure and setup crash memory ranges. These
303 + * ranges will be used create PT_LOAD program headers in elfcore header.
304 + */
305 +-static void fadump_setup_crash_memory_ranges(void)
306 ++static int fadump_setup_crash_memory_ranges(void)
307 + {
308 + struct memblock_region *reg;
309 + unsigned long long start, end;
310 ++ int ret;
311 +
312 + pr_debug("Setup crash memory ranges.\n");
313 + crash_mem_ranges = 0;
314 +@@ -928,7 +981,9 @@ static void fadump_setup_crash_memory_ranges(void)
315 + * specified during fadump registration. We need to create a separate
316 + * program header for this chunk with the correct offset.
317 + */
318 +- fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
319 ++ ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
320 ++ if (ret)
321 ++ return ret;
322 +
323 + for_each_memblock(memory, reg) {
324 + start = (unsigned long long)reg->base;
325 +@@ -948,8 +1003,12 @@ static void fadump_setup_crash_memory_ranges(void)
326 + }
327 +
328 + /* add this range excluding the reserved dump area. */
329 +- fadump_exclude_reserved_area(start, end);
330 ++ ret = fadump_exclude_reserved_area(start, end);
331 ++ if (ret)
332 ++ return ret;
333 + }
334 ++
335 ++ return 0;
336 + }
337 +
338 + /*
339 +@@ -1072,6 +1131,7 @@ static int register_fadump(void)
340 + {
341 + unsigned long addr;
342 + void *vaddr;
343 ++ int ret;
344 +
345 + /*
346 + * If no memory is reserved then we can not register for firmware-
347 +@@ -1080,7 +1140,9 @@ static int register_fadump(void)
348 + if (!fw_dump.reserve_dump_area_size)
349 + return -ENODEV;
350 +
351 +- fadump_setup_crash_memory_ranges();
352 ++ ret = fadump_setup_crash_memory_ranges();
353 ++ if (ret)
354 ++ return ret;
355 +
356 + addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
357 + /* Initialize fadump crash info header. */
358 +@@ -1158,6 +1220,7 @@ void fadump_cleanup(void)
359 + } else if (fw_dump.dump_registered) {
360 + /* Un-register Firmware-assisted dump if it was registered. */
361 + fadump_unregister_dump(&fdm);
362 ++ free_crash_memory_ranges();
363 + }
364 + }
365 +
366 +diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
367 +index 816055927ee4..d735937d975c 100644
368 +--- a/arch/powerpc/mm/mmu_context_iommu.c
369 ++++ b/arch/powerpc/mm/mmu_context_iommu.c
370 +@@ -130,6 +130,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
371 + long i, j, ret = 0, locked_entries = 0;
372 + unsigned int pageshift;
373 + unsigned long flags;
374 ++ unsigned long cur_ua;
375 + struct page *page = NULL;
376 +
377 + mutex_lock(&mem_list_mutex);
378 +@@ -178,7 +179,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
379 + }
380 +
381 + for (i = 0; i < entries; ++i) {
382 +- if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
383 ++ cur_ua = ua + (i << PAGE_SHIFT);
384 ++ if (1 != get_user_pages_fast(cur_ua,
385 + 1/* pages */, 1/* iswrite */, &page)) {
386 + ret = -EFAULT;
387 + for (j = 0; j < i; ++j)
388 +@@ -197,7 +199,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
389 + if (is_migrate_cma_page(page)) {
390 + if (mm_iommu_move_page_from_cma(page))
391 + goto populate;
392 +- if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
393 ++ if (1 != get_user_pages_fast(cur_ua,
394 + 1/* pages */, 1/* iswrite */,
395 + &page)) {
396 + ret = -EFAULT;
397 +@@ -211,20 +213,21 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
398 + }
399 + populate:
400 + pageshift = PAGE_SHIFT;
401 +- if (PageCompound(page)) {
402 ++ if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) {
403 + pte_t *pte;
404 + struct page *head = compound_head(page);
405 + unsigned int compshift = compound_order(head);
406 ++ unsigned int pteshift;
407 +
408 + local_irq_save(flags); /* disables as well */
409 +- pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
410 +- local_irq_restore(flags);
411 ++ pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift);
412 +
413 + /* Double check it is still the same pinned page */
414 + if (pte && pte_page(*pte) == head &&
415 +- pageshift == compshift)
416 +- pageshift = max_t(unsigned int, pageshift,
417 ++ pteshift == compshift + PAGE_SHIFT)
418 ++ pageshift = max_t(unsigned int, pteshift,
419 + PAGE_SHIFT);
420 ++ local_irq_restore(flags);
421 + }
422 + mem->pageshift = min(mem->pageshift, pageshift);
423 + mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
424 +diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
425 +index 677b29ef4532..e919696c7137 100644
426 +--- a/arch/powerpc/platforms/powernv/pci-ioda.c
427 ++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
428 +@@ -3286,12 +3286,49 @@ static void pnv_pci_ioda_create_dbgfs(void)
429 + #endif /* CONFIG_DEBUG_FS */
430 + }
431 +
432 ++static void pnv_pci_enable_bridge(struct pci_bus *bus)
433 ++{
434 ++ struct pci_dev *dev = bus->self;
435 ++ struct pci_bus *child;
436 ++
437 ++ /* Empty bus ? bail */
438 ++ if (list_empty(&bus->devices))
439 ++ return;
440 ++
441 ++ /*
442 ++ * If there's a bridge associated with that bus enable it. This works
443 ++ * around races in the generic code if the enabling is done during
444 ++ * parallel probing. This can be removed once those races have been
445 ++ * fixed.
446 ++ */
447 ++ if (dev) {
448 ++ int rc = pci_enable_device(dev);
449 ++ if (rc)
450 ++ pci_err(dev, "Error enabling bridge (%d)\n", rc);
451 ++ pci_set_master(dev);
452 ++ }
453 ++
454 ++ /* Perform the same to child busses */
455 ++ list_for_each_entry(child, &bus->children, node)
456 ++ pnv_pci_enable_bridge(child);
457 ++}
458 ++
459 ++static void pnv_pci_enable_bridges(void)
460 ++{
461 ++ struct pci_controller *hose;
462 ++
463 ++ list_for_each_entry(hose, &hose_list, list_node)
464 ++ pnv_pci_enable_bridge(hose->bus);
465 ++}
466 ++
467 + static void pnv_pci_ioda_fixup(void)
468 + {
469 + pnv_pci_ioda_setup_PEs();
470 + pnv_pci_ioda_setup_iommu_api();
471 + pnv_pci_ioda_create_dbgfs();
472 +
473 ++ pnv_pci_enable_bridges();
474 ++
475 + #ifdef CONFIG_EEH
476 + eeh_init();
477 + eeh_addr_cache_build();
478 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
479 +index 5e1ef9150182..2edc673be137 100644
480 +--- a/arch/powerpc/platforms/pseries/ras.c
481 ++++ b/arch/powerpc/platforms/pseries/ras.c
482 +@@ -360,7 +360,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
483 + }
484 +
485 + savep = __va(regs->gpr[3]);
486 +- regs->gpr[3] = savep[0]; /* restore original r3 */
487 ++ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
488 +
489 + /* If it isn't an extended log we can use the per cpu 64bit buffer */
490 + h = (struct rtas_error_log *)&savep[1];
491 +diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
492 +index 990703b7cf4d..4b7719b2a73c 100644
493 +--- a/arch/sparc/kernel/sys_sparc_32.c
494 ++++ b/arch/sparc/kernel/sys_sparc_32.c
495 +@@ -204,23 +204,27 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
496 +
497 + asmlinkage long sys_getdomainname(char __user *name, int len)
498 + {
499 +- int nlen, err;
500 +-
501 ++ int nlen, err;
502 ++ char tmp[__NEW_UTS_LEN + 1];
503 ++
504 + if (len < 0)
505 + return -EINVAL;
506 +
507 +- down_read(&uts_sem);
508 +-
509 ++ down_read(&uts_sem);
510 ++
511 + nlen = strlen(utsname()->domainname) + 1;
512 + err = -EINVAL;
513 + if (nlen > len)
514 +- goto out;
515 ++ goto out_unlock;
516 ++ memcpy(tmp, utsname()->domainname, nlen);
517 +
518 +- err = -EFAULT;
519 +- if (!copy_to_user(name, utsname()->domainname, nlen))
520 +- err = 0;
521 ++ up_read(&uts_sem);
522 +
523 +-out:
524 ++ if (copy_to_user(name, tmp, nlen))
525 ++ return -EFAULT;
526 ++ return 0;
527 ++
528 ++out_unlock:
529 + up_read(&uts_sem);
530 + return err;
531 + }
532 +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
533 +index 55416db482ad..d79c1c74873c 100644
534 +--- a/arch/sparc/kernel/sys_sparc_64.c
535 ++++ b/arch/sparc/kernel/sys_sparc_64.c
536 +@@ -527,23 +527,27 @@ extern void check_pending(int signum);
537 +
538 + SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
539 + {
540 +- int nlen, err;
541 ++ int nlen, err;
542 ++ char tmp[__NEW_UTS_LEN + 1];
543 +
544 + if (len < 0)
545 + return -EINVAL;
546 +
547 +- down_read(&uts_sem);
548 +-
549 ++ down_read(&uts_sem);
550 ++
551 + nlen = strlen(utsname()->domainname) + 1;
552 + err = -EINVAL;
553 + if (nlen > len)
554 +- goto out;
555 ++ goto out_unlock;
556 ++ memcpy(tmp, utsname()->domainname, nlen);
557 ++
558 ++ up_read(&uts_sem);
559 +
560 +- err = -EFAULT;
561 +- if (!copy_to_user(name, utsname()->domainname, nlen))
562 +- err = 0;
563 ++ if (copy_to_user(name, tmp, nlen))
564 ++ return -EFAULT;
565 ++ return 0;
566 +
567 +-out:
568 ++out_unlock:
569 + up_read(&uts_sem);
570 + return err;
571 + }
572 +diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
573 +index f24cd9f1799a..928b0c6083c9 100644
574 +--- a/arch/x86/kernel/kexec-bzimage64.c
575 ++++ b/arch/x86/kernel/kexec-bzimage64.c
576 +@@ -532,7 +532,7 @@ static int bzImage64_cleanup(void *loader_data)
577 + static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
578 + {
579 + return verify_pefile_signature(kernel, kernel_len,
580 +- NULL,
581 ++ VERIFY_USE_SECONDARY_KEYRING,
582 + VERIFYING_KEXEC_PE_SIGNATURE);
583 + }
584 + #endif
585 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
586 +index 8958b35f6008..a466ee14ad41 100644
587 +--- a/arch/x86/kvm/vmx.c
588 ++++ b/arch/x86/kvm/vmx.c
589 +@@ -200,12 +200,14 @@ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_
590 +
591 + static const struct {
592 + const char *option;
593 +- enum vmx_l1d_flush_state cmd;
594 ++ bool for_parse;
595 + } vmentry_l1d_param[] = {
596 +- {"auto", VMENTER_L1D_FLUSH_AUTO},
597 +- {"never", VMENTER_L1D_FLUSH_NEVER},
598 +- {"cond", VMENTER_L1D_FLUSH_COND},
599 +- {"always", VMENTER_L1D_FLUSH_ALWAYS},
600 ++ [VMENTER_L1D_FLUSH_AUTO] = {"auto", true},
601 ++ [VMENTER_L1D_FLUSH_NEVER] = {"never", true},
602 ++ [VMENTER_L1D_FLUSH_COND] = {"cond", true},
603 ++ [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true},
604 ++ [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
605 ++ [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
606 + };
607 +
608 + #define L1D_CACHE_ORDER 4
609 +@@ -289,8 +291,9 @@ static int vmentry_l1d_flush_parse(const char *s)
610 +
611 + if (s) {
612 + for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
613 +- if (sysfs_streq(s, vmentry_l1d_param[i].option))
614 +- return vmentry_l1d_param[i].cmd;
615 ++ if (vmentry_l1d_param[i].for_parse &&
616 ++ sysfs_streq(s, vmentry_l1d_param[i].option))
617 ++ return i;
618 + }
619 + }
620 + return -EINVAL;
621 +@@ -300,13 +303,13 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
622 + {
623 + int l1tf, ret;
624 +
625 +- if (!boot_cpu_has(X86_BUG_L1TF))
626 +- return 0;
627 +-
628 + l1tf = vmentry_l1d_flush_parse(s);
629 + if (l1tf < 0)
630 + return l1tf;
631 +
632 ++ if (!boot_cpu_has(X86_BUG_L1TF))
633 ++ return 0;
634 ++
635 + /*
636 + * Has vmx_init() run already? If not then this is the pre init
637 + * parameter parsing. In that case just store the value and let
638 +@@ -326,6 +329,9 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
639 +
640 + static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
641 + {
642 ++ if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
643 ++ return sprintf(s, "???\n");
644 ++
645 + return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
646 + }
647 +
648 +diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
649 +index 2041abb10a23..34545ecfdd6b 100644
650 +--- a/arch/xtensa/include/asm/cacheasm.h
651 ++++ b/arch/xtensa/include/asm/cacheasm.h
652 +@@ -31,16 +31,32 @@
653 + *
654 + */
655 +
656 +- .macro __loop_cache_all ar at insn size line_width
657 +
658 +- movi \ar, 0
659 ++ .macro __loop_cache_unroll ar at insn size line_width max_immed
660 ++
661 ++ .if (1 << (\line_width)) > (\max_immed)
662 ++ .set _reps, 1
663 ++ .elseif (2 << (\line_width)) > (\max_immed)
664 ++ .set _reps, 2
665 ++ .else
666 ++ .set _reps, 4
667 ++ .endif
668 ++
669 ++ __loopi \ar, \at, \size, (_reps << (\line_width))
670 ++ .set _index, 0
671 ++ .rep _reps
672 ++ \insn \ar, _index << (\line_width)
673 ++ .set _index, _index + 1
674 ++ .endr
675 ++ __endla \ar, \at, _reps << (\line_width)
676 ++
677 ++ .endm
678 ++
679 +
680 +- __loopi \ar, \at, \size, (4 << (\line_width))
681 +- \insn \ar, 0 << (\line_width)
682 +- \insn \ar, 1 << (\line_width)
683 +- \insn \ar, 2 << (\line_width)
684 +- \insn \ar, 3 << (\line_width)
685 +- __endla \ar, \at, 4 << (\line_width)
686 ++ .macro __loop_cache_all ar at insn size line_width max_immed
687 ++
688 ++ movi \ar, 0
689 ++ __loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
690 +
691 + .endm
692 +
693 +@@ -57,14 +73,9 @@
694 + .endm
695 +
696 +
697 +- .macro __loop_cache_page ar at insn line_width
698 ++ .macro __loop_cache_page ar at insn line_width max_immed
699 +
700 +- __loopi \ar, \at, PAGE_SIZE, 4 << (\line_width)
701 +- \insn \ar, 0 << (\line_width)
702 +- \insn \ar, 1 << (\line_width)
703 +- \insn \ar, 2 << (\line_width)
704 +- \insn \ar, 3 << (\line_width)
705 +- __endla \ar, \at, 4 << (\line_width)
706 ++ __loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
707 +
708 + .endm
709 +
710 +@@ -72,7 +83,8 @@
711 + .macro ___unlock_dcache_all ar at
712 +
713 + #if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
714 +- __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
715 ++ __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
716 ++ XCHAL_DCACHE_LINEWIDTH 240
717 + #endif
718 +
719 + .endm
720 +@@ -81,7 +93,8 @@
721 + .macro ___unlock_icache_all ar at
722 +
723 + #if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
724 +- __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
725 ++ __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
726 ++ XCHAL_ICACHE_LINEWIDTH 240
727 + #endif
728 +
729 + .endm
730 +@@ -90,7 +103,8 @@
731 + .macro ___flush_invalidate_dcache_all ar at
732 +
733 + #if XCHAL_DCACHE_SIZE
734 +- __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
735 ++ __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
736 ++ XCHAL_DCACHE_LINEWIDTH 240
737 + #endif
738 +
739 + .endm
740 +@@ -99,7 +113,8 @@
741 + .macro ___flush_dcache_all ar at
742 +
743 + #if XCHAL_DCACHE_SIZE
744 +- __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
745 ++ __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
746 ++ XCHAL_DCACHE_LINEWIDTH 240
747 + #endif
748 +
749 + .endm
750 +@@ -108,8 +123,8 @@
751 + .macro ___invalidate_dcache_all ar at
752 +
753 + #if XCHAL_DCACHE_SIZE
754 +- __loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
755 +- XCHAL_DCACHE_LINEWIDTH
756 ++ __loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
757 ++ XCHAL_DCACHE_LINEWIDTH 1020
758 + #endif
759 +
760 + .endm
761 +@@ -118,8 +133,8 @@
762 + .macro ___invalidate_icache_all ar at
763 +
764 + #if XCHAL_ICACHE_SIZE
765 +- __loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
766 +- XCHAL_ICACHE_LINEWIDTH
767 ++ __loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
768 ++ XCHAL_ICACHE_LINEWIDTH 1020
769 + #endif
770 +
771 + .endm
772 +@@ -166,7 +181,7 @@
773 + .macro ___flush_invalidate_dcache_page ar as
774 +
775 + #if XCHAL_DCACHE_SIZE
776 +- __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
777 ++ __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020
778 + #endif
779 +
780 + .endm
781 +@@ -175,7 +190,7 @@
782 + .macro ___flush_dcache_page ar as
783 +
784 + #if XCHAL_DCACHE_SIZE
785 +- __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
786 ++ __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020
787 + #endif
788 +
789 + .endm
790 +@@ -184,7 +199,7 @@
791 + .macro ___invalidate_dcache_page ar as
792 +
793 + #if XCHAL_DCACHE_SIZE
794 +- __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
795 ++ __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020
796 + #endif
797 +
798 + .endm
799 +@@ -193,7 +208,7 @@
800 + .macro ___invalidate_icache_page ar as
801 +
802 + #if XCHAL_ICACHE_SIZE
803 +- __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
804 ++ __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020
805 + #endif
806 +
807 + .endm
808 +diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
809 +index 5d53e504acae..4b571f3ea009 100644
810 +--- a/block/bfq-cgroup.c
811 ++++ b/block/bfq-cgroup.c
812 +@@ -887,7 +887,8 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
813 + if (ret)
814 + return ret;
815 +
816 +- return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
817 ++ ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
818 ++ return ret ?: nbytes;
819 + }
820 +
821 + static int bfqg_print_stat(struct seq_file *sf, void *v)
822 +diff --git a/block/blk-core.c b/block/blk-core.c
823 +index 68bae6338ad4..1d27e2a152e0 100644
824 +--- a/block/blk-core.c
825 ++++ b/block/blk-core.c
826 +@@ -1025,6 +1025,7 @@ out_exit_flush_rq:
827 + q->exit_rq_fn(q, q->fq->flush_rq);
828 + out_free_flush_queue:
829 + blk_free_flush_queue(q->fq);
830 ++ q->fq = NULL;
831 + return -ENOMEM;
832 + }
833 + EXPORT_SYMBOL(blk_init_allocated_queue);
834 +@@ -3458,9 +3459,11 @@ EXPORT_SYMBOL(blk_finish_plug);
835 + */
836 + void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
837 + {
838 +- /* not support for RQF_PM and ->rpm_status in blk-mq yet */
839 +- if (q->mq_ops)
840 ++ /* Don't enable runtime PM for blk-mq until it is ready */
841 ++ if (q->mq_ops) {
842 ++ pm_runtime_disable(dev);
843 + return;
844 ++ }
845 +
846 + q->dev = dev;
847 + q->rpm_status = RPM_ACTIVE;
848 +diff --git a/certs/system_keyring.c b/certs/system_keyring.c
849 +index 6251d1b27f0c..81728717523d 100644
850 +--- a/certs/system_keyring.c
851 ++++ b/certs/system_keyring.c
852 +@@ -15,6 +15,7 @@
853 + #include <linux/cred.h>
854 + #include <linux/err.h>
855 + #include <linux/slab.h>
856 ++#include <linux/verification.h>
857 + #include <keys/asymmetric-type.h>
858 + #include <keys/system_keyring.h>
859 + #include <crypto/pkcs7.h>
860 +@@ -230,7 +231,7 @@ int verify_pkcs7_signature(const void *data, size_t len,
861 +
862 + if (!trusted_keys) {
863 + trusted_keys = builtin_trusted_keys;
864 +- } else if (trusted_keys == (void *)1UL) {
865 ++ } else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) {
866 + #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
867 + trusted_keys = secondary_trusted_keys;
868 + #else
869 +diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c
870 +index 1063b644efcd..b2aa925a84bc 100644
871 +--- a/crypto/asymmetric_keys/pkcs7_key_type.c
872 ++++ b/crypto/asymmetric_keys/pkcs7_key_type.c
873 +@@ -62,7 +62,7 @@ static int pkcs7_preparse(struct key_preparsed_payload *prep)
874 +
875 + return verify_pkcs7_signature(NULL, 0,
876 + prep->data, prep->datalen,
877 +- (void *)1UL, usage,
878 ++ VERIFY_USE_SECONDARY_KEYRING, usage,
879 + pkcs7_view_content, prep);
880 + }
881 +
882 +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
883 +index f149d3e61234..1e2648e4c286 100644
884 +--- a/drivers/block/zram/zram_drv.c
885 ++++ b/drivers/block/zram/zram_drv.c
886 +@@ -321,6 +321,7 @@ static ssize_t backing_dev_store(struct device *dev,
887 + struct device_attribute *attr, const char *buf, size_t len)
888 + {
889 + char *file_name;
890 ++ size_t sz;
891 + struct file *backing_dev = NULL;
892 + struct inode *inode;
893 + struct address_space *mapping;
894 +@@ -341,7 +342,11 @@ static ssize_t backing_dev_store(struct device *dev,
895 + goto out;
896 + }
897 +
898 +- strlcpy(file_name, buf, len);
899 ++ strlcpy(file_name, buf, PATH_MAX);
900 ++ /* ignore trailing newline */
901 ++ sz = strlen(file_name);
902 ++ if (sz > 0 && file_name[sz - 1] == '\n')
903 ++ file_name[sz - 1] = 0x00;
904 +
905 + backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
906 + if (IS_ERR(backing_dev)) {
907 +diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
908 +index 43e14bb512c8..6a16d22bc604 100644
909 +--- a/drivers/cpufreq/cpufreq_governor.c
910 ++++ b/drivers/cpufreq/cpufreq_governor.c
911 +@@ -555,12 +555,20 @@ EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
912 +
913 + void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
914 + {
915 +- struct policy_dbs_info *policy_dbs = policy->governor_data;
916 ++ struct policy_dbs_info *policy_dbs;
917 ++
918 ++ /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
919 ++ mutex_lock(&gov_dbs_data_mutex);
920 ++ policy_dbs = policy->governor_data;
921 ++ if (!policy_dbs)
922 ++ goto out;
923 +
924 + mutex_lock(&policy_dbs->update_mutex);
925 + cpufreq_policy_apply_limits(policy);
926 + gov_update_sample_delay(policy_dbs, 0);
927 +-
928 + mutex_unlock(&policy_dbs->update_mutex);
929 ++
930 ++out:
931 ++ mutex_unlock(&gov_dbs_data_mutex);
932 + }
933 + EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
934 +diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
935 +index e7966e37a5aa..ecc6d755d3c1 100644
936 +--- a/drivers/crypto/caam/caamalg_qi.c
937 ++++ b/drivers/crypto/caam/caamalg_qi.c
938 +@@ -350,10 +350,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
939 + int ret = 0;
940 +
941 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
942 +- crypto_ablkcipher_set_flags(ablkcipher,
943 +- CRYPTO_TFM_RES_BAD_KEY_LEN);
944 + dev_err(jrdev, "key size mismatch\n");
945 +- return -EINVAL;
946 ++ goto badkey;
947 + }
948 +
949 + memcpy(ctx->key, key, keylen);
950 +@@ -388,7 +386,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
951 + return ret;
952 + badkey:
953 + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
954 +- return 0;
955 ++ return -EINVAL;
956 + }
957 +
958 + /*
959 +diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
960 +index 7ff4a25440ac..6f3f81bb880b 100644
961 +--- a/drivers/crypto/caam/caampkc.c
962 ++++ b/drivers/crypto/caam/caampkc.c
963 +@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
964 + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
965 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
966 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
967 +- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
968 +- dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
969 ++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
970 ++ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
971 + }
972 +
973 + static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
974 +@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
975 + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
976 + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
977 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
978 +- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
979 +- dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
980 ++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
981 ++ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
982 + }
983 +
984 + /* RSA Job Completion handler */
985 +@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
986 + goto unmap_p;
987 + }
988 +
989 +- pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
990 ++ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
991 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
992 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
993 + goto unmap_q;
994 + }
995 +
996 +- pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
997 ++ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
998 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
999 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
1000 + goto unmap_tmp1;
1001 +@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
1002 + return 0;
1003 +
1004 + unmap_tmp1:
1005 +- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
1006 ++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
1007 + unmap_q:
1008 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
1009 + unmap_p:
1010 +@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
1011 + goto unmap_dq;
1012 + }
1013 +
1014 +- pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
1015 ++ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
1016 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
1017 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
1018 + goto unmap_qinv;
1019 + }
1020 +
1021 +- pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
1022 ++ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
1023 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
1024 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
1025 + goto unmap_tmp1;
1026 +@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
1027 + return 0;
1028 +
1029 + unmap_tmp1:
1030 +- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
1031 ++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
1032 + unmap_qinv:
1033 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
1034 + unmap_dq:
1035 +diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
1036 +index d258953ff488..7fa1be184553 100644
1037 +--- a/drivers/crypto/caam/jr.c
1038 ++++ b/drivers/crypto/caam/jr.c
1039 +@@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg)
1040 + BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
1041 +
1042 + /* Unmap just-run descriptor so we can post-process */
1043 +- dma_unmap_single(dev, jrp->outring[hw_idx].desc,
1044 ++ dma_unmap_single(dev,
1045 ++ caam_dma_to_cpu(jrp->outring[hw_idx].desc),
1046 + jrp->entinfo[sw_idx].desc_size,
1047 + DMA_TO_DEVICE);
1048 +
1049 +diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
1050 +index 5285ece4f33a..b71895871be3 100644
1051 +--- a/drivers/crypto/vmx/aes_cbc.c
1052 ++++ b/drivers/crypto/vmx/aes_cbc.c
1053 +@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
1054 + ret = crypto_skcipher_encrypt(req);
1055 + skcipher_request_zero(req);
1056 + } else {
1057 +- preempt_disable();
1058 +- pagefault_disable();
1059 +- enable_kernel_vsx();
1060 +-
1061 + blkcipher_walk_init(&walk, dst, src, nbytes);
1062 + ret = blkcipher_walk_virt(desc, &walk);
1063 + while ((nbytes = walk.nbytes)) {
1064 ++ preempt_disable();
1065 ++ pagefault_disable();
1066 ++ enable_kernel_vsx();
1067 + aes_p8_cbc_encrypt(walk.src.virt.addr,
1068 + walk.dst.virt.addr,
1069 + nbytes & AES_BLOCK_MASK,
1070 + &ctx->enc_key, walk.iv, 1);
1071 ++ disable_kernel_vsx();
1072 ++ pagefault_enable();
1073 ++ preempt_enable();
1074 ++
1075 + nbytes &= AES_BLOCK_SIZE - 1;
1076 + ret = blkcipher_walk_done(desc, &walk, nbytes);
1077 + }
1078 +-
1079 +- disable_kernel_vsx();
1080 +- pagefault_enable();
1081 +- preempt_enable();
1082 + }
1083 +
1084 + return ret;
1085 +@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
1086 + ret = crypto_skcipher_decrypt(req);
1087 + skcipher_request_zero(req);
1088 + } else {
1089 +- preempt_disable();
1090 +- pagefault_disable();
1091 +- enable_kernel_vsx();
1092 +-
1093 + blkcipher_walk_init(&walk, dst, src, nbytes);
1094 + ret = blkcipher_walk_virt(desc, &walk);
1095 + while ((nbytes = walk.nbytes)) {
1096 ++ preempt_disable();
1097 ++ pagefault_disable();
1098 ++ enable_kernel_vsx();
1099 + aes_p8_cbc_encrypt(walk.src.virt.addr,
1100 + walk.dst.virt.addr,
1101 + nbytes & AES_BLOCK_MASK,
1102 + &ctx->dec_key, walk.iv, 0);
1103 ++ disable_kernel_vsx();
1104 ++ pagefault_enable();
1105 ++ preempt_enable();
1106 ++
1107 + nbytes &= AES_BLOCK_SIZE - 1;
1108 + ret = blkcipher_walk_done(desc, &walk, nbytes);
1109 + }
1110 +-
1111 +- disable_kernel_vsx();
1112 +- pagefault_enable();
1113 +- preempt_enable();
1114 + }
1115 +
1116 + return ret;
1117 +diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
1118 +index 8bd9aff0f55f..e9954a7d4694 100644
1119 +--- a/drivers/crypto/vmx/aes_xts.c
1120 ++++ b/drivers/crypto/vmx/aes_xts.c
1121 +@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
1122 + ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
1123 + skcipher_request_zero(req);
1124 + } else {
1125 ++ blkcipher_walk_init(&walk, dst, src, nbytes);
1126 ++
1127 ++ ret = blkcipher_walk_virt(desc, &walk);
1128 ++
1129 + preempt_disable();
1130 + pagefault_disable();
1131 + enable_kernel_vsx();
1132 +
1133 +- blkcipher_walk_init(&walk, dst, src, nbytes);
1134 +-
1135 +- ret = blkcipher_walk_virt(desc, &walk);
1136 + iv = walk.iv;
1137 + memset(tweak, 0, AES_BLOCK_SIZE);
1138 + aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
1139 +
1140 ++ disable_kernel_vsx();
1141 ++ pagefault_enable();
1142 ++ preempt_enable();
1143 ++
1144 + while ((nbytes = walk.nbytes)) {
1145 ++ preempt_disable();
1146 ++ pagefault_disable();
1147 ++ enable_kernel_vsx();
1148 + if (enc)
1149 + aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
1150 + nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
1151 + else
1152 + aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
1153 + nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
1154 ++ disable_kernel_vsx();
1155 ++ pagefault_enable();
1156 ++ preempt_enable();
1157 +
1158 + nbytes &= AES_BLOCK_SIZE - 1;
1159 + ret = blkcipher_walk_done(desc, &walk, nbytes);
1160 + }
1161 +-
1162 +- disable_kernel_vsx();
1163 +- pagefault_enable();
1164 +- preempt_enable();
1165 + }
1166 + return ret;
1167 + }
1168 +diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
1169 +index 35e9fb885486..95e96f04bf6f 100644
1170 +--- a/drivers/extcon/extcon.c
1171 ++++ b/drivers/extcon/extcon.c
1172 +@@ -433,8 +433,8 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
1173 + return index;
1174 +
1175 + spin_lock_irqsave(&edev->lock, flags);
1176 +-
1177 + state = !!(edev->state & BIT(index));
1178 ++ spin_unlock_irqrestore(&edev->lock, flags);
1179 +
1180 + /*
1181 + * Call functions in a raw notifier chain for the specific one
1182 +@@ -448,6 +448,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
1183 + */
1184 + raw_notifier_call_chain(&edev->nh_all, state, edev);
1185 +
1186 ++ spin_lock_irqsave(&edev->lock, flags);
1187 + /* This could be in interrupt handler */
1188 + prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
1189 + if (!prop_buf) {
1190 +diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
1191 +index 709efe2357ea..05ae8c4a8a1b 100644
1192 +--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
1193 ++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
1194 +@@ -782,6 +782,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
1195 + I915_USERPTR_UNSYNCHRONIZED))
1196 + return -EINVAL;
1197 +
1198 ++ if (!args->user_size)
1199 ++ return -EINVAL;
1200 ++
1201 + if (offset_in_page(args->user_ptr | args->user_size))
1202 + return -EINVAL;
1203 +
1204 +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
1205 +index 05964347008d..d96b09fea835 100644
1206 +--- a/drivers/hv/channel.c
1207 ++++ b/drivers/hv/channel.c
1208 +@@ -541,11 +541,8 @@ static void reset_channel_cb(void *arg)
1209 + channel->onchannel_callback = NULL;
1210 + }
1211 +
1212 +-static int vmbus_close_internal(struct vmbus_channel *channel)
1213 ++void vmbus_reset_channel_cb(struct vmbus_channel *channel)
1214 + {
1215 +- struct vmbus_channel_close_channel *msg;
1216 +- int ret;
1217 +-
1218 + /*
1219 + * vmbus_on_event(), running in the per-channel tasklet, can race
1220 + * with vmbus_close_internal() in the case of SMP guest, e.g., when
1221 +@@ -555,6 +552,29 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
1222 + */
1223 + tasklet_disable(&channel->callback_event);
1224 +
1225 ++ channel->sc_creation_callback = NULL;
1226 ++
1227 ++ /* Stop the callback asap */
1228 ++ if (channel->target_cpu != get_cpu()) {
1229 ++ put_cpu();
1230 ++ smp_call_function_single(channel->target_cpu, reset_channel_cb,
1231 ++ channel, true);
1232 ++ } else {
1233 ++ reset_channel_cb(channel);
1234 ++ put_cpu();
1235 ++ }
1236 ++
1237 ++ /* Re-enable tasklet for use on re-open */
1238 ++ tasklet_enable(&channel->callback_event);
1239 ++}
1240 ++
1241 ++static int vmbus_close_internal(struct vmbus_channel *channel)
1242 ++{
1243 ++ struct vmbus_channel_close_channel *msg;
1244 ++ int ret;
1245 ++
1246 ++ vmbus_reset_channel_cb(channel);
1247 ++
1248 + /*
1249 + * In case a device driver's probe() fails (e.g.,
1250 + * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
1251 +@@ -568,16 +588,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
1252 + }
1253 +
1254 + channel->state = CHANNEL_OPEN_STATE;
1255 +- channel->sc_creation_callback = NULL;
1256 +- /* Stop callback and cancel the timer asap */
1257 +- if (channel->target_cpu != get_cpu()) {
1258 +- put_cpu();
1259 +- smp_call_function_single(channel->target_cpu, reset_channel_cb,
1260 +- channel, true);
1261 +- } else {
1262 +- reset_channel_cb(channel);
1263 +- put_cpu();
1264 +- }
1265 +
1266 + /* Send a closing message */
1267 +
1268 +@@ -620,8 +630,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
1269 + get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
1270 +
1271 + out:
1272 +- /* re-enable tasklet for use on re-open */
1273 +- tasklet_enable(&channel->callback_event);
1274 + return ret;
1275 + }
1276 +
1277 +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
1278 +index 1939c0ca3741..1700b4e7758d 100644
1279 +--- a/drivers/hv/channel_mgmt.c
1280 ++++ b/drivers/hv/channel_mgmt.c
1281 +@@ -881,6 +881,12 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
1282 + return;
1283 + }
1284 +
1285 ++ /*
1286 ++ * Before setting channel->rescind in vmbus_rescind_cleanup(), we
1287 ++ * should make sure the channel callback is not running any more.
1288 ++ */
1289 ++ vmbus_reset_channel_cb(channel);
1290 ++
1291 + /*
1292 + * Now wait for offer handling to complete.
1293 + */
1294 +diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
1295 +index 565f7d8d3304..f2761b385541 100644
1296 +--- a/drivers/iio/accel/sca3000.c
1297 ++++ b/drivers/iio/accel/sca3000.c
1298 +@@ -797,6 +797,7 @@ static int sca3000_write_raw(struct iio_dev *indio_dev,
1299 + mutex_lock(&st->lock);
1300 + ret = sca3000_write_3db_freq(st, val);
1301 + mutex_unlock(&st->lock);
1302 ++ return ret;
1303 + default:
1304 + return -EINVAL;
1305 + }
1306 +diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
1307 +index 99eba524f6dd..1642b55f70da 100644
1308 +--- a/drivers/iio/frequency/ad9523.c
1309 ++++ b/drivers/iio/frequency/ad9523.c
1310 +@@ -508,7 +508,7 @@ static ssize_t ad9523_store(struct device *dev,
1311 + return ret;
1312 +
1313 + if (!state)
1314 +- return 0;
1315 ++ return len;
1316 +
1317 + mutex_lock(&indio_dev->mlock);
1318 + switch ((u32)this_attr->address) {
1319 +@@ -642,7 +642,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
1320 + code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
1321 + AD9523_CLK_DIST_DIV_REV(ret);
1322 + *val = code / 1000000;
1323 +- *val2 = (code % 1000000) * 10;
1324 ++ *val2 = code % 1000000;
1325 + return IIO_VAL_INT_PLUS_MICRO;
1326 + default:
1327 + return -EINVAL;
1328 +diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
1329 +index 9eb12c2e3c74..83cfe44f070e 100644
1330 +--- a/drivers/infiniband/sw/rxe/rxe_comp.c
1331 ++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
1332 +@@ -276,6 +276,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
1333 + case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
1334 + if (wqe->wr.opcode != IB_WR_RDMA_READ &&
1335 + wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
1336 ++ wqe->status = IB_WC_FATAL_ERR;
1337 + return COMPST_ERROR;
1338 + }
1339 + reset_retry_counters(qp);
1340 +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
1341 +index 97c2225829ea..60105ba77889 100644
1342 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
1343 ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
1344 +@@ -1713,8 +1713,7 @@ static bool srpt_close_ch(struct srpt_rdma_ch *ch)
1345 + int ret;
1346 +
1347 + if (!srpt_set_ch_state(ch, CH_DRAINING)) {
1348 +- pr_debug("%s-%d: already closed\n", ch->sess_name,
1349 +- ch->qp->qp_num);
1350 ++ pr_debug("%s: already closed\n", ch->sess_name);
1351 + return false;
1352 + }
1353 +
1354 +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
1355 +index e3dbb6101b4a..c0d1c4db5794 100644
1356 +--- a/drivers/iommu/dmar.c
1357 ++++ b/drivers/iommu/dmar.c
1358 +@@ -1336,8 +1336,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1359 + qi_submit_sync(&desc, iommu);
1360 + }
1361 +
1362 +-void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1363 +- u64 addr, unsigned mask)
1364 ++void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1365 ++ u16 qdep, u64 addr, unsigned mask)
1366 + {
1367 + struct qi_desc desc;
1368 +
1369 +@@ -1352,7 +1352,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1370 + qdep = 0;
1371 +
1372 + desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1373 +- QI_DIOTLB_TYPE;
1374 ++ QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1375 +
1376 + qi_submit_sync(&desc, iommu);
1377 + }
1378 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1379 +index e8414bcf8390..aaf3fed97477 100644
1380 +--- a/drivers/iommu/intel-iommu.c
1381 ++++ b/drivers/iommu/intel-iommu.c
1382 +@@ -422,6 +422,7 @@ struct device_domain_info {
1383 + struct list_head global; /* link to global list */
1384 + u8 bus; /* PCI bus number */
1385 + u8 devfn; /* PCI devfn number */
1386 ++ u16 pfsid; /* SRIOV physical function source ID */
1387 + u8 pasid_supported:3;
1388 + u8 pasid_enabled:1;
1389 + u8 pri_supported:1;
1390 +@@ -1502,6 +1503,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1391 + return;
1392 +
1393 + pdev = to_pci_dev(info->dev);
1394 ++ /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1395 ++ * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1396 ++ * queue depth at PF level. If DIT is not set, PFSID will be treated as
1397 ++ * reserved, which should be set to 0.
1398 ++ */
1399 ++ if (!ecap_dit(info->iommu->ecap))
1400 ++ info->pfsid = 0;
1401 ++ else {
1402 ++ struct pci_dev *pf_pdev;
1403 ++
1404 ++ /* pdev will be returned if device is not a vf */
1405 ++ pf_pdev = pci_physfn(pdev);
1406 ++ info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
1407 ++ }
1408 +
1409 + #ifdef CONFIG_INTEL_IOMMU_SVM
1410 + /* The PCIe spec, in its wisdom, declares that the behaviour of
1411 +@@ -1567,7 +1582,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1412 +
1413 + sid = info->bus << 8 | info->devfn;
1414 + qdep = info->ats_qdep;
1415 +- qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1416 ++ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1417 ++ qdep, addr, mask);
1418 + }
1419 + spin_unlock_irqrestore(&device_domain_lock, flags);
1420 + }
1421 +diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
1422 +index a7040163dd43..b8b2b3533f46 100644
1423 +--- a/drivers/mailbox/mailbox-xgene-slimpro.c
1424 ++++ b/drivers/mailbox/mailbox-xgene-slimpro.c
1425 +@@ -195,9 +195,9 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
1426 + platform_set_drvdata(pdev, ctx);
1427 +
1428 + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1429 +- mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
1430 +- if (!mb_base)
1431 +- return -ENOMEM;
1432 ++ mb_base = devm_ioremap_resource(&pdev->dev, regs);
1433 ++ if (IS_ERR(mb_base))
1434 ++ return PTR_ERR(mb_base);
1435 +
1436 + /* Setup mailbox links */
1437 + for (i = 0; i < MBOX_CNT; i++) {
1438 +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
1439 +index 930b00f6a3a2..5adb0c850b6c 100644
1440 +--- a/drivers/md/bcache/writeback.c
1441 ++++ b/drivers/md/bcache/writeback.c
1442 +@@ -456,8 +456,10 @@ static int bch_writeback_thread(void *arg)
1443 + * data on cache. BCACHE_DEV_DETACHING flag is set in
1444 + * bch_cached_dev_detach().
1445 + */
1446 +- if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
1447 ++ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
1448 ++ up_write(&dc->writeback_lock);
1449 + break;
1450 ++ }
1451 + }
1452 +
1453 + up_write(&dc->writeback_lock);
1454 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
1455 +index 4a4e9c75fc4c..0a5a45f3ec5f 100644
1456 +--- a/drivers/md/dm-cache-metadata.c
1457 ++++ b/drivers/md/dm-cache-metadata.c
1458 +@@ -362,7 +362,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
1459 + disk_super->version = cpu_to_le32(cmd->version);
1460 + memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
1461 + memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
1462 +- disk_super->policy_hint_size = 0;
1463 ++ disk_super->policy_hint_size = cpu_to_le32(0);
1464 +
1465 + __copy_sm_root(cmd, disk_super);
1466 +
1467 +@@ -700,6 +700,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
1468 + disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
1469 + disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
1470 + disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
1471 ++ disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
1472 +
1473 + disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
1474 + disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
1475 +@@ -1321,6 +1322,7 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
1476 +
1477 + dm_oblock_t oblock;
1478 + unsigned flags;
1479 ++ bool dirty = true;
1480 +
1481 + dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1482 + memcpy(&mapping, mapping_value_le, sizeof(mapping));
1483 +@@ -1331,8 +1333,10 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
1484 + dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1485 + memcpy(&hint, hint_value_le, sizeof(hint));
1486 + }
1487 ++ if (cmd->clean_when_opened)
1488 ++ dirty = flags & M_DIRTY;
1489 +
1490 +- r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY,
1491 ++ r = fn(context, oblock, to_cblock(cb), dirty,
1492 + le32_to_cpu(hint), hints_valid);
1493 + if (r) {
1494 + DMERR("policy couldn't load cache block %llu",
1495 +@@ -1360,7 +1364,7 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
1496 +
1497 + dm_oblock_t oblock;
1498 + unsigned flags;
1499 +- bool dirty;
1500 ++ bool dirty = true;
1501 +
1502 + dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1503 + memcpy(&mapping, mapping_value_le, sizeof(mapping));
1504 +@@ -1371,8 +1375,9 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
1505 + dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1506 + memcpy(&hint, hint_value_le, sizeof(hint));
1507 + }
1508 ++ if (cmd->clean_when_opened)
1509 ++ dirty = dm_bitset_cursor_get_value(dirty_cursor);
1510 +
1511 +- dirty = dm_bitset_cursor_get_value(dirty_cursor);
1512 + r = fn(context, oblock, to_cblock(cb), dirty,
1513 + le32_to_cpu(hint), hints_valid);
1514 + if (r) {
1515 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1516 +index f575110454b6..c60d29d09687 100644
1517 +--- a/drivers/md/dm-crypt.c
1518 ++++ b/drivers/md/dm-crypt.c
1519 +@@ -3072,11 +3072,11 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
1520 + */
1521 + limits->max_segment_size = PAGE_SIZE;
1522 +
1523 +- if (cc->sector_size != (1 << SECTOR_SHIFT)) {
1524 +- limits->logical_block_size = cc->sector_size;
1525 +- limits->physical_block_size = cc->sector_size;
1526 +- blk_limits_io_min(limits, cc->sector_size);
1527 +- }
1528 ++ limits->logical_block_size =
1529 ++ max_t(unsigned short, limits->logical_block_size, cc->sector_size);
1530 ++ limits->physical_block_size =
1531 ++ max_t(unsigned, limits->physical_block_size, cc->sector_size);
1532 ++ limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
1533 + }
1534 +
1535 + static struct target_type crypt_target = {
1536 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
1537 +index cbc56372ff97..898286ed47a1 100644
1538 +--- a/drivers/md/dm-integrity.c
1539 ++++ b/drivers/md/dm-integrity.c
1540 +@@ -177,7 +177,7 @@ struct dm_integrity_c {
1541 + __u8 sectors_per_block;
1542 +
1543 + unsigned char mode;
1544 +- bool suspending;
1545 ++ int suspending;
1546 +
1547 + int failed;
1548 +
1549 +@@ -2209,7 +2209,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
1550 +
1551 + del_timer_sync(&ic->autocommit_timer);
1552 +
1553 +- ic->suspending = true;
1554 ++ WRITE_ONCE(ic->suspending, 1);
1555 +
1556 + queue_work(ic->commit_wq, &ic->commit_work);
1557 + drain_workqueue(ic->commit_wq);
1558 +@@ -2219,7 +2219,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
1559 + dm_integrity_flush_buffers(ic);
1560 + }
1561 +
1562 +- ic->suspending = false;
1563 ++ WRITE_ONCE(ic->suspending, 0);
1564 +
1565 + BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
1566 +
1567 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1568 +index 72ae5dc50532..6cf9ad4e4e16 100644
1569 +--- a/drivers/md/dm-thin.c
1570 ++++ b/drivers/md/dm-thin.c
1571 +@@ -2514,6 +2514,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1572 + case PM_WRITE:
1573 + if (old_mode != new_mode)
1574 + notify_of_pool_mode_change(pool, "write");
1575 ++ if (old_mode == PM_OUT_OF_DATA_SPACE)
1576 ++ cancel_delayed_work_sync(&pool->no_space_timeout);
1577 + pool->out_of_data_space = false;
1578 + pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
1579 + dm_pool_metadata_read_write(pool->pmd);
1580 +diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
1581 +index 698fa764999c..59b0c1fce9be 100644
1582 +--- a/drivers/media/i2c/tvp5150.c
1583 ++++ b/drivers/media/i2c/tvp5150.c
1584 +@@ -871,7 +871,7 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
1585 + f = &format->format;
1586 +
1587 + f->width = decoder->rect.width;
1588 +- f->height = decoder->rect.height;
1589 ++ f->height = decoder->rect.height / 2;
1590 +
1591 + f->code = MEDIA_BUS_FMT_UYVY8_2X8;
1592 + f->field = V4L2_FIELD_ALTERNATE;
1593 +diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
1594 +index c37ccbfd52f2..96c07fa1802a 100644
1595 +--- a/drivers/mfd/hi655x-pmic.c
1596 ++++ b/drivers/mfd/hi655x-pmic.c
1597 +@@ -49,7 +49,7 @@ static struct regmap_config hi655x_regmap_config = {
1598 + .reg_bits = 32,
1599 + .reg_stride = HI655X_STRIDE,
1600 + .val_bits = 8,
1601 +- .max_register = HI655X_BUS_ADDR(0xFFF),
1602 ++ .max_register = HI655X_BUS_ADDR(0x400) - HI655X_STRIDE,
1603 + };
1604 +
1605 + static struct resource pwrkey_resources[] = {
1606 +diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
1607 +index c1ba0d42cbc8..e0f29b8a872d 100644
1608 +--- a/drivers/misc/cxl/main.c
1609 ++++ b/drivers/misc/cxl/main.c
1610 +@@ -287,7 +287,7 @@ int cxl_adapter_context_get(struct cxl *adapter)
1611 + int rc;
1612 +
1613 + rc = atomic_inc_unless_negative(&adapter->contexts_num);
1614 +- return rc >= 0 ? 0 : -EBUSY;
1615 ++ return rc ? 0 : -EBUSY;
1616 + }
1617 +
1618 + void cxl_adapter_context_put(struct cxl *adapter)
1619 +diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
1620 +index 56c6f79a5c5a..5f8b583c6e41 100644
1621 +--- a/drivers/misc/vmw_balloon.c
1622 ++++ b/drivers/misc/vmw_balloon.c
1623 +@@ -341,7 +341,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
1624 + success = false;
1625 + }
1626 +
1627 +- if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
1628 ++ /*
1629 ++ * 2MB pages are only supported with batching. If batching is for some
1630 ++ * reason disabled, do not use 2MB pages, since otherwise the legacy
1631 ++ * mechanism is used with 2MB pages, causing a failure.
1632 ++ */
1633 ++ if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
1634 ++ (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
1635 + b->supported_page_sizes = 2;
1636 + else
1637 + b->supported_page_sizes = 1;
1638 +@@ -450,7 +456,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
1639 +
1640 + pfn32 = (u32)pfn;
1641 + if (pfn32 != pfn)
1642 +- return -1;
1643 ++ return -EINVAL;
1644 +
1645 + STATS_INC(b->stats.lock[false]);
1646 +
1647 +@@ -460,7 +466,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
1648 +
1649 + pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
1650 + STATS_INC(b->stats.lock_fail[false]);
1651 +- return 1;
1652 ++ return -EIO;
1653 + }
1654 +
1655 + static int vmballoon_send_batched_lock(struct vmballoon *b,
1656 +@@ -597,11 +603,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
1657 +
1658 + locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
1659 + target);
1660 +- if (locked > 0) {
1661 ++ if (locked) {
1662 + STATS_INC(b->stats.refused_alloc[false]);
1663 +
1664 +- if (hv_status == VMW_BALLOON_ERROR_RESET ||
1665 +- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
1666 ++ if (locked == -EIO &&
1667 ++ (hv_status == VMW_BALLOON_ERROR_RESET ||
1668 ++ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
1669 + vmballoon_free_page(page, false);
1670 + return -EIO;
1671 + }
1672 +@@ -617,7 +624,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
1673 + } else {
1674 + vmballoon_free_page(page, false);
1675 + }
1676 +- return -EIO;
1677 ++ return locked;
1678 + }
1679 +
1680 + /* track allocated page */
1681 +@@ -1029,29 +1036,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
1682 + */
1683 + static int vmballoon_vmci_init(struct vmballoon *b)
1684 + {
1685 +- int error = 0;
1686 ++ unsigned long error, dummy;
1687 +
1688 +- if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
1689 +- error = vmci_doorbell_create(&b->vmci_doorbell,
1690 +- VMCI_FLAG_DELAYED_CB,
1691 +- VMCI_PRIVILEGE_FLAG_RESTRICTED,
1692 +- vmballoon_doorbell, b);
1693 +-
1694 +- if (error == VMCI_SUCCESS) {
1695 +- VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
1696 +- b->vmci_doorbell.context,
1697 +- b->vmci_doorbell.resource, error);
1698 +- STATS_INC(b->stats.doorbell_set);
1699 +- }
1700 +- }
1701 ++ if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1702 ++ return 0;
1703 +
1704 +- if (error != 0) {
1705 +- vmballoon_vmci_cleanup(b);
1706 ++ error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1707 ++ VMCI_PRIVILEGE_FLAG_RESTRICTED,
1708 ++ vmballoon_doorbell, b);
1709 +
1710 +- return -EIO;
1711 +- }
1712 ++ if (error != VMCI_SUCCESS)
1713 ++ goto fail;
1714 ++
1715 ++ error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
1716 ++ b->vmci_doorbell.resource, dummy);
1717 ++
1718 ++ STATS_INC(b->stats.doorbell_set);
1719 ++
1720 ++ if (error != VMW_BALLOON_SUCCESS)
1721 ++ goto fail;
1722 +
1723 + return 0;
1724 ++fail:
1725 ++ vmballoon_vmci_cleanup(b);
1726 ++ return -EIO;
1727 + }
1728 +
1729 + /*
1730 +@@ -1289,7 +1297,14 @@ static int __init vmballoon_init(void)
1731 +
1732 + return 0;
1733 + }
1734 +-module_init(vmballoon_init);
1735 ++
1736 ++/*
1737 ++ * Using late_initcall() instead of module_init() allows the balloon to use the
1738 ++ * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1739 ++ * VMCI is probed only after the balloon is initialized. If the balloon is used
1740 ++ * as a module, late_initcall() is equivalent to module_init().
1741 ++ */
1742 ++late_initcall(vmballoon_init);
1743 +
1744 + static void __exit vmballoon_exit(void)
1745 + {
1746 +diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
1747 +index 8bae88a150fd..713658be6661 100644
1748 +--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
1749 ++++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
1750 +@@ -44,7 +44,7 @@
1751 + /* DM_CM_RST */
1752 + #define RST_DTRANRST1 BIT(9)
1753 + #define RST_DTRANRST0 BIT(8)
1754 +-#define RST_RESERVED_BITS GENMASK_ULL(32, 0)
1755 ++#define RST_RESERVED_BITS GENMASK_ULL(31, 0)
1756 +
1757 + /* DM_CM_INFO1 and DM_CM_INFO1_MASK */
1758 + #define INFO1_CLEAR 0
1759 +diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h
1760 +index dd1ee1f0af48..469134930026 100644
1761 +--- a/drivers/net/wireless/marvell/libertas/dev.h
1762 ++++ b/drivers/net/wireless/marvell/libertas/dev.h
1763 +@@ -104,6 +104,7 @@ struct lbs_private {
1764 + u8 fw_ready;
1765 + u8 surpriseremoved;
1766 + u8 setup_fw_on_resume;
1767 ++ u8 power_up_on_resume;
1768 + int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
1769 + void (*reset_card) (struct lbs_private *priv);
1770 + int (*power_save) (struct lbs_private *priv);
1771 +diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
1772 +index 2300e796c6ab..43743c26c071 100644
1773 +--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
1774 ++++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
1775 +@@ -1290,15 +1290,23 @@ static void if_sdio_remove(struct sdio_func *func)
1776 + static int if_sdio_suspend(struct device *dev)
1777 + {
1778 + struct sdio_func *func = dev_to_sdio_func(dev);
1779 +- int ret;
1780 + struct if_sdio_card *card = sdio_get_drvdata(func);
1781 ++ struct lbs_private *priv = card->priv;
1782 ++ int ret;
1783 +
1784 + mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
1785 ++ priv->power_up_on_resume = false;
1786 +
1787 + /* If we're powered off anyway, just let the mmc layer remove the
1788 + * card. */
1789 +- if (!lbs_iface_active(card->priv))
1790 +- return -ENOSYS;
1791 ++ if (!lbs_iface_active(priv)) {
1792 ++ if (priv->fw_ready) {
1793 ++ priv->power_up_on_resume = true;
1794 ++ if_sdio_power_off(card);
1795 ++ }
1796 ++
1797 ++ return 0;
1798 ++ }
1799 +
1800 + dev_info(dev, "%s: suspend: PM flags = 0x%x\n",
1801 + sdio_func_id(func), flags);
1802 +@@ -1306,9 +1314,14 @@ static int if_sdio_suspend(struct device *dev)
1803 + /* If we aren't being asked to wake on anything, we should bail out
1804 + * and let the SD stack power down the card.
1805 + */
1806 +- if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) {
1807 ++ if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
1808 + dev_info(dev, "Suspend without wake params -- powering down card\n");
1809 +- return -ENOSYS;
1810 ++ if (priv->fw_ready) {
1811 ++ priv->power_up_on_resume = true;
1812 ++ if_sdio_power_off(card);
1813 ++ }
1814 ++
1815 ++ return 0;
1816 + }
1817 +
1818 + if (!(flags & MMC_PM_KEEP_POWER)) {
1819 +@@ -1321,7 +1334,7 @@ static int if_sdio_suspend(struct device *dev)
1820 + if (ret)
1821 + return ret;
1822 +
1823 +- ret = lbs_suspend(card->priv);
1824 ++ ret = lbs_suspend(priv);
1825 + if (ret)
1826 + return ret;
1827 +
1828 +@@ -1336,6 +1349,11 @@ static int if_sdio_resume(struct device *dev)
1829 +
1830 + dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func));
1831 +
1832 ++ if (card->priv->power_up_on_resume) {
1833 ++ if_sdio_power_on(card);
1834 ++ wait_event(card->pwron_waitq, card->priv->fw_ready);
1835 ++ }
1836 ++
1837 + ret = lbs_resume(card->priv);
1838 +
1839 + return ret;
1840 +diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
1841 +index 2fffd42767c7..fb5ab5812a22 100644
1842 +--- a/drivers/nvdimm/bus.c
1843 ++++ b/drivers/nvdimm/bus.c
1844 +@@ -808,9 +808,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
1845 + * overshoots the remainder by 4 bytes, assume it was
1846 + * including 'status'.
1847 + */
1848 +- if (out_field[1] - 8 == remainder)
1849 ++ if (out_field[1] - 4 == remainder)
1850 + return remainder;
1851 +- return out_field[1] - 4;
1852 ++ return out_field[1] - 8;
1853 + } else if (cmd == ND_CMD_CALL) {
1854 + struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
1855 +
1856 +diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
1857 +index 4c22cb395040..f7b8a86fa5c5 100644
1858 +--- a/drivers/pwm/pwm-tiehrpwm.c
1859 ++++ b/drivers/pwm/pwm-tiehrpwm.c
1860 +@@ -33,10 +33,6 @@
1861 + #define TBCTL 0x00
1862 + #define TBPRD 0x0A
1863 +
1864 +-#define TBCTL_RUN_MASK (BIT(15) | BIT(14))
1865 +-#define TBCTL_STOP_NEXT 0
1866 +-#define TBCTL_STOP_ON_CYCLE BIT(14)
1867 +-#define TBCTL_FREE_RUN (BIT(15) | BIT(14))
1868 + #define TBCTL_PRDLD_MASK BIT(3)
1869 + #define TBCTL_PRDLD_SHDW 0
1870 + #define TBCTL_PRDLD_IMDT BIT(3)
1871 +@@ -360,7 +356,7 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
1872 + /* Channels polarity can be configured from action qualifier module */
1873 + configure_polarity(pc, pwm->hwpwm);
1874 +
1875 +- /* Enable TBCLK before enabling PWM device */
1876 ++ /* Enable TBCLK */
1877 + ret = clk_enable(pc->tbclk);
1878 + if (ret) {
1879 + dev_err(chip->dev, "Failed to enable TBCLK for %s: %d\n",
1880 +@@ -368,9 +364,6 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
1881 + return ret;
1882 + }
1883 +
1884 +- /* Enable time counter for free_run */
1885 +- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN);
1886 +-
1887 + return 0;
1888 + }
1889 +
1890 +@@ -388,6 +381,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
1891 + aqcsfrc_mask = AQCSFRC_CSFA_MASK;
1892 + }
1893 +
1894 ++ /* Update shadow register first before modifying active register */
1895 ++ ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
1896 + /*
1897 + * Changes to immediate action on Action Qualifier. This puts
1898 + * Action Qualifier control on PWM output from next TBCLK
1899 +@@ -400,9 +395,6 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
1900 + /* Disabling TBCLK on PWM disable */
1901 + clk_disable(pc->tbclk);
1902 +
1903 +- /* Stop Time base counter */
1904 +- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT);
1905 +-
1906 + /* Disable clock on PWM disable */
1907 + pm_runtime_put_sync(chip->dev);
1908 + }
1909 +diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
1910 +index 13f7cd11c07e..ac6e6a6a194c 100644
1911 +--- a/drivers/rtc/rtc-omap.c
1912 ++++ b/drivers/rtc/rtc-omap.c
1913 +@@ -817,13 +817,6 @@ static int omap_rtc_probe(struct platform_device *pdev)
1914 + goto err;
1915 + }
1916 +
1917 +- if (rtc->is_pmic_controller) {
1918 +- if (!pm_power_off) {
1919 +- omap_rtc_power_off_rtc = rtc;
1920 +- pm_power_off = omap_rtc_power_off;
1921 +- }
1922 +- }
1923 +-
1924 + /* Support ext_wakeup pinconf */
1925 + rtc_pinctrl_desc.name = dev_name(&pdev->dev);
1926 +
1927 +@@ -833,6 +826,13 @@ static int omap_rtc_probe(struct platform_device *pdev)
1928 + return PTR_ERR(rtc->pctldev);
1929 + }
1930 +
1931 ++ if (rtc->is_pmic_controller) {
1932 ++ if (!pm_power_off) {
1933 ++ omap_rtc_power_off_rtc = rtc;
1934 ++ pm_power_off = omap_rtc_power_off;
1935 ++ }
1936 ++ }
1937 ++
1938 + return 0;
1939 +
1940 + err:
1941 +diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
1942 +index 4a001634023e..02bd1eba045b 100644
1943 +--- a/drivers/spi/spi-cadence.c
1944 ++++ b/drivers/spi/spi-cadence.c
1945 +@@ -319,7 +319,7 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
1946 + */
1947 + if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
1948 + CDNS_SPI_IXR_TXFULL)
1949 +- usleep_range(10, 20);
1950 ++ udelay(10);
1951 +
1952 + if (xspi->txbuf)
1953 + cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
1954 +diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
1955 +index 6ddb6ef1fda4..c5bbe08771a4 100644
1956 +--- a/drivers/spi/spi-davinci.c
1957 ++++ b/drivers/spi/spi-davinci.c
1958 +@@ -217,7 +217,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
1959 + pdata = &dspi->pdata;
1960 +
1961 + /* program delay transfers if tx_delay is non zero */
1962 +- if (spicfg->wdelay)
1963 ++ if (spicfg && spicfg->wdelay)
1964 + spidat1 |= SPIDAT1_WDEL;
1965 +
1966 + /*
1967 +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
1968 +index d89127f4a46d..ca013dd4ff6b 100644
1969 +--- a/drivers/spi/spi-fsl-dspi.c
1970 ++++ b/drivers/spi/spi-fsl-dspi.c
1971 +@@ -1006,30 +1006,30 @@ static int dspi_probe(struct platform_device *pdev)
1972 + goto out_master_put;
1973 + }
1974 +
1975 ++ dspi->clk = devm_clk_get(&pdev->dev, "dspi");
1976 ++ if (IS_ERR(dspi->clk)) {
1977 ++ ret = PTR_ERR(dspi->clk);
1978 ++ dev_err(&pdev->dev, "unable to get clock\n");
1979 ++ goto out_master_put;
1980 ++ }
1981 ++ ret = clk_prepare_enable(dspi->clk);
1982 ++ if (ret)
1983 ++ goto out_master_put;
1984 ++
1985 + dspi_init(dspi);
1986 + dspi->irq = platform_get_irq(pdev, 0);
1987 + if (dspi->irq < 0) {
1988 + dev_err(&pdev->dev, "can't get platform irq\n");
1989 + ret = dspi->irq;
1990 +- goto out_master_put;
1991 ++ goto out_clk_put;
1992 + }
1993 +
1994 + ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
1995 + pdev->name, dspi);
1996 + if (ret < 0) {
1997 + dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
1998 +- goto out_master_put;
1999 +- }
2000 +-
2001 +- dspi->clk = devm_clk_get(&pdev->dev, "dspi");
2002 +- if (IS_ERR(dspi->clk)) {
2003 +- ret = PTR_ERR(dspi->clk);
2004 +- dev_err(&pdev->dev, "unable to get clock\n");
2005 +- goto out_master_put;
2006 ++ goto out_clk_put;
2007 + }
2008 +- ret = clk_prepare_enable(dspi->clk);
2009 +- if (ret)
2010 +- goto out_master_put;
2011 +
2012 + if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
2013 + ret = dspi_request_dma(dspi, res->start);
2014 +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
2015 +index 4cb515a3104c..3a2e46e49405 100644
2016 +--- a/drivers/spi/spi-pxa2xx.c
2017 ++++ b/drivers/spi/spi-pxa2xx.c
2018 +@@ -1480,6 +1480,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
2019 + { PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP },
2020 + { PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP },
2021 + { PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP },
2022 ++ /* ICL-LP */
2023 ++ { PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP },
2024 ++ { PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP },
2025 ++ { PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP },
2026 + /* APL */
2027 + { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
2028 + { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
2029 +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
2030 +index c8cb0b398cb1..6db8844ef3ec 100644
2031 +--- a/drivers/tty/serial/serial_core.c
2032 ++++ b/drivers/tty/serial/serial_core.c
2033 +@@ -195,6 +195,7 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
2034 + {
2035 + struct uart_port *uport = uart_port_check(state);
2036 + unsigned long page;
2037 ++ unsigned long flags = 0;
2038 + int retval = 0;
2039 +
2040 + if (uport->type == PORT_UNKNOWN)
2041 +@@ -209,15 +210,18 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
2042 + * Initialise and allocate the transmit and temporary
2043 + * buffer.
2044 + */
2045 +- if (!state->xmit.buf) {
2046 +- /* This is protected by the per port mutex */
2047 +- page = get_zeroed_page(GFP_KERNEL);
2048 +- if (!page)
2049 +- return -ENOMEM;
2050 ++ page = get_zeroed_page(GFP_KERNEL);
2051 ++ if (!page)
2052 ++ return -ENOMEM;
2053 +
2054 ++ uart_port_lock(state, flags);
2055 ++ if (!state->xmit.buf) {
2056 + state->xmit.buf = (unsigned char *) page;
2057 + uart_circ_clear(&state->xmit);
2058 ++ } else {
2059 ++ free_page(page);
2060 + }
2061 ++ uart_port_unlock(uport, flags);
2062 +
2063 + retval = uport->ops->startup(uport);
2064 + if (retval == 0) {
2065 +@@ -276,6 +280,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
2066 + {
2067 + struct uart_port *uport = uart_port_check(state);
2068 + struct tty_port *port = &state->port;
2069 ++ unsigned long flags = 0;
2070 +
2071 + /*
2072 + * Set the TTY IO error marker
2073 +@@ -308,10 +313,12 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
2074 + /*
2075 + * Free the transmit buffer page.
2076 + */
2077 ++ uart_port_lock(state, flags);
2078 + if (state->xmit.buf) {
2079 + free_page((unsigned long)state->xmit.buf);
2080 + state->xmit.buf = NULL;
2081 + }
2082 ++ uart_port_unlock(uport, flags);
2083 + }
2084 +
2085 + /**
2086 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
2087 +index f741ba8df01b..11d73b5fc885 100644
2088 +--- a/drivers/video/fbdev/core/fbmem.c
2089 ++++ b/drivers/video/fbdev/core/fbmem.c
2090 +@@ -1716,12 +1716,12 @@ static int do_register_framebuffer(struct fb_info *fb_info)
2091 + return 0;
2092 + }
2093 +
2094 +-static int do_unregister_framebuffer(struct fb_info *fb_info)
2095 ++static int unbind_console(struct fb_info *fb_info)
2096 + {
2097 + struct fb_event event;
2098 +- int i, ret = 0;
2099 ++ int ret;
2100 ++ int i = fb_info->node;
2101 +
2102 +- i = fb_info->node;
2103 + if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
2104 + return -EINVAL;
2105 +
2106 +@@ -1736,17 +1736,29 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
2107 + unlock_fb_info(fb_info);
2108 + console_unlock();
2109 +
2110 ++ return ret;
2111 ++}
2112 ++
2113 ++static int __unlink_framebuffer(struct fb_info *fb_info);
2114 ++
2115 ++static int do_unregister_framebuffer(struct fb_info *fb_info)
2116 ++{
2117 ++ struct fb_event event;
2118 ++ int ret;
2119 ++
2120 ++ ret = unbind_console(fb_info);
2121 ++
2122 + if (ret)
2123 + return -EINVAL;
2124 +
2125 + pm_vt_switch_unregister(fb_info->dev);
2126 +
2127 +- unlink_framebuffer(fb_info);
2128 ++ __unlink_framebuffer(fb_info);
2129 + if (fb_info->pixmap.addr &&
2130 + (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
2131 + kfree(fb_info->pixmap.addr);
2132 + fb_destroy_modelist(&fb_info->modelist);
2133 +- registered_fb[i] = NULL;
2134 ++ registered_fb[fb_info->node] = NULL;
2135 + num_registered_fb--;
2136 + fb_cleanup_device(fb_info);
2137 + event.info = fb_info;
2138 +@@ -1759,7 +1771,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
2139 + return 0;
2140 + }
2141 +
2142 +-int unlink_framebuffer(struct fb_info *fb_info)
2143 ++static int __unlink_framebuffer(struct fb_info *fb_info)
2144 + {
2145 + int i;
2146 +
2147 +@@ -1771,6 +1783,20 @@ int unlink_framebuffer(struct fb_info *fb_info)
2148 + device_destroy(fb_class, MKDEV(FB_MAJOR, i));
2149 + fb_info->dev = NULL;
2150 + }
2151 ++
2152 ++ return 0;
2153 ++}
2154 ++
2155 ++int unlink_framebuffer(struct fb_info *fb_info)
2156 ++{
2157 ++ int ret;
2158 ++
2159 ++ ret = __unlink_framebuffer(fb_info);
2160 ++ if (ret)
2161 ++ return ret;
2162 ++
2163 ++ unbind_console(fb_info);
2164 ++
2165 + return 0;
2166 + }
2167 + EXPORT_SYMBOL(unlink_framebuffer);
2168 +diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
2169 +index f329eee6dc93..352abc39e891 100644
2170 +--- a/fs/9p/xattr.c
2171 ++++ b/fs/9p/xattr.c
2172 +@@ -105,7 +105,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
2173 + {
2174 + struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
2175 + struct iov_iter from;
2176 +- int retval;
2177 ++ int retval, err;
2178 +
2179 + iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
2180 +
2181 +@@ -126,7 +126,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
2182 + retval);
2183 + else
2184 + p9_client_write(fid, 0, &from, &retval);
2185 +- p9_client_clunk(fid);
2186 ++ err = p9_client_clunk(fid);
2187 ++ if (!retval && err)
2188 ++ retval = err;
2189 + return retval;
2190 + }
2191 +
2192 +diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
2193 +index 95f74bd2c067..70c4165d2d74 100644
2194 +--- a/fs/nfs/blocklayout/dev.c
2195 ++++ b/fs/nfs/blocklayout/dev.c
2196 +@@ -204,7 +204,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
2197 + chunk = div_u64(offset, dev->chunk_size);
2198 + div_u64_rem(chunk, dev->nr_children, &chunk_idx);
2199 +
2200 +- if (chunk_idx > dev->nr_children) {
2201 ++ if (chunk_idx >= dev->nr_children) {
2202 + dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
2203 + __func__, chunk_idx, offset, dev->chunk_size);
2204 + /* error, should not happen */
2205 +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
2206 +index 516b2248cafe..2c3f398995f6 100644
2207 +--- a/fs/nfs/callback_proc.c
2208 ++++ b/fs/nfs/callback_proc.c
2209 +@@ -433,11 +433,14 @@ validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
2210 + * a match. If the slot is in use and the sequence numbers match, the
2211 + * client is still waiting for a response to the original request.
2212 + */
2213 +-static bool referring_call_exists(struct nfs_client *clp,
2214 ++static int referring_call_exists(struct nfs_client *clp,
2215 + uint32_t nrclists,
2216 +- struct referring_call_list *rclists)
2217 ++ struct referring_call_list *rclists,
2218 ++ spinlock_t *lock)
2219 ++ __releases(lock)
2220 ++ __acquires(lock)
2221 + {
2222 +- bool status = 0;
2223 ++ int status = 0;
2224 + int i, j;
2225 + struct nfs4_session *session;
2226 + struct nfs4_slot_table *tbl;
2227 +@@ -460,8 +463,10 @@ static bool referring_call_exists(struct nfs_client *clp,
2228 +
2229 + for (j = 0; j < rclist->rcl_nrefcalls; j++) {
2230 + ref = &rclist->rcl_refcalls[j];
2231 ++ spin_unlock(lock);
2232 + status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
2233 + ref->rc_sequenceid, HZ >> 1) < 0;
2234 ++ spin_lock(lock);
2235 + if (status)
2236 + goto out;
2237 + }
2238 +@@ -538,7 +543,8 @@ __be32 nfs4_callback_sequence(void *argp, void *resp,
2239 + * related callback was received before the response to the original
2240 + * call.
2241 + */
2242 +- if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
2243 ++ if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
2244 ++ &tbl->slot_tbl_lock) < 0) {
2245 + status = htonl(NFS4ERR_DELAY);
2246 + goto out_unlock;
2247 + }
2248 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2249 +index 51deff8e1f86..dda4a3a3ef6e 100644
2250 +--- a/fs/nfs/nfs4proc.c
2251 ++++ b/fs/nfs/nfs4proc.c
2252 +@@ -547,8 +547,15 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
2253 + ret = -EIO;
2254 + return ret;
2255 + out_retry:
2256 +- if (ret == 0)
2257 ++ if (ret == 0) {
2258 + exception->retry = 1;
2259 ++ /*
2260 ++ * For NFS4ERR_MOVED, the client transport will need to
2261 ++ * be recomputed after migration recovery has completed.
2262 ++ */
2263 ++ if (errorcode == -NFS4ERR_MOVED)
2264 ++ rpc_task_release_transport(task);
2265 ++ }
2266 + return ret;
2267 + }
2268 +
2269 +diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
2270 +index 60da59be83b6..4a3dd66175fe 100644
2271 +--- a/fs/nfs/pnfs_nfs.c
2272 ++++ b/fs/nfs/pnfs_nfs.c
2273 +@@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
2274 +
2275 + /* The generic layer is about to remove the req from the commit list.
2276 + * If this will make the bucket empty, it will need to put the lseg reference.
2277 +- * Note this must be called holding i_lock
2278 ++ * Note this must be called holding nfsi->commit_mutex
2279 + */
2280 + void
2281 + pnfs_generic_clear_request_commit(struct nfs_page *req,
2282 +@@ -149,9 +149,7 @@ restart:
2283 + if (list_empty(&b->written)) {
2284 + freeme = b->wlseg;
2285 + b->wlseg = NULL;
2286 +- spin_unlock(&cinfo->inode->i_lock);
2287 + pnfs_put_lseg(freeme);
2288 +- spin_lock(&cinfo->inode->i_lock);
2289 + goto restart;
2290 + }
2291 + }
2292 +@@ -167,7 +165,7 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
2293 + LIST_HEAD(pages);
2294 + int i;
2295 +
2296 +- spin_lock(&cinfo->inode->i_lock);
2297 ++ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
2298 + for (i = idx; i < fl_cinfo->nbuckets; i++) {
2299 + bucket = &fl_cinfo->buckets[i];
2300 + if (list_empty(&bucket->committing))
2301 +@@ -177,12 +175,12 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
2302 + list_for_each(pos, &bucket->committing)
2303 + cinfo->ds->ncommitting--;
2304 + list_splice_init(&bucket->committing, &pages);
2305 +- spin_unlock(&cinfo->inode->i_lock);
2306 ++ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
2307 + nfs_retry_commit(&pages, freeme, cinfo, i);
2308 + pnfs_put_lseg(freeme);
2309 +- spin_lock(&cinfo->inode->i_lock);
2310 ++ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
2311 + }
2312 +- spin_unlock(&cinfo->inode->i_lock);
2313 ++ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
2314 + }
2315 +
2316 + static unsigned int
2317 +@@ -222,13 +220,13 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages,
2318 + struct list_head *pos;
2319 +
2320 + bucket = &cinfo->ds->buckets[data->ds_commit_index];
2321 +- spin_lock(&cinfo->inode->i_lock);
2322 ++ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
2323 + list_for_each(pos, &bucket->committing)
2324 + cinfo->ds->ncommitting--;
2325 + list_splice_init(&bucket->committing, pages);
2326 + data->lseg = bucket->clseg;
2327 + bucket->clseg = NULL;
2328 +- spin_unlock(&cinfo->inode->i_lock);
2329 ++ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
2330 +
2331 + }
2332 +
2333 +diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
2334 +index 7fa7d68baa6d..1d4f9997236f 100644
2335 +--- a/fs/overlayfs/readdir.c
2336 ++++ b/fs/overlayfs/readdir.c
2337 +@@ -623,6 +623,21 @@ static int ovl_fill_real(struct dir_context *ctx, const char *name,
2338 + return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type);
2339 + }
2340 +
2341 ++static bool ovl_is_impure_dir(struct file *file)
2342 ++{
2343 ++ struct ovl_dir_file *od = file->private_data;
2344 ++ struct inode *dir = d_inode(file->f_path.dentry);
2345 ++
2346 ++ /*
2347 ++ * Only upper dir can be impure, but if we are in the middle of
2348 ++ * iterating a lower real dir, dir could be copied up and marked
2349 ++ * impure. We only want the impure cache if we started iterating
2350 ++ * a real upper dir to begin with.
2351 ++ */
2352 ++ return od->is_upper && ovl_test_flag(OVL_IMPURE, dir);
2353 ++
2354 ++}
2355 ++
2356 + static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
2357 + {
2358 + int err;
2359 +@@ -646,7 +661,7 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
2360 + rdt.parent_ino = stat.ino;
2361 + }
2362 +
2363 +- if (ovl_test_flag(OVL_IMPURE, d_inode(dir))) {
2364 ++ if (ovl_is_impure_dir(file)) {
2365 + rdt.cache = ovl_cache_get_impure(&file->f_path);
2366 + if (IS_ERR(rdt.cache))
2367 + return PTR_ERR(rdt.cache);
2368 +@@ -676,7 +691,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
2369 + * entries.
2370 + */
2371 + if (ovl_same_sb(dentry->d_sb) &&
2372 +- (ovl_test_flag(OVL_IMPURE, d_inode(dentry)) ||
2373 ++ (ovl_is_impure_dir(file) ||
2374 + OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent)))) {
2375 + return ovl_iterate_real(file, ctx);
2376 + }
2377 +diff --git a/fs/quota/quota.c b/fs/quota/quota.c
2378 +index 43612e2a73af..3f02bab0db4e 100644
2379 +--- a/fs/quota/quota.c
2380 ++++ b/fs/quota/quota.c
2381 +@@ -18,6 +18,7 @@
2382 + #include <linux/quotaops.h>
2383 + #include <linux/types.h>
2384 + #include <linux/writeback.h>
2385 ++#include <linux/nospec.h>
2386 +
2387 + static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
2388 + qid_t id)
2389 +@@ -703,6 +704,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
2390 +
2391 + if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
2392 + return -EINVAL;
2393 ++ type = array_index_nospec(type, MAXQUOTAS);
2394 + /*
2395 + * Quota not supported on this fs? Check this before s_quota_types
2396 + * since they needn't be set if quota is not supported at all.
2397 +diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
2398 +index 8ae1cd8611cc..69051f7a9606 100644
2399 +--- a/fs/ubifs/journal.c
2400 ++++ b/fs/ubifs/journal.c
2401 +@@ -665,6 +665,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
2402 + spin_lock(&ui->ui_lock);
2403 + ui->synced_i_size = ui->ui_size;
2404 + spin_unlock(&ui->ui_lock);
2405 ++ if (xent) {
2406 ++ spin_lock(&host_ui->ui_lock);
2407 ++ host_ui->synced_i_size = host_ui->ui_size;
2408 ++ spin_unlock(&host_ui->ui_lock);
2409 ++ }
2410 + mark_inode_clean(c, ui);
2411 + mark_inode_clean(c, host_ui);
2412 + return 0;
2413 +@@ -1283,11 +1288,10 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
2414 + int *new_len)
2415 + {
2416 + void *buf;
2417 +- int err, compr_type;
2418 +- u32 dlen, out_len, old_dlen;
2419 ++ int err, dlen, compr_type, out_len, old_dlen;
2420 +
2421 + out_len = le32_to_cpu(dn->size);
2422 +- buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
2423 ++ buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
2424 + if (!buf)
2425 + return -ENOMEM;
2426 +
2427 +@@ -1389,7 +1393,16 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
2428 + else if (err)
2429 + goto out_free;
2430 + else {
2431 +- if (le32_to_cpu(dn->size) <= dlen)
2432 ++ int dn_len = le32_to_cpu(dn->size);
2433 ++
2434 ++ if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
2435 ++ ubifs_err(c, "bad data node (block %u, inode %lu)",
2436 ++ blk, inode->i_ino);
2437 ++ ubifs_dump_node(c, dn);
2438 ++ goto out_free;
2439 ++ }
2440 ++
2441 ++ if (dn_len <= dlen)
2442 + dlen = 0; /* Nothing to do */
2443 + else {
2444 + err = truncate_data_node(c, inode, blk, dn, &dlen);
2445 +diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
2446 +index 6c3a1abd0e22..780a436d8c45 100644
2447 +--- a/fs/ubifs/lprops.c
2448 ++++ b/fs/ubifs/lprops.c
2449 +@@ -1091,10 +1091,6 @@ static int scan_check_cb(struct ubifs_info *c,
2450 + }
2451 + }
2452 +
2453 +- buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
2454 +- if (!buf)
2455 +- return -ENOMEM;
2456 +-
2457 + /*
2458 + * After an unclean unmount, empty and freeable LEBs
2459 + * may contain garbage - do not scan them.
2460 +@@ -1113,6 +1109,10 @@ static int scan_check_cb(struct ubifs_info *c,
2461 + return LPT_SCAN_CONTINUE;
2462 + }
2463 +
2464 ++ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
2465 ++ if (!buf)
2466 ++ return -ENOMEM;
2467 ++
2468 + sleb = ubifs_scan(c, lnum, 0, buf, 0);
2469 + if (IS_ERR(sleb)) {
2470 + ret = PTR_ERR(sleb);
2471 +diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
2472 +index c13eae819cbc..d47f16c0d582 100644
2473 +--- a/fs/ubifs/xattr.c
2474 ++++ b/fs/ubifs/xattr.c
2475 +@@ -152,6 +152,12 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
2476 + ui->data_len = size;
2477 +
2478 + mutex_lock(&host_ui->ui_mutex);
2479 ++
2480 ++ if (!host->i_nlink) {
2481 ++ err = -ENOENT;
2482 ++ goto out_noent;
2483 ++ }
2484 ++
2485 + host->i_ctime = current_time(host);
2486 + host_ui->xattr_cnt += 1;
2487 + host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
2488 +@@ -183,6 +189,7 @@ out_cancel:
2489 + host_ui->xattr_size -= CALC_XATTR_BYTES(size);
2490 + host_ui->xattr_names -= fname_len(nm);
2491 + host_ui->flags &= ~UBIFS_CRYPT_FL;
2492 ++out_noent:
2493 + mutex_unlock(&host_ui->ui_mutex);
2494 + out_free:
2495 + make_bad_inode(inode);
2496 +@@ -234,6 +241,12 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
2497 + mutex_unlock(&ui->ui_mutex);
2498 +
2499 + mutex_lock(&host_ui->ui_mutex);
2500 ++
2501 ++ if (!host->i_nlink) {
2502 ++ err = -ENOENT;
2503 ++ goto out_noent;
2504 ++ }
2505 ++
2506 + host->i_ctime = current_time(host);
2507 + host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
2508 + host_ui->xattr_size += CALC_XATTR_BYTES(size);
2509 +@@ -255,6 +268,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
2510 + out_cancel:
2511 + host_ui->xattr_size -= CALC_XATTR_BYTES(size);
2512 + host_ui->xattr_size += CALC_XATTR_BYTES(old_size);
2513 ++out_noent:
2514 + mutex_unlock(&host_ui->ui_mutex);
2515 + make_bad_inode(inode);
2516 + out_free:
2517 +@@ -483,6 +497,12 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
2518 + return err;
2519 +
2520 + mutex_lock(&host_ui->ui_mutex);
2521 ++
2522 ++ if (!host->i_nlink) {
2523 ++ err = -ENOENT;
2524 ++ goto out_noent;
2525 ++ }
2526 ++
2527 + host->i_ctime = current_time(host);
2528 + host_ui->xattr_cnt -= 1;
2529 + host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
2530 +@@ -502,6 +522,7 @@ out_cancel:
2531 + host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
2532 + host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
2533 + host_ui->xattr_names += fname_len(nm);
2534 ++out_noent:
2535 + mutex_unlock(&host_ui->ui_mutex);
2536 + ubifs_release_budget(c, &req);
2537 + make_bad_inode(inode);
2538 +@@ -541,6 +562,9 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
2539 +
2540 + ubifs_assert(inode_is_locked(host));
2541 +
2542 ++ if (!host->i_nlink)
2543 ++ return -ENOENT;
2544 ++
2545 + if (fname_len(&nm) > UBIFS_MAX_NLEN)
2546 + return -ENAMETOOLONG;
2547 +
2548 +diff --git a/fs/xattr.c b/fs/xattr.c
2549 +index 61cd28ba25f3..be2ce57cd6ad 100644
2550 +--- a/fs/xattr.c
2551 ++++ b/fs/xattr.c
2552 +@@ -541,7 +541,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
2553 + if (error > 0) {
2554 + if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
2555 + (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
2556 +- posix_acl_fix_xattr_to_user(kvalue, size);
2557 ++ posix_acl_fix_xattr_to_user(kvalue, error);
2558 + if (size && copy_to_user(value, kvalue, error))
2559 + error = -EFAULT;
2560 + } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
2561 +diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
2562 +index ba74eaa8eadf..0c51f753652d 100644
2563 +--- a/include/linux/hyperv.h
2564 ++++ b/include/linux/hyperv.h
2565 +@@ -1026,6 +1026,8 @@ extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
2566 + extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
2567 + u32 gpadl_handle);
2568 +
2569 ++void vmbus_reset_channel_cb(struct vmbus_channel *channel);
2570 ++
2571 + extern int vmbus_recvpacket(struct vmbus_channel *channel,
2572 + void *buffer,
2573 + u32 bufferlen,
2574 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
2575 +index 485a5b48f038..a6ab2f51f703 100644
2576 +--- a/include/linux/intel-iommu.h
2577 ++++ b/include/linux/intel-iommu.h
2578 +@@ -112,6 +112,7 @@
2579 + * Extended Capability Register
2580 + */
2581 +
2582 ++#define ecap_dit(e) ((e >> 41) & 0x1)
2583 + #define ecap_pasid(e) ((e >> 40) & 0x1)
2584 + #define ecap_pss(e) ((e >> 35) & 0x1f)
2585 + #define ecap_eafs(e) ((e >> 34) & 0x1)
2586 +@@ -281,6 +282,7 @@ enum {
2587 + #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
2588 + #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
2589 + #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
2590 ++#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
2591 + #define QI_DEV_IOTLB_SIZE 1
2592 + #define QI_DEV_IOTLB_MAX_INVS 32
2593 +
2594 +@@ -305,6 +307,7 @@ enum {
2595 + #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
2596 + #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
2597 + #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
2598 ++#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
2599 + #define QI_DEV_EIOTLB_MAX_INVS 32
2600 +
2601 + #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
2602 +@@ -450,9 +453,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
2603 + u8 fm, u64 type);
2604 + extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
2605 + unsigned int size_order, u64 type);
2606 +-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
2607 +- u64 addr, unsigned mask);
2608 +-
2609 ++extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
2610 ++ u16 qdep, u64 addr, unsigned mask);
2611 + extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
2612 +
2613 + extern int dmar_ir_support(void);
2614 +diff --git a/include/linux/pci.h b/include/linux/pci.h
2615 +index 9d6fae809c09..b1abbcc614cf 100644
2616 +--- a/include/linux/pci.h
2617 ++++ b/include/linux/pci.h
2618 +@@ -2292,4 +2292,16 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2619 + /* provide the legacy pci_dma_* API */
2620 + #include <linux/pci-dma-compat.h>
2621 +
2622 ++#define pci_printk(level, pdev, fmt, arg...) \
2623 ++ dev_printk(level, &(pdev)->dev, fmt, ##arg)
2624 ++
2625 ++#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
2626 ++#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
2627 ++#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
2628 ++#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
2629 ++#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
2630 ++#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
2631 ++#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
2632 ++#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
2633 ++
2634 + #endif /* LINUX_PCI_H */
2635 +diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
2636 +index 71c237e8240e..166fc4e76df6 100644
2637 +--- a/include/linux/sunrpc/clnt.h
2638 ++++ b/include/linux/sunrpc/clnt.h
2639 +@@ -156,6 +156,7 @@ int rpc_switch_client_transport(struct rpc_clnt *,
2640 +
2641 + void rpc_shutdown_client(struct rpc_clnt *);
2642 + void rpc_release_client(struct rpc_clnt *);
2643 ++void rpc_task_release_transport(struct rpc_task *);
2644 + void rpc_task_release_client(struct rpc_task *);
2645 +
2646 + int rpcb_create_local(struct net *);
2647 +diff --git a/include/linux/verification.h b/include/linux/verification.h
2648 +index a10549a6c7cd..cfa4730d607a 100644
2649 +--- a/include/linux/verification.h
2650 ++++ b/include/linux/verification.h
2651 +@@ -12,6 +12,12 @@
2652 + #ifndef _LINUX_VERIFICATION_H
2653 + #define _LINUX_VERIFICATION_H
2654 +
2655 ++/*
2656 ++ * Indicate that both builtin trusted keys and secondary trusted keys
2657 ++ * should be used.
2658 ++ */
2659 ++#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL)
2660 ++
2661 + /*
2662 + * The use to which an asymmetric key is being put.
2663 + */
2664 +diff --git a/include/video/udlfb.h b/include/video/udlfb.h
2665 +index 1252a7a89bc0..85e32ee739fc 100644
2666 +--- a/include/video/udlfb.h
2667 ++++ b/include/video/udlfb.h
2668 +@@ -88,7 +88,7 @@ struct dlfb_data {
2669 + #define MIN_RAW_PIX_BYTES 2
2670 + #define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
2671 +
2672 +-#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
2673 ++#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */
2674 + #define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */
2675 +
2676 + /* remove these once align.h patch is taken into kernel */
2677 +diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
2678 +index bf8c8fd72589..7c51f065b212 100644
2679 +--- a/kernel/livepatch/core.c
2680 ++++ b/kernel/livepatch/core.c
2681 +@@ -605,6 +605,9 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
2682 + if (!func->old_name || !func->new_func)
2683 + return -EINVAL;
2684 +
2685 ++ if (strlen(func->old_name) >= KSYM_NAME_LEN)
2686 ++ return -EINVAL;
2687 ++
2688 + INIT_LIST_HEAD(&func->stack_node);
2689 + func->patched = false;
2690 + func->transition = false;
2691 +@@ -678,6 +681,9 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
2692 + if (!obj->funcs)
2693 + return -EINVAL;
2694 +
2695 ++ if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
2696 ++ return -EINVAL;
2697 ++
2698 + obj->patched = false;
2699 + obj->mod = NULL;
2700 +
2701 +diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
2702 +index e8517b63eb37..dd2b5a4d89a5 100644
2703 +--- a/kernel/power/Kconfig
2704 ++++ b/kernel/power/Kconfig
2705 +@@ -105,6 +105,7 @@ config PM_SLEEP
2706 + def_bool y
2707 + depends on SUSPEND || HIBERNATE_CALLBACKS
2708 + select PM
2709 ++ select SRCU
2710 +
2711 + config PM_SLEEP_SMP
2712 + def_bool y
2713 +diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
2714 +index d482fd61ac67..64f8046586b6 100644
2715 +--- a/kernel/printk/printk_safe.c
2716 ++++ b/kernel/printk/printk_safe.c
2717 +@@ -309,12 +309,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
2718 + return printk_safe_log_store(s, fmt, args);
2719 + }
2720 +
2721 +-void printk_nmi_enter(void)
2722 ++void notrace printk_nmi_enter(void)
2723 + {
2724 + this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
2725 + }
2726 +
2727 +-void printk_nmi_exit(void)
2728 ++void notrace printk_nmi_exit(void)
2729 + {
2730 + this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
2731 + }
2732 +diff --git a/kernel/sys.c b/kernel/sys.c
2733 +index de4ed027dfd7..e25ec93aea22 100644
2734 +--- a/kernel/sys.c
2735 ++++ b/kernel/sys.c
2736 +@@ -1176,18 +1176,19 @@ static int override_release(char __user *release, size_t len)
2737 +
2738 + SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
2739 + {
2740 +- int errno = 0;
2741 ++ struct new_utsname tmp;
2742 +
2743 + down_read(&uts_sem);
2744 +- if (copy_to_user(name, utsname(), sizeof *name))
2745 +- errno = -EFAULT;
2746 ++ memcpy(&tmp, utsname(), sizeof(tmp));
2747 + up_read(&uts_sem);
2748 ++ if (copy_to_user(name, &tmp, sizeof(tmp)))
2749 ++ return -EFAULT;
2750 +
2751 +- if (!errno && override_release(name->release, sizeof(name->release)))
2752 +- errno = -EFAULT;
2753 +- if (!errno && override_architecture(name))
2754 +- errno = -EFAULT;
2755 +- return errno;
2756 ++ if (override_release(name->release, sizeof(name->release)))
2757 ++ return -EFAULT;
2758 ++ if (override_architecture(name))
2759 ++ return -EFAULT;
2760 ++ return 0;
2761 + }
2762 +
2763 + #ifdef __ARCH_WANT_SYS_OLD_UNAME
2764 +@@ -1196,55 +1197,46 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
2765 + */
2766 + SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
2767 + {
2768 +- int error = 0;
2769 ++ struct old_utsname tmp;
2770 +
2771 + if (!name)
2772 + return -EFAULT;
2773 +
2774 + down_read(&uts_sem);
2775 +- if (copy_to_user(name, utsname(), sizeof(*name)))
2776 +- error = -EFAULT;
2777 ++ memcpy(&tmp, utsname(), sizeof(tmp));
2778 + up_read(&uts_sem);
2779 ++ if (copy_to_user(name, &tmp, sizeof(tmp)))
2780 ++ return -EFAULT;
2781 +
2782 +- if (!error && override_release(name->release, sizeof(name->release)))
2783 +- error = -EFAULT;
2784 +- if (!error && override_architecture(name))
2785 +- error = -EFAULT;
2786 +- return error;
2787 ++ if (override_release(name->release, sizeof(name->release)))
2788 ++ return -EFAULT;
2789 ++ if (override_architecture(name))
2790 ++ return -EFAULT;
2791 ++ return 0;
2792 + }
2793 +
2794 + SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
2795 + {
2796 +- int error;
2797 ++ struct oldold_utsname tmp = {};
2798 +
2799 + if (!name)
2800 + return -EFAULT;
2801 +- if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
2802 +- return -EFAULT;
2803 +
2804 + down_read(&uts_sem);
2805 +- error = __copy_to_user(&name->sysname, &utsname()->sysname,
2806 +- __OLD_UTS_LEN);
2807 +- error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
2808 +- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
2809 +- __OLD_UTS_LEN);
2810 +- error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
2811 +- error |= __copy_to_user(&name->release, &utsname()->release,
2812 +- __OLD_UTS_LEN);
2813 +- error |= __put_user(0, name->release + __OLD_UTS_LEN);
2814 +- error |= __copy_to_user(&name->version, &utsname()->version,
2815 +- __OLD_UTS_LEN);
2816 +- error |= __put_user(0, name->version + __OLD_UTS_LEN);
2817 +- error |= __copy_to_user(&name->machine, &utsname()->machine,
2818 +- __OLD_UTS_LEN);
2819 +- error |= __put_user(0, name->machine + __OLD_UTS_LEN);
2820 ++ memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
2821 ++ memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
2822 ++ memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
2823 ++ memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
2824 ++ memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
2825 + up_read(&uts_sem);
2826 ++ if (copy_to_user(name, &tmp, sizeof(tmp)))
2827 ++ return -EFAULT;
2828 +
2829 +- if (!error && override_architecture(name))
2830 +- error = -EFAULT;
2831 +- if (!error && override_release(name->release, sizeof(name->release)))
2832 +- error = -EFAULT;
2833 +- return error ? -EFAULT : 0;
2834 ++ if (override_architecture(name))
2835 ++ return -EFAULT;
2836 ++ if (override_release(name->release, sizeof(name->release)))
2837 ++ return -EFAULT;
2838 ++ return 0;
2839 + }
2840 + #endif
2841 +
2842 +@@ -1258,17 +1250,18 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
2843 +
2844 + if (len < 0 || len > __NEW_UTS_LEN)
2845 + return -EINVAL;
2846 +- down_write(&uts_sem);
2847 + errno = -EFAULT;
2848 + if (!copy_from_user(tmp, name, len)) {
2849 +- struct new_utsname *u = utsname();
2850 ++ struct new_utsname *u;
2851 +
2852 ++ down_write(&uts_sem);
2853 ++ u = utsname();
2854 + memcpy(u->nodename, tmp, len);
2855 + memset(u->nodename + len, 0, sizeof(u->nodename) - len);
2856 + errno = 0;
2857 + uts_proc_notify(UTS_PROC_HOSTNAME);
2858 ++ up_write(&uts_sem);
2859 + }
2860 +- up_write(&uts_sem);
2861 + return errno;
2862 + }
2863 +
2864 +@@ -1276,8 +1269,9 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
2865 +
2866 + SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
2867 + {
2868 +- int i, errno;
2869 ++ int i;
2870 + struct new_utsname *u;
2871 ++ char tmp[__NEW_UTS_LEN + 1];
2872 +
2873 + if (len < 0)
2874 + return -EINVAL;
2875 +@@ -1286,11 +1280,11 @@ SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
2876 + i = 1 + strlen(u->nodename);
2877 + if (i > len)
2878 + i = len;
2879 +- errno = 0;
2880 +- if (copy_to_user(name, u->nodename, i))
2881 +- errno = -EFAULT;
2882 ++ memcpy(tmp, u->nodename, i);
2883 + up_read(&uts_sem);
2884 +- return errno;
2885 ++ if (copy_to_user(name, tmp, i))
2886 ++ return -EFAULT;
2887 ++ return 0;
2888 + }
2889 +
2890 + #endif
2891 +@@ -1309,17 +1303,18 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
2892 + if (len < 0 || len > __NEW_UTS_LEN)
2893 + return -EINVAL;
2894 +
2895 +- down_write(&uts_sem);
2896 + errno = -EFAULT;
2897 + if (!copy_from_user(tmp, name, len)) {
2898 +- struct new_utsname *u = utsname();
2899 ++ struct new_utsname *u;
2900 +
2901 ++ down_write(&uts_sem);
2902 ++ u = utsname();
2903 + memcpy(u->domainname, tmp, len);
2904 + memset(u->domainname + len, 0, sizeof(u->domainname) - len);
2905 + errno = 0;
2906 + uts_proc_notify(UTS_PROC_DOMAINNAME);
2907 ++ up_write(&uts_sem);
2908 + }
2909 +- up_write(&uts_sem);
2910 + return errno;
2911 + }
2912 +
2913 +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
2914 +index e73dcab8e9f0..71a8ee6e60dc 100644
2915 +--- a/kernel/trace/blktrace.c
2916 ++++ b/kernel/trace/blktrace.c
2917 +@@ -1809,6 +1809,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
2918 + mutex_lock(&q->blk_trace_mutex);
2919 +
2920 + if (attr == &dev_attr_enable) {
2921 ++ if (!!value == !!q->blk_trace) {
2922 ++ ret = 0;
2923 ++ goto out_unlock_bdev;
2924 ++ }
2925 + if (value)
2926 + ret = blk_trace_setup_queue(q, bdev);
2927 + else
2928 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2929 +index b7302c37c064..e9cbb96cd99e 100644
2930 +--- a/kernel/trace/trace.c
2931 ++++ b/kernel/trace/trace.c
2932 +@@ -7545,7 +7545,9 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
2933 +
2934 + if (buffer) {
2935 + mutex_lock(&trace_types_lock);
2936 +- if (val) {
2937 ++ if (!!val == tracer_tracing_is_on(tr)) {
2938 ++ val = 0; /* do nothing */
2939 ++ } else if (val) {
2940 + tracer_tracing_on(tr);
2941 + if (tr->current_trace->start)
2942 + tr->current_trace->start(tr);
2943 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
2944 +index 7197ff9f0bbd..ea0d90a31fc9 100644
2945 +--- a/kernel/trace/trace_uprobe.c
2946 ++++ b/kernel/trace/trace_uprobe.c
2947 +@@ -967,7 +967,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
2948 +
2949 + list_del_rcu(&link->list);
2950 + /* synchronize with u{,ret}probe_trace_func */
2951 +- synchronize_sched();
2952 ++ synchronize_rcu();
2953 + kfree(link);
2954 +
2955 + if (!list_empty(&tu->tp.files))
2956 +diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
2957 +index c490f1e4313b..ed80a88980f0 100644
2958 +--- a/kernel/user_namespace.c
2959 ++++ b/kernel/user_namespace.c
2960 +@@ -650,7 +650,16 @@ static ssize_t map_write(struct file *file, const char __user *buf,
2961 + unsigned idx;
2962 + struct uid_gid_extent *extent = NULL;
2963 + char *kbuf = NULL, *pos, *next_line;
2964 +- ssize_t ret = -EINVAL;
2965 ++ ssize_t ret;
2966 ++
2967 ++ /* Only allow < page size writes at the beginning of the file */
2968 ++ if ((*ppos != 0) || (count >= PAGE_SIZE))
2969 ++ return -EINVAL;
2970 ++
2971 ++ /* Slurp in the user data */
2972 ++ kbuf = memdup_user_nul(buf, count);
2973 ++ if (IS_ERR(kbuf))
2974 ++ return PTR_ERR(kbuf);
2975 +
2976 + /*
2977 + * The userns_state_mutex serializes all writes to any given map.
2978 +@@ -684,19 +693,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
2979 + if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
2980 + goto out;
2981 +
2982 +- /* Only allow < page size writes at the beginning of the file */
2983 +- ret = -EINVAL;
2984 +- if ((*ppos != 0) || (count >= PAGE_SIZE))
2985 +- goto out;
2986 +-
2987 +- /* Slurp in the user data */
2988 +- kbuf = memdup_user_nul(buf, count);
2989 +- if (IS_ERR(kbuf)) {
2990 +- ret = PTR_ERR(kbuf);
2991 +- kbuf = NULL;
2992 +- goto out;
2993 +- }
2994 +-
2995 + /* Parse the user data */
2996 + ret = -EINVAL;
2997 + pos = kbuf;
2998 +diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
2999 +index 233cd8fc6910..258033d62cb3 100644
3000 +--- a/kernel/utsname_sysctl.c
3001 ++++ b/kernel/utsname_sysctl.c
3002 +@@ -18,7 +18,7 @@
3003 +
3004 + #ifdef CONFIG_PROC_SYSCTL
3005 +
3006 +-static void *get_uts(struct ctl_table *table, int write)
3007 ++static void *get_uts(struct ctl_table *table)
3008 + {
3009 + char *which = table->data;
3010 + struct uts_namespace *uts_ns;
3011 +@@ -26,21 +26,9 @@ static void *get_uts(struct ctl_table *table, int write)
3012 + uts_ns = current->nsproxy->uts_ns;
3013 + which = (which - (char *)&init_uts_ns) + (char *)uts_ns;
3014 +
3015 +- if (!write)
3016 +- down_read(&uts_sem);
3017 +- else
3018 +- down_write(&uts_sem);
3019 + return which;
3020 + }
3021 +
3022 +-static void put_uts(struct ctl_table *table, int write, void *which)
3023 +-{
3024 +- if (!write)
3025 +- up_read(&uts_sem);
3026 +- else
3027 +- up_write(&uts_sem);
3028 +-}
3029 +-
3030 + /*
3031 + * Special case of dostring for the UTS structure. This has locks
3032 + * to observe. Should this be in kernel/sys.c ????
3033 +@@ -50,13 +38,34 @@ static int proc_do_uts_string(struct ctl_table *table, int write,
3034 + {
3035 + struct ctl_table uts_table;
3036 + int r;
3037 ++ char tmp_data[__NEW_UTS_LEN + 1];
3038 ++
3039 + memcpy(&uts_table, table, sizeof(uts_table));
3040 +- uts_table.data = get_uts(table, write);
3041 ++ uts_table.data = tmp_data;
3042 ++
3043 ++ /*
3044 ++ * Buffer the value in tmp_data so that proc_dostring() can be called
3045 ++ * without holding any locks.
3046 ++ * We also need to read the original value in the write==1 case to
3047 ++ * support partial writes.
3048 ++ */
3049 ++ down_read(&uts_sem);
3050 ++ memcpy(tmp_data, get_uts(table), sizeof(tmp_data));
3051 ++ up_read(&uts_sem);
3052 + r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
3053 +- put_uts(table, write, uts_table.data);
3054 +
3055 +- if (write)
3056 ++ if (write) {
3057 ++ /*
3058 ++ * Write back the new value.
3059 ++ * Note that, since we dropped uts_sem, the result can
3060 ++ * theoretically be incorrect if there are two parallel writes
3061 ++ * at non-zero offsets to the same sysctl.
3062 ++ */
3063 ++ down_write(&uts_sem);
3064 ++ memcpy(get_uts(table), tmp_data, sizeof(tmp_data));
3065 ++ up_write(&uts_sem);
3066 + proc_sys_poll_notify(table->poll);
3067 ++ }
3068 +
3069 + return r;
3070 + }
3071 +diff --git a/mm/memory.c b/mm/memory.c
3072 +index c9657f013a4d..93d5d324904b 100644
3073 +--- a/mm/memory.c
3074 ++++ b/mm/memory.c
3075 +@@ -392,15 +392,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
3076 + {
3077 + struct mmu_table_batch **batch = &tlb->batch;
3078 +
3079 +- /*
3080 +- * When there's less then two users of this mm there cannot be a
3081 +- * concurrent page-table walk.
3082 +- */
3083 +- if (atomic_read(&tlb->mm->mm_users) < 2) {
3084 +- __tlb_remove_table(table);
3085 +- return;
3086 +- }
3087 +-
3088 + if (*batch == NULL) {
3089 + *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
3090 + if (*batch == NULL) {
3091 +diff --git a/mm/readahead.c b/mm/readahead.c
3092 +index c4ca70239233..59aa0d06f254 100644
3093 +--- a/mm/readahead.c
3094 ++++ b/mm/readahead.c
3095 +@@ -380,6 +380,7 @@ ondemand_readahead(struct address_space *mapping,
3096 + {
3097 + struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
3098 + unsigned long max_pages = ra->ra_pages;
3099 ++ unsigned long add_pages;
3100 + pgoff_t prev_offset;
3101 +
3102 + /*
3103 +@@ -469,10 +470,17 @@ readit:
3104 + * Will this read hit the readahead marker made by itself?
3105 + * If so, trigger the readahead marker hit now, and merge
3106 + * the resulted next readahead window into the current one.
3107 ++ * Take care of maximum IO pages as above.
3108 + */
3109 + if (offset == ra->start && ra->size == ra->async_size) {
3110 +- ra->async_size = get_next_ra_size(ra, max_pages);
3111 +- ra->size += ra->async_size;
3112 ++ add_pages = get_next_ra_size(ra, max_pages);
3113 ++ if (ra->size + add_pages <= max_pages) {
3114 ++ ra->async_size = add_pages;
3115 ++ ra->size += add_pages;
3116 ++ } else {
3117 ++ ra->size = max_pages;
3118 ++ ra->async_size = max_pages >> 1;
3119 ++ }
3120 + }
3121 +
3122 + return ra_submit(ra, mapping, filp);
3123 +diff --git a/net/9p/client.c b/net/9p/client.c
3124 +index b433aff5ff13..3ec5a82929b2 100644
3125 +--- a/net/9p/client.c
3126 ++++ b/net/9p/client.c
3127 +@@ -955,7 +955,7 @@ static int p9_client_version(struct p9_client *c)
3128 + {
3129 + int err = 0;
3130 + struct p9_req_t *req;
3131 +- char *version;
3132 ++ char *version = NULL;
3133 + int msize;
3134 +
3135 + p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
3136 +diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
3137 +index 985046ae4231..38e21a1e97bc 100644
3138 +--- a/net/9p/trans_fd.c
3139 ++++ b/net/9p/trans_fd.c
3140 +@@ -185,6 +185,8 @@ static void p9_mux_poll_stop(struct p9_conn *m)
3141 + spin_lock_irqsave(&p9_poll_lock, flags);
3142 + list_del_init(&m->poll_pending_link);
3143 + spin_unlock_irqrestore(&p9_poll_lock, flags);
3144 ++
3145 ++ flush_work(&p9_poll_work);
3146 + }
3147 +
3148 + /**
3149 +@@ -951,7 +953,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
3150 + if (err < 0)
3151 + return err;
3152 +
3153 +- if (valid_ipaddr4(addr) < 0)
3154 ++ if (addr == NULL || valid_ipaddr4(addr) < 0)
3155 + return -EINVAL;
3156 +
3157 + csocket = NULL;
3158 +@@ -1001,6 +1003,9 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
3159 +
3160 + csocket = NULL;
3161 +
3162 ++ if (addr == NULL)
3163 ++ return -EINVAL;
3164 ++
3165 + if (strlen(addr) >= UNIX_PATH_MAX) {
3166 + pr_err("%s (%d): address too long: %s\n",
3167 + __func__, task_pid_nr(current), addr);
3168 +diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
3169 +index 6d8e3031978f..f58467a49090 100644
3170 +--- a/net/9p/trans_rdma.c
3171 ++++ b/net/9p/trans_rdma.c
3172 +@@ -646,6 +646,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
3173 + struct rdma_conn_param conn_param;
3174 + struct ib_qp_init_attr qp_attr;
3175 +
3176 ++ if (addr == NULL)
3177 ++ return -EINVAL;
3178 ++
3179 + /* Parse the transport specific mount options */
3180 + err = parse_opts(args, &opts);
3181 + if (err < 0)
3182 +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
3183 +index 3aa5a93ad107..da0d3b257459 100644
3184 +--- a/net/9p/trans_virtio.c
3185 ++++ b/net/9p/trans_virtio.c
3186 +@@ -189,7 +189,7 @@ static int pack_sg_list(struct scatterlist *sg, int start,
3187 + s = rest_of_page(data);
3188 + if (s > count)
3189 + s = count;
3190 +- BUG_ON(index > limit);
3191 ++ BUG_ON(index >= limit);
3192 + /* Make sure we don't terminate early. */
3193 + sg_unmark_end(&sg[index]);
3194 + sg_set_buf(&sg[index++], data, s);
3195 +@@ -234,6 +234,7 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
3196 + s = PAGE_SIZE - data_off;
3197 + if (s > count)
3198 + s = count;
3199 ++ BUG_ON(index >= limit);
3200 + /* Make sure we don't terminate early. */
3201 + sg_unmark_end(&sg[index]);
3202 + sg_set_page(&sg[index++], pdata[i++], s, data_off);
3203 +@@ -406,6 +407,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
3204 + p9_debug(P9_DEBUG_TRANS, "virtio request\n");
3205 +
3206 + if (uodata) {
3207 ++ __le32 sz;
3208 + int n = p9_get_mapped_pages(chan, &out_pages, uodata,
3209 + outlen, &offs, &need_drop);
3210 + if (n < 0)
3211 +@@ -416,6 +418,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
3212 + memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
3213 + outlen = n;
3214 + }
3215 ++ /* The size field of the message must include the length of the
3216 ++ * header and the length of the data. We didn't actually know
3217 ++ * the length of the data until this point so add it in now.
3218 ++ */
3219 ++ sz = cpu_to_le32(req->tc->size + outlen);
3220 ++ memcpy(&req->tc->sdata[0], &sz, sizeof(sz));
3221 + } else if (uidata) {
3222 + int n = p9_get_mapped_pages(chan, &in_pages, uidata,
3223 + inlen, &offs, &need_drop);
3224 +@@ -643,6 +651,9 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
3225 + int ret = -ENOENT;
3226 + int found = 0;
3227 +
3228 ++ if (devname == NULL)
3229 ++ return -EINVAL;
3230 ++
3231 + mutex_lock(&virtio_9p_lock);
3232 + list_for_each_entry(chan, &virtio_chan_list, chan_list) {
3233 + if (!strncmp(devname, chan->tag, chan->tag_len) &&
3234 +diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
3235 +index 325c56043007..c10bdf63eae7 100644
3236 +--- a/net/9p/trans_xen.c
3237 ++++ b/net/9p/trans_xen.c
3238 +@@ -95,6 +95,9 @@ static int p9_xen_create(struct p9_client *client, const char *addr, char *args)
3239 + {
3240 + struct xen_9pfs_front_priv *priv;
3241 +
3242 ++ if (addr == NULL)
3243 ++ return -EINVAL;
3244 ++
3245 + read_lock(&xen_9pfs_lock);
3246 + list_for_each_entry(priv, &xen_9pfs_devs, list) {
3247 + if (!strcmp(priv->tag, addr)) {
3248 +diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
3249 +index e6ff5128e61a..ca53efa17be1 100644
3250 +--- a/net/ieee802154/6lowpan/tx.c
3251 ++++ b/net/ieee802154/6lowpan/tx.c
3252 +@@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
3253 + /* We must take a copy of the skb before we modify/replace the ipv6
3254 + * header as the header could be used elsewhere
3255 + */
3256 +- skb = skb_unshare(skb, GFP_ATOMIC);
3257 +- if (!skb)
3258 +- return NET_XMIT_DROP;
3259 ++ if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
3260 ++ skb_tailroom(skb) < ldev->needed_tailroom)) {
3261 ++ struct sk_buff *nskb;
3262 ++
3263 ++ nskb = skb_copy_expand(skb, ldev->needed_headroom,
3264 ++ ldev->needed_tailroom, GFP_ATOMIC);
3265 ++ if (likely(nskb)) {
3266 ++ consume_skb(skb);
3267 ++ skb = nskb;
3268 ++ } else {
3269 ++ kfree_skb(skb);
3270 ++ return NET_XMIT_DROP;
3271 ++ }
3272 ++ } else {
3273 ++ skb = skb_unshare(skb, GFP_ATOMIC);
3274 ++ if (!skb)
3275 ++ return NET_XMIT_DROP;
3276 ++ }
3277 +
3278 + ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
3279 + if (ret < 0) {
3280 +diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
3281 +index 7e253455f9dd..bcd1a5e6ebf4 100644
3282 +--- a/net/mac802154/tx.c
3283 ++++ b/net/mac802154/tx.c
3284 +@@ -63,8 +63,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
3285 + int ret;
3286 +
3287 + if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
3288 +- u16 crc = crc_ccitt(0, skb->data, skb->len);
3289 ++ struct sk_buff *nskb;
3290 ++ u16 crc;
3291 ++
3292 ++ if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
3293 ++ nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
3294 ++ GFP_ATOMIC);
3295 ++ if (likely(nskb)) {
3296 ++ consume_skb(skb);
3297 ++ skb = nskb;
3298 ++ } else {
3299 ++ goto err_tx;
3300 ++ }
3301 ++ }
3302 +
3303 ++ crc = crc_ccitt(0, skb->data, skb->len);
3304 + put_unaligned_le16(crc, skb_put(skb, 2));
3305 + }
3306 +
3307 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
3308 +index 2ad827db2704..6d118357d9dc 100644
3309 +--- a/net/sunrpc/clnt.c
3310 ++++ b/net/sunrpc/clnt.c
3311 +@@ -965,10 +965,20 @@ out:
3312 + }
3313 + EXPORT_SYMBOL_GPL(rpc_bind_new_program);
3314 +
3315 ++void rpc_task_release_transport(struct rpc_task *task)
3316 ++{
3317 ++ struct rpc_xprt *xprt = task->tk_xprt;
3318 ++
3319 ++ if (xprt) {
3320 ++ task->tk_xprt = NULL;
3321 ++ xprt_put(xprt);
3322 ++ }
3323 ++}
3324 ++EXPORT_SYMBOL_GPL(rpc_task_release_transport);
3325 ++
3326 + void rpc_task_release_client(struct rpc_task *task)
3327 + {
3328 + struct rpc_clnt *clnt = task->tk_client;
3329 +- struct rpc_xprt *xprt = task->tk_xprt;
3330 +
3331 + if (clnt != NULL) {
3332 + /* Remove from client task list */
3333 +@@ -979,12 +989,14 @@ void rpc_task_release_client(struct rpc_task *task)
3334 +
3335 + rpc_release_client(clnt);
3336 + }
3337 ++ rpc_task_release_transport(task);
3338 ++}
3339 +
3340 +- if (xprt != NULL) {
3341 +- task->tk_xprt = NULL;
3342 +-
3343 +- xprt_put(xprt);
3344 +- }
3345 ++static
3346 ++void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
3347 ++{
3348 ++ if (!task->tk_xprt)
3349 ++ task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi);
3350 + }
3351 +
3352 + static
3353 +@@ -992,8 +1004,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
3354 + {
3355 +
3356 + if (clnt != NULL) {
3357 +- if (task->tk_xprt == NULL)
3358 +- task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi);
3359 ++ rpc_task_set_transport(task, clnt);
3360 + task->tk_client = clnt;
3361 + atomic_inc(&clnt->cl_count);
3362 + if (clnt->cl_softrtry)
3363 +@@ -1529,6 +1540,7 @@ call_start(struct rpc_task *task)
3364 + clnt->cl_program->version[clnt->cl_vers]->counts[idx]++;
3365 + clnt->cl_stats->rpccnt++;
3366 + task->tk_action = call_reserve;
3367 ++ rpc_task_set_transport(task, clnt);
3368 + }
3369 +
3370 + /*
3371 +diff --git a/security/commoncap.c b/security/commoncap.c
3372 +index 1c1f64582bb5..ae26ef006988 100644
3373 +--- a/security/commoncap.c
3374 ++++ b/security/commoncap.c
3375 +@@ -388,7 +388,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
3376 + if (strcmp(name, "capability") != 0)
3377 + return -EOPNOTSUPP;
3378 +
3379 +- dentry = d_find_alias(inode);
3380 ++ dentry = d_find_any_alias(inode);
3381 + if (!dentry)
3382 + return -EINVAL;
3383 +
3384 +diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
3385 +index 5547457566a7..bbb9823e93b9 100644
3386 +--- a/tools/perf/util/auxtrace.c
3387 ++++ b/tools/perf/util/auxtrace.c
3388 +@@ -197,6 +197,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues,
3389 + for (i = 0; i < queues->nr_queues; i++) {
3390 + list_splice_tail(&queues->queue_array[i].head,
3391 + &queue_array[i].head);
3392 ++ queue_array[i].tid = queues->queue_array[i].tid;
3393 ++ queue_array[i].cpu = queues->queue_array[i].cpu;
3394 ++ queue_array[i].set = queues->queue_array[i].set;
3395 + queue_array[i].priv = queues->queue_array[i].priv;
3396 + }
3397 +