Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 21 Nov 2018 15:02:39
Message-Id: 1542812488.86bc6e325e814eac0e799c3cd77384a5a6469ecd.mpagano@gentoo
1 commit: 86bc6e325e814eac0e799c3cd77384a5a6469ecd
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Sep 9 23:25:58 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 21 15:01:28 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=86bc6e32
7
8 Linux patch 4.4.155
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1154_linux-4.4.155.patch | 1862 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1866 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 5a367b5..6b63ef8 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -659,6 +659,10 @@ Patch: 1153_linux-4.4.154.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.4.154
23
24 +Patch: 1154_linux-4.4.155.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.4.155
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1154_linux-4.4.155.patch b/1154_linux-4.4.155.patch
33 new file mode 100644
34 index 0000000..0e4fe23
35 --- /dev/null
36 +++ b/1154_linux-4.4.155.patch
37 @@ -0,0 +1,1862 @@
38 +diff --git a/Makefile b/Makefile
39 +index b184286cf7e6..2d9f89ec8397 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 4
45 +-SUBLEVEL = 154
46 ++SUBLEVEL = 155
47 + EXTRAVERSION =
48 + NAME = Blurry Fish Butt
49 +
50 +diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
51 +index 63f06a2b1f7f..bbc7cb9faa01 100644
52 +--- a/arch/alpha/kernel/osf_sys.c
53 ++++ b/arch/alpha/kernel/osf_sys.c
54 +@@ -526,24 +526,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path,
55 + SYSCALL_DEFINE1(osf_utsname, char __user *, name)
56 + {
57 + int error;
58 ++ char tmp[5 * 32];
59 +
60 + down_read(&uts_sem);
61 +- error = -EFAULT;
62 +- if (copy_to_user(name + 0, utsname()->sysname, 32))
63 +- goto out;
64 +- if (copy_to_user(name + 32, utsname()->nodename, 32))
65 +- goto out;
66 +- if (copy_to_user(name + 64, utsname()->release, 32))
67 +- goto out;
68 +- if (copy_to_user(name + 96, utsname()->version, 32))
69 +- goto out;
70 +- if (copy_to_user(name + 128, utsname()->machine, 32))
71 +- goto out;
72 ++ memcpy(tmp + 0 * 32, utsname()->sysname, 32);
73 ++ memcpy(tmp + 1 * 32, utsname()->nodename, 32);
74 ++ memcpy(tmp + 2 * 32, utsname()->release, 32);
75 ++ memcpy(tmp + 3 * 32, utsname()->version, 32);
76 ++ memcpy(tmp + 4 * 32, utsname()->machine, 32);
77 ++ up_read(&uts_sem);
78 +
79 +- error = 0;
80 +- out:
81 +- up_read(&uts_sem);
82 +- return error;
83 ++ if (copy_to_user(name, tmp, sizeof(tmp)))
84 ++ return -EFAULT;
85 ++ return 0;
86 + }
87 +
88 + SYSCALL_DEFINE0(getpagesize)
89 +@@ -561,24 +556,22 @@ SYSCALL_DEFINE0(getdtablesize)
90 + */
91 + SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
92 + {
93 +- unsigned len;
94 +- int i;
95 ++ int len, err = 0;
96 ++ char *kname;
97 ++ char tmp[32];
98 +
99 +- if (!access_ok(VERIFY_WRITE, name, namelen))
100 +- return -EFAULT;
101 +-
102 +- len = namelen;
103 +- if (len > 32)
104 +- len = 32;
105 ++ if (namelen < 0 || namelen > 32)
106 ++ namelen = 32;
107 +
108 + down_read(&uts_sem);
109 +- for (i = 0; i < len; ++i) {
110 +- __put_user(utsname()->domainname[i], name + i);
111 +- if (utsname()->domainname[i] == '\0')
112 +- break;
113 +- }
114 ++ kname = utsname()->domainname;
115 ++ len = strnlen(kname, namelen);
116 ++ len = min(len + 1, namelen);
117 ++ memcpy(tmp, kname, len);
118 + up_read(&uts_sem);
119 +
120 ++ if (copy_to_user(name, tmp, len))
121 ++ return -EFAULT;
122 + return 0;
123 + }
124 +
125 +@@ -741,13 +734,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
126 + };
127 + unsigned long offset;
128 + const char *res;
129 +- long len, err = -EINVAL;
130 ++ long len;
131 ++ char tmp[__NEW_UTS_LEN + 1];
132 +
133 + offset = command-1;
134 + if (offset >= ARRAY_SIZE(sysinfo_table)) {
135 + /* Digital UNIX has a few unpublished interfaces here */
136 + printk("sysinfo(%d)", command);
137 +- goto out;
138 ++ return -EINVAL;
139 + }
140 +
141 + down_read(&uts_sem);
142 +@@ -755,13 +749,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
143 + len = strlen(res)+1;
144 + if ((unsigned long)len > (unsigned long)count)
145 + len = count;
146 +- if (copy_to_user(buf, res, len))
147 +- err = -EFAULT;
148 +- else
149 +- err = 0;
150 ++ memcpy(tmp, res, len);
151 + up_read(&uts_sem);
152 +- out:
153 +- return err;
154 ++ if (copy_to_user(buf, tmp, len))
155 ++ return -EFAULT;
156 ++ return 0;
157 + }
158 +
159 + SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
160 +diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
161 +index bb1ca158273c..1922e7a93e40 100644
162 +--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
163 ++++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
164 +@@ -201,6 +201,7 @@
165 + #address-cells = <1>;
166 + #size-cells = <0>;
167 + reg = <0x70>;
168 ++ reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>;
169 + };
170 + };
171 +
172 +diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
173 +index 493e72f64b35..5768ec3c1781 100644
174 +--- a/arch/powerpc/include/asm/fadump.h
175 ++++ b/arch/powerpc/include/asm/fadump.h
176 +@@ -194,9 +194,6 @@ struct fadump_crash_info_header {
177 + struct cpumask cpu_online_mask;
178 + };
179 +
180 +-/* Crash memory ranges */
181 +-#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
182 +-
183 + struct fad_crash_memory_ranges {
184 + unsigned long long base;
185 + unsigned long long size;
186 +diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
187 +index 791d4c3329c3..c3c835290131 100644
188 +--- a/arch/powerpc/kernel/fadump.c
189 ++++ b/arch/powerpc/kernel/fadump.c
190 +@@ -35,6 +35,7 @@
191 + #include <linux/crash_dump.h>
192 + #include <linux/kobject.h>
193 + #include <linux/sysfs.h>
194 ++#include <linux/slab.h>
195 +
196 + #include <asm/page.h>
197 + #include <asm/prom.h>
198 +@@ -48,8 +49,10 @@ static struct fadump_mem_struct fdm;
199 + static const struct fadump_mem_struct *fdm_active;
200 +
201 + static DEFINE_MUTEX(fadump_mutex);
202 +-struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
203 ++struct fad_crash_memory_ranges *crash_memory_ranges;
204 ++int crash_memory_ranges_size;
205 + int crash_mem_ranges;
206 ++int max_crash_mem_ranges;
207 +
208 + /* Scan the Firmware Assisted dump configuration details. */
209 + int __init early_init_dt_scan_fw_dump(unsigned long node,
210 +@@ -726,38 +729,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
211 + return 0;
212 + }
213 +
214 +-static inline void fadump_add_crash_memory(unsigned long long base,
215 +- unsigned long long end)
216 ++static void free_crash_memory_ranges(void)
217 ++{
218 ++ kfree(crash_memory_ranges);
219 ++ crash_memory_ranges = NULL;
220 ++ crash_memory_ranges_size = 0;
221 ++ max_crash_mem_ranges = 0;
222 ++}
223 ++
224 ++/*
225 ++ * Allocate or reallocate crash memory ranges array in incremental units
226 ++ * of PAGE_SIZE.
227 ++ */
228 ++static int allocate_crash_memory_ranges(void)
229 ++{
230 ++ struct fad_crash_memory_ranges *new_array;
231 ++ u64 new_size;
232 ++
233 ++ new_size = crash_memory_ranges_size + PAGE_SIZE;
234 ++ pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
235 ++ new_size);
236 ++
237 ++ new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
238 ++ if (new_array == NULL) {
239 ++ pr_err("Insufficient memory for setting up crash memory ranges\n");
240 ++ free_crash_memory_ranges();
241 ++ return -ENOMEM;
242 ++ }
243 ++
244 ++ crash_memory_ranges = new_array;
245 ++ crash_memory_ranges_size = new_size;
246 ++ max_crash_mem_ranges = (new_size /
247 ++ sizeof(struct fad_crash_memory_ranges));
248 ++ return 0;
249 ++}
250 ++
251 ++static inline int fadump_add_crash_memory(unsigned long long base,
252 ++ unsigned long long end)
253 + {
254 + if (base == end)
255 +- return;
256 ++ return 0;
257 ++
258 ++ if (crash_mem_ranges == max_crash_mem_ranges) {
259 ++ int ret;
260 ++
261 ++ ret = allocate_crash_memory_ranges();
262 ++ if (ret)
263 ++ return ret;
264 ++ }
265 +
266 + pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
267 + crash_mem_ranges, base, end - 1, (end - base));
268 + crash_memory_ranges[crash_mem_ranges].base = base;
269 + crash_memory_ranges[crash_mem_ranges].size = end - base;
270 + crash_mem_ranges++;
271 ++ return 0;
272 + }
273 +
274 +-static void fadump_exclude_reserved_area(unsigned long long start,
275 ++static int fadump_exclude_reserved_area(unsigned long long start,
276 + unsigned long long end)
277 + {
278 + unsigned long long ra_start, ra_end;
279 ++ int ret = 0;
280 +
281 + ra_start = fw_dump.reserve_dump_area_start;
282 + ra_end = ra_start + fw_dump.reserve_dump_area_size;
283 +
284 + if ((ra_start < end) && (ra_end > start)) {
285 + if ((start < ra_start) && (end > ra_end)) {
286 +- fadump_add_crash_memory(start, ra_start);
287 +- fadump_add_crash_memory(ra_end, end);
288 ++ ret = fadump_add_crash_memory(start, ra_start);
289 ++ if (ret)
290 ++ return ret;
291 ++
292 ++ ret = fadump_add_crash_memory(ra_end, end);
293 + } else if (start < ra_start) {
294 +- fadump_add_crash_memory(start, ra_start);
295 ++ ret = fadump_add_crash_memory(start, ra_start);
296 + } else if (ra_end < end) {
297 +- fadump_add_crash_memory(ra_end, end);
298 ++ ret = fadump_add_crash_memory(ra_end, end);
299 + }
300 + } else
301 +- fadump_add_crash_memory(start, end);
302 ++ ret = fadump_add_crash_memory(start, end);
303 ++
304 ++ return ret;
305 + }
306 +
307 + static int fadump_init_elfcore_header(char *bufp)
308 +@@ -793,10 +846,11 @@ static int fadump_init_elfcore_header(char *bufp)
309 + * Traverse through memblock structure and setup crash memory ranges. These
310 + * ranges will be used create PT_LOAD program headers in elfcore header.
311 + */
312 +-static void fadump_setup_crash_memory_ranges(void)
313 ++static int fadump_setup_crash_memory_ranges(void)
314 + {
315 + struct memblock_region *reg;
316 + unsigned long long start, end;
317 ++ int ret;
318 +
319 + pr_debug("Setup crash memory ranges.\n");
320 + crash_mem_ranges = 0;
321 +@@ -807,7 +861,9 @@ static void fadump_setup_crash_memory_ranges(void)
322 + * specified during fadump registration. We need to create a separate
323 + * program header for this chunk with the correct offset.
324 + */
325 +- fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
326 ++ ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
327 ++ if (ret)
328 ++ return ret;
329 +
330 + for_each_memblock(memory, reg) {
331 + start = (unsigned long long)reg->base;
332 +@@ -816,8 +872,12 @@ static void fadump_setup_crash_memory_ranges(void)
333 + start = fw_dump.boot_memory_size;
334 +
335 + /* add this range excluding the reserved dump area. */
336 +- fadump_exclude_reserved_area(start, end);
337 ++ ret = fadump_exclude_reserved_area(start, end);
338 ++ if (ret)
339 ++ return ret;
340 + }
341 ++
342 ++ return 0;
343 + }
344 +
345 + /*
346 +@@ -941,6 +1001,7 @@ static void register_fadump(void)
347 + {
348 + unsigned long addr;
349 + void *vaddr;
350 ++ int ret;
351 +
352 + /*
353 + * If no memory is reserved then we can not register for firmware-
354 +@@ -949,7 +1010,9 @@ static void register_fadump(void)
355 + if (!fw_dump.reserve_dump_area_size)
356 + return;
357 +
358 +- fadump_setup_crash_memory_ranges();
359 ++ ret = fadump_setup_crash_memory_ranges();
360 ++ if (ret)
361 ++ return ret;
362 +
363 + addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
364 + /* Initialize fadump crash info header. */
365 +@@ -1028,6 +1091,7 @@ void fadump_cleanup(void)
366 + } else if (fw_dump.dump_registered) {
367 + /* Un-register Firmware-assisted dump if it was registered. */
368 + fadump_unregister_dump(&fdm);
369 ++ free_crash_memory_ranges();
370 + }
371 + }
372 +
373 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
374 +index 3b6647e574b6..f5313a78e5d6 100644
375 +--- a/arch/powerpc/platforms/pseries/ras.c
376 ++++ b/arch/powerpc/platforms/pseries/ras.c
377 +@@ -300,7 +300,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
378 + }
379 +
380 + savep = __va(regs->gpr[3]);
381 +- regs->gpr[3] = savep[0]; /* restore original r3 */
382 ++ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
383 +
384 + /* If it isn't an extended log we can use the per cpu 64bit buffer */
385 + h = (struct rtas_error_log *)&savep[1];
386 +diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
387 +index 646988d4c1a3..740f43b9b541 100644
388 +--- a/arch/sparc/kernel/sys_sparc_32.c
389 ++++ b/arch/sparc/kernel/sys_sparc_32.c
390 +@@ -201,23 +201,27 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
391 +
392 + asmlinkage long sys_getdomainname(char __user *name, int len)
393 + {
394 +- int nlen, err;
395 +-
396 ++ int nlen, err;
397 ++ char tmp[__NEW_UTS_LEN + 1];
398 ++
399 + if (len < 0)
400 + return -EINVAL;
401 +
402 +- down_read(&uts_sem);
403 +-
404 ++ down_read(&uts_sem);
405 ++
406 + nlen = strlen(utsname()->domainname) + 1;
407 + err = -EINVAL;
408 + if (nlen > len)
409 +- goto out;
410 ++ goto out_unlock;
411 ++ memcpy(tmp, utsname()->domainname, nlen);
412 +
413 +- err = -EFAULT;
414 +- if (!copy_to_user(name, utsname()->domainname, nlen))
415 +- err = 0;
416 ++ up_read(&uts_sem);
417 +
418 +-out:
419 ++ if (copy_to_user(name, tmp, nlen))
420 ++ return -EFAULT;
421 ++ return 0;
422 ++
423 ++out_unlock:
424 + up_read(&uts_sem);
425 + return err;
426 + }
427 +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
428 +index 7f0f7c01b297..f63cd2ea8470 100644
429 +--- a/arch/sparc/kernel/sys_sparc_64.c
430 ++++ b/arch/sparc/kernel/sys_sparc_64.c
431 +@@ -524,23 +524,27 @@ extern void check_pending(int signum);
432 +
433 + SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
434 + {
435 +- int nlen, err;
436 ++ int nlen, err;
437 ++ char tmp[__NEW_UTS_LEN + 1];
438 +
439 + if (len < 0)
440 + return -EINVAL;
441 +
442 +- down_read(&uts_sem);
443 +-
444 ++ down_read(&uts_sem);
445 ++
446 + nlen = strlen(utsname()->domainname) + 1;
447 + err = -EINVAL;
448 + if (nlen > len)
449 +- goto out;
450 ++ goto out_unlock;
451 ++ memcpy(tmp, utsname()->domainname, nlen);
452 ++
453 ++ up_read(&uts_sem);
454 +
455 +- err = -EFAULT;
456 +- if (!copy_to_user(name, utsname()->domainname, nlen))
457 +- err = 0;
458 ++ if (copy_to_user(name, tmp, nlen))
459 ++ return -EFAULT;
460 ++ return 0;
461 +
462 +-out:
463 ++out_unlock:
464 + up_read(&uts_sem);
465 + return err;
466 + }
467 +diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
468 +index 9016b4b70375..6c5020163db0 100644
469 +--- a/arch/x86/include/asm/io.h
470 ++++ b/arch/x86/include/asm/io.h
471 +@@ -351,4 +351,10 @@ extern void arch_phys_wc_del(int handle);
472 + #define arch_phys_wc_add arch_phys_wc_add
473 + #endif
474 +
475 ++#ifdef CONFIG_X86_PAT
476 ++extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
477 ++extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
478 ++#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
479 ++#endif
480 ++
481 + #endif /* _ASM_X86_IO_H */
482 +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
483 +index 1007fa80f5a6..0e1dd7d47f05 100644
484 +--- a/arch/x86/mm/pageattr.c
485 ++++ b/arch/x86/mm/pageattr.c
486 +@@ -1079,7 +1079,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
487 + * Map everything starting from the Gb boundary, possibly with 1G pages
488 + */
489 + while (end - start >= PUD_SIZE) {
490 +- set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
491 ++ set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn >> PAGE_SHIFT,
492 + canon_pgprot(pud_pgprot))));
493 +
494 + start += PUD_SIZE;
495 +diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
496 +index 3146b1da6d72..5ff0cb74de55 100644
497 +--- a/arch/x86/mm/pat.c
498 ++++ b/arch/x86/mm/pat.c
499 +@@ -726,6 +726,20 @@ void io_free_memtype(resource_size_t start, resource_size_t end)
500 + free_memtype(start, end);
501 + }
502 +
503 ++int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
504 ++{
505 ++ enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
506 ++
507 ++ return io_reserve_memtype(start, start + size, &type);
508 ++}
509 ++EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
510 ++
511 ++void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
512 ++{
513 ++ io_free_memtype(start, start + size);
514 ++}
515 ++EXPORT_SYMBOL(arch_io_free_memtype_wc);
516 ++
517 + pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
518 + unsigned long size, pgprot_t vma_prot)
519 + {
520 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
521 +index 73628c7599e7..3aca9a9011fb 100644
522 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
523 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
524 +@@ -492,6 +492,10 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
525 +
526 + int amdgpu_bo_init(struct amdgpu_device *adev)
527 + {
528 ++ /* reserve PAT memory space to WC for VRAM */
529 ++ arch_io_reserve_memtype_wc(adev->mc.aper_base,
530 ++ adev->mc.aper_size);
531 ++
532 + /* Add an MTRR for the VRAM */
533 + adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
534 + adev->mc.aper_size);
535 +@@ -507,6 +511,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
536 + {
537 + amdgpu_ttm_fini(adev);
538 + arch_phys_wc_del(adev->mc.vram_mtrr);
539 ++ arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
540 + }
541 +
542 + int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
543 +diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
544 +index 08f82eae6939..ac12f74e6b32 100644
545 +--- a/drivers/gpu/drm/ast/ast_ttm.c
546 ++++ b/drivers/gpu/drm/ast/ast_ttm.c
547 +@@ -275,6 +275,8 @@ int ast_mm_init(struct ast_private *ast)
548 + return ret;
549 + }
550 +
551 ++ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
552 ++ pci_resource_len(dev->pdev, 0));
553 + ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
554 + pci_resource_len(dev->pdev, 0));
555 +
556 +@@ -283,11 +285,15 @@ int ast_mm_init(struct ast_private *ast)
557 +
558 + void ast_mm_fini(struct ast_private *ast)
559 + {
560 ++ struct drm_device *dev = ast->dev;
561 ++
562 + ttm_bo_device_release(&ast->ttm.bdev);
563 +
564 + ast_ttm_global_release(ast);
565 +
566 + arch_phys_wc_del(ast->fb_mtrr);
567 ++ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
568 ++ pci_resource_len(dev->pdev, 0));
569 + }
570 +
571 + void ast_ttm_placement(struct ast_bo *bo, int domain)
572 +diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
573 +index dfffd528517a..393967025043 100644
574 +--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
575 ++++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
576 +@@ -275,6 +275,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
577 + return ret;
578 + }
579 +
580 ++ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
581 ++ pci_resource_len(dev->pdev, 0));
582 ++
583 + cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
584 + pci_resource_len(dev->pdev, 0));
585 +
586 +@@ -284,6 +287,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
587 +
588 + void cirrus_mm_fini(struct cirrus_device *cirrus)
589 + {
590 ++ struct drm_device *dev = cirrus->dev;
591 ++
592 + if (!cirrus->mm_inited)
593 + return;
594 +
595 +@@ -293,6 +298,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
596 +
597 + arch_phys_wc_del(cirrus->fb_mtrr);
598 + cirrus->fb_mtrr = 0;
599 ++ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
600 ++ pci_resource_len(dev->pdev, 0));
601 + }
602 +
603 + void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
604 +diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
605 +index 19fb0bddc1cd..359fe2b8bb8a 100644
606 +--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
607 ++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
608 +@@ -842,6 +842,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
609 + I915_USERPTR_UNSYNCHRONIZED))
610 + return -EINVAL;
611 +
612 ++ if (!args->user_size)
613 ++ return -EINVAL;
614 ++
615 + if (offset_in_page(args->user_ptr | args->user_size))
616 + return -EINVAL;
617 +
618 +diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
619 +index 05108b505fbf..d9df8d32fc35 100644
620 +--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
621 ++++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
622 +@@ -274,6 +274,9 @@ int mgag200_mm_init(struct mga_device *mdev)
623 + return ret;
624 + }
625 +
626 ++ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
627 ++ pci_resource_len(dev->pdev, 0));
628 ++
629 + mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
630 + pci_resource_len(dev->pdev, 0));
631 +
632 +@@ -282,10 +285,14 @@ int mgag200_mm_init(struct mga_device *mdev)
633 +
634 + void mgag200_mm_fini(struct mga_device *mdev)
635 + {
636 ++ struct drm_device *dev = mdev->dev;
637 ++
638 + ttm_bo_device_release(&mdev->ttm.bdev);
639 +
640 + mgag200_ttm_global_release(mdev);
641 +
642 ++ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
643 ++ pci_resource_len(dev->pdev, 0));
644 + arch_phys_wc_del(mdev->fb_mtrr);
645 + mdev->fb_mtrr = 0;
646 + }
647 +diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
648 +index d2e7d209f651..9835327a3214 100644
649 +--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
650 ++++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
651 +@@ -397,6 +397,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
652 + /* VRAM init */
653 + drm->gem.vram_available = drm->device.info.ram_user;
654 +
655 ++ arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
656 ++ device->func->resource_size(device, 1));
657 ++
658 + ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
659 + drm->gem.vram_available >> PAGE_SHIFT);
660 + if (ret) {
661 +@@ -429,6 +432,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
662 + void
663 + nouveau_ttm_fini(struct nouveau_drm *drm)
664 + {
665 ++ struct nvkm_device *device = nvxx_device(&drm->device);
666 ++
667 + ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
668 + ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
669 +
670 +@@ -438,4 +443,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
671 +
672 + arch_phys_wc_del(drm->ttm.mtrr);
673 + drm->ttm.mtrr = 0;
674 ++ arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
675 ++ device->func->resource_size(device, 1));
676 ++
677 + }
678 +diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
679 +index 83aee9e814ba..18ec38d0d3f5 100644
680 +--- a/drivers/gpu/drm/radeon/radeon_object.c
681 ++++ b/drivers/gpu/drm/radeon/radeon_object.c
682 +@@ -447,6 +447,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
683 +
684 + int radeon_bo_init(struct radeon_device *rdev)
685 + {
686 ++ /* reserve PAT memory space to WC for VRAM */
687 ++ arch_io_reserve_memtype_wc(rdev->mc.aper_base,
688 ++ rdev->mc.aper_size);
689 ++
690 + /* Add an MTRR for the VRAM */
691 + if (!rdev->fastfb_working) {
692 + rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
693 +@@ -464,6 +468,7 @@ void radeon_bo_fini(struct radeon_device *rdev)
694 + {
695 + radeon_ttm_fini(rdev);
696 + arch_phys_wc_del(rdev->mc.vram_mtrr);
697 ++ arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
698 + }
699 +
700 + /* Returns how many bytes TTM can move per IB.
701 +diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
702 +index 44a30f286de1..57b1812a5a18 100644
703 +--- a/drivers/iio/frequency/ad9523.c
704 ++++ b/drivers/iio/frequency/ad9523.c
705 +@@ -507,7 +507,7 @@ static ssize_t ad9523_store(struct device *dev,
706 + return ret;
707 +
708 + if (!state)
709 +- return 0;
710 ++ return len;
711 +
712 + mutex_lock(&indio_dev->mlock);
713 + switch ((u32)this_attr->address) {
714 +@@ -641,7 +641,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
715 + code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
716 + AD9523_CLK_DIST_DIV_REV(ret);
717 + *val = code / 1000000;
718 +- *val2 = (code % 1000000) * 10;
719 ++ *val2 = code % 1000000;
720 + return IIO_VAL_INT_PLUS_MICRO;
721 + default:
722 + return -EINVAL;
723 +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
724 +index e913a930ac80..5a63e32a4a6b 100644
725 +--- a/drivers/iommu/dmar.c
726 ++++ b/drivers/iommu/dmar.c
727 +@@ -1315,8 +1315,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
728 + qi_submit_sync(&desc, iommu);
729 + }
730 +
731 +-void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
732 +- u64 addr, unsigned mask)
733 ++void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
734 ++ u16 qdep, u64 addr, unsigned mask)
735 + {
736 + struct qi_desc desc;
737 +
738 +@@ -1331,7 +1331,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
739 + qdep = 0;
740 +
741 + desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
742 +- QI_DIOTLB_TYPE;
743 ++ QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
744 +
745 + qi_submit_sync(&desc, iommu);
746 + }
747 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
748 +index 4efec2db4ee2..49b266433f4c 100644
749 +--- a/drivers/iommu/intel-iommu.c
750 ++++ b/drivers/iommu/intel-iommu.c
751 +@@ -419,6 +419,7 @@ struct device_domain_info {
752 + struct list_head global; /* link to global list */
753 + u8 bus; /* PCI bus number */
754 + u8 devfn; /* PCI devfn number */
755 ++ u16 pfsid; /* SRIOV physical function source ID */
756 + u8 pasid_supported:3;
757 + u8 pasid_enabled:1;
758 + u8 pri_supported:1;
759 +@@ -1479,6 +1480,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
760 + return;
761 +
762 + pdev = to_pci_dev(info->dev);
763 ++ /* For IOMMU that supports device IOTLB throttling (DIT), we assign
764 ++ * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
765 ++ * queue depth at PF level. If DIT is not set, PFSID will be treated as
766 ++ * reserved, which should be set to 0.
767 ++ */
768 ++ if (!ecap_dit(info->iommu->ecap))
769 ++ info->pfsid = 0;
770 ++ else {
771 ++ struct pci_dev *pf_pdev;
772 ++
773 ++ /* pdev will be returned if device is not a vf */
774 ++ pf_pdev = pci_physfn(pdev);
775 ++ info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
776 ++ }
777 +
778 + #ifdef CONFIG_INTEL_IOMMU_SVM
779 + /* The PCIe spec, in its wisdom, declares that the behaviour of
780 +@@ -1537,7 +1552,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
781 +
782 + sid = info->bus << 8 | info->devfn;
783 + qdep = info->ats_qdep;
784 +- qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
785 ++ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
786 ++ qdep, addr, mask);
787 + }
788 + spin_unlock_irqrestore(&device_domain_lock, flags);
789 + }
790 +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
791 +index f2c0000de613..95a6ae053714 100644
792 +--- a/drivers/md/bcache/writeback.c
793 ++++ b/drivers/md/bcache/writeback.c
794 +@@ -462,8 +462,10 @@ static int bch_writeback_thread(void *arg)
795 + * data on cache. BCACHE_DEV_DETACHING flag is set in
796 + * bch_cached_dev_detach().
797 + */
798 +- if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
799 ++ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
800 ++ up_write(&dc->writeback_lock);
801 + break;
802 ++ }
803 + }
804 +
805 + up_write(&dc->writeback_lock);
806 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
807 +index d3c55d7754af..905badc6cb17 100644
808 +--- a/drivers/md/dm-cache-metadata.c
809 ++++ b/drivers/md/dm-cache-metadata.c
810 +@@ -337,7 +337,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
811 + disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
812 + memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
813 + memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
814 +- disk_super->policy_hint_size = 0;
815 ++ disk_super->policy_hint_size = cpu_to_le32(0);
816 +
817 + __copy_sm_root(cmd, disk_super);
818 +
819 +@@ -652,6 +652,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
820 + disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
821 + disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
822 + disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
823 ++ disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
824 +
825 + disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
826 + disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
827 +diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
828 +index 5e047bfc0cc4..518e2dec2aa2 100644
829 +--- a/drivers/misc/vmw_balloon.c
830 ++++ b/drivers/misc/vmw_balloon.c
831 +@@ -341,7 +341,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
832 + success = false;
833 + }
834 +
835 +- if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
836 ++ /*
837 ++ * 2MB pages are only supported with batching. If batching is for some
838 ++ * reason disabled, do not use 2MB pages, since otherwise the legacy
839 ++ * mechanism is used with 2MB pages, causing a failure.
840 ++ */
841 ++ if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
842 ++ (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
843 + b->supported_page_sizes = 2;
844 + else
845 + b->supported_page_sizes = 1;
846 +@@ -450,7 +456,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
847 +
848 + pfn32 = (u32)pfn;
849 + if (pfn32 != pfn)
850 +- return -1;
851 ++ return -EINVAL;
852 +
853 + STATS_INC(b->stats.lock[false]);
854 +
855 +@@ -460,7 +466,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
856 +
857 + pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
858 + STATS_INC(b->stats.lock_fail[false]);
859 +- return 1;
860 ++ return -EIO;
861 + }
862 +
863 + static int vmballoon_send_batched_lock(struct vmballoon *b,
864 +@@ -597,11 +603,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
865 +
866 + locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
867 + target);
868 +- if (locked > 0) {
869 ++ if (locked) {
870 + STATS_INC(b->stats.refused_alloc[false]);
871 +
872 +- if (hv_status == VMW_BALLOON_ERROR_RESET ||
873 +- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
874 ++ if (locked == -EIO &&
875 ++ (hv_status == VMW_BALLOON_ERROR_RESET ||
876 ++ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
877 + vmballoon_free_page(page, false);
878 + return -EIO;
879 + }
880 +@@ -617,7 +624,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
881 + } else {
882 + vmballoon_free_page(page, false);
883 + }
884 +- return -EIO;
885 ++ return locked;
886 + }
887 +
888 + /* track allocated page */
889 +@@ -1029,29 +1036,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
890 + */
891 + static int vmballoon_vmci_init(struct vmballoon *b)
892 + {
893 +- int error = 0;
894 ++ unsigned long error, dummy;
895 +
896 +- if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
897 +- error = vmci_doorbell_create(&b->vmci_doorbell,
898 +- VMCI_FLAG_DELAYED_CB,
899 +- VMCI_PRIVILEGE_FLAG_RESTRICTED,
900 +- vmballoon_doorbell, b);
901 +-
902 +- if (error == VMCI_SUCCESS) {
903 +- VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
904 +- b->vmci_doorbell.context,
905 +- b->vmci_doorbell.resource, error);
906 +- STATS_INC(b->stats.doorbell_set);
907 +- }
908 +- }
909 ++ if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
910 ++ return 0;
911 +
912 +- if (error != 0) {
913 +- vmballoon_vmci_cleanup(b);
914 ++ error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
915 ++ VMCI_PRIVILEGE_FLAG_RESTRICTED,
916 ++ vmballoon_doorbell, b);
917 +
918 +- return -EIO;
919 +- }
920 ++ if (error != VMCI_SUCCESS)
921 ++ goto fail;
922 ++
923 ++ error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
924 ++ b->vmci_doorbell.resource, dummy);
925 ++
926 ++ STATS_INC(b->stats.doorbell_set);
927 ++
928 ++ if (error != VMW_BALLOON_SUCCESS)
929 ++ goto fail;
930 +
931 + return 0;
932 ++fail:
933 ++ vmballoon_vmci_cleanup(b);
934 ++ return -EIO;
935 + }
936 +
937 + /*
938 +@@ -1289,7 +1297,14 @@ static int __init vmballoon_init(void)
939 +
940 + return 0;
941 + }
942 +-module_init(vmballoon_init);
943 ++
944 ++/*
945 ++ * Using late_initcall() instead of module_init() allows the balloon to use the
946 ++ * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
947 ++ * VMCI is probed only after the balloon is initialized. If the balloon is used
948 ++ * as a module, late_initcall() is equivalent to module_init().
949 ++ */
950 ++late_initcall(vmballoon_init);
951 +
952 + static void __exit vmballoon_exit(void)
953 + {
954 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
955 +index acec4b565511..1aede726052c 100644
956 +--- a/drivers/net/usb/lan78xx.c
957 ++++ b/drivers/net/usb/lan78xx.c
958 +@@ -902,6 +902,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
959 +
960 + ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
961 + netif_carrier_on(dev->net);
962 ++
963 ++ tasklet_schedule(&dev->bh);
964 + }
965 +
966 + return ret;
967 +@@ -1361,8 +1363,6 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
968 + netif_dbg(dev, ifup, dev->net,
969 + "MAC address set to random addr");
970 + }
971 +-
972 +- tasklet_schedule(&dev->bh);
973 + }
974 +
975 + ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
976 +diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
977 +index 6a41e66015b6..062dff1c902d 100644
978 +--- a/drivers/pwm/pwm-tiehrpwm.c
979 ++++ b/drivers/pwm/pwm-tiehrpwm.c
980 +@@ -384,6 +384,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
981 + aqcsfrc_mask = AQCSFRC_CSFA_MASK;
982 + }
983 +
984 ++ /* Update shadow register first before modifying active register */
985 ++ ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
986 + /*
987 + * Changes to immediate action on Action Qualifier. This puts
988 + * Action Qualifier control on PWM output from next TBCLK
989 +diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
990 +index c872a2e54c4b..2603bee2ce07 100644
991 +--- a/drivers/spi/spi-davinci.c
992 ++++ b/drivers/spi/spi-davinci.c
993 +@@ -220,7 +220,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
994 + pdata = &dspi->pdata;
995 +
996 + /* program delay transfers if tx_delay is non zero */
997 +- if (spicfg->wdelay)
998 ++ if (spicfg && spicfg->wdelay)
999 + spidat1 |= SPIDAT1_WDEL;
1000 +
1001 + /*
1002 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
1003 +index 0705d8883ede..8a29ec5992fd 100644
1004 +--- a/drivers/video/fbdev/core/fbmem.c
1005 ++++ b/drivers/video/fbdev/core/fbmem.c
1006 +@@ -1687,12 +1687,12 @@ static int do_register_framebuffer(struct fb_info *fb_info)
1007 + return 0;
1008 + }
1009 +
1010 +-static int do_unregister_framebuffer(struct fb_info *fb_info)
1011 ++static int unbind_console(struct fb_info *fb_info)
1012 + {
1013 + struct fb_event event;
1014 +- int i, ret = 0;
1015 ++ int ret;
1016 ++ int i = fb_info->node;
1017 +
1018 +- i = fb_info->node;
1019 + if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
1020 + return -EINVAL;
1021 +
1022 +@@ -1707,17 +1707,29 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
1023 + unlock_fb_info(fb_info);
1024 + console_unlock();
1025 +
1026 ++ return ret;
1027 ++}
1028 ++
1029 ++static int __unlink_framebuffer(struct fb_info *fb_info);
1030 ++
1031 ++static int do_unregister_framebuffer(struct fb_info *fb_info)
1032 ++{
1033 ++ struct fb_event event;
1034 ++ int ret;
1035 ++
1036 ++ ret = unbind_console(fb_info);
1037 ++
1038 + if (ret)
1039 + return -EINVAL;
1040 +
1041 + pm_vt_switch_unregister(fb_info->dev);
1042 +
1043 +- unlink_framebuffer(fb_info);
1044 ++ __unlink_framebuffer(fb_info);
1045 + if (fb_info->pixmap.addr &&
1046 + (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
1047 + kfree(fb_info->pixmap.addr);
1048 + fb_destroy_modelist(&fb_info->modelist);
1049 +- registered_fb[i] = NULL;
1050 ++ registered_fb[fb_info->node] = NULL;
1051 + num_registered_fb--;
1052 + fb_cleanup_device(fb_info);
1053 + event.info = fb_info;
1054 +@@ -1730,7 +1742,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
1055 + return 0;
1056 + }
1057 +
1058 +-int unlink_framebuffer(struct fb_info *fb_info)
1059 ++static int __unlink_framebuffer(struct fb_info *fb_info)
1060 + {
1061 + int i;
1062 +
1063 +@@ -1742,6 +1754,20 @@ int unlink_framebuffer(struct fb_info *fb_info)
1064 + device_destroy(fb_class, MKDEV(FB_MAJOR, i));
1065 + fb_info->dev = NULL;
1066 + }
1067 ++
1068 ++ return 0;
1069 ++}
1070 ++
1071 ++int unlink_framebuffer(struct fb_info *fb_info)
1072 ++{
1073 ++ int ret;
1074 ++
1075 ++ ret = __unlink_framebuffer(fb_info);
1076 ++ if (ret)
1077 ++ return ret;
1078 ++
1079 ++ unbind_console(fb_info);
1080 ++
1081 + return 0;
1082 + }
1083 + EXPORT_SYMBOL(unlink_framebuffer);
1084 +diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
1085 +index e3d026ac382e..f35168ce426b 100644
1086 +--- a/fs/9p/xattr.c
1087 ++++ b/fs/9p/xattr.c
1088 +@@ -107,7 +107,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
1089 + {
1090 + struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
1091 + struct iov_iter from;
1092 +- int retval;
1093 ++ int retval, err;
1094 +
1095 + iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
1096 +
1097 +@@ -128,7 +128,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
1098 + retval);
1099 + else
1100 + p9_client_write(fid, 0, &from, &retval);
1101 +- p9_client_clunk(fid);
1102 ++ err = p9_client_clunk(fid);
1103 ++ if (!retval && err)
1104 ++ retval = err;
1105 + return retval;
1106 + }
1107 +
1108 +diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
1109 +index a861bbdfe577..fa8b484d035d 100644
1110 +--- a/fs/nfs/blocklayout/dev.c
1111 ++++ b/fs/nfs/blocklayout/dev.c
1112 +@@ -162,7 +162,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
1113 + chunk = div_u64(offset, dev->chunk_size);
1114 + div_u64_rem(chunk, dev->nr_children, &chunk_idx);
1115 +
1116 +- if (chunk_idx > dev->nr_children) {
1117 ++ if (chunk_idx >= dev->nr_children) {
1118 + dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
1119 + __func__, chunk_idx, offset, dev->chunk_size);
1120 + /* error, should not happen */
1121 +diff --git a/fs/quota/quota.c b/fs/quota/quota.c
1122 +index 3746367098fd..bb0d643481c8 100644
1123 +--- a/fs/quota/quota.c
1124 ++++ b/fs/quota/quota.c
1125 +@@ -17,6 +17,7 @@
1126 + #include <linux/quotaops.h>
1127 + #include <linux/types.h>
1128 + #include <linux/writeback.h>
1129 ++#include <linux/nospec.h>
1130 +
1131 + static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
1132 + qid_t id)
1133 +@@ -644,6 +645,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
1134 +
1135 + if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
1136 + return -EINVAL;
1137 ++ type = array_index_nospec(type, MAXQUOTAS);
1138 + /*
1139 + * Quota not supported on this fs? Check this before s_quota_types
1140 + * since they needn't be set if quota is not supported at all.
1141 +diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
1142 +index 22dba8837a86..539fa934ed93 100644
1143 +--- a/fs/ubifs/journal.c
1144 ++++ b/fs/ubifs/journal.c
1145 +@@ -661,6 +661,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
1146 + spin_lock(&ui->ui_lock);
1147 + ui->synced_i_size = ui->ui_size;
1148 + spin_unlock(&ui->ui_lock);
1149 ++ if (xent) {
1150 ++ spin_lock(&host_ui->ui_lock);
1151 ++ host_ui->synced_i_size = host_ui->ui_size;
1152 ++ spin_unlock(&host_ui->ui_lock);
1153 ++ }
1154 + mark_inode_clean(c, ui);
1155 + mark_inode_clean(c, host_ui);
1156 + return 0;
1157 +@@ -1107,7 +1112,7 @@ static int recomp_data_node(const struct ubifs_info *c,
1158 + int err, len, compr_type, out_len;
1159 +
1160 + out_len = le32_to_cpu(dn->size);
1161 +- buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
1162 ++ buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
1163 + if (!buf)
1164 + return -ENOMEM;
1165 +
1166 +@@ -1186,7 +1191,16 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1167 + else if (err)
1168 + goto out_free;
1169 + else {
1170 +- if (le32_to_cpu(dn->size) <= dlen)
1171 ++ int dn_len = le32_to_cpu(dn->size);
1172 ++
1173 ++ if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
1174 ++ ubifs_err(c, "bad data node (block %u, inode %lu)",
1175 ++ blk, inode->i_ino);
1176 ++ ubifs_dump_node(c, dn);
1177 ++ goto out_free;
1178 ++ }
1179 ++
1180 ++ if (dn_len <= dlen)
1181 + dlen = 0; /* Nothing to do */
1182 + else {
1183 + int compr_type = le16_to_cpu(dn->compr_type);
1184 +diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
1185 +index a0011aa3a779..f43f162e36f4 100644
1186 +--- a/fs/ubifs/lprops.c
1187 ++++ b/fs/ubifs/lprops.c
1188 +@@ -1091,10 +1091,6 @@ static int scan_check_cb(struct ubifs_info *c,
1189 + }
1190 + }
1191 +
1192 +- buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
1193 +- if (!buf)
1194 +- return -ENOMEM;
1195 +-
1196 + /*
1197 + * After an unclean unmount, empty and freeable LEBs
1198 + * may contain garbage - do not scan them.
1199 +@@ -1113,6 +1109,10 @@ static int scan_check_cb(struct ubifs_info *c,
1200 + return LPT_SCAN_CONTINUE;
1201 + }
1202 +
1203 ++ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
1204 ++ if (!buf)
1205 ++ return -ENOMEM;
1206 ++
1207 + sleb = ubifs_scan(c, lnum, 0, buf, 0);
1208 + if (IS_ERR(sleb)) {
1209 + ret = PTR_ERR(sleb);
1210 +diff --git a/fs/xattr.c b/fs/xattr.c
1211 +index 76f01bf4b048..09441c396798 100644
1212 +--- a/fs/xattr.c
1213 ++++ b/fs/xattr.c
1214 +@@ -453,7 +453,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
1215 + if (error > 0) {
1216 + if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
1217 + (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
1218 +- posix_acl_fix_xattr_to_user(kvalue, size);
1219 ++ posix_acl_fix_xattr_to_user(kvalue, error);
1220 + if (size && copy_to_user(value, kvalue, error))
1221 + error = -EFAULT;
1222 + } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
1223 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
1224 +index 23e129ef6726..e353f6600b0b 100644
1225 +--- a/include/linux/intel-iommu.h
1226 ++++ b/include/linux/intel-iommu.h
1227 +@@ -125,6 +125,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
1228 + * Extended Capability Register
1229 + */
1230 +
1231 ++#define ecap_dit(e) ((e >> 41) & 0x1)
1232 + #define ecap_pasid(e) ((e >> 40) & 0x1)
1233 + #define ecap_pss(e) ((e >> 35) & 0x1f)
1234 + #define ecap_eafs(e) ((e >> 34) & 0x1)
1235 +@@ -294,6 +295,7 @@ enum {
1236 + #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
1237 + #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
1238 + #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
1239 ++#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
1240 + #define QI_DEV_IOTLB_SIZE 1
1241 + #define QI_DEV_IOTLB_MAX_INVS 32
1242 +
1243 +@@ -318,6 +320,7 @@ enum {
1244 + #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
1245 + #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
1246 + #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
1247 ++#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
1248 + #define QI_DEV_EIOTLB_MAX_INVS 32
1249 +
1250 + #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
1251 +@@ -463,9 +466,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
1252 + u8 fm, u64 type);
1253 + extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1254 + unsigned int size_order, u64 type);
1255 +-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1256 +- u64 addr, unsigned mask);
1257 +-
1258 ++extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1259 ++ u16 qdep, u64 addr, unsigned mask);
1260 + extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
1261 +
1262 + extern int dmar_ir_support(void);
1263 +diff --git a/include/linux/io.h b/include/linux/io.h
1264 +index de64c1e53612..8ab45611fc35 100644
1265 +--- a/include/linux/io.h
1266 ++++ b/include/linux/io.h
1267 +@@ -154,4 +154,26 @@ enum {
1268 + void *memremap(resource_size_t offset, size_t size, unsigned long flags);
1269 + void memunmap(void *addr);
1270 +
1271 ++/*
1272 ++ * On x86 PAT systems we have memory tracking that keeps track of
1273 ++ * the allowed mappings on memory ranges. This tracking works for
1274 ++ * all the in-kernel mapping APIs (ioremap*), but where the user
1275 ++ * wishes to map a range from a physical device into user memory
1276 ++ * the tracking won't be updated. This API is to be used by
1277 ++ * drivers which remap physical device pages into userspace,
1278 ++ * and wants to make sure they are mapped WC and not UC.
1279 ++ */
1280 ++#ifndef arch_io_reserve_memtype_wc
1281 ++static inline int arch_io_reserve_memtype_wc(resource_size_t base,
1282 ++ resource_size_t size)
1283 ++{
1284 ++ return 0;
1285 ++}
1286 ++
1287 ++static inline void arch_io_free_memtype_wc(resource_size_t base,
1288 ++ resource_size_t size)
1289 ++{
1290 ++}
1291 ++#endif
1292 ++
1293 + #endif /* _LINUX_IO_H */
1294 +diff --git a/include/video/udlfb.h b/include/video/udlfb.h
1295 +index f9466fa54ba4..2ad9a6d37ff4 100644
1296 +--- a/include/video/udlfb.h
1297 ++++ b/include/video/udlfb.h
1298 +@@ -87,7 +87,7 @@ struct dlfb_data {
1299 + #define MIN_RAW_PIX_BYTES 2
1300 + #define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
1301 +
1302 +-#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
1303 ++#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */
1304 + #define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */
1305 +
1306 + /* remove these once align.h patch is taken into kernel */
1307 +diff --git a/kernel/kthread.c b/kernel/kthread.c
1308 +index 850b255649a2..ac6849ee3057 100644
1309 +--- a/kernel/kthread.c
1310 ++++ b/kernel/kthread.c
1311 +@@ -313,10 +313,16 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
1312 + task = create->result;
1313 + if (!IS_ERR(task)) {
1314 + static const struct sched_param param = { .sched_priority = 0 };
1315 ++ char name[TASK_COMM_LEN];
1316 + va_list args;
1317 +
1318 + va_start(args, namefmt);
1319 +- vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
1320 ++ /*
1321 ++ * task is already visible to other tasks, so updating
1322 ++ * COMM must be protected.
1323 ++ */
1324 ++ vsnprintf(name, sizeof(name), namefmt, args);
1325 ++ set_task_comm(task, name);
1326 + va_end(args);
1327 + /*
1328 + * root may have changed our (kthreadd's) priority or CPU mask.
1329 +diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
1330 +index 02e8dfaa1ce2..9d76184279fe 100644
1331 +--- a/kernel/power/Kconfig
1332 ++++ b/kernel/power/Kconfig
1333 +@@ -105,6 +105,7 @@ config PM_SLEEP
1334 + def_bool y
1335 + depends on SUSPEND || HIBERNATE_CALLBACKS
1336 + select PM
1337 ++ select SRCU
1338 +
1339 + config PM_SLEEP_SMP
1340 + def_bool y
1341 +diff --git a/kernel/sys.c b/kernel/sys.c
1342 +index f718742e55e6..e2446ade79ba 100644
1343 +--- a/kernel/sys.c
1344 ++++ b/kernel/sys.c
1345 +@@ -1142,18 +1142,19 @@ static int override_release(char __user *release, size_t len)
1346 +
1347 + SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1348 + {
1349 +- int errno = 0;
1350 ++ struct new_utsname tmp;
1351 +
1352 + down_read(&uts_sem);
1353 +- if (copy_to_user(name, utsname(), sizeof *name))
1354 +- errno = -EFAULT;
1355 ++ memcpy(&tmp, utsname(), sizeof(tmp));
1356 + up_read(&uts_sem);
1357 ++ if (copy_to_user(name, &tmp, sizeof(tmp)))
1358 ++ return -EFAULT;
1359 +
1360 +- if (!errno && override_release(name->release, sizeof(name->release)))
1361 +- errno = -EFAULT;
1362 +- if (!errno && override_architecture(name))
1363 +- errno = -EFAULT;
1364 +- return errno;
1365 ++ if (override_release(name->release, sizeof(name->release)))
1366 ++ return -EFAULT;
1367 ++ if (override_architecture(name))
1368 ++ return -EFAULT;
1369 ++ return 0;
1370 + }
1371 +
1372 + #ifdef __ARCH_WANT_SYS_OLD_UNAME
1373 +@@ -1162,55 +1163,46 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1374 + */
1375 + SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1376 + {
1377 +- int error = 0;
1378 ++ struct old_utsname tmp;
1379 +
1380 + if (!name)
1381 + return -EFAULT;
1382 +
1383 + down_read(&uts_sem);
1384 +- if (copy_to_user(name, utsname(), sizeof(*name)))
1385 +- error = -EFAULT;
1386 ++ memcpy(&tmp, utsname(), sizeof(tmp));
1387 + up_read(&uts_sem);
1388 ++ if (copy_to_user(name, &tmp, sizeof(tmp)))
1389 ++ return -EFAULT;
1390 +
1391 +- if (!error && override_release(name->release, sizeof(name->release)))
1392 +- error = -EFAULT;
1393 +- if (!error && override_architecture(name))
1394 +- error = -EFAULT;
1395 +- return error;
1396 ++ if (override_release(name->release, sizeof(name->release)))
1397 ++ return -EFAULT;
1398 ++ if (override_architecture(name))
1399 ++ return -EFAULT;
1400 ++ return 0;
1401 + }
1402 +
1403 + SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1404 + {
1405 +- int error;
1406 ++ struct oldold_utsname tmp = {};
1407 +
1408 + if (!name)
1409 + return -EFAULT;
1410 +- if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1411 +- return -EFAULT;
1412 +
1413 + down_read(&uts_sem);
1414 +- error = __copy_to_user(&name->sysname, &utsname()->sysname,
1415 +- __OLD_UTS_LEN);
1416 +- error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1417 +- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1418 +- __OLD_UTS_LEN);
1419 +- error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1420 +- error |= __copy_to_user(&name->release, &utsname()->release,
1421 +- __OLD_UTS_LEN);
1422 +- error |= __put_user(0, name->release + __OLD_UTS_LEN);
1423 +- error |= __copy_to_user(&name->version, &utsname()->version,
1424 +- __OLD_UTS_LEN);
1425 +- error |= __put_user(0, name->version + __OLD_UTS_LEN);
1426 +- error |= __copy_to_user(&name->machine, &utsname()->machine,
1427 +- __OLD_UTS_LEN);
1428 +- error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1429 ++ memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1430 ++ memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1431 ++ memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1432 ++ memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1433 ++ memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1434 + up_read(&uts_sem);
1435 ++ if (copy_to_user(name, &tmp, sizeof(tmp)))
1436 ++ return -EFAULT;
1437 +
1438 +- if (!error && override_architecture(name))
1439 +- error = -EFAULT;
1440 +- if (!error && override_release(name->release, sizeof(name->release)))
1441 +- error = -EFAULT;
1442 +- return error ? -EFAULT : 0;
1443 ++ if (override_architecture(name))
1444 ++ return -EFAULT;
1445 ++ if (override_release(name->release, sizeof(name->release)))
1446 ++ return -EFAULT;
1447 ++ return 0;
1448 + }
1449 + #endif
1450 +
1451 +@@ -1224,17 +1216,18 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1452 +
1453 + if (len < 0 || len > __NEW_UTS_LEN)
1454 + return -EINVAL;
1455 +- down_write(&uts_sem);
1456 + errno = -EFAULT;
1457 + if (!copy_from_user(tmp, name, len)) {
1458 +- struct new_utsname *u = utsname();
1459 ++ struct new_utsname *u;
1460 +
1461 ++ down_write(&uts_sem);
1462 ++ u = utsname();
1463 + memcpy(u->nodename, tmp, len);
1464 + memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1465 + errno = 0;
1466 + uts_proc_notify(UTS_PROC_HOSTNAME);
1467 ++ up_write(&uts_sem);
1468 + }
1469 +- up_write(&uts_sem);
1470 + return errno;
1471 + }
1472 +
1473 +@@ -1242,8 +1235,9 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1474 +
1475 + SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1476 + {
1477 +- int i, errno;
1478 ++ int i;
1479 + struct new_utsname *u;
1480 ++ char tmp[__NEW_UTS_LEN + 1];
1481 +
1482 + if (len < 0)
1483 + return -EINVAL;
1484 +@@ -1252,11 +1246,11 @@ SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1485 + i = 1 + strlen(u->nodename);
1486 + if (i > len)
1487 + i = len;
1488 +- errno = 0;
1489 +- if (copy_to_user(name, u->nodename, i))
1490 +- errno = -EFAULT;
1491 ++ memcpy(tmp, u->nodename, i);
1492 + up_read(&uts_sem);
1493 +- return errno;
1494 ++ if (copy_to_user(name, tmp, i))
1495 ++ return -EFAULT;
1496 ++ return 0;
1497 + }
1498 +
1499 + #endif
1500 +@@ -1275,17 +1269,18 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1501 + if (len < 0 || len > __NEW_UTS_LEN)
1502 + return -EINVAL;
1503 +
1504 +- down_write(&uts_sem);
1505 + errno = -EFAULT;
1506 + if (!copy_from_user(tmp, name, len)) {
1507 +- struct new_utsname *u = utsname();
1508 ++ struct new_utsname *u;
1509 +
1510 ++ down_write(&uts_sem);
1511 ++ u = utsname();
1512 + memcpy(u->domainname, tmp, len);
1513 + memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1514 + errno = 0;
1515 + uts_proc_notify(UTS_PROC_DOMAINNAME);
1516 ++ up_write(&uts_sem);
1517 + }
1518 +- up_write(&uts_sem);
1519 + return errno;
1520 + }
1521 +
1522 +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
1523 +index 7ab5eafea8b2..210b8e726a97 100644
1524 +--- a/kernel/trace/blktrace.c
1525 ++++ b/kernel/trace/blktrace.c
1526 +@@ -1716,6 +1716,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1527 + mutex_lock(&bdev->bd_mutex);
1528 +
1529 + if (attr == &dev_attr_enable) {
1530 ++ if (!!value == !!q->blk_trace) {
1531 ++ ret = 0;
1532 ++ goto out_unlock_bdev;
1533 ++ }
1534 + if (value)
1535 + ret = blk_trace_setup_queue(q, bdev);
1536 + else
1537 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1538 +index 11761b3dd7ba..e409ddce8754 100644
1539 +--- a/kernel/trace/trace.c
1540 ++++ b/kernel/trace/trace.c
1541 +@@ -6496,7 +6496,9 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
1542 +
1543 + if (buffer) {
1544 + mutex_lock(&trace_types_lock);
1545 +- if (val) {
1546 ++ if (!!val == tracer_tracing_is_on(tr)) {
1547 ++ val = 0; /* do nothing */
1548 ++ } else if (val) {
1549 + tracer_tracing_on(tr);
1550 + if (tr->current_trace->start)
1551 + tr->current_trace->start(tr);
1552 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
1553 +index 68bb89ad9d28..1dc887bab085 100644
1554 +--- a/kernel/trace/trace_uprobe.c
1555 ++++ b/kernel/trace/trace_uprobe.c
1556 +@@ -969,7 +969,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
1557 +
1558 + list_del_rcu(&link->list);
1559 + /* synchronize with u{,ret}probe_trace_func */
1560 +- synchronize_sched();
1561 ++ synchronize_rcu();
1562 + kfree(link);
1563 +
1564 + if (!list_empty(&tu->tp.files))
1565 +diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
1566 +index 88fefa68c516..a965df4b54f5 100644
1567 +--- a/kernel/user_namespace.c
1568 ++++ b/kernel/user_namespace.c
1569 +@@ -602,9 +602,26 @@ static ssize_t map_write(struct file *file, const char __user *buf,
1570 + struct uid_gid_map new_map;
1571 + unsigned idx;
1572 + struct uid_gid_extent *extent = NULL;
1573 +- unsigned long page = 0;
1574 ++ unsigned long page;
1575 + char *kbuf, *pos, *next_line;
1576 +- ssize_t ret = -EINVAL;
1577 ++ ssize_t ret;
1578 ++
1579 ++ /* Only allow < page size writes at the beginning of the file */
1580 ++ if ((*ppos != 0) || (count >= PAGE_SIZE))
1581 ++ return -EINVAL;
1582 ++
1583 ++ /* Get a buffer */
1584 ++ page = __get_free_page(GFP_TEMPORARY);
1585 ++ kbuf = (char *) page;
1586 ++ if (!page)
1587 ++ return -ENOMEM;
1588 ++
1589 ++ /* Slurp in the user data */
1590 ++ if (copy_from_user(kbuf, buf, count)) {
1591 ++ free_page(page);
1592 ++ return -EFAULT;
1593 ++ }
1594 ++ kbuf[count] = '\0';
1595 +
1596 + /*
1597 + * The userns_state_mutex serializes all writes to any given map.
1598 +@@ -638,24 +655,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
1599 + if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
1600 + goto out;
1601 +
1602 +- /* Get a buffer */
1603 +- ret = -ENOMEM;
1604 +- page = __get_free_page(GFP_TEMPORARY);
1605 +- kbuf = (char *) page;
1606 +- if (!page)
1607 +- goto out;
1608 +-
1609 +- /* Only allow < page size writes at the beginning of the file */
1610 +- ret = -EINVAL;
1611 +- if ((*ppos != 0) || (count >= PAGE_SIZE))
1612 +- goto out;
1613 +-
1614 +- /* Slurp in the user data */
1615 +- ret = -EFAULT;
1616 +- if (copy_from_user(kbuf, buf, count))
1617 +- goto out;
1618 +- kbuf[count] = '\0';
1619 +-
1620 + /* Parse the user data */
1621 + ret = -EINVAL;
1622 + pos = kbuf;
1623 +diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
1624 +index c8eac43267e9..d2b3b2973456 100644
1625 +--- a/kernel/utsname_sysctl.c
1626 ++++ b/kernel/utsname_sysctl.c
1627 +@@ -17,7 +17,7 @@
1628 +
1629 + #ifdef CONFIG_PROC_SYSCTL
1630 +
1631 +-static void *get_uts(struct ctl_table *table, int write)
1632 ++static void *get_uts(struct ctl_table *table)
1633 + {
1634 + char *which = table->data;
1635 + struct uts_namespace *uts_ns;
1636 +@@ -25,21 +25,9 @@ static void *get_uts(struct ctl_table *table, int write)
1637 + uts_ns = current->nsproxy->uts_ns;
1638 + which = (which - (char *)&init_uts_ns) + (char *)uts_ns;
1639 +
1640 +- if (!write)
1641 +- down_read(&uts_sem);
1642 +- else
1643 +- down_write(&uts_sem);
1644 + return which;
1645 + }
1646 +
1647 +-static void put_uts(struct ctl_table *table, int write, void *which)
1648 +-{
1649 +- if (!write)
1650 +- up_read(&uts_sem);
1651 +- else
1652 +- up_write(&uts_sem);
1653 +-}
1654 +-
1655 + /*
1656 + * Special case of dostring for the UTS structure. This has locks
1657 + * to observe. Should this be in kernel/sys.c ????
1658 +@@ -49,13 +37,34 @@ static int proc_do_uts_string(struct ctl_table *table, int write,
1659 + {
1660 + struct ctl_table uts_table;
1661 + int r;
1662 ++ char tmp_data[__NEW_UTS_LEN + 1];
1663 ++
1664 + memcpy(&uts_table, table, sizeof(uts_table));
1665 +- uts_table.data = get_uts(table, write);
1666 ++ uts_table.data = tmp_data;
1667 ++
1668 ++ /*
1669 ++ * Buffer the value in tmp_data so that proc_dostring() can be called
1670 ++ * without holding any locks.
1671 ++ * We also need to read the original value in the write==1 case to
1672 ++ * support partial writes.
1673 ++ */
1674 ++ down_read(&uts_sem);
1675 ++ memcpy(tmp_data, get_uts(table), sizeof(tmp_data));
1676 ++ up_read(&uts_sem);
1677 + r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
1678 +- put_uts(table, write, uts_table.data);
1679 +
1680 +- if (write)
1681 ++ if (write) {
1682 ++ /*
1683 ++ * Write back the new value.
1684 ++ * Note that, since we dropped uts_sem, the result can
1685 ++ * theoretically be incorrect if there are two parallel writes
1686 ++ * at non-zero offsets to the same sysctl.
1687 ++ */
1688 ++ down_write(&uts_sem);
1689 ++ memcpy(get_uts(table), tmp_data, sizeof(tmp_data));
1690 ++ up_write(&uts_sem);
1691 + proc_sys_poll_notify(table->poll);
1692 ++ }
1693 +
1694 + return r;
1695 + }
1696 +diff --git a/mm/memory.c b/mm/memory.c
1697 +index 42db644f5ec4..5aee9ec8b8c6 100644
1698 +--- a/mm/memory.c
1699 ++++ b/mm/memory.c
1700 +@@ -361,15 +361,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
1701 + {
1702 + struct mmu_table_batch **batch = &tlb->batch;
1703 +
1704 +- /*
1705 +- * When there's less then two users of this mm there cannot be a
1706 +- * concurrent page-table walk.
1707 +- */
1708 +- if (atomic_read(&tlb->mm->mm_users) < 2) {
1709 +- __tlb_remove_table(table);
1710 +- return;
1711 +- }
1712 +-
1713 + if (*batch == NULL) {
1714 + *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1715 + if (*batch == NULL) {
1716 +diff --git a/net/9p/client.c b/net/9p/client.c
1717 +index 3ff26eb1ea20..ed8738c4dc09 100644
1718 +--- a/net/9p/client.c
1719 ++++ b/net/9p/client.c
1720 +@@ -931,7 +931,7 @@ static int p9_client_version(struct p9_client *c)
1721 + {
1722 + int err = 0;
1723 + struct p9_req_t *req;
1724 +- char *version;
1725 ++ char *version = NULL;
1726 + int msize;
1727 +
1728 + p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
1729 +diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
1730 +index bced8c074c12..2f68ffda3715 100644
1731 +--- a/net/9p/trans_fd.c
1732 ++++ b/net/9p/trans_fd.c
1733 +@@ -185,6 +185,8 @@ static void p9_mux_poll_stop(struct p9_conn *m)
1734 + spin_lock_irqsave(&p9_poll_lock, flags);
1735 + list_del_init(&m->poll_pending_link);
1736 + spin_unlock_irqrestore(&p9_poll_lock, flags);
1737 ++
1738 ++ flush_work(&p9_poll_work);
1739 + }
1740 +
1741 + /**
1742 +@@ -933,7 +935,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
1743 + if (err < 0)
1744 + return err;
1745 +
1746 +- if (valid_ipaddr4(addr) < 0)
1747 ++ if (addr == NULL || valid_ipaddr4(addr) < 0)
1748 + return -EINVAL;
1749 +
1750 + csocket = NULL;
1751 +@@ -981,6 +983,9 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
1752 +
1753 + csocket = NULL;
1754 +
1755 ++ if (addr == NULL)
1756 ++ return -EINVAL;
1757 ++
1758 + if (strlen(addr) >= UNIX_PATH_MAX) {
1759 + pr_err("%s (%d): address too long: %s\n",
1760 + __func__, task_pid_nr(current), addr);
1761 +diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
1762 +index 52b4a2f993f2..f42550dd3560 100644
1763 +--- a/net/9p/trans_rdma.c
1764 ++++ b/net/9p/trans_rdma.c
1765 +@@ -644,6 +644,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
1766 + struct ib_qp_init_attr qp_attr;
1767 + struct ib_cq_init_attr cq_attr = {};
1768 +
1769 ++ if (addr == NULL)
1770 ++ return -EINVAL;
1771 ++
1772 + /* Parse the transport specific mount options */
1773 + err = parse_opts(args, &opts);
1774 + if (err < 0)
1775 +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
1776 +index 2ddeecca5b12..6018a1c0dc28 100644
1777 +--- a/net/9p/trans_virtio.c
1778 ++++ b/net/9p/trans_virtio.c
1779 +@@ -192,7 +192,7 @@ static int pack_sg_list(struct scatterlist *sg, int start,
1780 + s = rest_of_page(data);
1781 + if (s > count)
1782 + s = count;
1783 +- BUG_ON(index > limit);
1784 ++ BUG_ON(index >= limit);
1785 + /* Make sure we don't terminate early. */
1786 + sg_unmark_end(&sg[index]);
1787 + sg_set_buf(&sg[index++], data, s);
1788 +@@ -237,6 +237,7 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
1789 + s = PAGE_SIZE - data_off;
1790 + if (s > count)
1791 + s = count;
1792 ++ BUG_ON(index >= limit);
1793 + /* Make sure we don't terminate early. */
1794 + sg_unmark_end(&sg[index]);
1795 + sg_set_page(&sg[index++], pdata[i++], s, data_off);
1796 +@@ -409,6 +410,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
1797 + p9_debug(P9_DEBUG_TRANS, "virtio request\n");
1798 +
1799 + if (uodata) {
1800 ++ __le32 sz;
1801 + int n = p9_get_mapped_pages(chan, &out_pages, uodata,
1802 + outlen, &offs, &need_drop);
1803 + if (n < 0)
1804 +@@ -419,6 +421,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
1805 + memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
1806 + outlen = n;
1807 + }
1808 ++ /* The size field of the message must include the length of the
1809 ++ * header and the length of the data. We didn't actually know
1810 ++ * the length of the data until this point so add it in now.
1811 ++ */
1812 ++ sz = cpu_to_le32(req->tc->size + outlen);
1813 ++ memcpy(&req->tc->sdata[0], &sz, sizeof(sz));
1814 + } else if (uidata) {
1815 + int n = p9_get_mapped_pages(chan, &in_pages, uidata,
1816 + inlen, &offs, &need_drop);
1817 +@@ -646,6 +654,9 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
1818 + int ret = -ENOENT;
1819 + int found = 0;
1820 +
1821 ++ if (devname == NULL)
1822 ++ return -EINVAL;
1823 ++
1824 + mutex_lock(&virtio_9p_lock);
1825 + list_for_each_entry(chan, &virtio_chan_list, chan_list) {
1826 + if (!strncmp(devname, chan->tag, chan->tag_len) &&
1827 +diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
1828 +index d4353faced35..a10db45b2e1e 100644
1829 +--- a/net/ieee802154/6lowpan/tx.c
1830 ++++ b/net/ieee802154/6lowpan/tx.c
1831 +@@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
1832 + /* We must take a copy of the skb before we modify/replace the ipv6
1833 + * header as the header could be used elsewhere
1834 + */
1835 +- skb = skb_unshare(skb, GFP_ATOMIC);
1836 +- if (!skb)
1837 +- return NET_XMIT_DROP;
1838 ++ if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
1839 ++ skb_tailroom(skb) < ldev->needed_tailroom)) {
1840 ++ struct sk_buff *nskb;
1841 ++
1842 ++ nskb = skb_copy_expand(skb, ldev->needed_headroom,
1843 ++ ldev->needed_tailroom, GFP_ATOMIC);
1844 ++ if (likely(nskb)) {
1845 ++ consume_skb(skb);
1846 ++ skb = nskb;
1847 ++ } else {
1848 ++ kfree_skb(skb);
1849 ++ return NET_XMIT_DROP;
1850 ++ }
1851 ++ } else {
1852 ++ skb = skb_unshare(skb, GFP_ATOMIC);
1853 ++ if (!skb)
1854 ++ return NET_XMIT_DROP;
1855 ++ }
1856 +
1857 + ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
1858 + if (ret < 0) {
1859 +diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
1860 +index 3827f359b336..9e1ff9d4cf2d 100644
1861 +--- a/net/mac802154/tx.c
1862 ++++ b/net/mac802154/tx.c
1863 +@@ -72,8 +72,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
1864 + int ret;
1865 +
1866 + if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
1867 +- u16 crc = crc_ccitt(0, skb->data, skb->len);
1868 ++ struct sk_buff *nskb;
1869 ++ u16 crc;
1870 ++
1871 ++ if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
1872 ++ nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
1873 ++ GFP_ATOMIC);
1874 ++ if (likely(nskb)) {
1875 ++ consume_skb(skb);
1876 ++ skb = nskb;
1877 ++ } else {
1878 ++ goto err_tx;
1879 ++ }
1880 ++ }
1881 +
1882 ++ crc = crc_ccitt(0, skb->data, skb->len);
1883 + put_unaligned_le16(crc, skb_put(skb, 2));
1884 + }
1885 +
1886 +diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
1887 +index 7f10430af39c..58426e7d320d 100644
1888 +--- a/tools/perf/util/auxtrace.c
1889 ++++ b/tools/perf/util/auxtrace.c
1890 +@@ -186,6 +186,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues,
1891 + for (i = 0; i < queues->nr_queues; i++) {
1892 + list_splice_tail(&queues->queue_array[i].head,
1893 + &queue_array[i].head);
1894 ++ queue_array[i].tid = queues->queue_array[i].tid;
1895 ++ queue_array[i].cpu = queues->queue_array[i].cpu;
1896 ++ queue_array[i].set = queues->queue_array[i].set;
1897 + queue_array[i].priv = queues->queue_array[i].priv;
1898 + }
1899 +