Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Sun, 09 Sep 2018 23:26:15
Message-Id: 1536535558.8782652b4e6d0b393bbee273c0723472ced5d772.mpagano@gentoo
1 commit: 8782652b4e6d0b393bbee273c0723472ced5d772
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Sep 9 23:25:58 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Sep 9 23:25:58 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8782652b
7
8 Linux patch 4.4.155
9
10 0000_README | 4 +
11 1154_linux-4.4.155.patch | 1862 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1866 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 5a367b5..6b63ef8 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -659,6 +659,10 @@ Patch: 1153_linux-4.4.154.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.154
21
22 +Patch: 1154_linux-4.4.155.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.155
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1154_linux-4.4.155.patch b/1154_linux-4.4.155.patch
31 new file mode 100644
32 index 0000000..0e4fe23
33 --- /dev/null
34 +++ b/1154_linux-4.4.155.patch
35 @@ -0,0 +1,1862 @@
36 +diff --git a/Makefile b/Makefile
37 +index b184286cf7e6..2d9f89ec8397 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 154
44 ++SUBLEVEL = 155
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
49 +index 63f06a2b1f7f..bbc7cb9faa01 100644
50 +--- a/arch/alpha/kernel/osf_sys.c
51 ++++ b/arch/alpha/kernel/osf_sys.c
52 +@@ -526,24 +526,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path,
53 + SYSCALL_DEFINE1(osf_utsname, char __user *, name)
54 + {
55 + int error;
56 ++ char tmp[5 * 32];
57 +
58 + down_read(&uts_sem);
59 +- error = -EFAULT;
60 +- if (copy_to_user(name + 0, utsname()->sysname, 32))
61 +- goto out;
62 +- if (copy_to_user(name + 32, utsname()->nodename, 32))
63 +- goto out;
64 +- if (copy_to_user(name + 64, utsname()->release, 32))
65 +- goto out;
66 +- if (copy_to_user(name + 96, utsname()->version, 32))
67 +- goto out;
68 +- if (copy_to_user(name + 128, utsname()->machine, 32))
69 +- goto out;
70 ++ memcpy(tmp + 0 * 32, utsname()->sysname, 32);
71 ++ memcpy(tmp + 1 * 32, utsname()->nodename, 32);
72 ++ memcpy(tmp + 2 * 32, utsname()->release, 32);
73 ++ memcpy(tmp + 3 * 32, utsname()->version, 32);
74 ++ memcpy(tmp + 4 * 32, utsname()->machine, 32);
75 ++ up_read(&uts_sem);
76 +
77 +- error = 0;
78 +- out:
79 +- up_read(&uts_sem);
80 +- return error;
81 ++ if (copy_to_user(name, tmp, sizeof(tmp)))
82 ++ return -EFAULT;
83 ++ return 0;
84 + }
85 +
86 + SYSCALL_DEFINE0(getpagesize)
87 +@@ -561,24 +556,22 @@ SYSCALL_DEFINE0(getdtablesize)
88 + */
89 + SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
90 + {
91 +- unsigned len;
92 +- int i;
93 ++ int len, err = 0;
94 ++ char *kname;
95 ++ char tmp[32];
96 +
97 +- if (!access_ok(VERIFY_WRITE, name, namelen))
98 +- return -EFAULT;
99 +-
100 +- len = namelen;
101 +- if (len > 32)
102 +- len = 32;
103 ++ if (namelen < 0 || namelen > 32)
104 ++ namelen = 32;
105 +
106 + down_read(&uts_sem);
107 +- for (i = 0; i < len; ++i) {
108 +- __put_user(utsname()->domainname[i], name + i);
109 +- if (utsname()->domainname[i] == '\0')
110 +- break;
111 +- }
112 ++ kname = utsname()->domainname;
113 ++ len = strnlen(kname, namelen);
114 ++ len = min(len + 1, namelen);
115 ++ memcpy(tmp, kname, len);
116 + up_read(&uts_sem);
117 +
118 ++ if (copy_to_user(name, tmp, len))
119 ++ return -EFAULT;
120 + return 0;
121 + }
122 +
123 +@@ -741,13 +734,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
124 + };
125 + unsigned long offset;
126 + const char *res;
127 +- long len, err = -EINVAL;
128 ++ long len;
129 ++ char tmp[__NEW_UTS_LEN + 1];
130 +
131 + offset = command-1;
132 + if (offset >= ARRAY_SIZE(sysinfo_table)) {
133 + /* Digital UNIX has a few unpublished interfaces here */
134 + printk("sysinfo(%d)", command);
135 +- goto out;
136 ++ return -EINVAL;
137 + }
138 +
139 + down_read(&uts_sem);
140 +@@ -755,13 +749,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
141 + len = strlen(res)+1;
142 + if ((unsigned long)len > (unsigned long)count)
143 + len = count;
144 +- if (copy_to_user(buf, res, len))
145 +- err = -EFAULT;
146 +- else
147 +- err = 0;
148 ++ memcpy(tmp, res, len);
149 + up_read(&uts_sem);
150 +- out:
151 +- return err;
152 ++ if (copy_to_user(buf, tmp, len))
153 ++ return -EFAULT;
154 ++ return 0;
155 + }
156 +
157 + SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
158 +diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
159 +index bb1ca158273c..1922e7a93e40 100644
160 +--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
161 ++++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
162 +@@ -201,6 +201,7 @@
163 + #address-cells = <1>;
164 + #size-cells = <0>;
165 + reg = <0x70>;
166 ++ reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>;
167 + };
168 + };
169 +
170 +diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
171 +index 493e72f64b35..5768ec3c1781 100644
172 +--- a/arch/powerpc/include/asm/fadump.h
173 ++++ b/arch/powerpc/include/asm/fadump.h
174 +@@ -194,9 +194,6 @@ struct fadump_crash_info_header {
175 + struct cpumask cpu_online_mask;
176 + };
177 +
178 +-/* Crash memory ranges */
179 +-#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
180 +-
181 + struct fad_crash_memory_ranges {
182 + unsigned long long base;
183 + unsigned long long size;
184 +diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
185 +index 791d4c3329c3..c3c835290131 100644
186 +--- a/arch/powerpc/kernel/fadump.c
187 ++++ b/arch/powerpc/kernel/fadump.c
188 +@@ -35,6 +35,7 @@
189 + #include <linux/crash_dump.h>
190 + #include <linux/kobject.h>
191 + #include <linux/sysfs.h>
192 ++#include <linux/slab.h>
193 +
194 + #include <asm/page.h>
195 + #include <asm/prom.h>
196 +@@ -48,8 +49,10 @@ static struct fadump_mem_struct fdm;
197 + static const struct fadump_mem_struct *fdm_active;
198 +
199 + static DEFINE_MUTEX(fadump_mutex);
200 +-struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
201 ++struct fad_crash_memory_ranges *crash_memory_ranges;
202 ++int crash_memory_ranges_size;
203 + int crash_mem_ranges;
204 ++int max_crash_mem_ranges;
205 +
206 + /* Scan the Firmware Assisted dump configuration details. */
207 + int __init early_init_dt_scan_fw_dump(unsigned long node,
208 +@@ -726,38 +729,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
209 + return 0;
210 + }
211 +
212 +-static inline void fadump_add_crash_memory(unsigned long long base,
213 +- unsigned long long end)
214 ++static void free_crash_memory_ranges(void)
215 ++{
216 ++ kfree(crash_memory_ranges);
217 ++ crash_memory_ranges = NULL;
218 ++ crash_memory_ranges_size = 0;
219 ++ max_crash_mem_ranges = 0;
220 ++}
221 ++
222 ++/*
223 ++ * Allocate or reallocate crash memory ranges array in incremental units
224 ++ * of PAGE_SIZE.
225 ++ */
226 ++static int allocate_crash_memory_ranges(void)
227 ++{
228 ++ struct fad_crash_memory_ranges *new_array;
229 ++ u64 new_size;
230 ++
231 ++ new_size = crash_memory_ranges_size + PAGE_SIZE;
232 ++ pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
233 ++ new_size);
234 ++
235 ++ new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
236 ++ if (new_array == NULL) {
237 ++ pr_err("Insufficient memory for setting up crash memory ranges\n");
238 ++ free_crash_memory_ranges();
239 ++ return -ENOMEM;
240 ++ }
241 ++
242 ++ crash_memory_ranges = new_array;
243 ++ crash_memory_ranges_size = new_size;
244 ++ max_crash_mem_ranges = (new_size /
245 ++ sizeof(struct fad_crash_memory_ranges));
246 ++ return 0;
247 ++}
248 ++
249 ++static inline int fadump_add_crash_memory(unsigned long long base,
250 ++ unsigned long long end)
251 + {
252 + if (base == end)
253 +- return;
254 ++ return 0;
255 ++
256 ++ if (crash_mem_ranges == max_crash_mem_ranges) {
257 ++ int ret;
258 ++
259 ++ ret = allocate_crash_memory_ranges();
260 ++ if (ret)
261 ++ return ret;
262 ++ }
263 +
264 + pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
265 + crash_mem_ranges, base, end - 1, (end - base));
266 + crash_memory_ranges[crash_mem_ranges].base = base;
267 + crash_memory_ranges[crash_mem_ranges].size = end - base;
268 + crash_mem_ranges++;
269 ++ return 0;
270 + }
271 +
272 +-static void fadump_exclude_reserved_area(unsigned long long start,
273 ++static int fadump_exclude_reserved_area(unsigned long long start,
274 + unsigned long long end)
275 + {
276 + unsigned long long ra_start, ra_end;
277 ++ int ret = 0;
278 +
279 + ra_start = fw_dump.reserve_dump_area_start;
280 + ra_end = ra_start + fw_dump.reserve_dump_area_size;
281 +
282 + if ((ra_start < end) && (ra_end > start)) {
283 + if ((start < ra_start) && (end > ra_end)) {
284 +- fadump_add_crash_memory(start, ra_start);
285 +- fadump_add_crash_memory(ra_end, end);
286 ++ ret = fadump_add_crash_memory(start, ra_start);
287 ++ if (ret)
288 ++ return ret;
289 ++
290 ++ ret = fadump_add_crash_memory(ra_end, end);
291 + } else if (start < ra_start) {
292 +- fadump_add_crash_memory(start, ra_start);
293 ++ ret = fadump_add_crash_memory(start, ra_start);
294 + } else if (ra_end < end) {
295 +- fadump_add_crash_memory(ra_end, end);
296 ++ ret = fadump_add_crash_memory(ra_end, end);
297 + }
298 + } else
299 +- fadump_add_crash_memory(start, end);
300 ++ ret = fadump_add_crash_memory(start, end);
301 ++
302 ++ return ret;
303 + }
304 +
305 + static int fadump_init_elfcore_header(char *bufp)
306 +@@ -793,10 +846,11 @@ static int fadump_init_elfcore_header(char *bufp)
307 + * Traverse through memblock structure and setup crash memory ranges. These
308 + * ranges will be used create PT_LOAD program headers in elfcore header.
309 + */
310 +-static void fadump_setup_crash_memory_ranges(void)
311 ++static int fadump_setup_crash_memory_ranges(void)
312 + {
313 + struct memblock_region *reg;
314 + unsigned long long start, end;
315 ++ int ret;
316 +
317 + pr_debug("Setup crash memory ranges.\n");
318 + crash_mem_ranges = 0;
319 +@@ -807,7 +861,9 @@ static void fadump_setup_crash_memory_ranges(void)
320 + * specified during fadump registration. We need to create a separate
321 + * program header for this chunk with the correct offset.
322 + */
323 +- fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
324 ++ ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
325 ++ if (ret)
326 ++ return ret;
327 +
328 + for_each_memblock(memory, reg) {
329 + start = (unsigned long long)reg->base;
330 +@@ -816,8 +872,12 @@ static void fadump_setup_crash_memory_ranges(void)
331 + start = fw_dump.boot_memory_size;
332 +
333 + /* add this range excluding the reserved dump area. */
334 +- fadump_exclude_reserved_area(start, end);
335 ++ ret = fadump_exclude_reserved_area(start, end);
336 ++ if (ret)
337 ++ return ret;
338 + }
339 ++
340 ++ return 0;
341 + }
342 +
343 + /*
344 +@@ -941,6 +1001,7 @@ static void register_fadump(void)
345 + {
346 + unsigned long addr;
347 + void *vaddr;
348 ++ int ret;
349 +
350 + /*
351 + * If no memory is reserved then we can not register for firmware-
352 +@@ -949,7 +1010,9 @@ static void register_fadump(void)
353 + if (!fw_dump.reserve_dump_area_size)
354 + return;
355 +
356 +- fadump_setup_crash_memory_ranges();
357 ++ ret = fadump_setup_crash_memory_ranges();
358 ++ if (ret)
359 ++ return ret;
360 +
361 + addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
362 + /* Initialize fadump crash info header. */
363 +@@ -1028,6 +1091,7 @@ void fadump_cleanup(void)
364 + } else if (fw_dump.dump_registered) {
365 + /* Un-register Firmware-assisted dump if it was registered. */
366 + fadump_unregister_dump(&fdm);
367 ++ free_crash_memory_ranges();
368 + }
369 + }
370 +
371 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
372 +index 3b6647e574b6..f5313a78e5d6 100644
373 +--- a/arch/powerpc/platforms/pseries/ras.c
374 ++++ b/arch/powerpc/platforms/pseries/ras.c
375 +@@ -300,7 +300,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
376 + }
377 +
378 + savep = __va(regs->gpr[3]);
379 +- regs->gpr[3] = savep[0]; /* restore original r3 */
380 ++ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
381 +
382 + /* If it isn't an extended log we can use the per cpu 64bit buffer */
383 + h = (struct rtas_error_log *)&savep[1];
384 +diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
385 +index 646988d4c1a3..740f43b9b541 100644
386 +--- a/arch/sparc/kernel/sys_sparc_32.c
387 ++++ b/arch/sparc/kernel/sys_sparc_32.c
388 +@@ -201,23 +201,27 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
389 +
390 + asmlinkage long sys_getdomainname(char __user *name, int len)
391 + {
392 +- int nlen, err;
393 +-
394 ++ int nlen, err;
395 ++ char tmp[__NEW_UTS_LEN + 1];
396 ++
397 + if (len < 0)
398 + return -EINVAL;
399 +
400 +- down_read(&uts_sem);
401 +-
402 ++ down_read(&uts_sem);
403 ++
404 + nlen = strlen(utsname()->domainname) + 1;
405 + err = -EINVAL;
406 + if (nlen > len)
407 +- goto out;
408 ++ goto out_unlock;
409 ++ memcpy(tmp, utsname()->domainname, nlen);
410 +
411 +- err = -EFAULT;
412 +- if (!copy_to_user(name, utsname()->domainname, nlen))
413 +- err = 0;
414 ++ up_read(&uts_sem);
415 +
416 +-out:
417 ++ if (copy_to_user(name, tmp, nlen))
418 ++ return -EFAULT;
419 ++ return 0;
420 ++
421 ++out_unlock:
422 + up_read(&uts_sem);
423 + return err;
424 + }
425 +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
426 +index 7f0f7c01b297..f63cd2ea8470 100644
427 +--- a/arch/sparc/kernel/sys_sparc_64.c
428 ++++ b/arch/sparc/kernel/sys_sparc_64.c
429 +@@ -524,23 +524,27 @@ extern void check_pending(int signum);
430 +
431 + SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
432 + {
433 +- int nlen, err;
434 ++ int nlen, err;
435 ++ char tmp[__NEW_UTS_LEN + 1];
436 +
437 + if (len < 0)
438 + return -EINVAL;
439 +
440 +- down_read(&uts_sem);
441 +-
442 ++ down_read(&uts_sem);
443 ++
444 + nlen = strlen(utsname()->domainname) + 1;
445 + err = -EINVAL;
446 + if (nlen > len)
447 +- goto out;
448 ++ goto out_unlock;
449 ++ memcpy(tmp, utsname()->domainname, nlen);
450 ++
451 ++ up_read(&uts_sem);
452 +
453 +- err = -EFAULT;
454 +- if (!copy_to_user(name, utsname()->domainname, nlen))
455 +- err = 0;
456 ++ if (copy_to_user(name, tmp, nlen))
457 ++ return -EFAULT;
458 ++ return 0;
459 +
460 +-out:
461 ++out_unlock:
462 + up_read(&uts_sem);
463 + return err;
464 + }
465 +diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
466 +index 9016b4b70375..6c5020163db0 100644
467 +--- a/arch/x86/include/asm/io.h
468 ++++ b/arch/x86/include/asm/io.h
469 +@@ -351,4 +351,10 @@ extern void arch_phys_wc_del(int handle);
470 + #define arch_phys_wc_add arch_phys_wc_add
471 + #endif
472 +
473 ++#ifdef CONFIG_X86_PAT
474 ++extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
475 ++extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
476 ++#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
477 ++#endif
478 ++
479 + #endif /* _ASM_X86_IO_H */
480 +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
481 +index 1007fa80f5a6..0e1dd7d47f05 100644
482 +--- a/arch/x86/mm/pageattr.c
483 ++++ b/arch/x86/mm/pageattr.c
484 +@@ -1079,7 +1079,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
485 + * Map everything starting from the Gb boundary, possibly with 1G pages
486 + */
487 + while (end - start >= PUD_SIZE) {
488 +- set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
489 ++ set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn >> PAGE_SHIFT,
490 + canon_pgprot(pud_pgprot))));
491 +
492 + start += PUD_SIZE;
493 +diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
494 +index 3146b1da6d72..5ff0cb74de55 100644
495 +--- a/arch/x86/mm/pat.c
496 ++++ b/arch/x86/mm/pat.c
497 +@@ -726,6 +726,20 @@ void io_free_memtype(resource_size_t start, resource_size_t end)
498 + free_memtype(start, end);
499 + }
500 +
501 ++int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
502 ++{
503 ++ enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
504 ++
505 ++ return io_reserve_memtype(start, start + size, &type);
506 ++}
507 ++EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
508 ++
509 ++void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
510 ++{
511 ++ io_free_memtype(start, start + size);
512 ++}
513 ++EXPORT_SYMBOL(arch_io_free_memtype_wc);
514 ++
515 + pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
516 + unsigned long size, pgprot_t vma_prot)
517 + {
518 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
519 +index 73628c7599e7..3aca9a9011fb 100644
520 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
521 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
522 +@@ -492,6 +492,10 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
523 +
524 + int amdgpu_bo_init(struct amdgpu_device *adev)
525 + {
526 ++ /* reserve PAT memory space to WC for VRAM */
527 ++ arch_io_reserve_memtype_wc(adev->mc.aper_base,
528 ++ adev->mc.aper_size);
529 ++
530 + /* Add an MTRR for the VRAM */
531 + adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
532 + adev->mc.aper_size);
533 +@@ -507,6 +511,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
534 + {
535 + amdgpu_ttm_fini(adev);
536 + arch_phys_wc_del(adev->mc.vram_mtrr);
537 ++ arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
538 + }
539 +
540 + int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
541 +diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
542 +index 08f82eae6939..ac12f74e6b32 100644
543 +--- a/drivers/gpu/drm/ast/ast_ttm.c
544 ++++ b/drivers/gpu/drm/ast/ast_ttm.c
545 +@@ -275,6 +275,8 @@ int ast_mm_init(struct ast_private *ast)
546 + return ret;
547 + }
548 +
549 ++ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
550 ++ pci_resource_len(dev->pdev, 0));
551 + ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
552 + pci_resource_len(dev->pdev, 0));
553 +
554 +@@ -283,11 +285,15 @@ int ast_mm_init(struct ast_private *ast)
555 +
556 + void ast_mm_fini(struct ast_private *ast)
557 + {
558 ++ struct drm_device *dev = ast->dev;
559 ++
560 + ttm_bo_device_release(&ast->ttm.bdev);
561 +
562 + ast_ttm_global_release(ast);
563 +
564 + arch_phys_wc_del(ast->fb_mtrr);
565 ++ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
566 ++ pci_resource_len(dev->pdev, 0));
567 + }
568 +
569 + void ast_ttm_placement(struct ast_bo *bo, int domain)
570 +diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
571 +index dfffd528517a..393967025043 100644
572 +--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
573 ++++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
574 +@@ -275,6 +275,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
575 + return ret;
576 + }
577 +
578 ++ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
579 ++ pci_resource_len(dev->pdev, 0));
580 ++
581 + cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
582 + pci_resource_len(dev->pdev, 0));
583 +
584 +@@ -284,6 +287,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
585 +
586 + void cirrus_mm_fini(struct cirrus_device *cirrus)
587 + {
588 ++ struct drm_device *dev = cirrus->dev;
589 ++
590 + if (!cirrus->mm_inited)
591 + return;
592 +
593 +@@ -293,6 +298,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
594 +
595 + arch_phys_wc_del(cirrus->fb_mtrr);
596 + cirrus->fb_mtrr = 0;
597 ++ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
598 ++ pci_resource_len(dev->pdev, 0));
599 + }
600 +
601 + void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
602 +diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
603 +index 19fb0bddc1cd..359fe2b8bb8a 100644
604 +--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
605 ++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
606 +@@ -842,6 +842,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
607 + I915_USERPTR_UNSYNCHRONIZED))
608 + return -EINVAL;
609 +
610 ++ if (!args->user_size)
611 ++ return -EINVAL;
612 ++
613 + if (offset_in_page(args->user_ptr | args->user_size))
614 + return -EINVAL;
615 +
616 +diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
617 +index 05108b505fbf..d9df8d32fc35 100644
618 +--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
619 ++++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
620 +@@ -274,6 +274,9 @@ int mgag200_mm_init(struct mga_device *mdev)
621 + return ret;
622 + }
623 +
624 ++ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
625 ++ pci_resource_len(dev->pdev, 0));
626 ++
627 + mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
628 + pci_resource_len(dev->pdev, 0));
629 +
630 +@@ -282,10 +285,14 @@ int mgag200_mm_init(struct mga_device *mdev)
631 +
632 + void mgag200_mm_fini(struct mga_device *mdev)
633 + {
634 ++ struct drm_device *dev = mdev->dev;
635 ++
636 + ttm_bo_device_release(&mdev->ttm.bdev);
637 +
638 + mgag200_ttm_global_release(mdev);
639 +
640 ++ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
641 ++ pci_resource_len(dev->pdev, 0));
642 + arch_phys_wc_del(mdev->fb_mtrr);
643 + mdev->fb_mtrr = 0;
644 + }
645 +diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
646 +index d2e7d209f651..9835327a3214 100644
647 +--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
648 ++++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
649 +@@ -397,6 +397,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
650 + /* VRAM init */
651 + drm->gem.vram_available = drm->device.info.ram_user;
652 +
653 ++ arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
654 ++ device->func->resource_size(device, 1));
655 ++
656 + ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
657 + drm->gem.vram_available >> PAGE_SHIFT);
658 + if (ret) {
659 +@@ -429,6 +432,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
660 + void
661 + nouveau_ttm_fini(struct nouveau_drm *drm)
662 + {
663 ++ struct nvkm_device *device = nvxx_device(&drm->device);
664 ++
665 + ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
666 + ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
667 +
668 +@@ -438,4 +443,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
669 +
670 + arch_phys_wc_del(drm->ttm.mtrr);
671 + drm->ttm.mtrr = 0;
672 ++ arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
673 ++ device->func->resource_size(device, 1));
674 ++
675 + }
676 +diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
677 +index 83aee9e814ba..18ec38d0d3f5 100644
678 +--- a/drivers/gpu/drm/radeon/radeon_object.c
679 ++++ b/drivers/gpu/drm/radeon/radeon_object.c
680 +@@ -447,6 +447,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
681 +
682 + int radeon_bo_init(struct radeon_device *rdev)
683 + {
684 ++ /* reserve PAT memory space to WC for VRAM */
685 ++ arch_io_reserve_memtype_wc(rdev->mc.aper_base,
686 ++ rdev->mc.aper_size);
687 ++
688 + /* Add an MTRR for the VRAM */
689 + if (!rdev->fastfb_working) {
690 + rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
691 +@@ -464,6 +468,7 @@ void radeon_bo_fini(struct radeon_device *rdev)
692 + {
693 + radeon_ttm_fini(rdev);
694 + arch_phys_wc_del(rdev->mc.vram_mtrr);
695 ++ arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
696 + }
697 +
698 + /* Returns how many bytes TTM can move per IB.
699 +diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
700 +index 44a30f286de1..57b1812a5a18 100644
701 +--- a/drivers/iio/frequency/ad9523.c
702 ++++ b/drivers/iio/frequency/ad9523.c
703 +@@ -507,7 +507,7 @@ static ssize_t ad9523_store(struct device *dev,
704 + return ret;
705 +
706 + if (!state)
707 +- return 0;
708 ++ return len;
709 +
710 + mutex_lock(&indio_dev->mlock);
711 + switch ((u32)this_attr->address) {
712 +@@ -641,7 +641,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
713 + code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
714 + AD9523_CLK_DIST_DIV_REV(ret);
715 + *val = code / 1000000;
716 +- *val2 = (code % 1000000) * 10;
717 ++ *val2 = code % 1000000;
718 + return IIO_VAL_INT_PLUS_MICRO;
719 + default:
720 + return -EINVAL;
721 +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
722 +index e913a930ac80..5a63e32a4a6b 100644
723 +--- a/drivers/iommu/dmar.c
724 ++++ b/drivers/iommu/dmar.c
725 +@@ -1315,8 +1315,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
726 + qi_submit_sync(&desc, iommu);
727 + }
728 +
729 +-void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
730 +- u64 addr, unsigned mask)
731 ++void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
732 ++ u16 qdep, u64 addr, unsigned mask)
733 + {
734 + struct qi_desc desc;
735 +
736 +@@ -1331,7 +1331,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
737 + qdep = 0;
738 +
739 + desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
740 +- QI_DIOTLB_TYPE;
741 ++ QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
742 +
743 + qi_submit_sync(&desc, iommu);
744 + }
745 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
746 +index 4efec2db4ee2..49b266433f4c 100644
747 +--- a/drivers/iommu/intel-iommu.c
748 ++++ b/drivers/iommu/intel-iommu.c
749 +@@ -419,6 +419,7 @@ struct device_domain_info {
750 + struct list_head global; /* link to global list */
751 + u8 bus; /* PCI bus number */
752 + u8 devfn; /* PCI devfn number */
753 ++ u16 pfsid; /* SRIOV physical function source ID */
754 + u8 pasid_supported:3;
755 + u8 pasid_enabled:1;
756 + u8 pri_supported:1;
757 +@@ -1479,6 +1480,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
758 + return;
759 +
760 + pdev = to_pci_dev(info->dev);
761 ++ /* For IOMMU that supports device IOTLB throttling (DIT), we assign
762 ++ * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
763 ++ * queue depth at PF level. If DIT is not set, PFSID will be treated as
764 ++ * reserved, which should be set to 0.
765 ++ */
766 ++ if (!ecap_dit(info->iommu->ecap))
767 ++ info->pfsid = 0;
768 ++ else {
769 ++ struct pci_dev *pf_pdev;
770 ++
771 ++ /* pdev will be returned if device is not a vf */
772 ++ pf_pdev = pci_physfn(pdev);
773 ++ info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
774 ++ }
775 +
776 + #ifdef CONFIG_INTEL_IOMMU_SVM
777 + /* The PCIe spec, in its wisdom, declares that the behaviour of
778 +@@ -1537,7 +1552,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
779 +
780 + sid = info->bus << 8 | info->devfn;
781 + qdep = info->ats_qdep;
782 +- qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
783 ++ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
784 ++ qdep, addr, mask);
785 + }
786 + spin_unlock_irqrestore(&device_domain_lock, flags);
787 + }
788 +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
789 +index f2c0000de613..95a6ae053714 100644
790 +--- a/drivers/md/bcache/writeback.c
791 ++++ b/drivers/md/bcache/writeback.c
792 +@@ -462,8 +462,10 @@ static int bch_writeback_thread(void *arg)
793 + * data on cache. BCACHE_DEV_DETACHING flag is set in
794 + * bch_cached_dev_detach().
795 + */
796 +- if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
797 ++ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
798 ++ up_write(&dc->writeback_lock);
799 + break;
800 ++ }
801 + }
802 +
803 + up_write(&dc->writeback_lock);
804 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
805 +index d3c55d7754af..905badc6cb17 100644
806 +--- a/drivers/md/dm-cache-metadata.c
807 ++++ b/drivers/md/dm-cache-metadata.c
808 +@@ -337,7 +337,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
809 + disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
810 + memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
811 + memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
812 +- disk_super->policy_hint_size = 0;
813 ++ disk_super->policy_hint_size = cpu_to_le32(0);
814 +
815 + __copy_sm_root(cmd, disk_super);
816 +
817 +@@ -652,6 +652,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
818 + disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
819 + disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
820 + disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
821 ++ disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
822 +
823 + disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
824 + disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
825 +diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
826 +index 5e047bfc0cc4..518e2dec2aa2 100644
827 +--- a/drivers/misc/vmw_balloon.c
828 ++++ b/drivers/misc/vmw_balloon.c
829 +@@ -341,7 +341,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
830 + success = false;
831 + }
832 +
833 +- if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
834 ++ /*
835 ++ * 2MB pages are only supported with batching. If batching is for some
836 ++ * reason disabled, do not use 2MB pages, since otherwise the legacy
837 ++ * mechanism is used with 2MB pages, causing a failure.
838 ++ */
839 ++ if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
840 ++ (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
841 + b->supported_page_sizes = 2;
842 + else
843 + b->supported_page_sizes = 1;
844 +@@ -450,7 +456,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
845 +
846 + pfn32 = (u32)pfn;
847 + if (pfn32 != pfn)
848 +- return -1;
849 ++ return -EINVAL;
850 +
851 + STATS_INC(b->stats.lock[false]);
852 +
853 +@@ -460,7 +466,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
854 +
855 + pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
856 + STATS_INC(b->stats.lock_fail[false]);
857 +- return 1;
858 ++ return -EIO;
859 + }
860 +
861 + static int vmballoon_send_batched_lock(struct vmballoon *b,
862 +@@ -597,11 +603,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
863 +
864 + locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
865 + target);
866 +- if (locked > 0) {
867 ++ if (locked) {
868 + STATS_INC(b->stats.refused_alloc[false]);
869 +
870 +- if (hv_status == VMW_BALLOON_ERROR_RESET ||
871 +- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
872 ++ if (locked == -EIO &&
873 ++ (hv_status == VMW_BALLOON_ERROR_RESET ||
874 ++ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
875 + vmballoon_free_page(page, false);
876 + return -EIO;
877 + }
878 +@@ -617,7 +624,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
879 + } else {
880 + vmballoon_free_page(page, false);
881 + }
882 +- return -EIO;
883 ++ return locked;
884 + }
885 +
886 + /* track allocated page */
887 +@@ -1029,29 +1036,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
888 + */
889 + static int vmballoon_vmci_init(struct vmballoon *b)
890 + {
891 +- int error = 0;
892 ++ unsigned long error, dummy;
893 +
894 +- if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
895 +- error = vmci_doorbell_create(&b->vmci_doorbell,
896 +- VMCI_FLAG_DELAYED_CB,
897 +- VMCI_PRIVILEGE_FLAG_RESTRICTED,
898 +- vmballoon_doorbell, b);
899 +-
900 +- if (error == VMCI_SUCCESS) {
901 +- VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
902 +- b->vmci_doorbell.context,
903 +- b->vmci_doorbell.resource, error);
904 +- STATS_INC(b->stats.doorbell_set);
905 +- }
906 +- }
907 ++ if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
908 ++ return 0;
909 +
910 +- if (error != 0) {
911 +- vmballoon_vmci_cleanup(b);
912 ++ error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
913 ++ VMCI_PRIVILEGE_FLAG_RESTRICTED,
914 ++ vmballoon_doorbell, b);
915 +
916 +- return -EIO;
917 +- }
918 ++ if (error != VMCI_SUCCESS)
919 ++ goto fail;
920 ++
921 ++ error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
922 ++ b->vmci_doorbell.resource, dummy);
923 ++
924 ++ STATS_INC(b->stats.doorbell_set);
925 ++
926 ++ if (error != VMW_BALLOON_SUCCESS)
927 ++ goto fail;
928 +
929 + return 0;
930 ++fail:
931 ++ vmballoon_vmci_cleanup(b);
932 ++ return -EIO;
933 + }
934 +
935 + /*
936 +@@ -1289,7 +1297,14 @@ static int __init vmballoon_init(void)
937 +
938 + return 0;
939 + }
940 +-module_init(vmballoon_init);
941 ++
942 ++/*
943 ++ * Using late_initcall() instead of module_init() allows the balloon to use the
944 ++ * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
945 ++ * VMCI is probed only after the balloon is initialized. If the balloon is used
946 ++ * as a module, late_initcall() is equivalent to module_init().
947 ++ */
948 ++late_initcall(vmballoon_init);
949 +
950 + static void __exit vmballoon_exit(void)
951 + {
952 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
953 +index acec4b565511..1aede726052c 100644
954 +--- a/drivers/net/usb/lan78xx.c
955 ++++ b/drivers/net/usb/lan78xx.c
956 +@@ -902,6 +902,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
957 +
958 + ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
959 + netif_carrier_on(dev->net);
960 ++
961 ++ tasklet_schedule(&dev->bh);
962 + }
963 +
964 + return ret;
965 +@@ -1361,8 +1363,6 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
966 + netif_dbg(dev, ifup, dev->net,
967 + "MAC address set to random addr");
968 + }
969 +-
970 +- tasklet_schedule(&dev->bh);
971 + }
972 +
973 + ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
974 +diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
975 +index 6a41e66015b6..062dff1c902d 100644
976 +--- a/drivers/pwm/pwm-tiehrpwm.c
977 ++++ b/drivers/pwm/pwm-tiehrpwm.c
978 +@@ -384,6 +384,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
979 + aqcsfrc_mask = AQCSFRC_CSFA_MASK;
980 + }
981 +
982 ++ /* Update shadow register first before modifying active register */
983 ++ ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
984 + /*
985 + * Changes to immediate action on Action Qualifier. This puts
986 + * Action Qualifier control on PWM output from next TBCLK
987 +diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
988 +index c872a2e54c4b..2603bee2ce07 100644
989 +--- a/drivers/spi/spi-davinci.c
990 ++++ b/drivers/spi/spi-davinci.c
991 +@@ -220,7 +220,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
992 + pdata = &dspi->pdata;
993 +
994 + /* program delay transfers if tx_delay is non zero */
995 +- if (spicfg->wdelay)
996 ++ if (spicfg && spicfg->wdelay)
997 + spidat1 |= SPIDAT1_WDEL;
998 +
999 + /*
1000 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
1001 +index 0705d8883ede..8a29ec5992fd 100644
1002 +--- a/drivers/video/fbdev/core/fbmem.c
1003 ++++ b/drivers/video/fbdev/core/fbmem.c
1004 +@@ -1687,12 +1687,12 @@ static int do_register_framebuffer(struct fb_info *fb_info)
1005 + return 0;
1006 + }
1007 +
1008 +-static int do_unregister_framebuffer(struct fb_info *fb_info)
1009 ++static int unbind_console(struct fb_info *fb_info)
1010 + {
1011 + struct fb_event event;
1012 +- int i, ret = 0;
1013 ++ int ret;
1014 ++ int i = fb_info->node;
1015 +
1016 +- i = fb_info->node;
1017 + if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
1018 + return -EINVAL;
1019 +
1020 +@@ -1707,17 +1707,29 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
1021 + unlock_fb_info(fb_info);
1022 + console_unlock();
1023 +
1024 ++ return ret;
1025 ++}
1026 ++
1027 ++static int __unlink_framebuffer(struct fb_info *fb_info);
1028 ++
1029 ++static int do_unregister_framebuffer(struct fb_info *fb_info)
1030 ++{
1031 ++ struct fb_event event;
1032 ++ int ret;
1033 ++
1034 ++ ret = unbind_console(fb_info);
1035 ++
1036 + if (ret)
1037 + return -EINVAL;
1038 +
1039 + pm_vt_switch_unregister(fb_info->dev);
1040 +
1041 +- unlink_framebuffer(fb_info);
1042 ++ __unlink_framebuffer(fb_info);
1043 + if (fb_info->pixmap.addr &&
1044 + (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
1045 + kfree(fb_info->pixmap.addr);
1046 + fb_destroy_modelist(&fb_info->modelist);
1047 +- registered_fb[i] = NULL;
1048 ++ registered_fb[fb_info->node] = NULL;
1049 + num_registered_fb--;
1050 + fb_cleanup_device(fb_info);
1051 + event.info = fb_info;
1052 +@@ -1730,7 +1742,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
1053 + return 0;
1054 + }
1055 +
1056 +-int unlink_framebuffer(struct fb_info *fb_info)
1057 ++static int __unlink_framebuffer(struct fb_info *fb_info)
1058 + {
1059 + int i;
1060 +
1061 +@@ -1742,6 +1754,20 @@ int unlink_framebuffer(struct fb_info *fb_info)
1062 + device_destroy(fb_class, MKDEV(FB_MAJOR, i));
1063 + fb_info->dev = NULL;
1064 + }
1065 ++
1066 ++ return 0;
1067 ++}
1068 ++
1069 ++int unlink_framebuffer(struct fb_info *fb_info)
1070 ++{
1071 ++ int ret;
1072 ++
1073 ++ ret = __unlink_framebuffer(fb_info);
1074 ++ if (ret)
1075 ++ return ret;
1076 ++
1077 ++ unbind_console(fb_info);
1078 ++
1079 + return 0;
1080 + }
1081 + EXPORT_SYMBOL(unlink_framebuffer);
1082 +diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
1083 +index e3d026ac382e..f35168ce426b 100644
1084 +--- a/fs/9p/xattr.c
1085 ++++ b/fs/9p/xattr.c
1086 +@@ -107,7 +107,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
1087 + {
1088 + struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
1089 + struct iov_iter from;
1090 +- int retval;
1091 ++ int retval, err;
1092 +
1093 + iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
1094 +
1095 +@@ -128,7 +128,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
1096 + retval);
1097 + else
1098 + p9_client_write(fid, 0, &from, &retval);
1099 +- p9_client_clunk(fid);
1100 ++ err = p9_client_clunk(fid);
1101 ++ if (!retval && err)
1102 ++ retval = err;
1103 + return retval;
1104 + }
1105 +
1106 +diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
1107 +index a861bbdfe577..fa8b484d035d 100644
1108 +--- a/fs/nfs/blocklayout/dev.c
1109 ++++ b/fs/nfs/blocklayout/dev.c
1110 +@@ -162,7 +162,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
1111 + chunk = div_u64(offset, dev->chunk_size);
1112 + div_u64_rem(chunk, dev->nr_children, &chunk_idx);
1113 +
1114 +- if (chunk_idx > dev->nr_children) {
1115 ++ if (chunk_idx >= dev->nr_children) {
1116 + dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
1117 + __func__, chunk_idx, offset, dev->chunk_size);
1118 + /* error, should not happen */
1119 +diff --git a/fs/quota/quota.c b/fs/quota/quota.c
1120 +index 3746367098fd..bb0d643481c8 100644
1121 +--- a/fs/quota/quota.c
1122 ++++ b/fs/quota/quota.c
1123 +@@ -17,6 +17,7 @@
1124 + #include <linux/quotaops.h>
1125 + #include <linux/types.h>
1126 + #include <linux/writeback.h>
1127 ++#include <linux/nospec.h>
1128 +
1129 + static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
1130 + qid_t id)
1131 +@@ -644,6 +645,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
1132 +
1133 + if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
1134 + return -EINVAL;
1135 ++ type = array_index_nospec(type, MAXQUOTAS);
1136 + /*
1137 + * Quota not supported on this fs? Check this before s_quota_types
1138 + * since they needn't be set if quota is not supported at all.
1139 +diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
1140 +index 22dba8837a86..539fa934ed93 100644
1141 +--- a/fs/ubifs/journal.c
1142 ++++ b/fs/ubifs/journal.c
1143 +@@ -661,6 +661,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
1144 + spin_lock(&ui->ui_lock);
1145 + ui->synced_i_size = ui->ui_size;
1146 + spin_unlock(&ui->ui_lock);
1147 ++ if (xent) {
1148 ++ spin_lock(&host_ui->ui_lock);
1149 ++ host_ui->synced_i_size = host_ui->ui_size;
1150 ++ spin_unlock(&host_ui->ui_lock);
1151 ++ }
1152 + mark_inode_clean(c, ui);
1153 + mark_inode_clean(c, host_ui);
1154 + return 0;
1155 +@@ -1107,7 +1112,7 @@ static int recomp_data_node(const struct ubifs_info *c,
1156 + int err, len, compr_type, out_len;
1157 +
1158 + out_len = le32_to_cpu(dn->size);
1159 +- buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
1160 ++ buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
1161 + if (!buf)
1162 + return -ENOMEM;
1163 +
1164 +@@ -1186,7 +1191,16 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1165 + else if (err)
1166 + goto out_free;
1167 + else {
1168 +- if (le32_to_cpu(dn->size) <= dlen)
1169 ++ int dn_len = le32_to_cpu(dn->size);
1170 ++
1171 ++ if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
1172 ++ ubifs_err(c, "bad data node (block %u, inode %lu)",
1173 ++ blk, inode->i_ino);
1174 ++ ubifs_dump_node(c, dn);
1175 ++ goto out_free;
1176 ++ }
1177 ++
1178 ++ if (dn_len <= dlen)
1179 + dlen = 0; /* Nothing to do */
1180 + else {
1181 + int compr_type = le16_to_cpu(dn->compr_type);
1182 +diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
1183 +index a0011aa3a779..f43f162e36f4 100644
1184 +--- a/fs/ubifs/lprops.c
1185 ++++ b/fs/ubifs/lprops.c
1186 +@@ -1091,10 +1091,6 @@ static int scan_check_cb(struct ubifs_info *c,
1187 + }
1188 + }
1189 +
1190 +- buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
1191 +- if (!buf)
1192 +- return -ENOMEM;
1193 +-
1194 + /*
1195 + * After an unclean unmount, empty and freeable LEBs
1196 + * may contain garbage - do not scan them.
1197 +@@ -1113,6 +1109,10 @@ static int scan_check_cb(struct ubifs_info *c,
1198 + return LPT_SCAN_CONTINUE;
1199 + }
1200 +
1201 ++ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
1202 ++ if (!buf)
1203 ++ return -ENOMEM;
1204 ++
1205 + sleb = ubifs_scan(c, lnum, 0, buf, 0);
1206 + if (IS_ERR(sleb)) {
1207 + ret = PTR_ERR(sleb);
1208 +diff --git a/fs/xattr.c b/fs/xattr.c
1209 +index 76f01bf4b048..09441c396798 100644
1210 +--- a/fs/xattr.c
1211 ++++ b/fs/xattr.c
1212 +@@ -453,7 +453,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
1213 + if (error > 0) {
1214 + if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
1215 + (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
1216 +- posix_acl_fix_xattr_to_user(kvalue, size);
1217 ++ posix_acl_fix_xattr_to_user(kvalue, error);
1218 + if (size && copy_to_user(value, kvalue, error))
1219 + error = -EFAULT;
1220 + } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
1221 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
1222 +index 23e129ef6726..e353f6600b0b 100644
1223 +--- a/include/linux/intel-iommu.h
1224 ++++ b/include/linux/intel-iommu.h
1225 +@@ -125,6 +125,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
1226 + * Extended Capability Register
1227 + */
1228 +
1229 ++#define ecap_dit(e) ((e >> 41) & 0x1)
1230 + #define ecap_pasid(e) ((e >> 40) & 0x1)
1231 + #define ecap_pss(e) ((e >> 35) & 0x1f)
1232 + #define ecap_eafs(e) ((e >> 34) & 0x1)
1233 +@@ -294,6 +295,7 @@ enum {
1234 + #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
1235 + #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
1236 + #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
1237 ++#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
1238 + #define QI_DEV_IOTLB_SIZE 1
1239 + #define QI_DEV_IOTLB_MAX_INVS 32
1240 +
1241 +@@ -318,6 +320,7 @@ enum {
1242 + #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
1243 + #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
1244 + #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
1245 ++#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
1246 + #define QI_DEV_EIOTLB_MAX_INVS 32
1247 +
1248 + #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
1249 +@@ -463,9 +466,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
1250 + u8 fm, u64 type);
1251 + extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1252 + unsigned int size_order, u64 type);
1253 +-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1254 +- u64 addr, unsigned mask);
1255 +-
1256 ++extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1257 ++ u16 qdep, u64 addr, unsigned mask);
1258 + extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
1259 +
1260 + extern int dmar_ir_support(void);
1261 +diff --git a/include/linux/io.h b/include/linux/io.h
1262 +index de64c1e53612..8ab45611fc35 100644
1263 +--- a/include/linux/io.h
1264 ++++ b/include/linux/io.h
1265 +@@ -154,4 +154,26 @@ enum {
1266 + void *memremap(resource_size_t offset, size_t size, unsigned long flags);
1267 + void memunmap(void *addr);
1268 +
1269 ++/*
1270 ++ * On x86 PAT systems we have memory tracking that keeps track of
1271 ++ * the allowed mappings on memory ranges. This tracking works for
1272 ++ * all the in-kernel mapping APIs (ioremap*), but where the user
1273 ++ * wishes to map a range from a physical device into user memory
1274 ++ * the tracking won't be updated. This API is to be used by
1275 ++ * drivers which remap physical device pages into userspace,
1276 ++ * and wants to make sure they are mapped WC and not UC.
1277 ++ */
1278 ++#ifndef arch_io_reserve_memtype_wc
1279 ++static inline int arch_io_reserve_memtype_wc(resource_size_t base,
1280 ++ resource_size_t size)
1281 ++{
1282 ++ return 0;
1283 ++}
1284 ++
1285 ++static inline void arch_io_free_memtype_wc(resource_size_t base,
1286 ++ resource_size_t size)
1287 ++{
1288 ++}
1289 ++#endif
1290 ++
1291 + #endif /* _LINUX_IO_H */
1292 +diff --git a/include/video/udlfb.h b/include/video/udlfb.h
1293 +index f9466fa54ba4..2ad9a6d37ff4 100644
1294 +--- a/include/video/udlfb.h
1295 ++++ b/include/video/udlfb.h
1296 +@@ -87,7 +87,7 @@ struct dlfb_data {
1297 + #define MIN_RAW_PIX_BYTES 2
1298 + #define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
1299 +
1300 +-#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
1301 ++#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */
1302 + #define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */
1303 +
1304 + /* remove these once align.h patch is taken into kernel */
1305 +diff --git a/kernel/kthread.c b/kernel/kthread.c
1306 +index 850b255649a2..ac6849ee3057 100644
1307 +--- a/kernel/kthread.c
1308 ++++ b/kernel/kthread.c
1309 +@@ -313,10 +313,16 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
1310 + task = create->result;
1311 + if (!IS_ERR(task)) {
1312 + static const struct sched_param param = { .sched_priority = 0 };
1313 ++ char name[TASK_COMM_LEN];
1314 + va_list args;
1315 +
1316 + va_start(args, namefmt);
1317 +- vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
1318 ++ /*
1319 ++ * task is already visible to other tasks, so updating
1320 ++ * COMM must be protected.
1321 ++ */
1322 ++ vsnprintf(name, sizeof(name), namefmt, args);
1323 ++ set_task_comm(task, name);
1324 + va_end(args);
1325 + /*
1326 + * root may have changed our (kthreadd's) priority or CPU mask.
1327 +diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
1328 +index 02e8dfaa1ce2..9d76184279fe 100644
1329 +--- a/kernel/power/Kconfig
1330 ++++ b/kernel/power/Kconfig
1331 +@@ -105,6 +105,7 @@ config PM_SLEEP
1332 + def_bool y
1333 + depends on SUSPEND || HIBERNATE_CALLBACKS
1334 + select PM
1335 ++ select SRCU
1336 +
1337 + config PM_SLEEP_SMP
1338 + def_bool y
1339 +diff --git a/kernel/sys.c b/kernel/sys.c
1340 +index f718742e55e6..e2446ade79ba 100644
1341 +--- a/kernel/sys.c
1342 ++++ b/kernel/sys.c
1343 +@@ -1142,18 +1142,19 @@ static int override_release(char __user *release, size_t len)
1344 +
1345 + SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1346 + {
1347 +- int errno = 0;
1348 ++ struct new_utsname tmp;
1349 +
1350 + down_read(&uts_sem);
1351 +- if (copy_to_user(name, utsname(), sizeof *name))
1352 +- errno = -EFAULT;
1353 ++ memcpy(&tmp, utsname(), sizeof(tmp));
1354 + up_read(&uts_sem);
1355 ++ if (copy_to_user(name, &tmp, sizeof(tmp)))
1356 ++ return -EFAULT;
1357 +
1358 +- if (!errno && override_release(name->release, sizeof(name->release)))
1359 +- errno = -EFAULT;
1360 +- if (!errno && override_architecture(name))
1361 +- errno = -EFAULT;
1362 +- return errno;
1363 ++ if (override_release(name->release, sizeof(name->release)))
1364 ++ return -EFAULT;
1365 ++ if (override_architecture(name))
1366 ++ return -EFAULT;
1367 ++ return 0;
1368 + }
1369 +
1370 + #ifdef __ARCH_WANT_SYS_OLD_UNAME
1371 +@@ -1162,55 +1163,46 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1372 + */
1373 + SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1374 + {
1375 +- int error = 0;
1376 ++ struct old_utsname tmp;
1377 +
1378 + if (!name)
1379 + return -EFAULT;
1380 +
1381 + down_read(&uts_sem);
1382 +- if (copy_to_user(name, utsname(), sizeof(*name)))
1383 +- error = -EFAULT;
1384 ++ memcpy(&tmp, utsname(), sizeof(tmp));
1385 + up_read(&uts_sem);
1386 ++ if (copy_to_user(name, &tmp, sizeof(tmp)))
1387 ++ return -EFAULT;
1388 +
1389 +- if (!error && override_release(name->release, sizeof(name->release)))
1390 +- error = -EFAULT;
1391 +- if (!error && override_architecture(name))
1392 +- error = -EFAULT;
1393 +- return error;
1394 ++ if (override_release(name->release, sizeof(name->release)))
1395 ++ return -EFAULT;
1396 ++ if (override_architecture(name))
1397 ++ return -EFAULT;
1398 ++ return 0;
1399 + }
1400 +
1401 + SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1402 + {
1403 +- int error;
1404 ++ struct oldold_utsname tmp = {};
1405 +
1406 + if (!name)
1407 + return -EFAULT;
1408 +- if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1409 +- return -EFAULT;
1410 +
1411 + down_read(&uts_sem);
1412 +- error = __copy_to_user(&name->sysname, &utsname()->sysname,
1413 +- __OLD_UTS_LEN);
1414 +- error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1415 +- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1416 +- __OLD_UTS_LEN);
1417 +- error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1418 +- error |= __copy_to_user(&name->release, &utsname()->release,
1419 +- __OLD_UTS_LEN);
1420 +- error |= __put_user(0, name->release + __OLD_UTS_LEN);
1421 +- error |= __copy_to_user(&name->version, &utsname()->version,
1422 +- __OLD_UTS_LEN);
1423 +- error |= __put_user(0, name->version + __OLD_UTS_LEN);
1424 +- error |= __copy_to_user(&name->machine, &utsname()->machine,
1425 +- __OLD_UTS_LEN);
1426 +- error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1427 ++ memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1428 ++ memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1429 ++ memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1430 ++ memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1431 ++ memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1432 + up_read(&uts_sem);
1433 ++ if (copy_to_user(name, &tmp, sizeof(tmp)))
1434 ++ return -EFAULT;
1435 +
1436 +- if (!error && override_architecture(name))
1437 +- error = -EFAULT;
1438 +- if (!error && override_release(name->release, sizeof(name->release)))
1439 +- error = -EFAULT;
1440 +- return error ? -EFAULT : 0;
1441 ++ if (override_architecture(name))
1442 ++ return -EFAULT;
1443 ++ if (override_release(name->release, sizeof(name->release)))
1444 ++ return -EFAULT;
1445 ++ return 0;
1446 + }
1447 + #endif
1448 +
1449 +@@ -1224,17 +1216,18 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1450 +
1451 + if (len < 0 || len > __NEW_UTS_LEN)
1452 + return -EINVAL;
1453 +- down_write(&uts_sem);
1454 + errno = -EFAULT;
1455 + if (!copy_from_user(tmp, name, len)) {
1456 +- struct new_utsname *u = utsname();
1457 ++ struct new_utsname *u;
1458 +
1459 ++ down_write(&uts_sem);
1460 ++ u = utsname();
1461 + memcpy(u->nodename, tmp, len);
1462 + memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1463 + errno = 0;
1464 + uts_proc_notify(UTS_PROC_HOSTNAME);
1465 ++ up_write(&uts_sem);
1466 + }
1467 +- up_write(&uts_sem);
1468 + return errno;
1469 + }
1470 +
1471 +@@ -1242,8 +1235,9 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1472 +
1473 + SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1474 + {
1475 +- int i, errno;
1476 ++ int i;
1477 + struct new_utsname *u;
1478 ++ char tmp[__NEW_UTS_LEN + 1];
1479 +
1480 + if (len < 0)
1481 + return -EINVAL;
1482 +@@ -1252,11 +1246,11 @@ SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1483 + i = 1 + strlen(u->nodename);
1484 + if (i > len)
1485 + i = len;
1486 +- errno = 0;
1487 +- if (copy_to_user(name, u->nodename, i))
1488 +- errno = -EFAULT;
1489 ++ memcpy(tmp, u->nodename, i);
1490 + up_read(&uts_sem);
1491 +- return errno;
1492 ++ if (copy_to_user(name, tmp, i))
1493 ++ return -EFAULT;
1494 ++ return 0;
1495 + }
1496 +
1497 + #endif
1498 +@@ -1275,17 +1269,18 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1499 + if (len < 0 || len > __NEW_UTS_LEN)
1500 + return -EINVAL;
1501 +
1502 +- down_write(&uts_sem);
1503 + errno = -EFAULT;
1504 + if (!copy_from_user(tmp, name, len)) {
1505 +- struct new_utsname *u = utsname();
1506 ++ struct new_utsname *u;
1507 +
1508 ++ down_write(&uts_sem);
1509 ++ u = utsname();
1510 + memcpy(u->domainname, tmp, len);
1511 + memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1512 + errno = 0;
1513 + uts_proc_notify(UTS_PROC_DOMAINNAME);
1514 ++ up_write(&uts_sem);
1515 + }
1516 +- up_write(&uts_sem);
1517 + return errno;
1518 + }
1519 +
1520 +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
1521 +index 7ab5eafea8b2..210b8e726a97 100644
1522 +--- a/kernel/trace/blktrace.c
1523 ++++ b/kernel/trace/blktrace.c
1524 +@@ -1716,6 +1716,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1525 + mutex_lock(&bdev->bd_mutex);
1526 +
1527 + if (attr == &dev_attr_enable) {
1528 ++ if (!!value == !!q->blk_trace) {
1529 ++ ret = 0;
1530 ++ goto out_unlock_bdev;
1531 ++ }
1532 + if (value)
1533 + ret = blk_trace_setup_queue(q, bdev);
1534 + else
1535 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1536 +index 11761b3dd7ba..e409ddce8754 100644
1537 +--- a/kernel/trace/trace.c
1538 ++++ b/kernel/trace/trace.c
1539 +@@ -6496,7 +6496,9 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
1540 +
1541 + if (buffer) {
1542 + mutex_lock(&trace_types_lock);
1543 +- if (val) {
1544 ++ if (!!val == tracer_tracing_is_on(tr)) {
1545 ++ val = 0; /* do nothing */
1546 ++ } else if (val) {
1547 + tracer_tracing_on(tr);
1548 + if (tr->current_trace->start)
1549 + tr->current_trace->start(tr);
1550 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
1551 +index 68bb89ad9d28..1dc887bab085 100644
1552 +--- a/kernel/trace/trace_uprobe.c
1553 ++++ b/kernel/trace/trace_uprobe.c
1554 +@@ -969,7 +969,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
1555 +
1556 + list_del_rcu(&link->list);
1557 + /* synchronize with u{,ret}probe_trace_func */
1558 +- synchronize_sched();
1559 ++ synchronize_rcu();
1560 + kfree(link);
1561 +
1562 + if (!list_empty(&tu->tp.files))
1563 +diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
1564 +index 88fefa68c516..a965df4b54f5 100644
1565 +--- a/kernel/user_namespace.c
1566 ++++ b/kernel/user_namespace.c
1567 +@@ -602,9 +602,26 @@ static ssize_t map_write(struct file *file, const char __user *buf,
1568 + struct uid_gid_map new_map;
1569 + unsigned idx;
1570 + struct uid_gid_extent *extent = NULL;
1571 +- unsigned long page = 0;
1572 ++ unsigned long page;
1573 + char *kbuf, *pos, *next_line;
1574 +- ssize_t ret = -EINVAL;
1575 ++ ssize_t ret;
1576 ++
1577 ++ /* Only allow < page size writes at the beginning of the file */
1578 ++ if ((*ppos != 0) || (count >= PAGE_SIZE))
1579 ++ return -EINVAL;
1580 ++
1581 ++ /* Get a buffer */
1582 ++ page = __get_free_page(GFP_TEMPORARY);
1583 ++ kbuf = (char *) page;
1584 ++ if (!page)
1585 ++ return -ENOMEM;
1586 ++
1587 ++ /* Slurp in the user data */
1588 ++ if (copy_from_user(kbuf, buf, count)) {
1589 ++ free_page(page);
1590 ++ return -EFAULT;
1591 ++ }
1592 ++ kbuf[count] = '\0';
1593 +
1594 + /*
1595 + * The userns_state_mutex serializes all writes to any given map.
1596 +@@ -638,24 +655,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
1597 + if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
1598 + goto out;
1599 +
1600 +- /* Get a buffer */
1601 +- ret = -ENOMEM;
1602 +- page = __get_free_page(GFP_TEMPORARY);
1603 +- kbuf = (char *) page;
1604 +- if (!page)
1605 +- goto out;
1606 +-
1607 +- /* Only allow < page size writes at the beginning of the file */
1608 +- ret = -EINVAL;
1609 +- if ((*ppos != 0) || (count >= PAGE_SIZE))
1610 +- goto out;
1611 +-
1612 +- /* Slurp in the user data */
1613 +- ret = -EFAULT;
1614 +- if (copy_from_user(kbuf, buf, count))
1615 +- goto out;
1616 +- kbuf[count] = '\0';
1617 +-
1618 + /* Parse the user data */
1619 + ret = -EINVAL;
1620 + pos = kbuf;
1621 +diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
1622 +index c8eac43267e9..d2b3b2973456 100644
1623 +--- a/kernel/utsname_sysctl.c
1624 ++++ b/kernel/utsname_sysctl.c
1625 +@@ -17,7 +17,7 @@
1626 +
1627 + #ifdef CONFIG_PROC_SYSCTL
1628 +
1629 +-static void *get_uts(struct ctl_table *table, int write)
1630 ++static void *get_uts(struct ctl_table *table)
1631 + {
1632 + char *which = table->data;
1633 + struct uts_namespace *uts_ns;
1634 +@@ -25,21 +25,9 @@ static void *get_uts(struct ctl_table *table, int write)
1635 + uts_ns = current->nsproxy->uts_ns;
1636 + which = (which - (char *)&init_uts_ns) + (char *)uts_ns;
1637 +
1638 +- if (!write)
1639 +- down_read(&uts_sem);
1640 +- else
1641 +- down_write(&uts_sem);
1642 + return which;
1643 + }
1644 +
1645 +-static void put_uts(struct ctl_table *table, int write, void *which)
1646 +-{
1647 +- if (!write)
1648 +- up_read(&uts_sem);
1649 +- else
1650 +- up_write(&uts_sem);
1651 +-}
1652 +-
1653 + /*
1654 + * Special case of dostring for the UTS structure. This has locks
1655 + * to observe. Should this be in kernel/sys.c ????
1656 +@@ -49,13 +37,34 @@ static int proc_do_uts_string(struct ctl_table *table, int write,
1657 + {
1658 + struct ctl_table uts_table;
1659 + int r;
1660 ++ char tmp_data[__NEW_UTS_LEN + 1];
1661 ++
1662 + memcpy(&uts_table, table, sizeof(uts_table));
1663 +- uts_table.data = get_uts(table, write);
1664 ++ uts_table.data = tmp_data;
1665 ++
1666 ++ /*
1667 ++ * Buffer the value in tmp_data so that proc_dostring() can be called
1668 ++ * without holding any locks.
1669 ++ * We also need to read the original value in the write==1 case to
1670 ++ * support partial writes.
1671 ++ */
1672 ++ down_read(&uts_sem);
1673 ++ memcpy(tmp_data, get_uts(table), sizeof(tmp_data));
1674 ++ up_read(&uts_sem);
1675 + r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
1676 +- put_uts(table, write, uts_table.data);
1677 +
1678 +- if (write)
1679 ++ if (write) {
1680 ++ /*
1681 ++ * Write back the new value.
1682 ++ * Note that, since we dropped uts_sem, the result can
1683 ++ * theoretically be incorrect if there are two parallel writes
1684 ++ * at non-zero offsets to the same sysctl.
1685 ++ */
1686 ++ down_write(&uts_sem);
1687 ++ memcpy(get_uts(table), tmp_data, sizeof(tmp_data));
1688 ++ up_write(&uts_sem);
1689 + proc_sys_poll_notify(table->poll);
1690 ++ }
1691 +
1692 + return r;
1693 + }
1694 +diff --git a/mm/memory.c b/mm/memory.c
1695 +index 42db644f5ec4..5aee9ec8b8c6 100644
1696 +--- a/mm/memory.c
1697 ++++ b/mm/memory.c
1698 +@@ -361,15 +361,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
1699 + {
1700 + struct mmu_table_batch **batch = &tlb->batch;
1701 +
1702 +- /*
1703 +- * When there's less then two users of this mm there cannot be a
1704 +- * concurrent page-table walk.
1705 +- */
1706 +- if (atomic_read(&tlb->mm->mm_users) < 2) {
1707 +- __tlb_remove_table(table);
1708 +- return;
1709 +- }
1710 +-
1711 + if (*batch == NULL) {
1712 + *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1713 + if (*batch == NULL) {
1714 +diff --git a/net/9p/client.c b/net/9p/client.c
1715 +index 3ff26eb1ea20..ed8738c4dc09 100644
1716 +--- a/net/9p/client.c
1717 ++++ b/net/9p/client.c
1718 +@@ -931,7 +931,7 @@ static int p9_client_version(struct p9_client *c)
1719 + {
1720 + int err = 0;
1721 + struct p9_req_t *req;
1722 +- char *version;
1723 ++ char *version = NULL;
1724 + int msize;
1725 +
1726 + p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
1727 +diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
1728 +index bced8c074c12..2f68ffda3715 100644
1729 +--- a/net/9p/trans_fd.c
1730 ++++ b/net/9p/trans_fd.c
1731 +@@ -185,6 +185,8 @@ static void p9_mux_poll_stop(struct p9_conn *m)
1732 + spin_lock_irqsave(&p9_poll_lock, flags);
1733 + list_del_init(&m->poll_pending_link);
1734 + spin_unlock_irqrestore(&p9_poll_lock, flags);
1735 ++
1736 ++ flush_work(&p9_poll_work);
1737 + }
1738 +
1739 + /**
1740 +@@ -933,7 +935,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
1741 + if (err < 0)
1742 + return err;
1743 +
1744 +- if (valid_ipaddr4(addr) < 0)
1745 ++ if (addr == NULL || valid_ipaddr4(addr) < 0)
1746 + return -EINVAL;
1747 +
1748 + csocket = NULL;
1749 +@@ -981,6 +983,9 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
1750 +
1751 + csocket = NULL;
1752 +
1753 ++ if (addr == NULL)
1754 ++ return -EINVAL;
1755 ++
1756 + if (strlen(addr) >= UNIX_PATH_MAX) {
1757 + pr_err("%s (%d): address too long: %s\n",
1758 + __func__, task_pid_nr(current), addr);
1759 +diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
1760 +index 52b4a2f993f2..f42550dd3560 100644
1761 +--- a/net/9p/trans_rdma.c
1762 ++++ b/net/9p/trans_rdma.c
1763 +@@ -644,6 +644,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
1764 + struct ib_qp_init_attr qp_attr;
1765 + struct ib_cq_init_attr cq_attr = {};
1766 +
1767 ++ if (addr == NULL)
1768 ++ return -EINVAL;
1769 ++
1770 + /* Parse the transport specific mount options */
1771 + err = parse_opts(args, &opts);
1772 + if (err < 0)
1773 +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
1774 +index 2ddeecca5b12..6018a1c0dc28 100644
1775 +--- a/net/9p/trans_virtio.c
1776 ++++ b/net/9p/trans_virtio.c
1777 +@@ -192,7 +192,7 @@ static int pack_sg_list(struct scatterlist *sg, int start,
1778 + s = rest_of_page(data);
1779 + if (s > count)
1780 + s = count;
1781 +- BUG_ON(index > limit);
1782 ++ BUG_ON(index >= limit);
1783 + /* Make sure we don't terminate early. */
1784 + sg_unmark_end(&sg[index]);
1785 + sg_set_buf(&sg[index++], data, s);
1786 +@@ -237,6 +237,7 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
1787 + s = PAGE_SIZE - data_off;
1788 + if (s > count)
1789 + s = count;
1790 ++ BUG_ON(index >= limit);
1791 + /* Make sure we don't terminate early. */
1792 + sg_unmark_end(&sg[index]);
1793 + sg_set_page(&sg[index++], pdata[i++], s, data_off);
1794 +@@ -409,6 +410,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
1795 + p9_debug(P9_DEBUG_TRANS, "virtio request\n");
1796 +
1797 + if (uodata) {
1798 ++ __le32 sz;
1799 + int n = p9_get_mapped_pages(chan, &out_pages, uodata,
1800 + outlen, &offs, &need_drop);
1801 + if (n < 0)
1802 +@@ -419,6 +421,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
1803 + memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
1804 + outlen = n;
1805 + }
1806 ++ /* The size field of the message must include the length of the
1807 ++ * header and the length of the data. We didn't actually know
1808 ++ * the length of the data until this point so add it in now.
1809 ++ */
1810 ++ sz = cpu_to_le32(req->tc->size + outlen);
1811 ++ memcpy(&req->tc->sdata[0], &sz, sizeof(sz));
1812 + } else if (uidata) {
1813 + int n = p9_get_mapped_pages(chan, &in_pages, uidata,
1814 + inlen, &offs, &need_drop);
1815 +@@ -646,6 +654,9 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
1816 + int ret = -ENOENT;
1817 + int found = 0;
1818 +
1819 ++ if (devname == NULL)
1820 ++ return -EINVAL;
1821 ++
1822 + mutex_lock(&virtio_9p_lock);
1823 + list_for_each_entry(chan, &virtio_chan_list, chan_list) {
1824 + if (!strncmp(devname, chan->tag, chan->tag_len) &&
1825 +diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
1826 +index d4353faced35..a10db45b2e1e 100644
1827 +--- a/net/ieee802154/6lowpan/tx.c
1828 ++++ b/net/ieee802154/6lowpan/tx.c
1829 +@@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
1830 + /* We must take a copy of the skb before we modify/replace the ipv6
1831 + * header as the header could be used elsewhere
1832 + */
1833 +- skb = skb_unshare(skb, GFP_ATOMIC);
1834 +- if (!skb)
1835 +- return NET_XMIT_DROP;
1836 ++ if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
1837 ++ skb_tailroom(skb) < ldev->needed_tailroom)) {
1838 ++ struct sk_buff *nskb;
1839 ++
1840 ++ nskb = skb_copy_expand(skb, ldev->needed_headroom,
1841 ++ ldev->needed_tailroom, GFP_ATOMIC);
1842 ++ if (likely(nskb)) {
1843 ++ consume_skb(skb);
1844 ++ skb = nskb;
1845 ++ } else {
1846 ++ kfree_skb(skb);
1847 ++ return NET_XMIT_DROP;
1848 ++ }
1849 ++ } else {
1850 ++ skb = skb_unshare(skb, GFP_ATOMIC);
1851 ++ if (!skb)
1852 ++ return NET_XMIT_DROP;
1853 ++ }
1854 +
1855 + ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
1856 + if (ret < 0) {
1857 +diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
1858 +index 3827f359b336..9e1ff9d4cf2d 100644
1859 +--- a/net/mac802154/tx.c
1860 ++++ b/net/mac802154/tx.c
1861 +@@ -72,8 +72,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
1862 + int ret;
1863 +
1864 + if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
1865 +- u16 crc = crc_ccitt(0, skb->data, skb->len);
1866 ++ struct sk_buff *nskb;
1867 ++ u16 crc;
1868 ++
1869 ++ if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
1870 ++ nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
1871 ++ GFP_ATOMIC);
1872 ++ if (likely(nskb)) {
1873 ++ consume_skb(skb);
1874 ++ skb = nskb;
1875 ++ } else {
1876 ++ goto err_tx;
1877 ++ }
1878 ++ }
1879 +
1880 ++ crc = crc_ccitt(0, skb->data, skb->len);
1881 + put_unaligned_le16(crc, skb_put(skb, 2));
1882 + }
1883 +
1884 +diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
1885 +index 7f10430af39c..58426e7d320d 100644
1886 +--- a/tools/perf/util/auxtrace.c
1887 ++++ b/tools/perf/util/auxtrace.c
1888 +@@ -186,6 +186,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues,
1889 + for (i = 0; i < queues->nr_queues; i++) {
1890 + list_splice_tail(&queues->queue_array[i].head,
1891 + &queue_array[i].head);
1892 ++ queue_array[i].tid = queues->queue_array[i].tid;
1893 ++ queue_array[i].cpu = queues->queue_array[i].cpu;
1894 ++ queue_array[i].set = queues->queue_array[i].set;
1895 + queue_array[i].priv = queues->queue_array[i].priv;
1896 + }
1897 +