Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 12 Apr 2017 17:59:56
Message-Id: 1492019979.5dc3041f3bac29d77549d3e6c30940707f468149.mpagano@gentoo
1 commit: 5dc3041f3bac29d77549d3e6c30940707f468149
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 12 17:59:39 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 12 17:59:39 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5dc3041f
7
8 Linux patch 4.4.61
9
10 0000_README | 4 +
11 1060_linux-4.4.61.patch | 1527 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1531 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 6cc653c..84c1648 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -283,6 +283,10 @@ Patch: 1059_linux-4.4.60.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.60
21
22 +Patch: 1060_linux-4.4.61.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.61
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1060_linux-4.4.61.patch b/1060_linux-4.4.61.patch
31 new file mode 100644
32 index 0000000..642a5bd
33 --- /dev/null
34 +++ b/1060_linux-4.4.61.patch
35 @@ -0,0 +1,1527 @@
36 +diff --git a/Makefile b/Makefile
37 +index fb7c2b40753d..ef5045b8201d 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 60
44 ++SUBLEVEL = 61
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
49 +index 11b6595c2672..f91ee2f27b41 100644
50 +--- a/arch/arm/kvm/mmu.c
51 ++++ b/arch/arm/kvm/mmu.c
52 +@@ -796,6 +796,7 @@ void stage2_unmap_vm(struct kvm *kvm)
53 + int idx;
54 +
55 + idx = srcu_read_lock(&kvm->srcu);
56 ++ down_read(&current->mm->mmap_sem);
57 + spin_lock(&kvm->mmu_lock);
58 +
59 + slots = kvm_memslots(kvm);
60 +@@ -803,6 +804,7 @@ void stage2_unmap_vm(struct kvm *kvm)
61 + stage2_unmap_memslot(kvm, memslot);
62 +
63 + spin_unlock(&kvm->mmu_lock);
64 ++ up_read(&current->mm->mmap_sem);
65 + srcu_read_unlock(&kvm->srcu, idx);
66 + }
67 +
68 +@@ -1759,6 +1761,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
69 + (KVM_PHYS_SIZE >> PAGE_SHIFT))
70 + return -EFAULT;
71 +
72 ++ down_read(&current->mm->mmap_sem);
73 + /*
74 + * A memory region could potentially cover multiple VMAs, and any holes
75 + * between them, so iterate over all of them to find out if we can map
76 +@@ -1802,8 +1805,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
77 + pa += vm_start - vma->vm_start;
78 +
79 + /* IO region dirty page logging not allowed */
80 +- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
81 +- return -EINVAL;
82 ++ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
83 ++ ret = -EINVAL;
84 ++ goto out;
85 ++ }
86 +
87 + ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
88 + vm_end - vm_start,
89 +@@ -1815,7 +1820,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
90 + } while (hva < reg_end);
91 +
92 + if (change == KVM_MR_FLAGS_ONLY)
93 +- return ret;
94 ++ goto out;
95 +
96 + spin_lock(&kvm->mmu_lock);
97 + if (ret)
98 +@@ -1823,6 +1828,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
99 + else
100 + stage2_flush_memslot(kvm, memslot);
101 + spin_unlock(&kvm->mmu_lock);
102 ++out:
103 ++ up_read(&current->mm->mmap_sem);
104 + return ret;
105 + }
106 +
107 +diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
108 +index 273e61225c27..07238b39638c 100644
109 +--- a/arch/metag/include/asm/uaccess.h
110 ++++ b/arch/metag/include/asm/uaccess.h
111 +@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
112 +
113 + #define strlen_user(str) strnlen_user(str, 32767)
114 +
115 +-extern unsigned long __must_check __copy_user_zeroing(void *to,
116 +- const void __user *from,
117 +- unsigned long n);
118 ++extern unsigned long raw_copy_from_user(void *to, const void __user *from,
119 ++ unsigned long n);
120 +
121 + static inline unsigned long
122 + copy_from_user(void *to, const void __user *from, unsigned long n)
123 + {
124 ++ unsigned long res = n;
125 + if (likely(access_ok(VERIFY_READ, from, n)))
126 +- return __copy_user_zeroing(to, from, n);
127 +- memset(to, 0, n);
128 +- return n;
129 ++ res = raw_copy_from_user(to, from, n);
130 ++ if (unlikely(res))
131 ++ memset(to + (n - res), 0, res);
132 ++ return res;
133 + }
134 +
135 +-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
136 ++#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
137 + #define __copy_from_user_inatomic __copy_from_user
138 +
139 + extern unsigned long __must_check __copy_user(void __user *to,
140 +diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
141 +index b3ebfe9c8e88..2792fc621088 100644
142 +--- a/arch/metag/lib/usercopy.c
143 ++++ b/arch/metag/lib/usercopy.c
144 +@@ -29,7 +29,6 @@
145 + COPY \
146 + "1:\n" \
147 + " .section .fixup,\"ax\"\n" \
148 +- " MOV D1Ar1,#0\n" \
149 + FIXUP \
150 + " MOVT D1Ar1,#HI(1b)\n" \
151 + " JUMP D1Ar1,#LO(1b)\n" \
152 +@@ -260,27 +259,31 @@
153 + "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
154 + "22:\n" \
155 + "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
156 +- "SUB %3, %3, #32\n" \
157 + "23:\n" \
158 +- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
159 ++ "SUB %3, %3, #32\n" \
160 + "24:\n" \
161 ++ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
162 ++ "25:\n" \
163 + "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
164 ++ "26:\n" \
165 + "SUB %3, %3, #32\n" \
166 + "DCACHE [%1+#-64], D0Ar6\n" \
167 + "BR $Lloop"id"\n" \
168 + \
169 + "MOV RAPF, %1\n" \
170 +- "25:\n" \
171 ++ "27:\n" \
172 + "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
173 +- "26:\n" \
174 ++ "28:\n" \
175 + "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
176 ++ "29:\n" \
177 + "SUB %3, %3, #32\n" \
178 +- "27:\n" \
179 ++ "30:\n" \
180 + "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
181 +- "28:\n" \
182 ++ "31:\n" \
183 + "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
184 ++ "32:\n" \
185 + "SUB %0, %0, #8\n" \
186 +- "29:\n" \
187 ++ "33:\n" \
188 + "SETL [%0++], D0.7, D1.7\n" \
189 + "SUB %3, %3, #32\n" \
190 + "1:" \
191 +@@ -312,11 +315,15 @@
192 + " .long 26b,3b\n" \
193 + " .long 27b,3b\n" \
194 + " .long 28b,3b\n" \
195 +- " .long 29b,4b\n" \
196 ++ " .long 29b,3b\n" \
197 ++ " .long 30b,3b\n" \
198 ++ " .long 31b,3b\n" \
199 ++ " .long 32b,3b\n" \
200 ++ " .long 33b,4b\n" \
201 + " .previous\n" \
202 + : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
203 + : "0" (to), "1" (from), "2" (ret), "3" (n) \
204 +- : "D1Ar1", "D0Ar2", "memory")
205 ++ : "D1Ar1", "D0Ar2", "cc", "memory")
206 +
207 + /* rewind 'to' and 'from' pointers when a fault occurs
208 + *
209 +@@ -342,7 +349,7 @@
210 + #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
211 + __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
212 + "LSR D0Ar2, D0Ar2, #8\n" \
213 +- "AND D0Ar2, D0Ar2, #0x7\n" \
214 ++ "ANDS D0Ar2, D0Ar2, #0x7\n" \
215 + "ADDZ D0Ar2, D0Ar2, #4\n" \
216 + "SUB D0Ar2, D0Ar2, #1\n" \
217 + "MOV D1Ar1, #4\n" \
218 +@@ -403,47 +410,55 @@
219 + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
220 + "22:\n" \
221 + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
222 +- "SUB %3, %3, #16\n" \
223 + "23:\n" \
224 +- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
225 +- "24:\n" \
226 +- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
227 + "SUB %3, %3, #16\n" \
228 +- "25:\n" \
229 ++ "24:\n" \
230 + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
231 +- "26:\n" \
232 ++ "25:\n" \
233 + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
234 ++ "26:\n" \
235 + "SUB %3, %3, #16\n" \
236 + "27:\n" \
237 + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
238 + "28:\n" \
239 + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
240 ++ "29:\n" \
241 ++ "SUB %3, %3, #16\n" \
242 ++ "30:\n" \
243 ++ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
244 ++ "31:\n" \
245 ++ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
246 ++ "32:\n" \
247 + "SUB %3, %3, #16\n" \
248 + "DCACHE [%1+#-64], D0Ar6\n" \
249 + "BR $Lloop"id"\n" \
250 + \
251 + "MOV RAPF, %1\n" \
252 +- "29:\n" \
253 ++ "33:\n" \
254 + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
255 +- "30:\n" \
256 ++ "34:\n" \
257 + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
258 ++ "35:\n" \
259 + "SUB %3, %3, #16\n" \
260 +- "31:\n" \
261 ++ "36:\n" \
262 + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
263 +- "32:\n" \
264 ++ "37:\n" \
265 + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
266 ++ "38:\n" \
267 + "SUB %3, %3, #16\n" \
268 +- "33:\n" \
269 ++ "39:\n" \
270 + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
271 +- "34:\n" \
272 ++ "40:\n" \
273 + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
274 ++ "41:\n" \
275 + "SUB %3, %3, #16\n" \
276 +- "35:\n" \
277 ++ "42:\n" \
278 + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
279 +- "36:\n" \
280 ++ "43:\n" \
281 + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
282 ++ "44:\n" \
283 + "SUB %0, %0, #4\n" \
284 +- "37:\n" \
285 ++ "45:\n" \
286 + "SETD [%0++], D0.7\n" \
287 + "SUB %3, %3, #16\n" \
288 + "1:" \
289 +@@ -483,11 +498,19 @@
290 + " .long 34b,3b\n" \
291 + " .long 35b,3b\n" \
292 + " .long 36b,3b\n" \
293 +- " .long 37b,4b\n" \
294 ++ " .long 37b,3b\n" \
295 ++ " .long 38b,3b\n" \
296 ++ " .long 39b,3b\n" \
297 ++ " .long 40b,3b\n" \
298 ++ " .long 41b,3b\n" \
299 ++ " .long 42b,3b\n" \
300 ++ " .long 43b,3b\n" \
301 ++ " .long 44b,3b\n" \
302 ++ " .long 45b,4b\n" \
303 + " .previous\n" \
304 + : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
305 + : "0" (to), "1" (from), "2" (ret), "3" (n) \
306 +- : "D1Ar1", "D0Ar2", "memory")
307 ++ : "D1Ar1", "D0Ar2", "cc", "memory")
308 +
309 + /* rewind 'to' and 'from' pointers when a fault occurs
310 + *
311 +@@ -513,7 +536,7 @@
312 + #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
313 + __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
314 + "LSR D0Ar2, D0Ar2, #8\n" \
315 +- "AND D0Ar2, D0Ar2, #0x7\n" \
316 ++ "ANDS D0Ar2, D0Ar2, #0x7\n" \
317 + "ADDZ D0Ar2, D0Ar2, #4\n" \
318 + "SUB D0Ar2, D0Ar2, #1\n" \
319 + "MOV D1Ar1, #4\n" \
320 +@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
321 + if ((unsigned long) src & 1) {
322 + __asm_copy_to_user_1(dst, src, retn);
323 + n--;
324 ++ if (retn)
325 ++ return retn + n;
326 + }
327 + if ((unsigned long) dst & 1) {
328 + /* Worst case - byte copy */
329 + while (n > 0) {
330 + __asm_copy_to_user_1(dst, src, retn);
331 + n--;
332 ++ if (retn)
333 ++ return retn + n;
334 + }
335 + }
336 + if (((unsigned long) src & 2) && n >= 2) {
337 + __asm_copy_to_user_2(dst, src, retn);
338 + n -= 2;
339 ++ if (retn)
340 ++ return retn + n;
341 + }
342 + if ((unsigned long) dst & 2) {
343 + /* Second worst case - word copy */
344 + while (n >= 2) {
345 + __asm_copy_to_user_2(dst, src, retn);
346 + n -= 2;
347 ++ if (retn)
348 ++ return retn + n;
349 + }
350 + }
351 +
352 +@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
353 + while (n >= 8) {
354 + __asm_copy_to_user_8x64(dst, src, retn);
355 + n -= 8;
356 ++ if (retn)
357 ++ return retn + n;
358 + }
359 + }
360 + if (n >= RAPF_MIN_BUF_SIZE) {
361 +@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
362 + while (n >= 8) {
363 + __asm_copy_to_user_8x64(dst, src, retn);
364 + n -= 8;
365 ++ if (retn)
366 ++ return retn + n;
367 + }
368 + }
369 + #endif
370 +@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
371 + while (n >= 16) {
372 + __asm_copy_to_user_16(dst, src, retn);
373 + n -= 16;
374 ++ if (retn)
375 ++ return retn + n;
376 + }
377 +
378 + while (n >= 4) {
379 + __asm_copy_to_user_4(dst, src, retn);
380 + n -= 4;
381 ++ if (retn)
382 ++ return retn + n;
383 + }
384 +
385 + switch (n) {
386 +@@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
387 + break;
388 + }
389 +
390 ++ /*
391 ++ * If we get here, retn correctly reflects the number of failing
392 ++ * bytes.
393 ++ */
394 + return retn;
395 + }
396 + EXPORT_SYMBOL(__copy_user);
397 +@@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
398 + __asm_copy_user_cont(to, from, ret, \
399 + " GETB D1Ar1,[%1++]\n" \
400 + "2: SETB [%0++],D1Ar1\n", \
401 +- "3: ADD %2,%2,#1\n" \
402 +- " SETB [%0++],D1Ar1\n", \
403 ++ "3: ADD %2,%2,#1\n", \
404 + " .long 2b,3b\n")
405 +
406 + #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
407 + __asm_copy_user_cont(to, from, ret, \
408 + " GETW D1Ar1,[%1++]\n" \
409 + "2: SETW [%0++],D1Ar1\n" COPY, \
410 +- "3: ADD %2,%2,#2\n" \
411 +- " SETW [%0++],D1Ar1\n" FIXUP, \
412 ++ "3: ADD %2,%2,#2\n" FIXUP, \
413 + " .long 2b,3b\n" TENTRY)
414 +
415 + #define __asm_copy_from_user_2(to, from, ret) \
416 +@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
417 + __asm_copy_from_user_2x_cont(to, from, ret, \
418 + " GETB D1Ar1,[%1++]\n" \
419 + "4: SETB [%0++],D1Ar1\n", \
420 +- "5: ADD %2,%2,#1\n" \
421 +- " SETB [%0++],D1Ar1\n", \
422 ++ "5: ADD %2,%2,#1\n", \
423 + " .long 4b,5b\n")
424 +
425 + #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
426 + __asm_copy_user_cont(to, from, ret, \
427 + " GETD D1Ar1,[%1++]\n" \
428 + "2: SETD [%0++],D1Ar1\n" COPY, \
429 +- "3: ADD %2,%2,#4\n" \
430 +- " SETD [%0++],D1Ar1\n" FIXUP, \
431 ++ "3: ADD %2,%2,#4\n" FIXUP, \
432 + " .long 2b,3b\n" TENTRY)
433 +
434 + #define __asm_copy_from_user_4(to, from, ret) \
435 + __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
436 +
437 +-#define __asm_copy_from_user_5(to, from, ret) \
438 +- __asm_copy_from_user_4x_cont(to, from, ret, \
439 +- " GETB D1Ar1,[%1++]\n" \
440 +- "4: SETB [%0++],D1Ar1\n", \
441 +- "5: ADD %2,%2,#1\n" \
442 +- " SETB [%0++],D1Ar1\n", \
443 +- " .long 4b,5b\n")
444 +-
445 +-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
446 +- __asm_copy_from_user_4x_cont(to, from, ret, \
447 +- " GETW D1Ar1,[%1++]\n" \
448 +- "4: SETW [%0++],D1Ar1\n" COPY, \
449 +- "5: ADD %2,%2,#2\n" \
450 +- " SETW [%0++],D1Ar1\n" FIXUP, \
451 +- " .long 4b,5b\n" TENTRY)
452 +-
453 +-#define __asm_copy_from_user_6(to, from, ret) \
454 +- __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
455 +-
456 +-#define __asm_copy_from_user_7(to, from, ret) \
457 +- __asm_copy_from_user_6x_cont(to, from, ret, \
458 +- " GETB D1Ar1,[%1++]\n" \
459 +- "6: SETB [%0++],D1Ar1\n", \
460 +- "7: ADD %2,%2,#1\n" \
461 +- " SETB [%0++],D1Ar1\n", \
462 +- " .long 6b,7b\n")
463 +-
464 +-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
465 +- __asm_copy_from_user_4x_cont(to, from, ret, \
466 +- " GETD D1Ar1,[%1++]\n" \
467 +- "4: SETD [%0++],D1Ar1\n" COPY, \
468 +- "5: ADD %2,%2,#4\n" \
469 +- " SETD [%0++],D1Ar1\n" FIXUP, \
470 +- " .long 4b,5b\n" TENTRY)
471 +-
472 +-#define __asm_copy_from_user_8(to, from, ret) \
473 +- __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
474 +-
475 +-#define __asm_copy_from_user_9(to, from, ret) \
476 +- __asm_copy_from_user_8x_cont(to, from, ret, \
477 +- " GETB D1Ar1,[%1++]\n" \
478 +- "6: SETB [%0++],D1Ar1\n", \
479 +- "7: ADD %2,%2,#1\n" \
480 +- " SETB [%0++],D1Ar1\n", \
481 +- " .long 6b,7b\n")
482 +-
483 +-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
484 +- __asm_copy_from_user_8x_cont(to, from, ret, \
485 +- " GETW D1Ar1,[%1++]\n" \
486 +- "6: SETW [%0++],D1Ar1\n" COPY, \
487 +- "7: ADD %2,%2,#2\n" \
488 +- " SETW [%0++],D1Ar1\n" FIXUP, \
489 +- " .long 6b,7b\n" TENTRY)
490 +-
491 +-#define __asm_copy_from_user_10(to, from, ret) \
492 +- __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
493 +-
494 +-#define __asm_copy_from_user_11(to, from, ret) \
495 +- __asm_copy_from_user_10x_cont(to, from, ret, \
496 +- " GETB D1Ar1,[%1++]\n" \
497 +- "8: SETB [%0++],D1Ar1\n", \
498 +- "9: ADD %2,%2,#1\n" \
499 +- " SETB [%0++],D1Ar1\n", \
500 +- " .long 8b,9b\n")
501 +-
502 +-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
503 +- __asm_copy_from_user_8x_cont(to, from, ret, \
504 +- " GETD D1Ar1,[%1++]\n" \
505 +- "6: SETD [%0++],D1Ar1\n" COPY, \
506 +- "7: ADD %2,%2,#4\n" \
507 +- " SETD [%0++],D1Ar1\n" FIXUP, \
508 +- " .long 6b,7b\n" TENTRY)
509 +-
510 +-#define __asm_copy_from_user_12(to, from, ret) \
511 +- __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
512 +-
513 +-#define __asm_copy_from_user_13(to, from, ret) \
514 +- __asm_copy_from_user_12x_cont(to, from, ret, \
515 +- " GETB D1Ar1,[%1++]\n" \
516 +- "8: SETB [%0++],D1Ar1\n", \
517 +- "9: ADD %2,%2,#1\n" \
518 +- " SETB [%0++],D1Ar1\n", \
519 +- " .long 8b,9b\n")
520 +-
521 +-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
522 +- __asm_copy_from_user_12x_cont(to, from, ret, \
523 +- " GETW D1Ar1,[%1++]\n" \
524 +- "8: SETW [%0++],D1Ar1\n" COPY, \
525 +- "9: ADD %2,%2,#2\n" \
526 +- " SETW [%0++],D1Ar1\n" FIXUP, \
527 +- " .long 8b,9b\n" TENTRY)
528 +-
529 +-#define __asm_copy_from_user_14(to, from, ret) \
530 +- __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
531 +-
532 +-#define __asm_copy_from_user_15(to, from, ret) \
533 +- __asm_copy_from_user_14x_cont(to, from, ret, \
534 +- " GETB D1Ar1,[%1++]\n" \
535 +- "10: SETB [%0++],D1Ar1\n", \
536 +- "11: ADD %2,%2,#1\n" \
537 +- " SETB [%0++],D1Ar1\n", \
538 +- " .long 10b,11b\n")
539 +-
540 +-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
541 +- __asm_copy_from_user_12x_cont(to, from, ret, \
542 +- " GETD D1Ar1,[%1++]\n" \
543 +- "8: SETD [%0++],D1Ar1\n" COPY, \
544 +- "9: ADD %2,%2,#4\n" \
545 +- " SETD [%0++],D1Ar1\n" FIXUP, \
546 +- " .long 8b,9b\n" TENTRY)
547 +-
548 +-#define __asm_copy_from_user_16(to, from, ret) \
549 +- __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
550 +-
551 + #define __asm_copy_from_user_8x64(to, from, ret) \
552 + asm volatile ( \
553 + " GETL D0Ar2,D1Ar1,[%1++]\n" \
554 + "2: SETL [%0++],D0Ar2,D1Ar1\n" \
555 + "1:\n" \
556 + " .section .fixup,\"ax\"\n" \
557 +- " MOV D1Ar1,#0\n" \
558 +- " MOV D0Ar2,#0\n" \
559 + "3: ADD %2,%2,#8\n" \
560 +- " SETL [%0++],D0Ar2,D1Ar1\n" \
561 + " MOVT D0Ar2,#HI(1b)\n" \
562 + " JUMP D0Ar2,#LO(1b)\n" \
563 + " .previous\n" \
564 +@@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
565 + *
566 + * Rationale:
567 + * A fault occurs while reading from user buffer, which is the
568 +- * source. Since the fault is at a single address, we only
569 +- * need to rewind by 8 bytes.
570 ++ * source.
571 + * Since we don't write to kernel buffer until we read first,
572 + * the kernel buffer is at the right state and needn't be
573 +- * corrected.
574 ++ * corrected, but the source must be rewound to the beginning of
575 ++ * the block, which is LSM_STEP*8 bytes.
576 ++ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
577 ++ * and stored in D0Ar2
578 ++ *
579 ++ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
580 ++ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
581 ++ * a fault happens at the 4th write, LSM_STEP will be 0
582 ++ * instead of 4. The code copes with that.
583 + */
584 + #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
585 + __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
586 +- "SUB %1, %1, #8\n")
587 ++ "LSR D0Ar2, D0Ar2, #5\n" \
588 ++ "ANDS D0Ar2, D0Ar2, #0x38\n" \
589 ++ "ADDZ D0Ar2, D0Ar2, #32\n" \
590 ++ "SUB %1, %1, D0Ar2\n")
591 +
592 + /* rewind 'from' pointer when a fault occurs
593 + *
594 + * Rationale:
595 + * A fault occurs while reading from user buffer, which is the
596 +- * source. Since the fault is at a single address, we only
597 +- * need to rewind by 4 bytes.
598 ++ * source.
599 + * Since we don't write to kernel buffer until we read first,
600 + * the kernel buffer is at the right state and needn't be
601 +- * corrected.
602 ++ * corrected, but the source must be rewound to the beginning of
603 ++ * the block, which is LSM_STEP*4 bytes.
604 ++ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
605 ++ * and stored in D0Ar2
606 ++ *
607 ++ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
608 ++ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
609 ++ * a fault happens at the 4th write, LSM_STEP will be 0
610 ++ * instead of 4. The code copes with that.
611 + */
612 + #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
613 + __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
614 +- "SUB %1, %1, #4\n")
615 ++ "LSR D0Ar2, D0Ar2, #6\n" \
616 ++ "ANDS D0Ar2, D0Ar2, #0x1c\n" \
617 ++ "ADDZ D0Ar2, D0Ar2, #16\n" \
618 ++ "SUB %1, %1, D0Ar2\n")
619 +
620 +
621 +-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
622 +- userland. The return-value is the number of bytes that were
623 +- inaccessible. */
624 +-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
625 +- unsigned long n)
626 ++/*
627 ++ * Copy from user to kernel. The return-value is the number of bytes that were
628 ++ * inaccessible.
629 ++ */
630 ++unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
631 ++ unsigned long n)
632 + {
633 + register char *dst asm ("A0.2") = pdst;
634 + register const char __user *src asm ("A1.2") = psrc;
635 +@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
636 + if ((unsigned long) src & 1) {
637 + __asm_copy_from_user_1(dst, src, retn);
638 + n--;
639 ++ if (retn)
640 ++ return retn + n;
641 + }
642 + if ((unsigned long) dst & 1) {
643 + /* Worst case - byte copy */
644 +@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
645 + __asm_copy_from_user_1(dst, src, retn);
646 + n--;
647 + if (retn)
648 +- goto copy_exception_bytes;
649 ++ return retn + n;
650 + }
651 + }
652 + if (((unsigned long) src & 2) && n >= 2) {
653 + __asm_copy_from_user_2(dst, src, retn);
654 + n -= 2;
655 ++ if (retn)
656 ++ return retn + n;
657 + }
658 + if ((unsigned long) dst & 2) {
659 + /* Second worst case - word copy */
660 +@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
661 + __asm_copy_from_user_2(dst, src, retn);
662 + n -= 2;
663 + if (retn)
664 +- goto copy_exception_bytes;
665 ++ return retn + n;
666 + }
667 + }
668 +
669 +- /* We only need one check after the unalignment-adjustments,
670 +- because if both adjustments were done, either both or
671 +- neither reference had an exception. */
672 +- if (retn != 0)
673 +- goto copy_exception_bytes;
674 +-
675 + #ifdef USE_RAPF
676 + /* 64 bit copy loop */
677 + if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
678 +@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
679 + __asm_copy_from_user_8x64(dst, src, retn);
680 + n -= 8;
681 + if (retn)
682 +- goto copy_exception_bytes;
683 ++ return retn + n;
684 + }
685 + }
686 +
687 +@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
688 + __asm_copy_from_user_8x64(dst, src, retn);
689 + n -= 8;
690 + if (retn)
691 +- goto copy_exception_bytes;
692 ++ return retn + n;
693 + }
694 + }
695 + #endif
696 +@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
697 + n -= 4;
698 +
699 + if (retn)
700 +- goto copy_exception_bytes;
701 ++ return retn + n;
702 + }
703 +
704 + /* If we get here, there were no memory read faults. */
705 +@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
706 + /* If we get here, retn correctly reflects the number of failing
707 + bytes. */
708 + return retn;
709 +-
710 +- copy_exception_bytes:
711 +- /* We already have "retn" bytes cleared, and need to clear the
712 +- remaining "n" bytes. A non-optimized simple byte-for-byte in-line
713 +- memset is preferred here, since this isn't speed-critical code and
714 +- we'd rather have this a leaf-function than calling memset. */
715 +- {
716 +- char *endp;
717 +- for (endp = dst + n; dst < endp; dst++)
718 +- *dst = 0;
719 +- }
720 +-
721 +- return retn + n;
722 + }
723 +-EXPORT_SYMBOL(__copy_user_zeroing);
724 ++EXPORT_SYMBOL(raw_copy_from_user);
725 +
726 + #define __asm_clear_8x64(to, ret) \
727 + asm volatile ( \
728 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
729 +index db459612de44..75bfca69e418 100644
730 +--- a/arch/mips/Kconfig
731 ++++ b/arch/mips/Kconfig
732 +@@ -1412,7 +1412,7 @@ config CPU_MIPS32_R6
733 + select CPU_SUPPORTS_MSA
734 + select GENERIC_CSUM
735 + select HAVE_KVM
736 +- select MIPS_O32_FP64_SUPPORT
737 ++ select MIPS_O32_FP64_SUPPORT if 32BIT
738 + help
739 + Choose this option to build a kernel for release 6 or later of the
740 + MIPS32 architecture. New MIPS processors, starting with the Warrior
741 +diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
742 +index 40196bebe849..2365ce0ad8f2 100644
743 +--- a/arch/mips/include/asm/spinlock.h
744 ++++ b/arch/mips/include/asm/spinlock.h
745 +@@ -112,7 +112,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
746 + " andi %[ticket], %[ticket], 0xffff \n"
747 + " bne %[ticket], %[my_ticket], 4f \n"
748 + " subu %[ticket], %[my_ticket], %[ticket] \n"
749 +- "2: \n"
750 ++ "2: .insn \n"
751 + " .subsection 2 \n"
752 + "4: andi %[ticket], %[ticket], 0xffff \n"
753 + " sll %[ticket], 5 \n"
754 +@@ -187,7 +187,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
755 + " sc %[ticket], %[ticket_ptr] \n"
756 + " beqz %[ticket], 1b \n"
757 + " li %[ticket], 1 \n"
758 +- "2: \n"
759 ++ "2: .insn \n"
760 + " .subsection 2 \n"
761 + "3: b 2b \n"
762 + " li %[ticket], 0 \n"
763 +@@ -367,7 +367,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
764 + " .set reorder \n"
765 + __WEAK_LLSC_MB
766 + " li %2, 1 \n"
767 +- "2: \n"
768 ++ "2: .insn \n"
769 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
770 + : GCC_OFF_SMALL_ASM() (rw->lock)
771 + : "memory");
772 +@@ -407,7 +407,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
773 + " lui %1, 0x8000 \n"
774 + " sc %1, %0 \n"
775 + " li %2, 1 \n"
776 +- "2: \n"
777 ++ "2: .insn \n"
778 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
779 + "=&r" (ret)
780 + : GCC_OFF_SMALL_ASM() (rw->lock)
781 +diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
782 +index 3e390a4e3897..daf580ce5ca2 100644
783 +--- a/arch/mips/lantiq/xway/sysctrl.c
784 ++++ b/arch/mips/lantiq/xway/sysctrl.c
785 +@@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
786 +
787 + if (!np_xbar)
788 + panic("Failed to load xbar nodes from devicetree");
789 +- if (of_address_to_resource(np_pmu, 0, &res_xbar))
790 ++ if (of_address_to_resource(np_xbar, 0, &res_xbar))
791 + panic("Failed to get xbar resources");
792 + if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
793 + res_xbar.name) < 0)
794 +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
795 +index 29f73e00253d..63b7d6f82d24 100644
796 +--- a/arch/mips/mm/tlbex.c
797 ++++ b/arch/mips/mm/tlbex.c
798 +@@ -757,7 +757,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
799 + static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
800 + struct uasm_label **l,
801 + unsigned int pte,
802 +- unsigned int ptr)
803 ++ unsigned int ptr,
804 ++ unsigned int flush)
805 + {
806 + #ifdef CONFIG_SMP
807 + UASM_i_SC(p, pte, 0, ptr);
808 +@@ -766,6 +767,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
809 + #else
810 + UASM_i_SW(p, pte, 0, ptr);
811 + #endif
812 ++ if (cpu_has_ftlb && flush) {
813 ++ BUG_ON(!cpu_has_tlbinv);
814 ++
815 ++ UASM_i_MFC0(p, ptr, C0_ENTRYHI);
816 ++ uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
817 ++ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
818 ++ build_tlb_write_entry(p, l, r, tlb_indexed);
819 ++
820 ++ uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
821 ++ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
822 ++ build_huge_update_entries(p, pte, ptr);
823 ++ build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
824 ++
825 ++ return;
826 ++ }
827 ++
828 + build_huge_update_entries(p, pte, ptr);
829 + build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
830 + }
831 +@@ -2082,7 +2099,7 @@ static void build_r4000_tlb_load_handler(void)
832 + uasm_l_tlbl_goaround2(&l, p);
833 + }
834 + uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
835 +- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
836 ++ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
837 + #endif
838 +
839 + uasm_l_nopage_tlbl(&l, p);
840 +@@ -2137,7 +2154,7 @@ static void build_r4000_tlb_store_handler(void)
841 + build_tlb_probe_entry(&p);
842 + uasm_i_ori(&p, wr.r1, wr.r1,
843 + _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
844 +- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
845 ++ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
846 + #endif
847 +
848 + uasm_l_nopage_tlbs(&l, p);
849 +@@ -2193,7 +2210,7 @@ static void build_r4000_tlb_modify_handler(void)
850 + build_tlb_probe_entry(&p);
851 + uasm_i_ori(&p, wr.r1, wr.r1,
852 + _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
853 +- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
854 ++ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
855 + #endif
856 +
857 + uasm_l_nopage_tlbm(&l, p);
858 +diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
859 +index f42834c7f007..3c575093f8f1 100644
860 +--- a/arch/mips/ralink/rt3883.c
861 ++++ b/arch/mips/ralink/rt3883.c
862 +@@ -36,7 +36,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
863 + static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
864 + static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
865 + static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
866 +-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
867 ++static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
868 + static struct rt2880_pmx_func pci_func[] = {
869 + FUNC("pci-dev", 0, 40, 32),
870 + FUNC("pci-host2", 1, 40, 32),
871 +@@ -44,7 +44,7 @@ static struct rt2880_pmx_func pci_func[] = {
872 + FUNC("pci-fnc", 3, 40, 32)
873 + };
874 + static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
875 +-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
876 ++static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
877 +
878 + static struct rt2880_pmx_group rt3883_pinmux_data[] = {
879 + GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
880 +diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
881 +index 718dd197909f..de73beb36910 100644
882 +--- a/arch/nios2/kernel/prom.c
883 ++++ b/arch/nios2/kernel/prom.c
884 +@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
885 + return alloc_bootmem_align(size, align);
886 + }
887 +
888 ++int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
889 ++ bool nomap)
890 ++{
891 ++ reserve_bootmem(base, size, BOOTMEM_DEFAULT);
892 ++ return 0;
893 ++}
894 ++
895 + void __init early_init_devtree(void *params)
896 + {
897 + __be32 *dtb = (u32 *)__dtb_start;
898 +diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
899 +index a4ff86d58d5c..6c4e351a7930 100644
900 +--- a/arch/nios2/kernel/setup.c
901 ++++ b/arch/nios2/kernel/setup.c
902 +@@ -195,6 +195,9 @@ void __init setup_arch(char **cmdline_p)
903 + }
904 + #endif /* CONFIG_BLK_DEV_INITRD */
905 +
906 ++ early_init_fdt_reserve_self();
907 ++ early_init_fdt_scan_reserved_mem();
908 ++
909 + unflatten_and_copy_device_tree();
910 +
911 + setup_cpuinfo();
912 +diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
913 +index 86150fbb42c3..91e5c1758b5c 100644
914 +--- a/arch/powerpc/kernel/align.c
915 ++++ b/arch/powerpc/kernel/align.c
916 +@@ -808,14 +808,25 @@ int fix_alignment(struct pt_regs *regs)
917 + nb = aligninfo[instr].len;
918 + flags = aligninfo[instr].flags;
919 +
920 +- /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
921 +- if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
922 +- nb = 8;
923 +- flags = LD+SW;
924 +- } else if (IS_XFORM(instruction) &&
925 +- ((instruction >> 1) & 0x3ff) == 660) {
926 +- nb = 8;
927 +- flags = ST+SW;
928 ++ /*
929 ++ * Handle some cases which give overlaps in the DSISR values.
930 ++ */
931 ++ if (IS_XFORM(instruction)) {
932 ++ switch (get_xop(instruction)) {
933 ++ case 532: /* ldbrx */
934 ++ nb = 8;
935 ++ flags = LD+SW;
936 ++ break;
937 ++ case 660: /* stdbrx */
938 ++ nb = 8;
939 ++ flags = ST+SW;
940 ++ break;
941 ++ case 20: /* lwarx */
942 ++ case 84: /* ldarx */
943 ++ case 116: /* lharx */
944 ++ case 276: /* lqarx */
945 ++ return 0; /* not emulated ever */
946 ++ }
947 + }
948 +
949 + /* Byteswap little endian loads and stores */
950 +diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
951 +index c8822af10a58..19d9b2d2d212 100644
952 +--- a/arch/powerpc/mm/hash_native_64.c
953 ++++ b/arch/powerpc/mm/hash_native_64.c
954 +@@ -645,6 +645,10 @@ static void native_flush_hash_range(unsigned long number, int local)
955 + unsigned long psize = batch->psize;
956 + int ssize = batch->ssize;
957 + int i;
958 ++ unsigned int use_local;
959 ++
960 ++ use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
961 ++ mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
962 +
963 + local_irq_save(flags);
964 +
965 +@@ -671,8 +675,7 @@ static void native_flush_hash_range(unsigned long number, int local)
966 + } pte_iterate_hashed_end();
967 + }
968 +
969 +- if (mmu_has_feature(MMU_FTR_TLBIEL) &&
970 +- mmu_psize_defs[psize].tlbiel && local) {
971 ++ if (use_local) {
972 + asm volatile("ptesync":::"memory");
973 + for (i = 0; i < number; i++) {
974 + vpn = batch->vpn[i];
975 +diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
976 +index 4da604ebf6fd..ca15613eaaa4 100644
977 +--- a/arch/s390/boot/compressed/misc.c
978 ++++ b/arch/s390/boot/compressed/misc.c
979 +@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
980 +
981 + unsigned long decompress_kernel(void)
982 + {
983 +- unsigned long output_addr;
984 +- unsigned char *output;
985 ++ void *output, *kernel_end;
986 +
987 +- output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
988 +- check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
989 +- memset(&_bss, 0, &_ebss - &_bss);
990 +- free_mem_ptr = (unsigned long)&_end;
991 +- free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
992 +- output = (unsigned char *) output_addr;
993 ++ output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
994 ++ kernel_end = output + SZ__bss_start;
995 ++ check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
996 +
997 + #ifdef CONFIG_BLK_DEV_INITRD
998 + /*
999 + * Move the initrd right behind the end of the decompressed
1000 +- * kernel image.
1001 ++ * kernel image. This also prevents initrd corruption caused by
1002 ++ * bss clearing since kernel_end will always be located behind the
1003 ++ * current bss section..
1004 + */
1005 +- if (INITRD_START && INITRD_SIZE &&
1006 +- INITRD_START < (unsigned long) output + SZ__bss_start) {
1007 +- check_ipl_parmblock(output + SZ__bss_start,
1008 +- INITRD_START + INITRD_SIZE);
1009 +- memmove(output + SZ__bss_start,
1010 +- (void *) INITRD_START, INITRD_SIZE);
1011 +- INITRD_START = (unsigned long) output + SZ__bss_start;
1012 ++ if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
1013 ++ check_ipl_parmblock(kernel_end, INITRD_SIZE);
1014 ++ memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
1015 ++ INITRD_START = (unsigned long) kernel_end;
1016 + }
1017 + #endif
1018 +
1019 ++ /*
1020 ++ * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
1021 ++ * initialized afterwards since they reside in bss.
1022 ++ */
1023 ++ memset(&_bss, 0, &_ebss - &_bss);
1024 ++ free_mem_ptr = (unsigned long) &_end;
1025 ++ free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
1026 ++
1027 + puts("Uncompressing Linux... ");
1028 + __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
1029 + puts("Ok, booting the kernel.\n");
1030 +diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
1031 +index 5c7381c5ad7f..c8d837f0fbbc 100644
1032 +--- a/arch/s390/include/asm/uaccess.h
1033 ++++ b/arch/s390/include/asm/uaccess.h
1034 +@@ -150,7 +150,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
1035 + " jg 2b\n" \
1036 + ".popsection\n" \
1037 + EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
1038 +- : "=d" (__rc), "=Q" (*(to)) \
1039 ++ : "=d" (__rc), "+Q" (*(to)) \
1040 + : "d" (size), "Q" (*(from)), \
1041 + "d" (__reg0), "K" (-EFAULT) \
1042 + : "cc"); \
1043 +diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
1044 +index 4f5fa8d65fe9..144367c0c28f 100644
1045 +--- a/drivers/gpu/drm/ttm/ttm_object.c
1046 ++++ b/drivers/gpu/drm/ttm/ttm_object.c
1047 +@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
1048 + if (unlikely(ret != 0))
1049 + goto out_err0;
1050 +
1051 +- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
1052 ++ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
1053 + if (unlikely(ret != 0))
1054 + goto out_err1;
1055 +
1056 +@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
1057 +
1058 + int ttm_ref_object_add(struct ttm_object_file *tfile,
1059 + struct ttm_base_object *base,
1060 +- enum ttm_ref_type ref_type, bool *existed)
1061 ++ enum ttm_ref_type ref_type, bool *existed,
1062 ++ bool require_existed)
1063 + {
1064 + struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
1065 + struct ttm_ref_object *ref;
1066 +@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
1067 + }
1068 +
1069 + rcu_read_unlock();
1070 ++ if (require_existed)
1071 ++ return -EPERM;
1072 ++
1073 + ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
1074 + false, false);
1075 + if (unlikely(ret != 0))
1076 +@@ -635,7 +639,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
1077 + prime = (struct ttm_prime_object *) dma_buf->priv;
1078 + base = &prime->base;
1079 + *handle = base->hash.key;
1080 +- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
1081 ++ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
1082 +
1083 + dma_buf_put(dma_buf);
1084 +
1085 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
1086 +index 8e689b439890..6c649f7b5929 100644
1087 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
1088 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
1089 +@@ -539,7 +539,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
1090 + struct vmw_fence_obj **p_fence)
1091 + {
1092 + struct vmw_fence_obj *fence;
1093 +- int ret;
1094 ++ int ret;
1095 +
1096 + fence = kzalloc(sizeof(*fence), GFP_KERNEL);
1097 + if (unlikely(fence == NULL))
1098 +@@ -702,6 +702,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
1099 + }
1100 +
1101 +
1102 ++/**
1103 ++ * vmw_fence_obj_lookup - Look up a user-space fence object
1104 ++ *
1105 ++ * @tfile: A struct ttm_object_file identifying the caller.
1106 ++ * @handle: A handle identifying the fence object.
1107 ++ * @return: A struct vmw_user_fence base ttm object on success or
1108 ++ * an error pointer on failure.
1109 ++ *
1110 ++ * The fence object is looked up and type-checked. The caller needs
1111 ++ * to have opened the fence object first, but since that happens on
1112 ++ * creation and fence objects aren't shareable, that's not an
1113 ++ * issue currently.
1114 ++ */
1115 ++static struct ttm_base_object *
1116 ++vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
1117 ++{
1118 ++ struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
1119 ++
1120 ++ if (!base) {
1121 ++ pr_err("Invalid fence object handle 0x%08lx.\n",
1122 ++ (unsigned long)handle);
1123 ++ return ERR_PTR(-EINVAL);
1124 ++ }
1125 ++
1126 ++ if (base->refcount_release != vmw_user_fence_base_release) {
1127 ++ pr_err("Invalid fence object handle 0x%08lx.\n",
1128 ++ (unsigned long)handle);
1129 ++ ttm_base_object_unref(&base);
1130 ++ return ERR_PTR(-EINVAL);
1131 ++ }
1132 ++
1133 ++ return base;
1134 ++}
1135 ++
1136 ++
1137 + int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
1138 + struct drm_file *file_priv)
1139 + {
1140 +@@ -727,13 +762,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
1141 + arg->kernel_cookie = jiffies + wait_timeout;
1142 + }
1143 +
1144 +- base = ttm_base_object_lookup(tfile, arg->handle);
1145 +- if (unlikely(base == NULL)) {
1146 +- printk(KERN_ERR "Wait invalid fence object handle "
1147 +- "0x%08lx.\n",
1148 +- (unsigned long)arg->handle);
1149 +- return -EINVAL;
1150 +- }
1151 ++ base = vmw_fence_obj_lookup(tfile, arg->handle);
1152 ++ if (IS_ERR(base))
1153 ++ return PTR_ERR(base);
1154 +
1155 + fence = &(container_of(base, struct vmw_user_fence, base)->fence);
1156 +
1157 +@@ -772,13 +803,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
1158 + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1159 + struct vmw_private *dev_priv = vmw_priv(dev);
1160 +
1161 +- base = ttm_base_object_lookup(tfile, arg->handle);
1162 +- if (unlikely(base == NULL)) {
1163 +- printk(KERN_ERR "Fence signaled invalid fence object handle "
1164 +- "0x%08lx.\n",
1165 +- (unsigned long)arg->handle);
1166 +- return -EINVAL;
1167 +- }
1168 ++ base = vmw_fence_obj_lookup(tfile, arg->handle);
1169 ++ if (IS_ERR(base))
1170 ++ return PTR_ERR(base);
1171 +
1172 + fence = &(container_of(base, struct vmw_user_fence, base)->fence);
1173 + fman = fman_from_fence(fence);
1174 +@@ -1093,6 +1120,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1175 + (struct drm_vmw_fence_event_arg *) data;
1176 + struct vmw_fence_obj *fence = NULL;
1177 + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1178 ++ struct ttm_object_file *tfile = vmw_fp->tfile;
1179 + struct drm_vmw_fence_rep __user *user_fence_rep =
1180 + (struct drm_vmw_fence_rep __user *)(unsigned long)
1181 + arg->fence_rep;
1182 +@@ -1106,24 +1134,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1183 + */
1184 + if (arg->handle) {
1185 + struct ttm_base_object *base =
1186 +- ttm_base_object_lookup_for_ref(dev_priv->tdev,
1187 +- arg->handle);
1188 +-
1189 +- if (unlikely(base == NULL)) {
1190 +- DRM_ERROR("Fence event invalid fence object handle "
1191 +- "0x%08lx.\n",
1192 +- (unsigned long)arg->handle);
1193 +- return -EINVAL;
1194 +- }
1195 ++ vmw_fence_obj_lookup(tfile, arg->handle);
1196 ++
1197 ++ if (IS_ERR(base))
1198 ++ return PTR_ERR(base);
1199 ++
1200 + fence = &(container_of(base, struct vmw_user_fence,
1201 + base)->fence);
1202 + (void) vmw_fence_obj_reference(fence);
1203 +
1204 + if (user_fence_rep != NULL) {
1205 +- bool existed;
1206 +-
1207 + ret = ttm_ref_object_add(vmw_fp->tfile, base,
1208 +- TTM_REF_USAGE, &existed);
1209 ++ TTM_REF_USAGE, NULL, false);
1210 + if (unlikely(ret != 0)) {
1211 + DRM_ERROR("Failed to reference a fence "
1212 + "object.\n");
1213 +@@ -1166,8 +1188,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1214 + return 0;
1215 + out_no_create:
1216 + if (user_fence_rep != NULL)
1217 +- ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1218 +- handle, TTM_REF_USAGE);
1219 ++ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1220 + out_no_ref_obj:
1221 + vmw_fence_obj_unreference(&fence);
1222 + return ret;
1223 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
1224 +index b8c6a03c8c54..5ec24fd801cd 100644
1225 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
1226 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
1227 +@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
1228 + param->value = dev_priv->has_dx;
1229 + break;
1230 + default:
1231 +- DRM_ERROR("Illegal vmwgfx get param request: %d\n",
1232 +- param->param);
1233 + return -EINVAL;
1234 + }
1235 +
1236 +@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
1237 + bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
1238 + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1239 +
1240 +- if (unlikely(arg->pad64 != 0)) {
1241 ++ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
1242 + DRM_ERROR("Illegal GET_3D_CAP argument.\n");
1243 + return -EINVAL;
1244 + }
1245 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
1246 +index e57667ca7557..dbca128a9aa6 100644
1247 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
1248 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
1249 +@@ -591,7 +591,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
1250 + return ret;
1251 +
1252 + ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
1253 +- TTM_REF_SYNCCPU_WRITE, &existed);
1254 ++ TTM_REF_SYNCCPU_WRITE, &existed, false);
1255 + if (ret != 0 || existed)
1256 + ttm_bo_synccpu_write_release(&user_bo->dma.base);
1257 +
1258 +@@ -775,7 +775,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
1259 +
1260 + *handle = user_bo->prime.base.hash.key;
1261 + return ttm_ref_object_add(tfile, &user_bo->prime.base,
1262 +- TTM_REF_USAGE, NULL);
1263 ++ TTM_REF_USAGE, NULL, false);
1264 + }
1265 +
1266 + /*
1267 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
1268 +index 7d620e82e000..c9c04ccccdd9 100644
1269 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
1270 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
1271 +@@ -715,11 +715,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1272 + 128;
1273 +
1274 + num_sizes = 0;
1275 +- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
1276 ++ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
1277 ++ if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
1278 ++ return -EINVAL;
1279 + num_sizes += req->mip_levels[i];
1280 ++ }
1281 +
1282 +- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
1283 +- DRM_VMW_MAX_MIP_LEVELS)
1284 ++ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
1285 ++ num_sizes == 0)
1286 + return -EINVAL;
1287 +
1288 + size = vmw_user_surface_size + 128 +
1289 +@@ -904,17 +907,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
1290 + uint32_t handle;
1291 + struct ttm_base_object *base;
1292 + int ret;
1293 ++ bool require_exist = false;
1294 +
1295 + if (handle_type == DRM_VMW_HANDLE_PRIME) {
1296 + ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
1297 + if (unlikely(ret != 0))
1298 + return ret;
1299 + } else {
1300 +- if (unlikely(drm_is_render_client(file_priv))) {
1301 +- DRM_ERROR("Render client refused legacy "
1302 +- "surface reference.\n");
1303 +- return -EACCES;
1304 +- }
1305 ++ if (unlikely(drm_is_render_client(file_priv)))
1306 ++ require_exist = true;
1307 ++
1308 + if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
1309 + DRM_ERROR("Locked master refused legacy "
1310 + "surface reference.\n");
1311 +@@ -942,17 +944,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
1312 +
1313 + /*
1314 + * Make sure the surface creator has the same
1315 +- * authenticating master.
1316 ++ * authenticating master, or is already registered with us.
1317 + */
1318 + if (drm_is_primary_client(file_priv) &&
1319 +- user_srf->master != file_priv->master) {
1320 +- DRM_ERROR("Trying to reference surface outside of"
1321 +- " master domain.\n");
1322 +- ret = -EACCES;
1323 +- goto out_bad_resource;
1324 +- }
1325 ++ user_srf->master != file_priv->master)
1326 ++ require_exist = true;
1327 +
1328 +- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
1329 ++ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
1330 ++ require_exist);
1331 + if (unlikely(ret != 0)) {
1332 + DRM_ERROR("Could not add a reference to a surface.\n");
1333 + goto out_bad_resource;
1334 +diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
1335 +index acb3b303d800..90841abd3ce4 100644
1336 +--- a/drivers/iio/gyro/bmg160_core.c
1337 ++++ b/drivers/iio/gyro/bmg160_core.c
1338 +@@ -28,6 +28,7 @@
1339 + #include <linux/iio/trigger_consumer.h>
1340 + #include <linux/iio/triggered_buffer.h>
1341 + #include <linux/regmap.h>
1342 ++#include <linux/delay.h>
1343 + #include "bmg160.h"
1344 +
1345 + #define BMG160_IRQ_NAME "bmg160_event"
1346 +@@ -53,6 +54,9 @@
1347 + #define BMG160_NO_FILTER 0
1348 + #define BMG160_DEF_BW 100
1349 +
1350 ++#define BMG160_GYRO_REG_RESET 0x14
1351 ++#define BMG160_GYRO_RESET_VAL 0xb6
1352 ++
1353 + #define BMG160_REG_INT_MAP_0 0x17
1354 + #define BMG160_INT_MAP_0_BIT_ANY BIT(1)
1355 +
1356 +@@ -186,6 +190,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
1357 + int ret;
1358 + unsigned int val;
1359 +
1360 ++ /*
1361 ++ * Reset chip to get it in a known good state. A delay of 30ms after
1362 ++ * reset is required according to the datasheet.
1363 ++ */
1364 ++ regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
1365 ++ BMG160_GYRO_RESET_VAL);
1366 ++ usleep_range(30000, 30700);
1367 ++
1368 + ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
1369 + if (ret < 0) {
1370 + dev_err(data->dev, "Error reading reg_chip_id\n");
1371 +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
1372 +index 3f2a3d611e4b..9c6357c03905 100644
1373 +--- a/drivers/staging/android/ashmem.c
1374 ++++ b/drivers/staging/android/ashmem.c
1375 +@@ -392,6 +392,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
1376 + ret = PTR_ERR(vmfile);
1377 + goto out;
1378 + }
1379 ++ vmfile->f_mode |= FMODE_LSEEK;
1380 + asma->file = vmfile;
1381 + }
1382 + get_file(asma->file);
1383 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1384 +index 2fa754c5fd62..6cb5c4b30e78 100644
1385 +--- a/fs/cifs/smb2pdu.c
1386 ++++ b/fs/cifs/smb2pdu.c
1387 +@@ -952,6 +952,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1388 + return -EINVAL;
1389 + }
1390 +
1391 ++ /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
1392 ++ if (tcon)
1393 ++ tcon->tid = 0;
1394 ++
1395 + rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
1396 + if (rc) {
1397 + kfree(unc_path);
1398 +diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
1399 +index b803213d1307..39c75a86c67f 100644
1400 +--- a/fs/sysfs/file.c
1401 ++++ b/fs/sysfs/file.c
1402 +@@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
1403 + {
1404 + const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
1405 + struct kobject *kobj = of->kn->parent->priv;
1406 +- size_t len;
1407 ++ ssize_t len;
1408 +
1409 + /*
1410 + * If buf != of->prealloc_buf, we don't know how
1411 +@@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
1412 + if (WARN_ON_ONCE(buf != of->prealloc_buf))
1413 + return 0;
1414 + len = ops->show(kobj, of->kn->priv, buf);
1415 ++ if (len < 0)
1416 ++ return len;
1417 + if (pos) {
1418 + if (len <= pos)
1419 + return 0;
1420 + len -= pos;
1421 + memmove(buf, buf + pos, len);
1422 + }
1423 +- return min(count, len);
1424 ++ return min_t(ssize_t, count, len);
1425 + }
1426 +
1427 + /* kernfs write callback for regular sysfs files */
1428 +diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
1429 +index ed953f98f0e1..1487011fe057 100644
1430 +--- a/include/drm/ttm/ttm_object.h
1431 ++++ b/include/drm/ttm/ttm_object.h
1432 +@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
1433 + * @ref_type: The type of reference.
1434 + * @existed: Upon completion, indicates that an identical reference object
1435 + * already existed, and the refcount was upped on that object instead.
1436 ++ * @require_existed: Fail with -EPERM if an identical ref object didn't
1437 ++ * already exist.
1438 + *
1439 + * Checks that the base object is shareable and adds a ref object to it.
1440 + *
1441 +@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
1442 + */
1443 + extern int ttm_ref_object_add(struct ttm_object_file *tfile,
1444 + struct ttm_base_object *base,
1445 +- enum ttm_ref_type ref_type, bool *existed);
1446 ++ enum ttm_ref_type ref_type, bool *existed,
1447 ++ bool require_existed);
1448 +
1449 + extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
1450 + struct ttm_base_object *base);
1451 +diff --git a/kernel/ptrace.c b/kernel/ptrace.c
1452 +index a46c40bfb5f6..c7e8ed99c953 100644
1453 +--- a/kernel/ptrace.c
1454 ++++ b/kernel/ptrace.c
1455 +@@ -151,11 +151,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
1456 +
1457 + WARN_ON(!task->ptrace || task->parent != current);
1458 +
1459 ++ /*
1460 ++ * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
1461 ++ * Recheck state under the lock to close this race.
1462 ++ */
1463 + spin_lock_irq(&task->sighand->siglock);
1464 +- if (__fatal_signal_pending(task))
1465 +- wake_up_state(task, __TASK_TRACED);
1466 +- else
1467 +- task->state = TASK_TRACED;
1468 ++ if (task->state == __TASK_TRACED) {
1469 ++ if (__fatal_signal_pending(task))
1470 ++ wake_up_state(task, __TASK_TRACED);
1471 ++ else
1472 ++ task->state = TASK_TRACED;
1473 ++ }
1474 + spin_unlock_irq(&task->sighand->siglock);
1475 + }
1476 +
1477 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
1478 +index acbb0e73d3a2..7d7f99b0db47 100644
1479 +--- a/kernel/trace/ring_buffer.c
1480 ++++ b/kernel/trace/ring_buffer.c
1481 +@@ -4875,9 +4875,9 @@ static __init int test_ringbuffer(void)
1482 + rb_data[cpu].cnt = cpu;
1483 + rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
1484 + "rbtester/%d", cpu);
1485 +- if (WARN_ON(!rb_threads[cpu])) {
1486 ++ if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
1487 + pr_cont("FAILED\n");
1488 +- ret = -1;
1489 ++ ret = PTR_ERR(rb_threads[cpu]);
1490 + goto out_free;
1491 + }
1492 +
1493 +@@ -4887,9 +4887,9 @@ static __init int test_ringbuffer(void)
1494 +
1495 + /* Now create the rb hammer! */
1496 + rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
1497 +- if (WARN_ON(!rb_hammer)) {
1498 ++ if (WARN_ON(IS_ERR(rb_hammer))) {
1499 + pr_cont("FAILED\n");
1500 +- ret = -1;
1501 ++ ret = PTR_ERR(rb_hammer);
1502 + goto out_free;
1503 + }
1504 +
1505 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
1506 +index a4217fe60dff..e09b1a0e2cfe 100644
1507 +--- a/mm/mempolicy.c
1508 ++++ b/mm/mempolicy.c
1509 +@@ -1492,7 +1492,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1510 + COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1511 + compat_ulong_t, maxnode)
1512 + {
1513 +- long err = 0;
1514 + unsigned long __user *nm = NULL;
1515 + unsigned long nr_bits, alloc_size;
1516 + DECLARE_BITMAP(bm, MAX_NUMNODES);
1517 +@@ -1501,14 +1500,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1518 + alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1519 +
1520 + if (nmask) {
1521 +- err = compat_get_bitmap(bm, nmask, nr_bits);
1522 ++ if (compat_get_bitmap(bm, nmask, nr_bits))
1523 ++ return -EFAULT;
1524 + nm = compat_alloc_user_space(alloc_size);
1525 +- err |= copy_to_user(nm, bm, alloc_size);
1526 ++ if (copy_to_user(nm, bm, alloc_size))
1527 ++ return -EFAULT;
1528 + }
1529 +
1530 +- if (err)
1531 +- return -EFAULT;
1532 +-
1533 + return sys_set_mempolicy(mode, nm, nr_bits+1);
1534 + }
1535 +
1536 +@@ -1516,7 +1514,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1537 + compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1538 + compat_ulong_t, maxnode, compat_ulong_t, flags)
1539 + {
1540 +- long err = 0;
1541 + unsigned long __user *nm = NULL;
1542 + unsigned long nr_bits, alloc_size;
1543 + nodemask_t bm;
1544 +@@ -1525,14 +1522,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1545 + alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1546 +
1547 + if (nmask) {
1548 +- err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1549 ++ if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1550 ++ return -EFAULT;
1551 + nm = compat_alloc_user_space(alloc_size);
1552 +- err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1553 ++ if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1554 ++ return -EFAULT;
1555 + }
1556 +
1557 +- if (err)
1558 +- return -EFAULT;
1559 +-
1560 + return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1561 + }
1562 +