Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.12 commit in: /
Date: Fri, 25 Aug 2017 11:00:21
Message-Id: 1503658807.8911ed4b99ac4a15838eeaaf9bc095b327cb3d23.mpagano@gentoo
1 commit: 8911ed4b99ac4a15838eeaaf9bc095b327cb3d23
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Aug 25 11:00:07 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Aug 25 11:00:07 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8911ed4b
7
8 Linux patch 4.12.9
9
10 0000_README | 4 +
11 1008_linux-4.12.9.patch | 1644 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1648 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 47efe0d..90242d0 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -75,6 +75,10 @@ Patch: 1007_linux-4.12.8.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.12.8
21
22 +Patch: 1008_linux-4.12.9.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.12.9
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1008_linux-4.12.9.patch b/1008_linux-4.12.9.patch
31 new file mode 100644
32 index 0000000..21e964c
33 --- /dev/null
34 +++ b/1008_linux-4.12.9.patch
35 @@ -0,0 +1,1644 @@
36 +diff --git a/Makefile b/Makefile
37 +index 6da481d08441..a6c2a5e7a48d 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 12
43 +-SUBLEVEL = 8
44 ++SUBLEVEL = 9
45 + EXTRAVERSION =
46 + NAME = Fearless Coyote
47 +
48 +diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
49 +index 559da17297ef..651299c242ec 100644
50 +--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
51 ++++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
52 +@@ -507,7 +507,7 @@
53 + pinctrl_pcie: pciegrp {
54 + fsl,pins = <
55 + /* PCIe reset */
56 +- MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x030b0
57 ++ MX6QDL_PAD_EIM_DA0__GPIO3_IO00 0x030b0
58 + MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x030b0
59 + >;
60 + };
61 +@@ -668,7 +668,7 @@
62 + &pcie {
63 + pinctrl-names = "default";
64 + pinctrl-0 = <&pinctrl_pcie>;
65 +- reset-gpio = <&gpio6 31 GPIO_ACTIVE_LOW>;
66 ++ reset-gpio = <&gpio3 0 GPIO_ACTIVE_LOW>;
67 + status = "okay";
68 + };
69 +
70 +diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
71 +index 4e6e88a6b2f4..2244a94ed9c9 100644
72 +--- a/arch/arm/include/asm/bug.h
73 ++++ b/arch/arm/include/asm/bug.h
74 +@@ -37,7 +37,7 @@ do { \
75 + ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
76 + "2:\t.asciz " #__file "\n" \
77 + ".popsection\n" \
78 +- ".pushsection __bug_table,\"a\"\n" \
79 ++ ".pushsection __bug_table,\"aw\"\n" \
80 + ".align 2\n" \
81 + "3:\t.word 1b, 2b\n" \
82 + "\t.hword " #__line ", 0\n" \
83 +diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
84 +index 366448eb0fb7..a02a57186f56 100644
85 +--- a/arch/arm64/include/asm/bug.h
86 ++++ b/arch/arm64/include/asm/bug.h
87 +@@ -36,7 +36,7 @@
88 + #ifdef CONFIG_GENERIC_BUG
89 +
90 + #define __BUG_ENTRY(flags) \
91 +- ".pushsection __bug_table,\"a\"\n\t" \
92 ++ ".pushsection __bug_table,\"aw\"\n\t" \
93 + ".align 2\n\t" \
94 + "0: .long 1f - 0b\n\t" \
95 + _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
96 +diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
97 +index acae781f7359..3288c2b36731 100644
98 +--- a/arch/arm64/include/asm/elf.h
99 ++++ b/arch/arm64/include/asm/elf.h
100 +@@ -114,10 +114,10 @@
101 +
102 + /*
103 + * This is the base location for PIE (ET_DYN with INTERP) loads. On
104 +- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
105 ++ * 64-bit, this is above 4GB to leave the entire 32-bit address
106 + * space open for things that want to use the area for 32-bit pointers.
107 + */
108 +-#define ELF_ET_DYN_BASE 0x100000000UL
109 ++#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
110 +
111 + #ifndef __ASSEMBLY__
112 +
113 +diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h
114 +index 8d9b1eba89c4..76b2e82ee730 100644
115 +--- a/arch/blackfin/include/asm/bug.h
116 ++++ b/arch/blackfin/include/asm/bug.h
117 +@@ -21,7 +21,7 @@
118 + #define _BUG_OR_WARN(flags) \
119 + asm volatile( \
120 + "1: .hword %0\n" \
121 +- " .section __bug_table,\"a\",@progbits\n" \
122 ++ " .section __bug_table,\"aw\",@progbits\n" \
123 + "2: .long 1b\n" \
124 + " .long %1\n" \
125 + " .short %2\n" \
126 +@@ -38,7 +38,7 @@
127 + #define _BUG_OR_WARN(flags) \
128 + asm volatile( \
129 + "1: .hword %0\n" \
130 +- " .section __bug_table,\"a\",@progbits\n" \
131 ++ " .section __bug_table,\"aw\",@progbits\n" \
132 + "2: .long 1b\n" \
133 + " .short %1\n" \
134 + " .org 2b + %2\n" \
135 +diff --git a/arch/mn10300/include/asm/bug.h b/arch/mn10300/include/asm/bug.h
136 +index aa6a38886391..811414fb002d 100644
137 +--- a/arch/mn10300/include/asm/bug.h
138 ++++ b/arch/mn10300/include/asm/bug.h
139 +@@ -21,7 +21,7 @@ do { \
140 + asm volatile( \
141 + " syscall 15 \n" \
142 + "0: \n" \
143 +- " .section __bug_table,\"a\" \n" \
144 ++ " .section __bug_table,\"aw\" \n" \
145 + " .long 0b,%0,%1 \n" \
146 + " .previous \n" \
147 + : \
148 +diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
149 +index d2742273a685..07ea467f22fc 100644
150 +--- a/arch/parisc/include/asm/bug.h
151 ++++ b/arch/parisc/include/asm/bug.h
152 +@@ -27,7 +27,7 @@
153 + do { \
154 + asm volatile("\n" \
155 + "1:\t" PARISC_BUG_BREAK_ASM "\n" \
156 +- "\t.pushsection __bug_table,\"a\"\n" \
157 ++ "\t.pushsection __bug_table,\"aw\"\n" \
158 + "2:\t" ASM_WORD_INSN "1b, %c0\n" \
159 + "\t.short %c1, %c2\n" \
160 + "\t.org 2b+%c3\n" \
161 +@@ -50,7 +50,7 @@
162 + do { \
163 + asm volatile("\n" \
164 + "1:\t" PARISC_BUG_BREAK_ASM "\n" \
165 +- "\t.pushsection __bug_table,\"a\"\n" \
166 ++ "\t.pushsection __bug_table,\"aw\"\n" \
167 + "2:\t" ASM_WORD_INSN "1b, %c0\n" \
168 + "\t.short %c1, %c2\n" \
169 + "\t.org 2b+%c3\n" \
170 +@@ -64,7 +64,7 @@
171 + do { \
172 + asm volatile("\n" \
173 + "1:\t" PARISC_BUG_BREAK_ASM "\n" \
174 +- "\t.pushsection __bug_table,\"a\"\n" \
175 ++ "\t.pushsection __bug_table,\"aw\"\n" \
176 + "2:\t" ASM_WORD_INSN "1b\n" \
177 + "\t.short %c0\n" \
178 + "\t.org 2b+%c1\n" \
179 +diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
180 +index 0151af6c2a50..87fcc1948817 100644
181 +--- a/arch/powerpc/include/asm/bug.h
182 ++++ b/arch/powerpc/include/asm/bug.h
183 +@@ -18,7 +18,7 @@
184 + #include <asm/asm-offsets.h>
185 + #ifdef CONFIG_DEBUG_BUGVERBOSE
186 + .macro EMIT_BUG_ENTRY addr,file,line,flags
187 +- .section __bug_table,"a"
188 ++ .section __bug_table,"aw"
189 + 5001: PPC_LONG \addr, 5002f
190 + .short \line, \flags
191 + .org 5001b+BUG_ENTRY_SIZE
192 +@@ -29,7 +29,7 @@
193 + .endm
194 + #else
195 + .macro EMIT_BUG_ENTRY addr,file,line,flags
196 +- .section __bug_table,"a"
197 ++ .section __bug_table,"aw"
198 + 5001: PPC_LONG \addr
199 + .short \flags
200 + .org 5001b+BUG_ENTRY_SIZE
201 +@@ -42,14 +42,14 @@
202 + sizeof(struct bug_entry), respectively */
203 + #ifdef CONFIG_DEBUG_BUGVERBOSE
204 + #define _EMIT_BUG_ENTRY \
205 +- ".section __bug_table,\"a\"\n" \
206 ++ ".section __bug_table,\"aw\"\n" \
207 + "2:\t" PPC_LONG "1b, %0\n" \
208 + "\t.short %1, %2\n" \
209 + ".org 2b+%3\n" \
210 + ".previous\n"
211 + #else
212 + #define _EMIT_BUG_ENTRY \
213 +- ".section __bug_table,\"a\"\n" \
214 ++ ".section __bug_table,\"aw\"\n" \
215 + "2:\t" PPC_LONG "1b\n" \
216 + "\t.short %2\n" \
217 + ".org 2b+%3\n" \
218 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
219 +index 2ad725ef4368..318738f3aa05 100644
220 +--- a/arch/powerpc/kernel/process.c
221 ++++ b/arch/powerpc/kernel/process.c
222 +@@ -362,7 +362,8 @@ void enable_kernel_vsx(void)
223 +
224 + cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
225 +
226 +- if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
227 ++ if (current->thread.regs &&
228 ++ (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
229 + check_if_tm_restore_required(current);
230 + /*
231 + * If a thread has already been reclaimed then the
232 +@@ -386,7 +387,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
233 + {
234 + if (tsk->thread.regs) {
235 + preempt_disable();
236 +- if (tsk->thread.regs->msr & MSR_VSX) {
237 ++ if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
238 + BUG_ON(tsk != current);
239 + giveup_vsx(tsk);
240 + }
241 +diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
242 +index 1bbd9dbfe4e0..ce9cc123988b 100644
243 +--- a/arch/s390/include/asm/bug.h
244 ++++ b/arch/s390/include/asm/bug.h
245 +@@ -14,7 +14,7 @@
246 + ".section .rodata.str,\"aMS\",@progbits,1\n" \
247 + "2: .asciz \""__FILE__"\"\n" \
248 + ".previous\n" \
249 +- ".section __bug_table,\"a\"\n" \
250 ++ ".section __bug_table,\"aw\"\n" \
251 + "3: .long 1b-3b,2b-3b\n" \
252 + " .short %0,%1\n" \
253 + " .org 3b+%2\n" \
254 +@@ -30,7 +30,7 @@
255 + asm volatile( \
256 + "0: j 0b+2\n" \
257 + "1:\n" \
258 +- ".section __bug_table,\"a\"\n" \
259 ++ ".section __bug_table,\"aw\"\n" \
260 + "2: .long 1b-2b\n" \
261 + " .short %0\n" \
262 + " .org 2b+%1\n" \
263 +diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h
264 +index 1b77f068be2b..986c8781d89f 100644
265 +--- a/arch/sh/include/asm/bug.h
266 ++++ b/arch/sh/include/asm/bug.h
267 +@@ -24,14 +24,14 @@
268 + */
269 + #ifdef CONFIG_DEBUG_BUGVERBOSE
270 + #define _EMIT_BUG_ENTRY \
271 +- "\t.pushsection __bug_table,\"a\"\n" \
272 ++ "\t.pushsection __bug_table,\"aw\"\n" \
273 + "2:\t.long 1b, %O1\n" \
274 + "\t.short %O2, %O3\n" \
275 + "\t.org 2b+%O4\n" \
276 + "\t.popsection\n"
277 + #else
278 + #define _EMIT_BUG_ENTRY \
279 +- "\t.pushsection __bug_table,\"a\"\n" \
280 ++ "\t.pushsection __bug_table,\"aw\"\n" \
281 + "2:\t.long 1b\n" \
282 + "\t.short %O3\n" \
283 + "\t.org 2b+%O4\n" \
284 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
285 +index 0efb4c9497bc..ae1d55548f5a 100644
286 +--- a/arch/x86/Kconfig
287 ++++ b/arch/x86/Kconfig
288 +@@ -94,6 +94,7 @@ config X86
289 + select GENERIC_STRNCPY_FROM_USER
290 + select GENERIC_STRNLEN_USER
291 + select GENERIC_TIME_VSYSCALL
292 ++ select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
293 + select HAVE_ACPI_APEI if ACPI
294 + select HAVE_ACPI_APEI_NMI if ACPI
295 + select HAVE_ALIGNED_STRUCT_PAGE if SLUB
296 +diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
297 +index 1cd792db15ef..1eab79c9ac48 100644
298 +--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
299 ++++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
300 +@@ -117,11 +117,10 @@
301 + .set T1, REG_T1
302 + .endm
303 +
304 +-#define K_BASE %r8
305 + #define HASH_PTR %r9
306 ++#define BLOCKS_CTR %r8
307 + #define BUFFER_PTR %r10
308 + #define BUFFER_PTR2 %r13
309 +-#define BUFFER_END %r11
310 +
311 + #define PRECALC_BUF %r14
312 + #define WK_BUF %r15
313 +@@ -205,14 +204,14 @@
314 + * blended AVX2 and ALU instruction scheduling
315 + * 1 vector iteration per 8 rounds
316 + */
317 +- vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
318 ++ vmovdqu (i * 2)(BUFFER_PTR), W_TMP
319 + .elseif ((i & 7) == 1)
320 +- vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
321 ++ vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
322 + WY_TMP, WY_TMP
323 + .elseif ((i & 7) == 2)
324 + vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
325 + .elseif ((i & 7) == 4)
326 +- vpaddd K_XMM(K_BASE), WY, WY_TMP
327 ++ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
328 + .elseif ((i & 7) == 7)
329 + vmovdqu WY_TMP, PRECALC_WK(i&~7)
330 +
331 +@@ -255,7 +254,7 @@
332 + vpxor WY, WY_TMP, WY_TMP
333 + .elseif ((i & 7) == 7)
334 + vpxor WY_TMP2, WY_TMP, WY
335 +- vpaddd K_XMM(K_BASE), WY, WY_TMP
336 ++ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
337 + vmovdqu WY_TMP, PRECALC_WK(i&~7)
338 +
339 + PRECALC_ROTATE_WY
340 +@@ -291,7 +290,7 @@
341 + vpsrld $30, WY, WY
342 + vpor WY, WY_TMP, WY
343 + .elseif ((i & 7) == 7)
344 +- vpaddd K_XMM(K_BASE), WY, WY_TMP
345 ++ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
346 + vmovdqu WY_TMP, PRECALC_WK(i&~7)
347 +
348 + PRECALC_ROTATE_WY
349 +@@ -446,6 +445,16 @@
350 +
351 + .endm
352 +
353 ++/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
354 ++ * %1 + %2 >= %3 ? %4 : 0
355 ++ */
356 ++.macro ADD_IF_GE a, b, c, d
357 ++ mov \a, RTA
358 ++ add $\d, RTA
359 ++ cmp $\c, \b
360 ++ cmovge RTA, \a
361 ++.endm
362 ++
363 + /*
364 + * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
365 + */
366 +@@ -463,13 +472,16 @@
367 + lea (2*4*80+32)(%rsp), WK_BUF
368 +
369 + # Precalc WK for first 2 blocks
370 +- PRECALC_OFFSET = 0
371 ++ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
372 + .set i, 0
373 + .rept 160
374 + PRECALC i
375 + .set i, i + 1
376 + .endr
377 +- PRECALC_OFFSET = 128
378 ++
379 ++ /* Go to next block if needed */
380 ++ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
381 ++ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
382 + xchg WK_BUF, PRECALC_BUF
383 +
384 + .align 32
385 +@@ -479,8 +491,8 @@ _loop:
386 + * we use K_BASE value as a signal of a last block,
387 + * it is set below by: cmovae BUFFER_PTR, K_BASE
388 + */
389 +- cmp K_BASE, BUFFER_PTR
390 +- jne _begin
391 ++ test BLOCKS_CTR, BLOCKS_CTR
392 ++ jnz _begin
393 + .align 32
394 + jmp _end
395 + .align 32
396 +@@ -512,10 +524,10 @@ _loop0:
397 + .set j, j+2
398 + .endr
399 +
400 +- add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */
401 +- cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */
402 +- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
403 +-
404 ++ /* Update Counter */
405 ++ sub $1, BLOCKS_CTR
406 ++ /* Move to the next block only if needed*/
407 ++ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
408 + /*
409 + * rounds
410 + * 60,62,64,66,68
411 +@@ -532,8 +544,8 @@ _loop0:
412 + UPDATE_HASH 12(HASH_PTR), D
413 + UPDATE_HASH 16(HASH_PTR), E
414 +
415 +- cmp K_BASE, BUFFER_PTR /* is current block the last one? */
416 +- je _loop
417 ++ test BLOCKS_CTR, BLOCKS_CTR
418 ++ jz _loop
419 +
420 + mov TB, B
421 +
422 +@@ -575,10 +587,10 @@ _loop2:
423 + .set j, j+2
424 + .endr
425 +
426 +- add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */
427 +-
428 +- cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */
429 +- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
430 ++ /* update counter */
431 ++ sub $1, BLOCKS_CTR
432 ++ /* Move to the next block only if needed*/
433 ++ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
434 +
435 + jmp _loop3
436 + _loop3:
437 +@@ -641,19 +653,12 @@ _loop3:
438 +
439 + avx2_zeroupper
440 +
441 +- lea K_XMM_AR(%rip), K_BASE
442 +-
443 ++ /* Setup initial values */
444 + mov CTX, HASH_PTR
445 + mov BUF, BUFFER_PTR
446 +- lea 64(BUF), BUFFER_PTR2
447 +-
448 +- shl $6, CNT /* mul by 64 */
449 +- add BUF, CNT
450 +- add $64, CNT
451 +- mov CNT, BUFFER_END
452 +
453 +- cmp BUFFER_END, BUFFER_PTR2
454 +- cmovae K_BASE, BUFFER_PTR2
455 ++ mov BUF, BUFFER_PTR2
456 ++ mov CNT, BLOCKS_CTR
457 +
458 + xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
459 +
460 +diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
461 +index f960a043cdeb..fc61739150e7 100644
462 +--- a/arch/x86/crypto/sha1_ssse3_glue.c
463 ++++ b/arch/x86/crypto/sha1_ssse3_glue.c
464 +@@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
465 +
466 + static bool avx2_usable(void)
467 + {
468 +- if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
469 ++ if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
470 + && boot_cpu_has(X86_FEATURE_BMI1)
471 + && boot_cpu_has(X86_FEATURE_BMI2))
472 + return true;
473 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
474 +index 4a4c0834f965..22f2281b942b 100644
475 +--- a/arch/x86/entry/entry_64.S
476 ++++ b/arch/x86/entry/entry_64.S
477 +@@ -1209,6 +1209,8 @@ ENTRY(nmi)
478 + * other IST entries.
479 + */
480 +
481 ++ ASM_CLAC
482 ++
483 + /* Use %rdx as our temp variable throughout */
484 + pushq %rdx
485 +
486 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
487 +index 580b60f5ac83..c138835c5547 100644
488 +--- a/arch/x86/events/core.c
489 ++++ b/arch/x86/events/core.c
490 +@@ -2105,7 +2105,7 @@ static void refresh_pce(void *ignored)
491 + load_mm_cr4(current->active_mm);
492 + }
493 +
494 +-static void x86_pmu_event_mapped(struct perf_event *event)
495 ++static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
496 + {
497 + if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
498 + return;
499 +@@ -2120,22 +2120,20 @@ static void x86_pmu_event_mapped(struct perf_event *event)
500 + * For now, this can't happen because all callers hold mmap_sem
501 + * for write. If this changes, we'll need a different solution.
502 + */
503 +- lockdep_assert_held_exclusive(&current->mm->mmap_sem);
504 ++ lockdep_assert_held_exclusive(&mm->mmap_sem);
505 +
506 +- if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
507 +- on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
508 ++ if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
509 ++ on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
510 + }
511 +
512 +-static void x86_pmu_event_unmapped(struct perf_event *event)
513 ++static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
514 + {
515 +- if (!current->mm)
516 +- return;
517 +
518 + if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
519 + return;
520 +
521 +- if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
522 +- on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
523 ++ if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
524 ++ on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
525 + }
526 +
527 + static int x86_pmu_event_idx(struct perf_event *event)
528 +diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
529 +index 39e702d90cdb..aa6b2023d8f8 100644
530 +--- a/arch/x86/include/asm/bug.h
531 ++++ b/arch/x86/include/asm/bug.h
532 +@@ -35,7 +35,7 @@
533 + #define _BUG_FLAGS(ins, flags) \
534 + do { \
535 + asm volatile("1:\t" ins "\n" \
536 +- ".pushsection __bug_table,\"a\"\n" \
537 ++ ".pushsection __bug_table,\"aw\"\n" \
538 + "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
539 + "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
540 + "\t.word %c1" "\t# bug_entry::line\n" \
541 +@@ -52,7 +52,7 @@ do { \
542 + #define _BUG_FLAGS(ins, flags) \
543 + do { \
544 + asm volatile("1:\t" ins "\n" \
545 +- ".pushsection __bug_table,\"a\"\n" \
546 ++ ".pushsection __bug_table,\"aw\"\n" \
547 + "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
548 + "\t.word %c0" "\t# bug_entry::flags\n" \
549 + "\t.org 2b+%c1\n" \
550 +diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
551 +index 1c18d83d3f09..9aeb91935ce0 100644
552 +--- a/arch/x86/include/asm/elf.h
553 ++++ b/arch/x86/include/asm/elf.h
554 +@@ -247,11 +247,11 @@ extern int force_personality32;
555 +
556 + /*
557 + * This is the base location for PIE (ET_DYN with INTERP) loads. On
558 +- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
559 ++ * 64-bit, this is above 4GB to leave the entire 32-bit address
560 + * space open for things that want to use the area for 32-bit pointers.
561 + */
562 + #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
563 +- 0x100000000UL)
564 ++ (TASK_SIZE / 3 * 2))
565 +
566 + /* This yields a mask that user programs can use to figure out what
567 + instruction set this CPU supports. This could be done in user space,
568 +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
569 +index 19ad095b41df..81db3e92dc76 100644
570 +--- a/arch/x86/mm/mmap.c
571 ++++ b/arch/x86/mm/mmap.c
572 +@@ -50,8 +50,7 @@ unsigned long tasksize_64bit(void)
573 + static unsigned long stack_maxrandom_size(unsigned long task_size)
574 + {
575 + unsigned long max = 0;
576 +- if ((current->flags & PF_RANDOMIZE) &&
577 +- !(current->personality & ADDR_NO_RANDOMIZE)) {
578 ++ if (current->flags & PF_RANDOMIZE) {
579 + max = (-1UL) & __STACK_RND_MASK(task_size == tasksize_32bit());
580 + max <<= PAGE_SHIFT;
581 + }
582 +@@ -82,13 +81,13 @@ static int mmap_is_legacy(void)
583 +
584 + static unsigned long arch_rnd(unsigned int rndbits)
585 + {
586 ++ if (!(current->flags & PF_RANDOMIZE))
587 ++ return 0;
588 + return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT;
589 + }
590 +
591 + unsigned long arch_mmap_rnd(void)
592 + {
593 +- if (!(current->flags & PF_RANDOMIZE))
594 +- return 0;
595 + return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits);
596 + }
597 +
598 +diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
599 +index 0c3354cf3552..76944e3271bf 100644
600 +--- a/block/blk-mq-pci.c
601 ++++ b/block/blk-mq-pci.c
602 +@@ -36,12 +36,18 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev)
603 + for (queue = 0; queue < set->nr_hw_queues; queue++) {
604 + mask = pci_irq_get_affinity(pdev, queue);
605 + if (!mask)
606 +- return -EINVAL;
607 ++ goto fallback;
608 +
609 + for_each_cpu(cpu, mask)
610 + set->mq_map[cpu] = queue;
611 + }
612 +
613 + return 0;
614 ++
615 ++fallback:
616 ++ WARN_ON_ONCE(set->nr_hw_queues > 1);
617 ++ for_each_possible_cpu(cpu)
618 ++ set->mq_map[cpu] = 0;
619 ++ return 0;
620 + }
621 + EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
622 +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
623 +index 39459631667c..b49547c5f2c2 100644
624 +--- a/drivers/block/xen-blkfront.c
625 ++++ b/drivers/block/xen-blkfront.c
626 +@@ -2119,9 +2119,9 @@ static int blkfront_resume(struct xenbus_device *dev)
627 + /*
628 + * Get the bios in the request so we can re-queue them.
629 + */
630 +- if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
631 +- req_op(shadow[i].request) == REQ_OP_DISCARD ||
632 +- req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
633 ++ if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
634 ++ req_op(shadow[j].request) == REQ_OP_DISCARD ||
635 ++ req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
636 + shadow[j].request->cmd_flags & REQ_FUA) {
637 + /*
638 + * Flush operations don't contain bios, so
639 +diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
640 +index 771dd26c7076..6719e346b790 100644
641 +--- a/drivers/crypto/ixp4xx_crypto.c
642 ++++ b/drivers/crypto/ixp4xx_crypto.c
643 +@@ -1074,7 +1074,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
644 + req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
645 + &crypt->icv_rev_aes);
646 + if (unlikely(!req_ctx->hmac_virt))
647 +- goto free_buf_src;
648 ++ goto free_buf_dst;
649 + if (!encrypt) {
650 + scatterwalk_map_and_copy(req_ctx->hmac_virt,
651 + req->src, cryptlen, authsize, 0);
652 +@@ -1089,10 +1089,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
653 + BUG_ON(qmgr_stat_overflow(SEND_QID));
654 + return -EINPROGRESS;
655 +
656 +-free_buf_src:
657 +- free_buf_chain(dev, req_ctx->src, crypt->src_buf);
658 + free_buf_dst:
659 + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
660 ++free_buf_src:
661 ++ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
662 + crypt->ctl_flags = CTL_FLAG_UNUSED;
663 + return -ENOMEM;
664 + }
665 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
666 +index ed814e6d0207..28c1112e520c 100644
667 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
668 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
669 +@@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
670 + struct dma_fence *f = e->fence;
671 + struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
672 +
673 ++ if (dma_fence_is_signaled(f)) {
674 ++ hash_del(&e->node);
675 ++ dma_fence_put(f);
676 ++ kmem_cache_free(amdgpu_sync_slab, e);
677 ++ continue;
678 ++ }
679 + if (ring && s_fence) {
680 + /* For fences from the same ring it is sufficient
681 + * when they are scheduled.
682 +@@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
683 + }
684 + }
685 +
686 +- if (dma_fence_is_signaled(f)) {
687 +- hash_del(&e->node);
688 +- dma_fence_put(f);
689 +- kmem_cache_free(amdgpu_sync_slab, e);
690 +- continue;
691 +- }
692 +-
693 + return f;
694 + }
695 +
696 +diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
697 +index 7032c542a9b1..4dd4c2159a92 100644
698 +--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
699 ++++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
700 +@@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
701 + goto err_unpin;
702 + }
703 +
704 ++ ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
705 ++ if (ret)
706 ++ goto err_unpin;
707 ++
708 + ret = req->engine->emit_bb_start(req,
709 + so->batch_offset, so->batch_size,
710 + I915_DISPATCH_SECURE);
711 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
712 +index d5ab9ddef3e3..3b0e9fb33afe 100644
713 +--- a/drivers/input/mouse/elan_i2c_core.c
714 ++++ b/drivers/input/mouse/elan_i2c_core.c
715 +@@ -1224,6 +1224,10 @@ static const struct acpi_device_id elan_acpi_id[] = {
716 + { "ELAN0100", 0 },
717 + { "ELAN0600", 0 },
718 + { "ELAN0605", 0 },
719 ++ { "ELAN0608", 0 },
720 ++ { "ELAN0605", 0 },
721 ++ { "ELAN0609", 0 },
722 ++ { "ELAN060B", 0 },
723 + { "ELAN1000", 0 },
724 + { }
725 + };
726 +diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
727 +index 28b26c80f4cf..056507099725 100644
728 +--- a/drivers/irqchip/irq-atmel-aic-common.c
729 ++++ b/drivers/irqchip/irq-atmel-aic-common.c
730 +@@ -142,9 +142,9 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root)
731 + struct device_node *np;
732 + void __iomem *regs;
733 +
734 +- np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
735 ++ np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
736 + if (!np)
737 +- np = of_find_compatible_node(root, NULL,
738 ++ np = of_find_compatible_node(NULL, NULL,
739 + "atmel,at91sam9x5-rtc");
740 +
741 + if (!np)
742 +@@ -196,7 +196,6 @@ static void __init aic_common_irq_fixup(const struct of_device_id *matches)
743 + return;
744 +
745 + match = of_match_node(matches, root);
746 +- of_node_put(root);
747 +
748 + if (match) {
749 + void (*fixup)(struct device_node *) = match->data;
750 +diff --git a/drivers/md/md.c b/drivers/md/md.c
751 +index d7847014821a..caca5d689cdc 100644
752 +--- a/drivers/md/md.c
753 ++++ b/drivers/md/md.c
754 +@@ -7979,7 +7979,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
755 + if (mddev->safemode == 1)
756 + mddev->safemode = 0;
757 + /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
758 +- if (mddev->in_sync || !mddev->sync_checkers) {
759 ++ if (mddev->in_sync || mddev->sync_checkers) {
760 + spin_lock(&mddev->lock);
761 + if (mddev->in_sync) {
762 + mddev->in_sync = 0;
763 +@@ -8639,6 +8639,9 @@ void md_check_recovery(struct mddev *mddev)
764 + if (mddev_trylock(mddev)) {
765 + int spares = 0;
766 +
767 ++ if (!mddev->external && mddev->safemode == 1)
768 ++ mddev->safemode = 0;
769 ++
770 + if (mddev->ro) {
771 + struct md_rdev *rdev;
772 + if (!mddev->external && mddev->in_sync)
773 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
774 +index c42153a985be..473f91322368 100644
775 +--- a/drivers/net/usb/qmi_wwan.c
776 ++++ b/drivers/net/usb/qmi_wwan.c
777 +@@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
778 + {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
779 + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
780 + {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
781 ++ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
782 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
783 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
784 + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
785 +diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
786 +index 5c63b920b471..ed92c1254cff 100644
787 +--- a/drivers/parisc/dino.c
788 ++++ b/drivers/parisc/dino.c
789 +@@ -956,7 +956,7 @@ static int __init dino_probe(struct parisc_device *dev)
790 +
791 + dino_dev->hba.dev = dev;
792 + dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
793 +- dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */
794 ++ dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
795 + spin_lock_init(&dino_dev->dinosaur_pen);
796 + dino_dev->hba.iommu = ccio_get_iommu(dev);
797 +
798 +diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
799 +index 2776cfe64c09..ef9cf4a21afe 100644
800 +--- a/drivers/usb/core/usb-acpi.c
801 ++++ b/drivers/usb/core/usb-acpi.c
802 +@@ -127,6 +127,22 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
803 + */
804 + #define USB_ACPI_LOCATION_VALID (1 << 31)
805 +
806 ++static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent,
807 ++ int raw)
808 ++{
809 ++ struct acpi_device *adev;
810 ++
811 ++ if (!parent)
812 ++ return NULL;
813 ++
814 ++ list_for_each_entry(adev, &parent->children, node) {
815 ++ if (acpi_device_adr(adev) == raw)
816 ++ return adev;
817 ++ }
818 ++
819 ++ return acpi_find_child_device(parent, raw, false);
820 ++}
821 ++
822 + static struct acpi_device *usb_acpi_find_companion(struct device *dev)
823 + {
824 + struct usb_device *udev;
825 +@@ -174,8 +190,10 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
826 + int raw;
827 +
828 + raw = usb_hcd_find_raw_port_number(hcd, port1);
829 +- adev = acpi_find_child_device(ACPI_COMPANION(&udev->dev),
830 +- raw, false);
831 ++
832 ++ adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev),
833 ++ raw);
834 ++
835 + if (!adev)
836 + return NULL;
837 + } else {
838 +@@ -186,7 +204,9 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
839 + return NULL;
840 +
841 + acpi_bus_get_device(parent_handle, &adev);
842 +- adev = acpi_find_child_device(adev, port1, false);
843 ++
844 ++ adev = usb_acpi_find_port(adev, port1);
845 ++
846 + if (!adev)
847 + return NULL;
848 + }
849 +diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
850 +index 4da69dbf7dca..1bdd02a6d6ac 100644
851 +--- a/drivers/xen/biomerge.c
852 ++++ b/drivers/xen/biomerge.c
853 +@@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
854 + unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
855 + unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
856 +
857 +- return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
858 +- ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
859 ++ return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
860 + #else
861 + /*
862 + * XXX: Add support for merging bio_vec when using different page
863 +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
864 +index 7465c3ea5dd5..9867eda73769 100644
865 +--- a/fs/binfmt_elf.c
866 ++++ b/fs/binfmt_elf.c
867 +@@ -666,8 +666,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
868 + {
869 + unsigned long random_variable = 0;
870 +
871 +- if ((current->flags & PF_RANDOMIZE) &&
872 +- !(current->personality & ADDR_NO_RANDOMIZE)) {
873 ++ if (current->flags & PF_RANDOMIZE) {
874 + random_variable = get_random_long();
875 + random_variable &= STACK_RND_MASK;
876 + random_variable <<= PAGE_SHIFT;
877 +diff --git a/include/linux/memblock.h b/include/linux/memblock.h
878 +index 8098695e5d8d..2526c501622f 100644
879 +--- a/include/linux/memblock.h
880 ++++ b/include/linux/memblock.h
881 +@@ -65,6 +65,7 @@ extern bool movable_node_enabled;
882 + #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
883 + #define __init_memblock __meminit
884 + #define __initdata_memblock __meminitdata
885 ++void memblock_discard(void);
886 + #else
887 + #define __init_memblock
888 + #define __initdata_memblock
889 +@@ -78,8 +79,6 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
890 + int nid, ulong flags);
891 + phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
892 + phys_addr_t size, phys_addr_t align);
893 +-phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
894 +-phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
895 + void memblock_allow_resize(void);
896 + int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
897 + int memblock_add(phys_addr_t base, phys_addr_t size);
898 +@@ -114,6 +113,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
899 + void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
900 + phys_addr_t *out_end);
901 +
902 ++void __memblock_free_early(phys_addr_t base, phys_addr_t size);
903 ++void __memblock_free_late(phys_addr_t base, phys_addr_t size);
904 ++
905 + /**
906 + * for_each_mem_range - iterate through memblock areas from type_a and not
907 + * included in type_b. Or just type_a if type_b is NULL.
908 +diff --git a/include/linux/nmi.h b/include/linux/nmi.h
909 +index aa3cd0878270..a8d4fc3356d2 100644
910 +--- a/include/linux/nmi.h
911 ++++ b/include/linux/nmi.h
912 +@@ -155,6 +155,14 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
913 + #define sysctl_softlockup_all_cpu_backtrace 0
914 + #define sysctl_hardlockup_all_cpu_backtrace 0
915 + #endif
916 ++
917 ++#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
918 ++ defined(CONFIG_HARDLOCKUP_DETECTOR)
919 ++void watchdog_update_hrtimer_threshold(u64 period);
920 ++#else
921 ++static inline void watchdog_update_hrtimer_threshold(u64 period) { }
922 ++#endif
923 ++
924 + extern bool is_hardlockup(void);
925 + struct ctl_table;
926 + extern int proc_watchdog(struct ctl_table *, int ,
927 +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
928 +index 24a635887f28..fc32347473a9 100644
929 +--- a/include/linux/perf_event.h
930 ++++ b/include/linux/perf_event.h
931 +@@ -310,8 +310,8 @@ struct pmu {
932 + * Notification that the event was mapped or unmapped. Called
933 + * in the context of the mapping task.
934 + */
935 +- void (*event_mapped) (struct perf_event *event); /*optional*/
936 +- void (*event_unmapped) (struct perf_event *event); /*optional*/
937 ++ void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
938 ++ void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
939 +
940 + /*
941 + * Flags for ->add()/->del()/ ->start()/->stop(). There are
942 +diff --git a/include/linux/pid.h b/include/linux/pid.h
943 +index 4d179316e431..719582744a2e 100644
944 +--- a/include/linux/pid.h
945 ++++ b/include/linux/pid.h
946 +@@ -8,7 +8,9 @@ enum pid_type
947 + PIDTYPE_PID,
948 + PIDTYPE_PGID,
949 + PIDTYPE_SID,
950 +- PIDTYPE_MAX
951 ++ PIDTYPE_MAX,
952 ++ /* only valid to __task_pid_nr_ns() */
953 ++ __PIDTYPE_TGID
954 + };
955 +
956 + /*
957 +diff --git a/include/linux/sched.h b/include/linux/sched.h
958 +index 7f2a1eff2997..35f4517eeba9 100644
959 +--- a/include/linux/sched.h
960 ++++ b/include/linux/sched.h
961 +@@ -1132,13 +1132,6 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
962 + return tsk->tgid;
963 + }
964 +
965 +-extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
966 +-
967 +-static inline pid_t task_tgid_vnr(struct task_struct *tsk)
968 +-{
969 +- return pid_vnr(task_tgid(tsk));
970 +-}
971 +-
972 + /**
973 + * pid_alive - check that a task structure is not stale
974 + * @p: Task structure to be checked.
975 +@@ -1154,23 +1147,6 @@ static inline int pid_alive(const struct task_struct *p)
976 + return p->pids[PIDTYPE_PID].pid != NULL;
977 + }
978 +
979 +-static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
980 +-{
981 +- pid_t pid = 0;
982 +-
983 +- rcu_read_lock();
984 +- if (pid_alive(tsk))
985 +- pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
986 +- rcu_read_unlock();
987 +-
988 +- return pid;
989 +-}
990 +-
991 +-static inline pid_t task_ppid_nr(const struct task_struct *tsk)
992 +-{
993 +- return task_ppid_nr_ns(tsk, &init_pid_ns);
994 +-}
995 +-
996 + static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
997 + {
998 + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
999 +@@ -1192,6 +1168,33 @@ static inline pid_t task_session_vnr(struct task_struct *tsk)
1000 + return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1001 + }
1002 +
1003 ++static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1004 ++{
1005 ++ return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
1006 ++}
1007 ++
1008 ++static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1009 ++{
1010 ++ return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
1011 ++}
1012 ++
1013 ++static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1014 ++{
1015 ++ pid_t pid = 0;
1016 ++
1017 ++ rcu_read_lock();
1018 ++ if (pid_alive(tsk))
1019 ++ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1020 ++ rcu_read_unlock();
1021 ++
1022 ++ return pid;
1023 ++}
1024 ++
1025 ++static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1026 ++{
1027 ++ return task_ppid_nr_ns(tsk, &init_pid_ns);
1028 ++}
1029 ++
1030 + /* Obsolete, do not use: */
1031 + static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1032 + {
1033 +diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
1034 +index 62d686d96581..ed748ee40029 100644
1035 +--- a/kernel/audit_watch.c
1036 ++++ b/kernel/audit_watch.c
1037 +@@ -457,13 +457,15 @@ void audit_remove_watch_rule(struct audit_krule *krule)
1038 + list_del(&krule->rlist);
1039 +
1040 + if (list_empty(&watch->rules)) {
1041 ++ /*
1042 ++ * audit_remove_watch() drops our reference to 'parent' which
1043 ++ * can get freed. Grab our own reference to be safe.
1044 ++ */
1045 ++ audit_get_parent(parent);
1046 + audit_remove_watch(watch);
1047 +-
1048 +- if (list_empty(&parent->watches)) {
1049 +- audit_get_parent(parent);
1050 ++ if (list_empty(&parent->watches))
1051 + fsnotify_destroy_mark(&parent->mark, audit_watch_group);
1052 +- audit_put_parent(parent);
1053 +- }
1054 ++ audit_put_parent(parent);
1055 + }
1056 + }
1057 +
1058 +diff --git a/kernel/events/core.c b/kernel/events/core.c
1059 +index d7f726747341..dbb3d273d497 100644
1060 +--- a/kernel/events/core.c
1061 ++++ b/kernel/events/core.c
1062 +@@ -5084,7 +5084,7 @@ static void perf_mmap_open(struct vm_area_struct *vma)
1063 + atomic_inc(&event->rb->aux_mmap_count);
1064 +
1065 + if (event->pmu->event_mapped)
1066 +- event->pmu->event_mapped(event);
1067 ++ event->pmu->event_mapped(event, vma->vm_mm);
1068 + }
1069 +
1070 + static void perf_pmu_output_stop(struct perf_event *event);
1071 +@@ -5107,7 +5107,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
1072 + unsigned long size = perf_data_size(rb);
1073 +
1074 + if (event->pmu->event_unmapped)
1075 +- event->pmu->event_unmapped(event);
1076 ++ event->pmu->event_unmapped(event, vma->vm_mm);
1077 +
1078 + /*
1079 + * rb->aux_mmap_count will always drop before rb->mmap_count and
1080 +@@ -5405,7 +5405,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1081 + vma->vm_ops = &perf_mmap_vmops;
1082 +
1083 + if (event->pmu->event_mapped)
1084 +- event->pmu->event_mapped(event);
1085 ++ event->pmu->event_mapped(event, vma->vm_mm);
1086 +
1087 + return ret;
1088 + }
1089 +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
1090 +index c94da688ee9b..cdf94ce959d8 100644
1091 +--- a/kernel/irq/chip.c
1092 ++++ b/kernel/irq/chip.c
1093 +@@ -898,13 +898,15 @@ EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1094 +
1095 + void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1096 + {
1097 +- unsigned long flags;
1098 ++ unsigned long flags, trigger, tmp;
1099 + struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1100 +
1101 + if (!desc)
1102 + return;
1103 + irq_settings_clr_and_set(desc, clr, set);
1104 +
1105 ++ trigger = irqd_get_trigger_type(&desc->irq_data);
1106 ++
1107 + irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1108 + IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1109 + if (irq_settings_has_no_balance_set(desc))
1110 +@@ -916,7 +918,11 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1111 + if (irq_settings_is_level(desc))
1112 + irqd_set(&desc->irq_data, IRQD_LEVEL);
1113 +
1114 +- irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
1115 ++ tmp = irq_settings_get_trigger_mask(desc);
1116 ++ if (tmp != IRQ_TYPE_NONE)
1117 ++ trigger = tmp;
1118 ++
1119 ++ irqd_set(&desc->irq_data, trigger);
1120 +
1121 + irq_put_desc_unlock(desc, flags);
1122 + }
1123 +diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
1124 +index 1a9abc1c8ea0..259a22aa9934 100644
1125 +--- a/kernel/irq/ipi.c
1126 ++++ b/kernel/irq/ipi.c
1127 +@@ -165,7 +165,7 @@ irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
1128 + struct irq_data *data = irq_get_irq_data(irq);
1129 + struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
1130 +
1131 +- if (!data || !ipimask || cpu > nr_cpu_ids)
1132 ++ if (!data || !ipimask || cpu >= nr_cpu_ids)
1133 + return INVALID_HWIRQ;
1134 +
1135 + if (!cpumask_test_cpu(cpu, ipimask))
1136 +@@ -195,7 +195,7 @@ static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
1137 + if (!chip->ipi_send_single && !chip->ipi_send_mask)
1138 + return -EINVAL;
1139 +
1140 +- if (cpu > nr_cpu_ids)
1141 ++ if (cpu >= nr_cpu_ids)
1142 + return -EINVAL;
1143 +
1144 + if (dest) {
1145 +diff --git a/kernel/pid.c b/kernel/pid.c
1146 +index fd1cde1e4576..eeb892e728f8 100644
1147 +--- a/kernel/pid.c
1148 ++++ b/kernel/pid.c
1149 +@@ -527,8 +527,11 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1150 + if (!ns)
1151 + ns = task_active_pid_ns(current);
1152 + if (likely(pid_alive(task))) {
1153 +- if (type != PIDTYPE_PID)
1154 ++ if (type != PIDTYPE_PID) {
1155 ++ if (type == __PIDTYPE_TGID)
1156 ++ type = PIDTYPE_PID;
1157 + task = task->group_leader;
1158 ++ }
1159 + nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
1160 + }
1161 + rcu_read_unlock();
1162 +@@ -537,12 +540,6 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1163 + }
1164 + EXPORT_SYMBOL(__task_pid_nr_ns);
1165 +
1166 +-pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1167 +-{
1168 +- return pid_nr_ns(task_tgid(tsk), ns);
1169 +-}
1170 +-EXPORT_SYMBOL(task_tgid_nr_ns);
1171 +-
1172 + struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
1173 + {
1174 + return ns_of_pid(task_pid(tsk));
1175 +diff --git a/kernel/watchdog.c b/kernel/watchdog.c
1176 +index 03e0b69bb5bf..b8e938c7273f 100644
1177 +--- a/kernel/watchdog.c
1178 ++++ b/kernel/watchdog.c
1179 +@@ -161,6 +161,7 @@ static void set_sample_period(void)
1180 + * hardlockup detector generates a warning
1181 + */
1182 + sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
1183 ++ watchdog_update_hrtimer_threshold(sample_period);
1184 + }
1185 +
1186 + /* Commands for resetting the watchdog */
1187 +diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
1188 +index 54a427d1f344..cd0986b69cbc 100644
1189 +--- a/kernel/watchdog_hld.c
1190 ++++ b/kernel/watchdog_hld.c
1191 +@@ -70,6 +70,62 @@ void touch_nmi_watchdog(void)
1192 + }
1193 + EXPORT_SYMBOL(touch_nmi_watchdog);
1194 +
1195 ++#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
1196 ++static DEFINE_PER_CPU(ktime_t, last_timestamp);
1197 ++static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
1198 ++static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
1199 ++
1200 ++void watchdog_update_hrtimer_threshold(u64 period)
1201 ++{
1202 ++ /*
1203 ++ * The hrtimer runs with a period of (watchdog_threshold * 2) / 5
1204 ++ *
1205 ++ * So it runs effectively with 2.5 times the rate of the NMI
1206 ++ * watchdog. That means the hrtimer should fire 2-3 times before
1207 ++ * the NMI watchdog expires. The NMI watchdog on x86 is based on
1208 ++ * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
1209 ++ * might run way faster than expected and the NMI fires in a
1210 ++ * smaller period than the one deduced from the nominal CPU
1211 ++ * frequency. Depending on the Turbo-Mode factor this might be fast
1212 ++ * enough to get the NMI period smaller than the hrtimer watchdog
1213 ++ * period and trigger false positives.
1214 ++ *
1215 ++ * The sample threshold is used to check in the NMI handler whether
1216 ++ * the minimum time between two NMI samples has elapsed. That
1217 ++ * prevents false positives.
1218 ++ *
1219 ++ * Set this to 4/5 of the actual watchdog threshold period so the
1220 ++ * hrtimer is guaranteed to fire at least once within the real
1221 ++ * watchdog threshold.
1222 ++ */
1223 ++ watchdog_hrtimer_sample_threshold = period * 2;
1224 ++}
1225 ++
1226 ++static bool watchdog_check_timestamp(void)
1227 ++{
1228 ++ ktime_t delta, now = ktime_get_mono_fast_ns();
1229 ++
1230 ++ delta = now - __this_cpu_read(last_timestamp);
1231 ++ if (delta < watchdog_hrtimer_sample_threshold) {
1232 ++ /*
1233 ++ * If ktime is jiffies based, a stalled timer would prevent
1234 ++ * jiffies from being incremented and the filter would look
1235 ++ * at a stale timestamp and never trigger.
1236 ++ */
1237 ++ if (__this_cpu_inc_return(nmi_rearmed) < 10)
1238 ++ return false;
1239 ++ }
1240 ++ __this_cpu_write(nmi_rearmed, 0);
1241 ++ __this_cpu_write(last_timestamp, now);
1242 ++ return true;
1243 ++}
1244 ++#else
1245 ++static inline bool watchdog_check_timestamp(void)
1246 ++{
1247 ++ return true;
1248 ++}
1249 ++#endif
1250 ++
1251 + static struct perf_event_attr wd_hw_attr = {
1252 + .type = PERF_TYPE_HARDWARE,
1253 + .config = PERF_COUNT_HW_CPU_CYCLES,
1254 +@@ -94,6 +150,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
1255 + return;
1256 + }
1257 +
1258 ++ if (!watchdog_check_timestamp())
1259 ++ return;
1260 ++
1261 + /* check for a hardlockup
1262 + * This is done by making sure our timer interrupt
1263 + * is incrementing. The timer interrupt should have
1264 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
1265 +index e4587ebe52c7..1f1cb51005de 100644
1266 +--- a/lib/Kconfig.debug
1267 ++++ b/lib/Kconfig.debug
1268 +@@ -344,6 +344,13 @@ config SECTION_MISMATCH_WARN_ONLY
1269 +
1270 + If unsure, say Y.
1271 +
1272 ++#
1273 ++# Enables a timestamp based low pass filter to compensate for perf based
1274 ++# hard lockup detection which runs too fast due to turbo modes.
1275 ++#
1276 ++config HARDLOCKUP_CHECK_TIMESTAMP
1277 ++ bool
1278 ++
1279 + #
1280 + # Select this config option from the architecture Kconfig, if it
1281 + # is preferred to always offer frame pointers as a config
1282 +diff --git a/mm/cma_debug.c b/mm/cma_debug.c
1283 +index 595b757bef72..c03ccbc405a0 100644
1284 +--- a/mm/cma_debug.c
1285 ++++ b/mm/cma_debug.c
1286 +@@ -167,7 +167,7 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
1287 + char name[16];
1288 + int u32s;
1289 +
1290 +- sprintf(name, "cma-%s", cma->name);
1291 ++ scnprintf(name, sizeof(name), "cma-%s", cma->name);
1292 +
1293 + tmp = debugfs_create_dir(name, cma_debugfs_root);
1294 +
1295 +diff --git a/mm/memblock.c b/mm/memblock.c
1296 +index 7b8a5db76a2f..7087d5578866 100644
1297 +--- a/mm/memblock.c
1298 ++++ b/mm/memblock.c
1299 +@@ -288,31 +288,27 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
1300 + }
1301 +
1302 + #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
1303 +-
1304 +-phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
1305 +- phys_addr_t *addr)
1306 +-{
1307 +- if (memblock.reserved.regions == memblock_reserved_init_regions)
1308 +- return 0;
1309 +-
1310 +- *addr = __pa(memblock.reserved.regions);
1311 +-
1312 +- return PAGE_ALIGN(sizeof(struct memblock_region) *
1313 +- memblock.reserved.max);
1314 +-}
1315 +-
1316 +-phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
1317 +- phys_addr_t *addr)
1318 ++/**
1319 ++ * Discard memory and reserved arrays if they were allocated
1320 ++ */
1321 ++void __init memblock_discard(void)
1322 + {
1323 +- if (memblock.memory.regions == memblock_memory_init_regions)
1324 +- return 0;
1325 ++ phys_addr_t addr, size;
1326 +
1327 +- *addr = __pa(memblock.memory.regions);
1328 ++ if (memblock.reserved.regions != memblock_reserved_init_regions) {
1329 ++ addr = __pa(memblock.reserved.regions);
1330 ++ size = PAGE_ALIGN(sizeof(struct memblock_region) *
1331 ++ memblock.reserved.max);
1332 ++ __memblock_free_late(addr, size);
1333 ++ }
1334 +
1335 +- return PAGE_ALIGN(sizeof(struct memblock_region) *
1336 +- memblock.memory.max);
1337 ++ if (memblock.memory.regions == memblock_memory_init_regions) {
1338 ++ addr = __pa(memblock.memory.regions);
1339 ++ size = PAGE_ALIGN(sizeof(struct memblock_region) *
1340 ++ memblock.memory.max);
1341 ++ __memblock_free_late(addr, size);
1342 ++ }
1343 + }
1344 +-
1345 + #endif
1346 +
1347 + /**
1348 +diff --git a/mm/memory.c b/mm/memory.c
1349 +index b0c3d1556a94..9e50ffcf9639 100644
1350 +--- a/mm/memory.c
1351 ++++ b/mm/memory.c
1352 +@@ -3882,8 +3882,18 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
1353 + * further.
1354 + */
1355 + if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
1356 +- && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags)))
1357 ++ && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags))) {
1358 ++
1359 ++ /*
1360 ++ * We are going to enforce SIGBUS but the PF path might have
1361 ++ * dropped the mmap_sem already so take it again so that
1362 ++ * we do not break expectations of all arch specific PF paths
1363 ++ * and g-u-p
1364 ++ */
1365 ++ if (ret & VM_FAULT_RETRY)
1366 ++ down_read(&vma->vm_mm->mmap_sem);
1367 + ret = VM_FAULT_SIGBUS;
1368 ++ }
1369 +
1370 + return ret;
1371 + }
1372 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
1373 +index 37d0b334bfe9..e0157546e6b5 100644
1374 +--- a/mm/mempolicy.c
1375 ++++ b/mm/mempolicy.c
1376 +@@ -931,11 +931,6 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
1377 + *policy |= (pol->flags & MPOL_MODE_FLAGS);
1378 + }
1379 +
1380 +- if (vma) {
1381 +- up_read(&current->mm->mmap_sem);
1382 +- vma = NULL;
1383 +- }
1384 +-
1385 + err = 0;
1386 + if (nmask) {
1387 + if (mpol_store_user_nodemask(pol)) {
1388 +diff --git a/mm/migrate.c b/mm/migrate.c
1389 +index 89a0a1707f4c..2586d5ab9b99 100644
1390 +--- a/mm/migrate.c
1391 ++++ b/mm/migrate.c
1392 +@@ -41,6 +41,7 @@
1393 + #include <linux/page_idle.h>
1394 + #include <linux/page_owner.h>
1395 + #include <linux/sched/mm.h>
1396 ++#include <linux/ptrace.h>
1397 +
1398 + #include <asm/tlbflush.h>
1399 +
1400 +@@ -1649,7 +1650,6 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1401 + const int __user *, nodes,
1402 + int __user *, status, int, flags)
1403 + {
1404 +- const struct cred *cred = current_cred(), *tcred;
1405 + struct task_struct *task;
1406 + struct mm_struct *mm;
1407 + int err;
1408 +@@ -1673,14 +1673,9 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1409 +
1410 + /*
1411 + * Check if this process has the right to modify the specified
1412 +- * process. The right exists if the process has administrative
1413 +- * capabilities, superuser privileges or the same
1414 +- * userid as the target process.
1415 ++ * process. Use the regular "ptrace_may_access()" checks.
1416 + */
1417 +- tcred = __task_cred(task);
1418 +- if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1419 +- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1420 +- !capable(CAP_SYS_NICE)) {
1421 ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1422 + rcu_read_unlock();
1423 + err = -EPERM;
1424 + goto out;
1425 +diff --git a/mm/nobootmem.c b/mm/nobootmem.c
1426 +index 487dad610731..ab998125f04d 100644
1427 +--- a/mm/nobootmem.c
1428 ++++ b/mm/nobootmem.c
1429 +@@ -146,22 +146,6 @@ static unsigned long __init free_low_memory_core_early(void)
1430 + NULL)
1431 + count += __free_memory_core(start, end);
1432 +
1433 +-#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
1434 +- {
1435 +- phys_addr_t size;
1436 +-
1437 +- /* Free memblock.reserved array if it was allocated */
1438 +- size = get_allocated_memblock_reserved_regions_info(&start);
1439 +- if (size)
1440 +- count += __free_memory_core(start, start + size);
1441 +-
1442 +- /* Free memblock.memory array if it was allocated */
1443 +- size = get_allocated_memblock_memory_regions_info(&start);
1444 +- if (size)
1445 +- count += __free_memory_core(start, start + size);
1446 +- }
1447 +-#endif
1448 +-
1449 + return count;
1450 + }
1451 +
1452 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1453 +index 07569fa25760..4d16ef9d42a9 100644
1454 +--- a/mm/page_alloc.c
1455 ++++ b/mm/page_alloc.c
1456 +@@ -1582,6 +1582,10 @@ void __init page_alloc_init_late(void)
1457 + /* Reinit limits that are based on free pages after the kernel is up */
1458 + files_maxfiles_init();
1459 + #endif
1460 ++#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
1461 ++ /* Discard memblock private memory */
1462 ++ memblock_discard();
1463 ++#endif
1464 +
1465 + for_each_populated_zone(zone)
1466 + set_zone_contiguous(zone);
1467 +diff --git a/mm/slub.c b/mm/slub.c
1468 +index 8addc535bcdc..a0f3c56611c6 100644
1469 +--- a/mm/slub.c
1470 ++++ b/mm/slub.c
1471 +@@ -5637,13 +5637,14 @@ static void sysfs_slab_remove_workfn(struct work_struct *work)
1472 + * A cache is never shut down before deactivation is
1473 + * complete, so no need to worry about synchronization.
1474 + */
1475 +- return;
1476 ++ goto out;
1477 +
1478 + #ifdef CONFIG_MEMCG
1479 + kset_unregister(s->memcg_kset);
1480 + #endif
1481 + kobject_uevent(&s->kobj, KOBJ_REMOVE);
1482 + kobject_del(&s->kobj);
1483 ++out:
1484 + kobject_put(&s->kobj);
1485 + }
1486 +
1487 +diff --git a/mm/vmalloc.c b/mm/vmalloc.c
1488 +index ecc97f74ab18..104eb720ba43 100644
1489 +--- a/mm/vmalloc.c
1490 ++++ b/mm/vmalloc.c
1491 +@@ -1669,7 +1669,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1492 + struct page **pages;
1493 + unsigned int nr_pages, array_size, i;
1494 + const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1495 +- const gfp_t alloc_mask = gfp_mask | __GFP_HIGHMEM | __GFP_NOWARN;
1496 ++ const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
1497 ++ const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
1498 ++ 0 :
1499 ++ __GFP_HIGHMEM;
1500 +
1501 + nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1502 + array_size = (nr_pages * sizeof(struct page *));
1503 +@@ -1677,7 +1680,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1504 + area->nr_pages = nr_pages;
1505 + /* Please note that the recursion is strictly bounded. */
1506 + if (array_size > PAGE_SIZE) {
1507 +- pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
1508 ++ pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
1509 + PAGE_KERNEL, node, area->caller);
1510 + } else {
1511 + pages = kmalloc_node(array_size, nested_gfp, node);
1512 +@@ -1698,9 +1701,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1513 + }
1514 +
1515 + if (node == NUMA_NO_NODE)
1516 +- page = alloc_page(alloc_mask);
1517 ++ page = alloc_page(alloc_mask|highmem_mask);
1518 + else
1519 +- page = alloc_pages_node(node, alloc_mask, 0);
1520 ++ page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
1521 +
1522 + if (unlikely(!page)) {
1523 + /* Successfully allocated i pages, free them in __vunmap() */
1524 +@@ -1708,7 +1711,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1525 + goto fail;
1526 + }
1527 + area->pages[i] = page;
1528 +- if (gfpflags_allow_blocking(gfp_mask))
1529 ++ if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
1530 + cond_resched();
1531 + }
1532 +
1533 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
1534 +index f3b1d7f50b81..67c4c68ce041 100644
1535 +--- a/sound/core/seq/seq_clientmgr.c
1536 ++++ b/sound/core/seq/seq_clientmgr.c
1537 +@@ -1502,16 +1502,11 @@ static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
1538 + static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
1539 + {
1540 + struct snd_seq_queue_info *info = arg;
1541 +- int result;
1542 + struct snd_seq_queue *q;
1543 +
1544 +- result = snd_seq_queue_alloc(client->number, info->locked, info->flags);
1545 +- if (result < 0)
1546 +- return result;
1547 +-
1548 +- q = queueptr(result);
1549 +- if (q == NULL)
1550 +- return -EINVAL;
1551 ++ q = snd_seq_queue_alloc(client->number, info->locked, info->flags);
1552 ++ if (IS_ERR(q))
1553 ++ return PTR_ERR(q);
1554 +
1555 + info->queue = q->queue;
1556 + info->locked = q->locked;
1557 +@@ -1521,7 +1516,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
1558 + if (!info->name[0])
1559 + snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
1560 + strlcpy(q->name, info->name, sizeof(q->name));
1561 +- queuefree(q);
1562 ++ snd_use_lock_free(&q->use_lock);
1563 +
1564 + return 0;
1565 + }
1566 +diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
1567 +index 450c5187eecb..79e0c5604ef8 100644
1568 +--- a/sound/core/seq/seq_queue.c
1569 ++++ b/sound/core/seq/seq_queue.c
1570 +@@ -184,22 +184,26 @@ void __exit snd_seq_queues_delete(void)
1571 + static void queue_use(struct snd_seq_queue *queue, int client, int use);
1572 +
1573 + /* allocate a new queue -
1574 +- * return queue index value or negative value for error
1575 ++ * return pointer to new queue or ERR_PTR(-errno) for error
1576 ++ * The new queue's use_lock is set to 1. It is the caller's responsibility to
1577 ++ * call snd_use_lock_free(&q->use_lock).
1578 + */
1579 +-int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
1580 ++struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
1581 + {
1582 + struct snd_seq_queue *q;
1583 +
1584 + q = queue_new(client, locked);
1585 + if (q == NULL)
1586 +- return -ENOMEM;
1587 ++ return ERR_PTR(-ENOMEM);
1588 + q->info_flags = info_flags;
1589 + queue_use(q, client, 1);
1590 ++ snd_use_lock_use(&q->use_lock);
1591 + if (queue_list_add(q) < 0) {
1592 ++ snd_use_lock_free(&q->use_lock);
1593 + queue_delete(q);
1594 +- return -ENOMEM;
1595 ++ return ERR_PTR(-ENOMEM);
1596 + }
1597 +- return q->queue;
1598 ++ return q;
1599 + }
1600 +
1601 + /* delete a queue - queue must be owned by the client */
1602 +diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
1603 +index 30c8111477f6..719093489a2c 100644
1604 +--- a/sound/core/seq/seq_queue.h
1605 ++++ b/sound/core/seq/seq_queue.h
1606 +@@ -71,7 +71,7 @@ void snd_seq_queues_delete(void);
1607 +
1608 +
1609 + /* create new queue (constructor) */
1610 +-int snd_seq_queue_alloc(int client, int locked, unsigned int flags);
1611 ++struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags);
1612 +
1613 + /* delete queue (destructor) */
1614 + int snd_seq_queue_delete(int client, int queueid);
1615 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
1616 +index 082736c539bc..e630813c5008 100644
1617 +--- a/sound/usb/mixer.c
1618 ++++ b/sound/usb/mixer.c
1619 +@@ -542,6 +542,8 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
1620 +
1621 + if (size < sizeof(scale))
1622 + return -ENOMEM;
1623 ++ if (cval->min_mute)
1624 ++ scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE;
1625 + scale[2] = cval->dBmin;
1626 + scale[3] = cval->dBmax;
1627 + if (copy_to_user(_tlv, scale, sizeof(scale)))
1628 +diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
1629 +index 3417ef347e40..2b4b067646ab 100644
1630 +--- a/sound/usb/mixer.h
1631 ++++ b/sound/usb/mixer.h
1632 +@@ -64,6 +64,7 @@ struct usb_mixer_elem_info {
1633 + int cached;
1634 + int cache_val[MAX_CHANNELS];
1635 + u8 initialized;
1636 ++ u8 min_mute;
1637 + void *private_data;
1638 + };
1639 +
1640 +diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
1641 +index 4fa0053a40af..7fbc90f5c6de 100644
1642 +--- a/sound/usb/mixer_quirks.c
1643 ++++ b/sound/usb/mixer_quirks.c
1644 +@@ -1878,6 +1878,12 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
1645 + if (unitid == 7 && cval->control == UAC_FU_VOLUME)
1646 + snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
1647 + break;
1648 ++ /* lowest playback value is muted on C-Media devices */
1649 ++ case USB_ID(0x0d8c, 0x000c):
1650 ++ case USB_ID(0x0d8c, 0x0014):
1651 ++ if (strstr(kctl->id.name, "Playback"))
1652 ++ cval->min_mute = 1;
1653 ++ break;
1654 + }
1655 + }
1656 +
1657 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
1658 +index d7b0b0a3a2db..6a03f9697039 100644
1659 +--- a/sound/usb/quirks.c
1660 ++++ b/sound/usb/quirks.c
1661 +@@ -1142,6 +1142,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
1662 + case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
1663 + case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
1664 + case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
1665 ++ case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
1666 + case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
1667 + case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
1668 + case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
1669 +@@ -1374,6 +1375,10 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1670 + }
1671 + }
1672 + break;
1673 ++ case USB_ID(0x16d0, 0x0a23):
1674 ++ if (fp->altsetting == 2)
1675 ++ return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1676 ++ break;
1677 +
1678 + default:
1679 + break;