Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Wed, 30 May 2018 11:39:29
Message-Id: 1527680351.53b8f4f2e5ab3aa75bce79d5934ede887e4e2854.mpagano@gentoo
1 commit: 53b8f4f2e5ab3aa75bce79d5934ede887e4e2854
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed May 30 11:39:11 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed May 30 11:39:11 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=53b8f4f2
7
8 Linux patch 4.9.104
9
10 0000_README | 4 +
11 1103_linux-4.9.104.patch | 13079 +++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 13083 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 5e90d97..e808f94 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -455,6 +455,10 @@ Patch: 1102_linux-4.9.103.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.103
21
22 +Patch: 1103_linux-4.9.104.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.104
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1103_linux-4.9.104.patch b/1103_linux-4.9.104.patch
31 new file mode 100644
32 index 0000000..9f689d2
33 --- /dev/null
34 +++ b/1103_linux-4.9.104.patch
35 @@ -0,0 +1,13079 @@
36 +diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
37 +index 1699a55b7b70..ef639960b272 100644
38 +--- a/Documentation/device-mapper/thin-provisioning.txt
39 ++++ b/Documentation/device-mapper/thin-provisioning.txt
40 +@@ -112,9 +112,11 @@ $low_water_mark is expressed in blocks of size $data_block_size. If
41 + free space on the data device drops below this level then a dm event
42 + will be triggered which a userspace daemon should catch allowing it to
43 + extend the pool device. Only one such event will be sent.
44 +-Resuming a device with a new table itself triggers an event so the
45 +-userspace daemon can use this to detect a situation where a new table
46 +-already exceeds the threshold.
47 ++
48 ++No special event is triggered if a just resumed device's free space is below
49 ++the low water mark. However, resuming a device always triggers an
50 ++event; a userspace daemon should verify that free space exceeds the low
51 ++water mark when handling this event.
52 +
53 + A low water mark for the metadata device is maintained in the kernel and
54 + will trigger a dm event if free space on the metadata device drops below
55 +diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
56 +index 217a90eaabe7..9c38bbe7e6d7 100644
57 +--- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
58 ++++ b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
59 +@@ -11,7 +11,11 @@ Required properties:
60 + interrupts.
61 +
62 + Optional properties:
63 +-- clocks: Optional reference to the clock used by the XOR engine.
64 ++- clocks: Optional reference to the clocks used by the XOR engine.
65 ++- clock-names: mandatory if there is a second clock, in this case the
66 ++ name must be "core" for the first clock and "reg" for the second
67 ++ one
68 ++
69 +
70 + Example:
71 +
72 +diff --git a/Makefile b/Makefile
73 +index 6090f655fb32..780dcc8033b2 100644
74 +--- a/Makefile
75 ++++ b/Makefile
76 +@@ -1,6 +1,6 @@
77 + VERSION = 4
78 + PATCHLEVEL = 9
79 +-SUBLEVEL = 103
80 ++SUBLEVEL = 104
81 + EXTRAVERSION =
82 + NAME = Roaring Lionus
83 +
84 +diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
85 +index 0ca9724597c1..7081e52291d0 100644
86 +--- a/arch/alpha/include/asm/xchg.h
87 ++++ b/arch/alpha/include/asm/xchg.h
88 +@@ -11,6 +11,10 @@
89 + * Atomic exchange.
90 + * Since it can be used to implement critical sections
91 + * it must clobber "memory" (also for interrupts in UP).
92 ++ *
93 ++ * The leading and the trailing memory barriers guarantee that these
94 ++ * operations are fully ordered.
95 ++ *
96 + */
97 +
98 + static inline unsigned long
99 +@@ -18,6 +22,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
100 + {
101 + unsigned long ret, tmp, addr64;
102 +
103 ++ smp_mb();
104 + __asm__ __volatile__(
105 + " andnot %4,7,%3\n"
106 + " insbl %1,%4,%1\n"
107 +@@ -42,6 +47,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
108 + {
109 + unsigned long ret, tmp, addr64;
110 +
111 ++ smp_mb();
112 + __asm__ __volatile__(
113 + " andnot %4,7,%3\n"
114 + " inswl %1,%4,%1\n"
115 +@@ -66,6 +72,7 @@ ____xchg(_u32, volatile int *m, unsigned long val)
116 + {
117 + unsigned long dummy;
118 +
119 ++ smp_mb();
120 + __asm__ __volatile__(
121 + "1: ldl_l %0,%4\n"
122 + " bis $31,%3,%1\n"
123 +@@ -86,6 +93,7 @@ ____xchg(_u64, volatile long *m, unsigned long val)
124 + {
125 + unsigned long dummy;
126 +
127 ++ smp_mb();
128 + __asm__ __volatile__(
129 + "1: ldq_l %0,%4\n"
130 + " bis $31,%3,%1\n"
131 +@@ -127,10 +135,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
132 + * store NEW in MEM. Return the initial value in MEM. Success is
133 + * indicated by comparing RETURN with OLD.
134 + *
135 +- * The memory barrier should be placed in SMP only when we actually
136 +- * make the change. If we don't change anything (so if the returned
137 +- * prev is equal to old) then we aren't acquiring anything new and
138 +- * we don't need any memory barrier as far I can tell.
139 ++ * The leading and the trailing memory barriers guarantee that these
140 ++ * operations are fully ordered.
141 ++ *
142 ++ * The trailing memory barrier is placed in SMP unconditionally, in
143 ++ * order to guarantee that dependency ordering is preserved when a
144 ++ * dependency is headed by an unsuccessful operation.
145 + */
146 +
147 + static inline unsigned long
148 +@@ -138,6 +148,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
149 + {
150 + unsigned long prev, tmp, cmp, addr64;
151 +
152 ++ smp_mb();
153 + __asm__ __volatile__(
154 + " andnot %5,7,%4\n"
155 + " insbl %1,%5,%1\n"
156 +@@ -149,8 +160,8 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
157 + " or %1,%2,%2\n"
158 + " stq_c %2,0(%4)\n"
159 + " beq %2,3f\n"
160 +- __ASM__MB
161 + "2:\n"
162 ++ __ASM__MB
163 + ".subsection 2\n"
164 + "3: br 1b\n"
165 + ".previous"
166 +@@ -165,6 +176,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
167 + {
168 + unsigned long prev, tmp, cmp, addr64;
169 +
170 ++ smp_mb();
171 + __asm__ __volatile__(
172 + " andnot %5,7,%4\n"
173 + " inswl %1,%5,%1\n"
174 +@@ -176,8 +188,8 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
175 + " or %1,%2,%2\n"
176 + " stq_c %2,0(%4)\n"
177 + " beq %2,3f\n"
178 +- __ASM__MB
179 + "2:\n"
180 ++ __ASM__MB
181 + ".subsection 2\n"
182 + "3: br 1b\n"
183 + ".previous"
184 +@@ -192,6 +204,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
185 + {
186 + unsigned long prev, cmp;
187 +
188 ++ smp_mb();
189 + __asm__ __volatile__(
190 + "1: ldl_l %0,%5\n"
191 + " cmpeq %0,%3,%1\n"
192 +@@ -199,8 +212,8 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
193 + " mov %4,%1\n"
194 + " stl_c %1,%2\n"
195 + " beq %1,3f\n"
196 +- __ASM__MB
197 + "2:\n"
198 ++ __ASM__MB
199 + ".subsection 2\n"
200 + "3: br 1b\n"
201 + ".previous"
202 +@@ -215,6 +228,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
203 + {
204 + unsigned long prev, cmp;
205 +
206 ++ smp_mb();
207 + __asm__ __volatile__(
208 + "1: ldq_l %0,%5\n"
209 + " cmpeq %0,%3,%1\n"
210 +@@ -222,8 +236,8 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
211 + " mov %4,%1\n"
212 + " stq_c %1,%2\n"
213 + " beq %1,3f\n"
214 +- __ASM__MB
215 + "2:\n"
216 ++ __ASM__MB
217 + ".subsection 2\n"
218 + "3: br 1b\n"
219 + ".previous"
220 +diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
221 +index 249e10190d20..b7b78cb09a37 100644
222 +--- a/arch/arc/Kconfig
223 ++++ b/arch/arc/Kconfig
224 +@@ -495,7 +495,6 @@ config ARC_CURR_IN_REG
225 +
226 + config ARC_EMUL_UNALIGNED
227 + bool "Emulate unaligned memory access (userspace only)"
228 +- default N
229 + select SYSCTL_ARCH_UNALIGN_NO_WARN
230 + select SYSCTL_ARCH_UNALIGN_ALLOW
231 + depends on ISA_ARCOMPACT
232 +diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
233 +index 74dd21b7373c..c51b88ee3cec 100644
234 +--- a/arch/arm/boot/dts/bcm283x.dtsi
235 ++++ b/arch/arm/boot/dts/bcm283x.dtsi
236 +@@ -146,8 +146,8 @@
237 +
238 + i2s: i2s@7e203000 {
239 + compatible = "brcm,bcm2835-i2s";
240 +- reg = <0x7e203000 0x20>,
241 +- <0x7e101098 0x02>;
242 ++ reg = <0x7e203000 0x24>;
243 ++ clocks = <&clocks BCM2835_CLOCK_PCM>;
244 +
245 + dmas = <&dma 2>,
246 + <&dma 3>;
247 +diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
248 +index a1658d0721b8..cf0de77f09c4 100644
249 +--- a/arch/arm/boot/dts/bcm958625hr.dts
250 ++++ b/arch/arm/boot/dts/bcm958625hr.dts
251 +@@ -49,7 +49,7 @@
252 +
253 + memory {
254 + device_type = "memory";
255 +- reg = <0x60000000 0x80000000>;
256 ++ reg = <0x60000000 0x20000000>;
257 + };
258 +
259 + gpio-restart {
260 +diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
261 +index 58b09bf1ba2d..205130600853 100644
262 +--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
263 ++++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
264 +@@ -213,37 +213,37 @@
265 + &iomuxc {
266 + pinctrl_enet1: enet1grp {
267 + fsl,pins = <
268 +- MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x3
269 +- MX7D_PAD_SD2_WP__ENET1_MDC 0x3
270 +- MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x1
271 +- MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x1
272 +- MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x1
273 +- MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x1
274 +- MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x1
275 +- MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x1
276 +- MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x1
277 +- MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x1
278 +- MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x1
279 +- MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x1
280 +- MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x1
281 +- MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x1
282 ++ MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x30
283 ++ MX7D_PAD_SD2_WP__ENET1_MDC 0x30
284 ++ MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x11
285 ++ MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x11
286 ++ MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x11
287 ++ MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x11
288 ++ MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x11
289 ++ MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x11
290 ++ MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x11
291 ++ MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x11
292 ++ MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x11
293 ++ MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x11
294 ++ MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x11
295 ++ MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x11
296 + >;
297 + };
298 +
299 + pinctrl_enet2: enet2grp {
300 + fsl,pins = <
301 +- MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x1
302 +- MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x1
303 +- MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x1
304 +- MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x1
305 +- MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x1
306 +- MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x1
307 +- MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x1
308 +- MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x1
309 +- MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x1
310 +- MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x1
311 +- MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x1
312 +- MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x1
313 ++ MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x11
314 ++ MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x11
315 ++ MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x11
316 ++ MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x11
317 ++ MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x11
318 ++ MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x11
319 ++ MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x11
320 ++ MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x11
321 ++ MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x11
322 ++ MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x11
323 ++ MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x11
324 ++ MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x11
325 + >;
326 + };
327 +
328 +diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
329 +index 6761d11d3f9e..db0239c7e6c7 100644
330 +--- a/arch/arm/boot/dts/r8a7791-porter.dts
331 ++++ b/arch/arm/boot/dts/r8a7791-porter.dts
332 +@@ -428,7 +428,7 @@
333 + "dclkin.0", "dclkin.1";
334 +
335 + ports {
336 +- port@1 {
337 ++ port@0 {
338 + endpoint {
339 + remote-endpoint = <&adv7511_in>;
340 + };
341 +diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
342 +index 9f48141270b8..f0702d8063d9 100644
343 +--- a/arch/arm/boot/dts/socfpga.dtsi
344 ++++ b/arch/arm/boot/dts/socfpga.dtsi
345 +@@ -759,7 +759,7 @@
346 + timer@fffec600 {
347 + compatible = "arm,cortex-a9-twd-timer";
348 + reg = <0xfffec600 0x100>;
349 +- interrupts = <1 13 0xf04>;
350 ++ interrupts = <1 13 0xf01>;
351 + clocks = <&mpu_periph_clk>;
352 + };
353 +
354 +diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
355 +index d0295f1dd1a3..ff65b6d96c7e 100644
356 +--- a/arch/arm/include/asm/vdso.h
357 ++++ b/arch/arm/include/asm/vdso.h
358 +@@ -11,8 +11,6 @@ struct mm_struct;
359 +
360 + void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
361 +
362 +-extern char vdso_start, vdso_end;
363 +-
364 + extern unsigned int vdso_total_pages;
365 +
366 + #else /* CONFIG_VDSO */
367 +diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
368 +index 53cf86cf2d1a..890439737374 100644
369 +--- a/arch/arm/kernel/vdso.c
370 ++++ b/arch/arm/kernel/vdso.c
371 +@@ -39,6 +39,8 @@
372 +
373 + static struct page **vdso_text_pagelist;
374 +
375 ++extern char vdso_start[], vdso_end[];
376 ++
377 + /* Total number of pages needed for the data and text portions of the VDSO. */
378 + unsigned int vdso_total_pages __ro_after_init;
379 +
380 +@@ -179,13 +181,13 @@ static int __init vdso_init(void)
381 + unsigned int text_pages;
382 + int i;
383 +
384 +- if (memcmp(&vdso_start, "\177ELF", 4)) {
385 ++ if (memcmp(vdso_start, "\177ELF", 4)) {
386 + pr_err("VDSO is not a valid ELF object!\n");
387 + return -ENOEXEC;
388 + }
389 +
390 +- text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
391 +- pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
392 ++ text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
393 ++ pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
394 +
395 + /* Allocate the VDSO text pagelist */
396 + vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
397 +@@ -200,7 +202,7 @@ static int __init vdso_init(void)
398 + for (i = 0; i < text_pages; i++) {
399 + struct page *page;
400 +
401 +- page = virt_to_page(&vdso_start + i * PAGE_SIZE);
402 ++ page = virt_to_page(vdso_start + i * PAGE_SIZE);
403 + vdso_text_pagelist[i] = page;
404 + }
405 +
406 +@@ -211,7 +213,7 @@ static int __init vdso_init(void)
407 +
408 + cntvct_ok = cntvct_functional();
409 +
410 +- patch_vdso(&vdso_start);
411 ++ patch_vdso(vdso_start);
412 +
413 + return 0;
414 + }
415 +diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
416 +index 4f5fd4a084c0..034b89499bd7 100644
417 +--- a/arch/arm/mach-omap1/clock.c
418 ++++ b/arch/arm/mach-omap1/clock.c
419 +@@ -1031,17 +1031,17 @@ static int clk_debugfs_register_one(struct clk *c)
420 + return -ENOMEM;
421 + c->dent = d;
422 +
423 +- d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
424 ++ d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
425 + if (!d) {
426 + err = -ENOMEM;
427 + goto err_out;
428 + }
429 +- d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
430 ++ d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
431 + if (!d) {
432 + err = -ENOMEM;
433 + goto err_out;
434 + }
435 +- d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
436 ++ d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
437 + if (!d) {
438 + err = -ENOMEM;
439 + goto err_out;
440 +diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
441 +index 678d2a31dcb8..3202015ecb83 100644
442 +--- a/arch/arm/mach-omap2/pm.c
443 ++++ b/arch/arm/mach-omap2/pm.c
444 +@@ -225,7 +225,7 @@ static void omap_pm_end(void)
445 + cpu_idle_poll_ctrl(false);
446 + }
447 +
448 +-static void omap_pm_finish(void)
449 ++static void omap_pm_wake(void)
450 + {
451 + if (cpu_is_omap34xx())
452 + omap_prcm_irq_complete();
453 +@@ -235,7 +235,7 @@ static const struct platform_suspend_ops omap_pm_ops = {
454 + .begin = omap_pm_begin,
455 + .end = omap_pm_end,
456 + .enter = omap_pm_enter,
457 +- .finish = omap_pm_finish,
458 ++ .wake = omap_pm_wake,
459 + .valid = suspend_valid_only_mem,
460 + };
461 +
462 +diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
463 +index b2f2448bfa6d..a4cab2814655 100644
464 +--- a/arch/arm/mach-omap2/timer.c
465 ++++ b/arch/arm/mach-omap2/timer.c
466 +@@ -136,12 +136,6 @@ static struct clock_event_device clockevent_gpt = {
467 + .tick_resume = omap2_gp_timer_shutdown,
468 + };
469 +
470 +-static struct property device_disabled = {
471 +- .name = "status",
472 +- .length = sizeof("disabled"),
473 +- .value = "disabled",
474 +-};
475 +-
476 + static const struct of_device_id omap_timer_match[] __initconst = {
477 + { .compatible = "ti,omap2420-timer", },
478 + { .compatible = "ti,omap3430-timer", },
479 +@@ -183,8 +177,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id *
480 + of_get_property(np, "ti,timer-secure", NULL)))
481 + continue;
482 +
483 +- if (!of_device_is_compatible(np, "ti,omap-counter32k"))
484 +- of_add_property(np, &device_disabled);
485 ++ if (!of_device_is_compatible(np, "ti,omap-counter32k")) {
486 ++ struct property *prop;
487 ++
488 ++ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
489 ++ if (!prop)
490 ++ return NULL;
491 ++ prop->name = "status";
492 ++ prop->value = "disabled";
493 ++ prop->length = strlen(prop->value);
494 ++ of_add_property(np, prop);
495 ++ }
496 + return np;
497 + }
498 +
499 +diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
500 +index 89bb0fc796bd..72905a442106 100644
501 +--- a/arch/arm/mach-orion5x/Kconfig
502 ++++ b/arch/arm/mach-orion5x/Kconfig
503 +@@ -57,7 +57,6 @@ config MACH_KUROBOX_PRO
504 +
505 + config MACH_DNS323
506 + bool "D-Link DNS-323"
507 +- select GENERIC_NET_UTILS
508 + select I2C_BOARDINFO if I2C
509 + help
510 + Say 'Y' here if you want your kernel to support the
511 +@@ -65,7 +64,6 @@ config MACH_DNS323
512 +
513 + config MACH_TS209
514 + bool "QNAP TS-109/TS-209"
515 +- select GENERIC_NET_UTILS
516 + help
517 + Say 'Y' here if you want your kernel to support the
518 + QNAP TS-109/TS-209 platform.
519 +@@ -107,7 +105,6 @@ config MACH_LINKSTATION_LS_HGL
520 +
521 + config MACH_TS409
522 + bool "QNAP TS-409"
523 +- select GENERIC_NET_UTILS
524 + help
525 + Say 'Y' here if you want your kernel to support the
526 + QNAP TS-409 platform.
527 +diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
528 +index cd483bfb5ca8..d13344b2ddcd 100644
529 +--- a/arch/arm/mach-orion5x/dns323-setup.c
530 ++++ b/arch/arm/mach-orion5x/dns323-setup.c
531 +@@ -173,10 +173,42 @@ static struct mv643xx_eth_platform_data dns323_eth_data = {
532 + .phy_addr = MV643XX_ETH_PHY_ADDR(8),
533 + };
534 +
535 ++/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these
536 ++ * functions be kept somewhere?
537 ++ */
538 ++static int __init dns323_parse_hex_nibble(char n)
539 ++{
540 ++ if (n >= '0' && n <= '9')
541 ++ return n - '0';
542 ++
543 ++ if (n >= 'A' && n <= 'F')
544 ++ return n - 'A' + 10;
545 ++
546 ++ if (n >= 'a' && n <= 'f')
547 ++ return n - 'a' + 10;
548 ++
549 ++ return -1;
550 ++}
551 ++
552 ++static int __init dns323_parse_hex_byte(const char *b)
553 ++{
554 ++ int hi;
555 ++ int lo;
556 ++
557 ++ hi = dns323_parse_hex_nibble(b[0]);
558 ++ lo = dns323_parse_hex_nibble(b[1]);
559 ++
560 ++ if (hi < 0 || lo < 0)
561 ++ return -1;
562 ++
563 ++ return (hi << 4) | lo;
564 ++}
565 ++
566 + static int __init dns323_read_mac_addr(void)
567 + {
568 + u_int8_t addr[6];
569 +- void __iomem *mac_page;
570 ++ int i;
571 ++ char *mac_page;
572 +
573 + /* MAC address is stored as a regular ol' string in /dev/mtdblock4
574 + * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80).
575 +@@ -185,8 +217,23 @@ static int __init dns323_read_mac_addr(void)
576 + if (!mac_page)
577 + return -ENOMEM;
578 +
579 +- if (!mac_pton((__force const char *) mac_page, addr))
580 +- goto error_fail;
581 ++ /* Sanity check the string we're looking at */
582 ++ for (i = 0; i < 5; i++) {
583 ++ if (*(mac_page + (i * 3) + 2) != ':') {
584 ++ goto error_fail;
585 ++ }
586 ++ }
587 ++
588 ++ for (i = 0; i < 6; i++) {
589 ++ int byte;
590 ++
591 ++ byte = dns323_parse_hex_byte(mac_page + (i * 3));
592 ++ if (byte < 0) {
593 ++ goto error_fail;
594 ++ }
595 ++
596 ++ addr[i] = byte;
597 ++ }
598 +
599 + iounmap(mac_page);
600 + printk("DNS-323: Found ethernet MAC address: %pM\n", addr);
601 +diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c
602 +index 89774985d380..905d4f2dd0b8 100644
603 +--- a/arch/arm/mach-orion5x/tsx09-common.c
604 ++++ b/arch/arm/mach-orion5x/tsx09-common.c
605 +@@ -53,12 +53,53 @@ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
606 + .phy_addr = MV643XX_ETH_PHY_ADDR(8),
607 + };
608 +
609 ++static int __init qnap_tsx09_parse_hex_nibble(char n)
610 ++{
611 ++ if (n >= '0' && n <= '9')
612 ++ return n - '0';
613 ++
614 ++ if (n >= 'A' && n <= 'F')
615 ++ return n - 'A' + 10;
616 ++
617 ++ if (n >= 'a' && n <= 'f')
618 ++ return n - 'a' + 10;
619 ++
620 ++ return -1;
621 ++}
622 ++
623 ++static int __init qnap_tsx09_parse_hex_byte(const char *b)
624 ++{
625 ++ int hi;
626 ++ int lo;
627 ++
628 ++ hi = qnap_tsx09_parse_hex_nibble(b[0]);
629 ++ lo = qnap_tsx09_parse_hex_nibble(b[1]);
630 ++
631 ++ if (hi < 0 || lo < 0)
632 ++ return -1;
633 ++
634 ++ return (hi << 4) | lo;
635 ++}
636 ++
637 + static int __init qnap_tsx09_check_mac_addr(const char *addr_str)
638 + {
639 + u_int8_t addr[6];
640 ++ int i;
641 +
642 +- if (!mac_pton(addr_str, addr))
643 +- return -1;
644 ++ for (i = 0; i < 6; i++) {
645 ++ int byte;
646 ++
647 ++ /*
648 ++ * Enforce "xx:xx:xx:xx:xx:xx\n" format.
649 ++ */
650 ++ if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n'))
651 ++ return -1;
652 ++
653 ++ byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3));
654 ++ if (byte < 0)
655 ++ return -1;
656 ++ addr[i] = byte;
657 ++ }
658 +
659 + printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr);
660 +
661 +@@ -77,12 +118,12 @@ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size)
662 + unsigned long addr;
663 +
664 + for (addr = mem_base; addr < (mem_base + size); addr += 1024) {
665 +- void __iomem *nor_page;
666 ++ char *nor_page;
667 + int ret = 0;
668 +
669 + nor_page = ioremap(addr, 1024);
670 + if (nor_page != NULL) {
671 +- ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page);
672 ++ ret = qnap_tsx09_check_mac_addr(nor_page);
673 + iounmap(nor_page);
674 + }
675 +
676 +diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
677 +index 7a327bd32521..ebef8aacea83 100644
678 +--- a/arch/arm/plat-omap/dmtimer.c
679 ++++ b/arch/arm/plat-omap/dmtimer.c
680 +@@ -890,11 +890,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
681 + timer->irq = irq->start;
682 + timer->pdev = pdev;
683 +
684 +- /* Skip pm_runtime_enable for OMAP1 */
685 +- if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
686 +- pm_runtime_enable(dev);
687 +- pm_runtime_irq_safe(dev);
688 +- }
689 ++ pm_runtime_enable(dev);
690 ++ pm_runtime_irq_safe(dev);
691 +
692 + if (!timer->reserved) {
693 + ret = pm_runtime_get_sync(dev);
694 +diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
695 +index 338f82a7fdc7..2c93de7fffe5 100644
696 +--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
697 ++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
698 +@@ -326,8 +326,8 @@
699 + blsp2_spi5: spi@075ba000{
700 + compatible = "qcom,spi-qup-v2.2.1";
701 + reg = <0x075ba000 0x600>;
702 +- interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
703 +- clocks = <&gcc GCC_BLSP2_QUP5_SPI_APPS_CLK>,
704 ++ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
705 ++ clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
706 + <&gcc GCC_BLSP2_AHB_CLK>;
707 + clock-names = "core", "iface";
708 + pinctrl-names = "default", "sleep";
709 +diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
710 +index cae331d553f8..a9d2dd03c977 100644
711 +--- a/arch/arm64/include/asm/spinlock.h
712 ++++ b/arch/arm64/include/asm/spinlock.h
713 +@@ -141,8 +141,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
714 + " cbnz %w1, 1f\n"
715 + " add %w1, %w0, %3\n"
716 + " casa %w0, %w1, %2\n"
717 +- " and %w1, %w1, #0xffff\n"
718 +- " eor %w1, %w1, %w0, lsr #16\n"
719 ++ " sub %w1, %w1, %3\n"
720 ++ " eor %w1, %w1, %w0\n"
721 + "1:")
722 + : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
723 + : "I" (1 << TICKET_SHIFT)
724 +diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
725 +index 801a16dbbdf6..7d2a15a0f625 100644
726 +--- a/arch/arm64/include/asm/stacktrace.h
727 ++++ b/arch/arm64/include/asm/stacktrace.h
728 +@@ -23,7 +23,7 @@ struct stackframe {
729 + unsigned long sp;
730 + unsigned long pc;
731 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
732 +- unsigned int graph;
733 ++ int graph;
734 + #endif
735 + };
736 +
737 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
738 +index 74107134cc30..2de62aa91303 100644
739 +--- a/arch/arm64/kernel/cpu_errata.c
740 ++++ b/arch/arm64/kernel/cpu_errata.c
741 +@@ -160,7 +160,7 @@ static int enable_smccc_arch_workaround_1(void *data)
742 + case PSCI_CONDUIT_HVC:
743 + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
744 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
745 +- if (res.a0)
746 ++ if ((int)res.a0 < 0)
747 + return 0;
748 + cb = call_hvc_arch_workaround_1;
749 + smccc_start = __smccc_workaround_1_hvc_start;
750 +@@ -170,7 +170,7 @@ static int enable_smccc_arch_workaround_1(void *data)
751 + case PSCI_CONDUIT_SMC:
752 + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
753 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
754 +- if (res.a0)
755 ++ if ((int)res.a0 < 0)
756 + return 0;
757 + cb = call_smc_arch_workaround_1;
758 + smccc_start = __smccc_workaround_1_smc_start;
759 +diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
760 +index c2efddfca18c..0cc01e0d38eb 100644
761 +--- a/arch/arm64/kernel/stacktrace.c
762 ++++ b/arch/arm64/kernel/stacktrace.c
763 +@@ -72,6 +72,11 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
764 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
765 + if (tsk->ret_stack &&
766 + (frame->pc == (unsigned long)return_to_handler)) {
767 ++ if (WARN_ON_ONCE(frame->graph == -1))
768 ++ return -EINVAL;
769 ++ if (frame->graph < -1)
770 ++ frame->graph += FTRACE_NOTRACE_DEPTH;
771 ++
772 + /*
773 + * This is a case where function graph tracer has
774 + * modified a return address (LR) in a stack frame
775 +diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
776 +index 59779699a1a4..5d9076e86200 100644
777 +--- a/arch/arm64/kernel/time.c
778 ++++ b/arch/arm64/kernel/time.c
779 +@@ -53,7 +53,7 @@ unsigned long profile_pc(struct pt_regs *regs)
780 + frame.sp = regs->sp;
781 + frame.pc = regs->pc;
782 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
783 +- frame.graph = -1; /* no task info */
784 ++ frame.graph = current->curr_ret_stack;
785 + #endif
786 + do {
787 + int ret = unwind_frame(NULL, &frame);
788 +diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
789 +index 5ed0ea92c5bf..f851c9d651f0 100644
790 +--- a/arch/ia64/kernel/err_inject.c
791 ++++ b/arch/ia64/kernel/err_inject.c
792 +@@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
793 + u64 virt_addr=simple_strtoull(buf, NULL, 16);
794 + int ret;
795 +
796 +- ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
797 ++ ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
798 + if (ret<=0) {
799 + #ifdef ERR_INJ_DEBUG
800 + printk("Virtual address %lx is not existing.\n",virt_addr);
801 +diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
802 +index a0fc0c192427..3e8be0f54a44 100644
803 +--- a/arch/m68k/coldfire/device.c
804 ++++ b/arch/m68k/coldfire/device.c
805 +@@ -135,7 +135,11 @@ static struct platform_device mcf_fec0 = {
806 + .id = 0,
807 + .num_resources = ARRAY_SIZE(mcf_fec0_resources),
808 + .resource = mcf_fec0_resources,
809 +- .dev.platform_data = FEC_PDATA,
810 ++ .dev = {
811 ++ .dma_mask = &mcf_fec0.dev.coherent_dma_mask,
812 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
813 ++ .platform_data = FEC_PDATA,
814 ++ }
815 + };
816 +
817 + #ifdef MCFFEC_BASE1
818 +@@ -167,7 +171,11 @@ static struct platform_device mcf_fec1 = {
819 + .id = 1,
820 + .num_resources = ARRAY_SIZE(mcf_fec1_resources),
821 + .resource = mcf_fec1_resources,
822 +- .dev.platform_data = FEC_PDATA,
823 ++ .dev = {
824 ++ .dma_mask = &mcf_fec1.dev.coherent_dma_mask,
825 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
826 ++ .platform_data = FEC_PDATA,
827 ++ }
828 + };
829 + #endif /* MCFFEC_BASE1 */
830 + #endif /* CONFIG_FEC */
831 +diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
832 +index 6ed1ded87b8f..6420c83c29d1 100644
833 +--- a/arch/mips/cavium-octeon/octeon-irq.c
834 ++++ b/arch/mips/cavium-octeon/octeon-irq.c
835 +@@ -2271,7 +2271,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
836 +
837 + parent_irq = irq_of_parse_and_map(ciu_node, 0);
838 + if (!parent_irq) {
839 +- pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
840 ++ pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
841 + ciu_node->name);
842 + return -EINVAL;
843 + }
844 +@@ -2283,7 +2283,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
845 +
846 + addr = of_get_address(ciu_node, 0, NULL, NULL);
847 + if (!addr) {
848 +- pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
849 ++ pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
850 + return -EINVAL;
851 + }
852 + host_data->raw_reg = (u64)phys_to_virt(
853 +@@ -2291,7 +2291,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
854 +
855 + addr = of_get_address(ciu_node, 1, NULL, NULL);
856 + if (!addr) {
857 +- pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
858 ++ pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
859 + return -EINVAL;
860 + }
861 + host_data->en_reg = (u64)phys_to_virt(
862 +@@ -2299,7 +2299,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
863 +
864 + r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
865 + if (r) {
866 +- pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
867 ++ pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
868 + ciu_node->name);
869 + return r;
870 + }
871 +@@ -2309,7 +2309,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
872 + &octeon_irq_domain_cib_ops,
873 + host_data);
874 + if (!cib_domain) {
875 +- pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
876 ++ pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
877 + return -ENOMEM;
878 + }
879 +
880 +diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
881 +index aa3800c82332..d99ca862dae3 100644
882 +--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
883 ++++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
884 +@@ -167,7 +167,7 @@
885 + #define AR71XX_AHB_DIV_MASK 0x7
886 +
887 + #define AR724X_PLL_REG_CPU_CONFIG 0x00
888 +-#define AR724X_PLL_REG_PCIE_CONFIG 0x18
889 ++#define AR724X_PLL_REG_PCIE_CONFIG 0x10
890 +
891 + #define AR724X_PLL_FB_SHIFT 0
892 + #define AR724X_PLL_FB_MASK 0x3ff
893 +diff --git a/arch/mips/include/asm/machine.h b/arch/mips/include/asm/machine.h
894 +index 6b444cd9526f..db930cdc715f 100644
895 +--- a/arch/mips/include/asm/machine.h
896 ++++ b/arch/mips/include/asm/machine.h
897 +@@ -52,7 +52,7 @@ mips_machine_is_compatible(const struct mips_machine *mach, const void *fdt)
898 + if (!mach->matches)
899 + return NULL;
900 +
901 +- for (match = mach->matches; match->compatible; match++) {
902 ++ for (match = mach->matches; match->compatible[0]; match++) {
903 + if (fdt_node_check_compatible(fdt, 0, match->compatible) == 0)
904 + return match;
905 + }
906 +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
907 +index 0c8ae2cc6380..8f7bf74d1c0b 100644
908 +--- a/arch/mips/kernel/ptrace.c
909 ++++ b/arch/mips/kernel/ptrace.c
910 +@@ -483,7 +483,7 @@ static int fpr_get_msa(struct task_struct *target,
911 + /*
912 + * Copy the floating-point context to the supplied NT_PRFPREG buffer.
913 + * Choose the appropriate helper for general registers, and then copy
914 +- * the FCSR register separately.
915 ++ * the FCSR and FIR registers separately.
916 + */
917 + static int fpr_get(struct task_struct *target,
918 + const struct user_regset *regset,
919 +@@ -491,6 +491,7 @@ static int fpr_get(struct task_struct *target,
920 + void *kbuf, void __user *ubuf)
921 + {
922 + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
923 ++ const int fir_pos = fcr31_pos + sizeof(u32);
924 + int err;
925 +
926 + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
927 +@@ -503,6 +504,12 @@ static int fpr_get(struct task_struct *target,
928 + err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
929 + &target->thread.fpu.fcr31,
930 + fcr31_pos, fcr31_pos + sizeof(u32));
931 ++ if (err)
932 ++ return err;
933 ++
934 ++ err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
935 ++ &boot_cpu_data.fpu_id,
936 ++ fir_pos, fir_pos + sizeof(u32));
937 +
938 + return err;
939 + }
940 +@@ -551,7 +558,8 @@ static int fpr_set_msa(struct task_struct *target,
941 + /*
942 + * Copy the supplied NT_PRFPREG buffer to the floating-point context.
943 + * Choose the appropriate helper for general registers, and then copy
944 +- * the FCSR register separately.
945 ++ * the FCSR register separately. Ignore the incoming FIR register
946 ++ * contents though, as the register is read-only.
947 + *
948 + * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
949 + * which is supposed to have been guaranteed by the kernel before
950 +@@ -565,6 +573,7 @@ static int fpr_set(struct task_struct *target,
951 + const void *kbuf, const void __user *ubuf)
952 + {
953 + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
954 ++ const int fir_pos = fcr31_pos + sizeof(u32);
955 + u32 fcr31;
956 + int err;
957 +
958 +@@ -592,6 +601,11 @@ static int fpr_set(struct task_struct *target,
959 + ptrace_setfcr31(target, fcr31);
960 + }
961 +
962 ++ if (count > 0)
963 ++ err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
964 ++ fir_pos,
965 ++ fir_pos + sizeof(u32));
966 ++
967 + return err;
968 + }
969 +
970 +@@ -813,7 +827,7 @@ long arch_ptrace(struct task_struct *child, long request,
971 + fregs = get_fpu_regs(child);
972 +
973 + #ifdef CONFIG_32BIT
974 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
975 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
976 + /*
977 + * The odd registers are actually the high
978 + * order bits of the values stored in the even
979 +@@ -902,7 +916,7 @@ long arch_ptrace(struct task_struct *child, long request,
980 +
981 + init_fp_ctx(child);
982 + #ifdef CONFIG_32BIT
983 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
984 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
985 + /*
986 + * The odd registers are actually the high
987 + * order bits of the values stored in the even
988 +diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
989 +index 5fcbdcd7abd0..bc9afbabbe14 100644
990 +--- a/arch/mips/kernel/ptrace32.c
991 ++++ b/arch/mips/kernel/ptrace32.c
992 +@@ -97,7 +97,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
993 + break;
994 + }
995 + fregs = get_fpu_regs(child);
996 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
997 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
998 + /*
999 + * The odd registers are actually the high
1000 + * order bits of the values stored in the even
1001 +@@ -204,7 +204,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1002 + sizeof(child->thread.fpu));
1003 + child->thread.fpu.fcr31 = 0;
1004 + }
1005 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
1006 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1007 + /*
1008 + * The odd registers are actually the high
1009 + * order bits of the values stored in the even
1010 +diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
1011 +index 29ec9ab3fd55..a2c46f539e3e 100644
1012 +--- a/arch/mips/kvm/mips.c
1013 ++++ b/arch/mips/kvm/mips.c
1014 +@@ -42,7 +42,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
1015 + { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
1016 + { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
1017 + { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
1018 +- { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
1019 ++ { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
1020 + { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
1021 + { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
1022 + { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
1023 +diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
1024 +index 9d0107fbb169..43fa682e55da 100644
1025 +--- a/arch/mips/mm/c-r4k.c
1026 ++++ b/arch/mips/mm/c-r4k.c
1027 +@@ -851,9 +851,12 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
1028 + /*
1029 + * Either no secondary cache or the available caches don't have the
1030 + * subset property so we have to flush the primary caches
1031 +- * explicitly
1032 ++ * explicitly.
1033 ++ * If we would need IPI to perform an INDEX-type operation, then
1034 ++ * we have to use the HIT-type alternative as IPI cannot be used
1035 ++ * here due to interrupts possibly being disabled.
1036 + */
1037 +- if (size >= dcache_size) {
1038 ++ if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
1039 + r4k_blast_dcache();
1040 + } else {
1041 + R4600_HIT_CACHEOP_WAR_IMPL;
1042 +@@ -890,7 +893,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
1043 + return;
1044 + }
1045 +
1046 +- if (size >= dcache_size) {
1047 ++ if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
1048 + r4k_blast_dcache();
1049 + } else {
1050 + R4600_HIT_CACHEOP_WAR_IMPL;
1051 +diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
1052 +index 8b937300fb7f..fd26fadc8617 100644
1053 +--- a/arch/mips/txx9/rbtx4939/setup.c
1054 ++++ b/arch/mips/txx9/rbtx4939/setup.c
1055 +@@ -186,7 +186,7 @@ static void __init rbtx4939_update_ioc_pen(void)
1056 +
1057 + #define RBTX4939_MAX_7SEGLEDS 8
1058 +
1059 +-#if IS_ENABLED(CONFIG_LEDS_CLASS)
1060 ++#if IS_BUILTIN(CONFIG_LEDS_CLASS)
1061 + static u8 led_val[RBTX4939_MAX_7SEGLEDS];
1062 + struct rbtx4939_led_data {
1063 + struct led_classdev cdev;
1064 +@@ -261,7 +261,7 @@ static inline void rbtx4939_led_setup(void)
1065 +
1066 + static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
1067 + {
1068 +-#if IS_ENABLED(CONFIG_LEDS_CLASS)
1069 ++#if IS_BUILTIN(CONFIG_LEDS_CLASS)
1070 + unsigned long flags;
1071 + local_irq_save(flags);
1072 + /* bit7: reserved for LED class */
1073 +diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
1074 +index 9d47f2efa830..bb69f3955b59 100644
1075 +--- a/arch/powerpc/boot/Makefile
1076 ++++ b/arch/powerpc/boot/Makefile
1077 +@@ -92,7 +92,8 @@ $(addprefix $(obj)/,$(zlib-y)): \
1078 + libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
1079 + libfdtheader := fdt.h libfdt.h libfdt_internal.h
1080 +
1081 +-$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
1082 ++$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \
1083 ++ treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
1084 + $(addprefix $(obj)/,$(libfdtheader))
1085 +
1086 + src-wlib-y := string.S crt0.S crtsavres.S stdio.c decompress.c main.c \
1087 +diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
1088 +index 744fd54de374..1bcc84903930 100644
1089 +--- a/arch/powerpc/include/asm/irq_work.h
1090 ++++ b/arch/powerpc/include/asm/irq_work.h
1091 +@@ -5,5 +5,6 @@ static inline bool arch_irq_work_has_interrupt(void)
1092 + {
1093 + return true;
1094 + }
1095 ++extern void arch_irq_work_raise(void);
1096 +
1097 + #endif /* _ASM_POWERPC_IRQ_WORK_H */
1098 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
1099 +index 218cba2f5699..0a2b247dbc6b 100644
1100 +--- a/arch/powerpc/kvm/book3s_hv.c
1101 ++++ b/arch/powerpc/kvm/book3s_hv.c
1102 +@@ -3107,15 +3107,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1103 + goto up_out;
1104 +
1105 + psize = vma_kernel_pagesize(vma);
1106 +- porder = __ilog2(psize);
1107 +
1108 + up_read(&current->mm->mmap_sem);
1109 +
1110 + /* We can handle 4k, 64k or 16M pages in the VRMA */
1111 +- err = -EINVAL;
1112 +- if (!(psize == 0x1000 || psize == 0x10000 ||
1113 +- psize == 0x1000000))
1114 +- goto out_srcu;
1115 ++ if (psize >= 0x1000000)
1116 ++ psize = 0x1000000;
1117 ++ else if (psize >= 0x10000)
1118 ++ psize = 0x10000;
1119 ++ else
1120 ++ psize = 0x1000;
1121 ++ porder = __ilog2(psize);
1122 +
1123 + /* Update VRMASD field in the LPCR */
1124 + senc = slb_pgsize_encoding(psize);
1125 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
1126 +index a51c188b81f3..6cff96e0d77b 100644
1127 +--- a/arch/powerpc/mm/numa.c
1128 ++++ b/arch/powerpc/mm/numa.c
1129 +@@ -551,7 +551,7 @@ static int numa_setup_cpu(unsigned long lcpu)
1130 + nid = of_node_to_nid_single(cpu);
1131 +
1132 + out_present:
1133 +- if (nid < 0 || !node_online(nid))
1134 ++ if (nid < 0 || !node_possible(nid))
1135 + nid = first_online_node;
1136 +
1137 + map_cpu_to_node(lcpu, nid);
1138 +@@ -904,6 +904,32 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
1139 + NODE_DATA(nid)->node_spanned_pages = spanned_pages;
1140 + }
1141 +
1142 ++static void __init find_possible_nodes(void)
1143 ++{
1144 ++ struct device_node *rtas;
1145 ++ u32 numnodes, i;
1146 ++
1147 ++ if (min_common_depth <= 0)
1148 ++ return;
1149 ++
1150 ++ rtas = of_find_node_by_path("/rtas");
1151 ++ if (!rtas)
1152 ++ return;
1153 ++
1154 ++ if (of_property_read_u32_index(rtas,
1155 ++ "ibm,max-associativity-domains",
1156 ++ min_common_depth, &numnodes))
1157 ++ goto out;
1158 ++
1159 ++ for (i = 0; i < numnodes; i++) {
1160 ++ if (!node_possible(i))
1161 ++ node_set(i, node_possible_map);
1162 ++ }
1163 ++
1164 ++out:
1165 ++ of_node_put(rtas);
1166 ++}
1167 ++
1168 + void __init initmem_init(void)
1169 + {
1170 + int nid, cpu;
1171 +@@ -917,12 +943,15 @@ void __init initmem_init(void)
1172 + memblock_dump_all();
1173 +
1174 + /*
1175 +- * Reduce the possible NUMA nodes to the online NUMA nodes,
1176 +- * since we do not support node hotplug. This ensures that we
1177 +- * lower the maximum NUMA node ID to what is actually present.
1178 ++ * Modify the set of possible NUMA nodes to reflect information
1179 ++ * available about the set of online nodes, and the set of nodes
1180 ++ * that we expect to make use of for this platform's affinity
1181 ++ * calculations.
1182 + */
1183 + nodes_and(node_possible_map, node_possible_map, node_online_map);
1184 +
1185 ++ find_possible_nodes();
1186 ++
1187 + for_each_online_node(nid) {
1188 + unsigned long start_pfn, end_pfn;
1189 +
1190 +@@ -1274,6 +1303,40 @@ static long vphn_get_associativity(unsigned long cpu,
1191 + return rc;
1192 + }
1193 +
1194 ++static inline int find_and_online_cpu_nid(int cpu)
1195 ++{
1196 ++ __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1197 ++ int new_nid;
1198 ++
1199 ++ /* Use associativity from first thread for all siblings */
1200 ++ vphn_get_associativity(cpu, associativity);
1201 ++ new_nid = associativity_to_nid(associativity);
1202 ++ if (new_nid < 0 || !node_possible(new_nid))
1203 ++ new_nid = first_online_node;
1204 ++
1205 ++ if (NODE_DATA(new_nid) == NULL) {
1206 ++#ifdef CONFIG_MEMORY_HOTPLUG
1207 ++ /*
1208 ++ * Need to ensure that NODE_DATA is initialized for a node from
1209 ++ * available memory (see memblock_alloc_try_nid). If unable to
1210 ++ * init the node, then default to nearest node that has memory
1211 ++ * installed.
1212 ++ */
1213 ++ if (try_online_node(new_nid))
1214 ++ new_nid = first_online_node;
1215 ++#else
1216 ++ /*
1217 ++ * Default to using the nearest node that has memory installed.
1218 ++ * Otherwise, it would be necessary to patch the kernel MM code
1219 ++ * to deal with more memoryless-node error conditions.
1220 ++ */
1221 ++ new_nid = first_online_node;
1222 ++#endif
1223 ++ }
1224 ++
1225 ++ return new_nid;
1226 ++}
1227 ++
1228 + /*
1229 + * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1230 + * characteristics change. This function doesn't perform any locking and is
1231 +@@ -1339,7 +1402,6 @@ int arch_update_cpu_topology(void)
1232 + {
1233 + unsigned int cpu, sibling, changed = 0;
1234 + struct topology_update_data *updates, *ud;
1235 +- __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1236 + cpumask_t updated_cpus;
1237 + struct device *dev;
1238 + int weight, new_nid, i = 0;
1239 +@@ -1374,11 +1436,7 @@ int arch_update_cpu_topology(void)
1240 + continue;
1241 + }
1242 +
1243 +- /* Use associativity from first thread for all siblings */
1244 +- vphn_get_associativity(cpu, associativity);
1245 +- new_nid = associativity_to_nid(associativity);
1246 +- if (new_nid < 0 || !node_online(new_nid))
1247 +- new_nid = first_online_node;
1248 ++ new_nid = find_and_online_cpu_nid(cpu);
1249 +
1250 + if (new_nid == numa_cpu_lookup_table[cpu]) {
1251 + cpumask_andnot(&cpu_associativity_changes_mask,
1252 +diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
1253 +index 7e706f36e364..9c58194c7ea5 100644
1254 +--- a/arch/powerpc/net/bpf_jit_comp.c
1255 ++++ b/arch/powerpc/net/bpf_jit_comp.c
1256 +@@ -329,6 +329,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
1257 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
1258 + PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
1259 + break;
1260 ++ case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
1261 ++ PPC_LWZ_OFFS(r_A, r_skb, K);
1262 ++ break;
1263 + case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
1264 + PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
1265 + break;
1266 +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
1267 +index bf949623de90..771edffa2d40 100644
1268 +--- a/arch/powerpc/perf/core-book3s.c
1269 ++++ b/arch/powerpc/perf/core-book3s.c
1270 +@@ -448,6 +448,16 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
1271 + /* invalid entry */
1272 + continue;
1273 +
1274 ++ /*
1275 ++ * BHRB rolling buffer could very much contain the kernel
1276 ++ * addresses at this point. Check the privileges before
1277 ++ * exporting it to userspace (avoid exposure of regions
1278 ++ * where we could have speculative execution)
1279 ++ */
1280 ++ if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
1281 ++ is_kernel_addr(addr))
1282 ++ continue;
1283 ++
1284 + /* Branches are read most recent first (ie. mfbhrb 0 is
1285 + * the most recent branch).
1286 + * There are two types of valid entries:
1287 +@@ -1188,6 +1198,7 @@ static void power_pmu_disable(struct pmu *pmu)
1288 + */
1289 + write_mmcr0(cpuhw, val);
1290 + mb();
1291 ++ isync();
1292 +
1293 + /*
1294 + * Disable instruction sampling if it was enabled
1295 +@@ -1196,12 +1207,26 @@ static void power_pmu_disable(struct pmu *pmu)
1296 + mtspr(SPRN_MMCRA,
1297 + cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
1298 + mb();
1299 ++ isync();
1300 + }
1301 +
1302 + cpuhw->disabled = 1;
1303 + cpuhw->n_added = 0;
1304 +
1305 + ebb_switch_out(mmcr0);
1306 ++
1307 ++#ifdef CONFIG_PPC64
1308 ++ /*
1309 ++ * These are readable by userspace, may contain kernel
1310 ++ * addresses and are not switched by context switch, so clear
1311 ++ * them now to avoid leaking anything to userspace in general
1312 ++ * including to another process.
1313 ++ */
1314 ++ if (ppmu->flags & PPMU_ARCH_207S) {
1315 ++ mtspr(SPRN_SDAR, 0);
1316 ++ mtspr(SPRN_SIAR, 0);
1317 ++ }
1318 ++#endif
1319 + }
1320 +
1321 + local_irq_restore(flags);
1322 +diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
1323 +index b9aac951a90f..f37567ed640c 100644
1324 +--- a/arch/powerpc/sysdev/mpic.c
1325 ++++ b/arch/powerpc/sysdev/mpic.c
1326 +@@ -626,7 +626,7 @@ static inline u32 mpic_physmask(u32 cpumask)
1327 + int i;
1328 + u32 mask = 0;
1329 +
1330 +- for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
1331 ++ for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1)
1332 + mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
1333 + return mask;
1334 + }
1335 +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
1336 +index ced6c9b8f04d..51f842c0a175 100644
1337 +--- a/arch/s390/kvm/vsie.c
1338 ++++ b/arch/s390/kvm/vsie.c
1339 +@@ -549,7 +549,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1340 +
1341 + gpa = scb_o->itdba & ~0xffUL;
1342 + if (gpa && (scb_s->ecb & 0x10U)) {
1343 +- if (!(gpa & ~0x1fffU)) {
1344 ++ if (!(gpa & ~0x1fffUL)) {
1345 + rc = set_validity_icpt(scb_s, 0x0080U);
1346 + goto unpin;
1347 + }
1348 +diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
1349 +index c001f782c5f1..28cc61216b64 100644
1350 +--- a/arch/sh/kernel/entry-common.S
1351 ++++ b/arch/sh/kernel/entry-common.S
1352 +@@ -255,7 +255,7 @@ debug_trap:
1353 + mov.l @r8, r8
1354 + jsr @r8
1355 + nop
1356 +- bra __restore_all
1357 ++ bra ret_from_exception
1358 + nop
1359 + CFI_ENDPROC
1360 +
1361 +diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
1362 +index 24827a3f733a..89d299ccdfa6 100644
1363 +--- a/arch/sparc/include/asm/atomic_64.h
1364 ++++ b/arch/sparc/include/asm/atomic_64.h
1365 +@@ -82,7 +82,11 @@ ATOMIC_OPS(xor)
1366 + #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
1367 +
1368 + #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
1369 +-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1370 ++
1371 ++static inline int atomic_xchg(atomic_t *v, int new)
1372 ++{
1373 ++ return xchg(&v->counter, new);
1374 ++}
1375 +
1376 + static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1377 + {
1378 +diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
1379 +index b6802b978140..81ad06a1672f 100644
1380 +--- a/arch/sparc/include/asm/pgtable_64.h
1381 ++++ b/arch/sparc/include/asm/pgtable_64.h
1382 +@@ -952,7 +952,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
1383 + pmd_t *pmd);
1384 +
1385 + #define __HAVE_ARCH_PMDP_INVALIDATE
1386 +-extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1387 ++extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1388 + pmd_t *pmdp);
1389 +
1390 + #define __HAVE_ARCH_PGTABLE_DEPOSIT
1391 +diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
1392 +index c56a195c9071..b2722ed31053 100644
1393 +--- a/arch/sparc/mm/tlb.c
1394 ++++ b/arch/sparc/mm/tlb.c
1395 +@@ -219,17 +219,28 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1396 + }
1397 + }
1398 +
1399 ++static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1400 ++ unsigned long address, pmd_t *pmdp, pmd_t pmd)
1401 ++{
1402 ++ pmd_t old;
1403 ++
1404 ++ do {
1405 ++ old = *pmdp;
1406 ++ } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
1407 ++
1408 ++ return old;
1409 ++}
1410 ++
1411 + /*
1412 + * This routine is only called when splitting a THP
1413 + */
1414 +-void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1415 ++pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1416 + pmd_t *pmdp)
1417 + {
1418 +- pmd_t entry = *pmdp;
1419 +-
1420 +- pmd_val(entry) &= ~_PAGE_VALID;
1421 ++ pmd_t old, entry;
1422 +
1423 +- set_pmd_at(vma->vm_mm, address, pmdp, entry);
1424 ++ entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
1425 ++ old = pmdp_establish(vma, address, pmdp, entry);
1426 + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
1427 +
1428 + /*
1429 +@@ -240,6 +251,8 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1430 + if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
1431 + !is_huge_zero_page(pmd_page(entry)))
1432 + (vma->vm_mm)->context.thp_pte_count--;
1433 ++
1434 ++ return old;
1435 + }
1436 +
1437 + void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1438 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
1439 +index 02e547f9ca3f..655a65eaf105 100644
1440 +--- a/arch/x86/events/core.c
1441 ++++ b/arch/x86/events/core.c
1442 +@@ -1155,16 +1155,13 @@ int x86_perf_event_set_period(struct perf_event *event)
1443 +
1444 + per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1445 +
1446 +- if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
1447 +- local64_read(&hwc->prev_count) != (u64)-left) {
1448 +- /*
1449 +- * The hw event starts counting from this event offset,
1450 +- * mark it to be able to extra future deltas:
1451 +- */
1452 +- local64_set(&hwc->prev_count, (u64)-left);
1453 ++ /*
1454 ++ * The hw event starts counting from this event offset,
1455 ++ * mark it to be able to extra future deltas:
1456 ++ */
1457 ++ local64_set(&hwc->prev_count, (u64)-left);
1458 +
1459 +- wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
1460 +- }
1461 ++ wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
1462 +
1463 + /*
1464 + * Due to erratum on certan cpu we need
1465 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
1466 +index 6f353a874178..815039327932 100644
1467 +--- a/arch/x86/events/intel/core.c
1468 ++++ b/arch/x86/events/intel/core.c
1469 +@@ -2066,9 +2066,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1470 + int bit, loops;
1471 + u64 status;
1472 + int handled;
1473 ++ int pmu_enabled;
1474 +
1475 + cpuc = this_cpu_ptr(&cpu_hw_events);
1476 +
1477 ++ /*
1478 ++ * Save the PMU state.
1479 ++ * It needs to be restored when leaving the handler.
1480 ++ */
1481 ++ pmu_enabled = cpuc->enabled;
1482 + /*
1483 + * No known reason to not always do late ACK,
1484 + * but just in case do it opt-in.
1485 +@@ -2076,6 +2082,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1486 + if (!x86_pmu.late_ack)
1487 + apic_write(APIC_LVTPC, APIC_DM_NMI);
1488 + intel_bts_disable_local();
1489 ++ cpuc->enabled = 0;
1490 + __intel_pmu_disable_all();
1491 + handled = intel_pmu_drain_bts_buffer();
1492 + handled += intel_bts_interrupt();
1493 +@@ -2173,7 +2180,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1494 +
1495 + done:
1496 + /* Only restore PMU state when it's active. See x86_pmu_disable(). */
1497 +- if (cpuc->enabled)
1498 ++ cpuc->enabled = pmu_enabled;
1499 ++ if (pmu_enabled)
1500 + __intel_pmu_enable_all(0, true);
1501 + intel_bts_enable_local();
1502 +
1503 +@@ -3019,7 +3027,7 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
1504 + * Therefore the effective (average) period matches the requested period,
1505 + * despite coarser hardware granularity.
1506 + */
1507 +-static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
1508 ++static u64 bdw_limit_period(struct perf_event *event, u64 left)
1509 + {
1510 + if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
1511 + X86_CONFIG(.event=0xc0, .umask=0x01)) {
1512 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
1513 +index 8e7a3f1df3a5..f26e26e4d84f 100644
1514 +--- a/arch/x86/events/intel/ds.c
1515 ++++ b/arch/x86/events/intel/ds.c
1516 +@@ -1110,6 +1110,7 @@ static void setup_pebs_sample_data(struct perf_event *event,
1517 + if (pebs == NULL)
1518 + return;
1519 +
1520 ++ regs->flags &= ~PERF_EFLAGS_EXACT;
1521 + sample_type = event->attr.sample_type;
1522 + dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
1523 +
1524 +@@ -1154,7 +1155,6 @@ static void setup_pebs_sample_data(struct perf_event *event,
1525 + */
1526 + *regs = *iregs;
1527 + regs->flags = pebs->flags;
1528 +- set_linear_ip(regs, pebs->ip);
1529 +
1530 + if (sample_type & PERF_SAMPLE_REGS_INTR) {
1531 + regs->ax = pebs->ax;
1532 +@@ -1190,13 +1190,22 @@ static void setup_pebs_sample_data(struct perf_event *event,
1533 + #endif
1534 + }
1535 +
1536 +- if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
1537 +- regs->ip = pebs->real_ip;
1538 +- regs->flags |= PERF_EFLAGS_EXACT;
1539 +- } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
1540 +- regs->flags |= PERF_EFLAGS_EXACT;
1541 +- else
1542 +- regs->flags &= ~PERF_EFLAGS_EXACT;
1543 ++ if (event->attr.precise_ip > 1) {
1544 ++ /* Haswell and later have the eventing IP, so use it: */
1545 ++ if (x86_pmu.intel_cap.pebs_format >= 2) {
1546 ++ set_linear_ip(regs, pebs->real_ip);
1547 ++ regs->flags |= PERF_EFLAGS_EXACT;
1548 ++ } else {
1549 ++ /* Otherwise use PEBS off-by-1 IP: */
1550 ++ set_linear_ip(regs, pebs->ip);
1551 ++
1552 ++ /* ... and try to fix it up using the LBR entries: */
1553 ++ if (intel_pmu_pebs_fixup_ip(regs))
1554 ++ regs->flags |= PERF_EFLAGS_EXACT;
1555 ++ }
1556 ++ } else
1557 ++ set_linear_ip(regs, pebs->ip);
1558 ++
1559 +
1560 + if ((sample_type & PERF_SAMPLE_ADDR) &&
1561 + x86_pmu.intel_cap.pebs_format >= 1)
1562 +@@ -1263,17 +1272,84 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
1563 + return NULL;
1564 + }
1565 +
1566 ++/*
1567 ++ * Special variant of intel_pmu_save_and_restart() for auto-reload.
1568 ++ */
1569 ++static int
1570 ++intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
1571 ++{
1572 ++ struct hw_perf_event *hwc = &event->hw;
1573 ++ int shift = 64 - x86_pmu.cntval_bits;
1574 ++ u64 period = hwc->sample_period;
1575 ++ u64 prev_raw_count, new_raw_count;
1576 ++ s64 new, old;
1577 ++
1578 ++ WARN_ON(!period);
1579 ++
1580 ++ /*
1581 ++ * drain_pebs() only happens when the PMU is disabled.
1582 ++ */
1583 ++ WARN_ON(this_cpu_read(cpu_hw_events.enabled));
1584 ++
1585 ++ prev_raw_count = local64_read(&hwc->prev_count);
1586 ++ rdpmcl(hwc->event_base_rdpmc, new_raw_count);
1587 ++ local64_set(&hwc->prev_count, new_raw_count);
1588 ++
1589 ++ /*
1590 ++ * Since the counter increments a negative counter value and
1591 ++ * overflows on the sign switch, giving the interval:
1592 ++ *
1593 ++ * [-period, 0]
1594 ++ *
1595 ++ * the difference between two consequtive reads is:
1596 ++ *
1597 ++ * A) value2 - value1;
1598 ++ * when no overflows have happened in between,
1599 ++ *
1600 ++ * B) (0 - value1) + (value2 - (-period));
1601 ++ * when one overflow happened in between,
1602 ++ *
1603 ++ * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
1604 ++ * when @n overflows happened in between.
1605 ++ *
1606 ++ * Here A) is the obvious difference, B) is the extension to the
1607 ++ * discrete interval, where the first term is to the top of the
1608 ++ * interval and the second term is from the bottom of the next
1609 ++ * interval and C) the extension to multiple intervals, where the
1610 ++ * middle term is the whole intervals covered.
1611 ++ *
1612 ++ * An equivalent of C, by reduction, is:
1613 ++ *
1614 ++ * value2 - value1 + n * period
1615 ++ */
1616 ++ new = ((s64)(new_raw_count << shift) >> shift);
1617 ++ old = ((s64)(prev_raw_count << shift) >> shift);
1618 ++ local64_add(new - old + count * period, &event->count);
1619 ++
1620 ++ perf_event_update_userpage(event);
1621 ++
1622 ++ return 0;
1623 ++}
1624 ++
1625 + static void __intel_pmu_pebs_event(struct perf_event *event,
1626 + struct pt_regs *iregs,
1627 + void *base, void *top,
1628 + int bit, int count)
1629 + {
1630 ++ struct hw_perf_event *hwc = &event->hw;
1631 + struct perf_sample_data data;
1632 + struct pt_regs regs;
1633 + void *at = get_next_pebs_record_by_bit(base, top, bit);
1634 +
1635 +- if (!intel_pmu_save_and_restart(event) &&
1636 +- !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
1637 ++ if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1638 ++ /*
1639 ++ * Now, auto-reload is only enabled in fixed period mode.
1640 ++ * The reload value is always hwc->sample_period.
1641 ++ * May need to change it, if auto-reload is enabled in
1642 ++ * freq mode later.
1643 ++ */
1644 ++ intel_pmu_save_and_restart_reload(event, count);
1645 ++ } else if (!intel_pmu_save_and_restart(event))
1646 + return;
1647 +
1648 + while (count > 1) {
1649 +@@ -1325,8 +1401,11 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
1650 + return;
1651 +
1652 + n = top - at;
1653 +- if (n <= 0)
1654 ++ if (n <= 0) {
1655 ++ if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
1656 ++ intel_pmu_save_and_restart_reload(event, 0);
1657 + return;
1658 ++ }
1659 +
1660 + __intel_pmu_pebs_event(event, iregs, at, top, 0, n);
1661 + }
1662 +@@ -1349,8 +1428,22 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
1663 +
1664 + ds->pebs_index = ds->pebs_buffer_base;
1665 +
1666 +- if (unlikely(base >= top))
1667 ++ if (unlikely(base >= top)) {
1668 ++ /*
1669 ++ * The drain_pebs() could be called twice in a short period
1670 ++ * for auto-reload event in pmu::read(). There are no
1671 ++ * overflows have happened in between.
1672 ++ * It needs to call intel_pmu_save_and_restart_reload() to
1673 ++ * update the event->count for this case.
1674 ++ */
1675 ++ for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
1676 ++ x86_pmu.max_pebs_events) {
1677 ++ event = cpuc->events[bit];
1678 ++ if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
1679 ++ intel_pmu_save_and_restart_reload(event, 0);
1680 ++ }
1681 + return;
1682 ++ }
1683 +
1684 + for (at = base; at < top; at += x86_pmu.pebs_record_size) {
1685 + struct pebs_record_nhm *p = at;
1686 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
1687 +index bcbb1d2ae10b..f3563179290b 100644
1688 +--- a/arch/x86/events/perf_event.h
1689 ++++ b/arch/x86/events/perf_event.h
1690 +@@ -548,7 +548,7 @@ struct x86_pmu {
1691 + struct x86_pmu_quirk *quirks;
1692 + int perfctr_second_write;
1693 + bool late_ack;
1694 +- unsigned (*limit_period)(struct perf_event *event, unsigned l);
1695 ++ u64 (*limit_period)(struct perf_event *event, u64 l);
1696 +
1697 + /*
1698 + * sysfs attrs
1699 +diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
1700 +index 39bcefc20de7..bb078786a323 100644
1701 +--- a/arch/x86/include/asm/i8259.h
1702 ++++ b/arch/x86/include/asm/i8259.h
1703 +@@ -68,6 +68,11 @@ struct legacy_pic {
1704 + extern struct legacy_pic *legacy_pic;
1705 + extern struct legacy_pic null_legacy_pic;
1706 +
1707 ++static inline bool has_legacy_pic(void)
1708 ++{
1709 ++ return legacy_pic != &null_legacy_pic;
1710 ++}
1711 ++
1712 + static inline int nr_legacy_irqs(void)
1713 + {
1714 + return legacy_pic->nr_legacy_irqs;
1715 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
1716 +index c6583efdbdaf..76cf21f887bd 100644
1717 +--- a/arch/x86/kernel/apic/apic.c
1718 ++++ b/arch/x86/kernel/apic/apic.c
1719 +@@ -1403,7 +1403,7 @@ void setup_local_APIC(void)
1720 + * TODO: set up through-local-APIC from through-I/O-APIC? --macro
1721 + */
1722 + value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1723 +- if (!cpu && (pic_mode || !value)) {
1724 ++ if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
1725 + value = APIC_DM_EXTINT;
1726 + apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
1727 + } else {
1728 +diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
1729 +index 3fe45f84ced4..7a07b15b451c 100644
1730 +--- a/arch/x86/kernel/devicetree.c
1731 ++++ b/arch/x86/kernel/devicetree.c
1732 +@@ -11,6 +11,7 @@
1733 + #include <linux/of_address.h>
1734 + #include <linux/of_platform.h>
1735 + #include <linux/of_irq.h>
1736 ++#include <linux/libfdt.h>
1737 + #include <linux/slab.h>
1738 + #include <linux/pci.h>
1739 + #include <linux/of_pci.h>
1740 +@@ -199,19 +200,22 @@ static struct of_ioapic_type of_ioapic_type[] =
1741 + static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
1742 + unsigned int nr_irqs, void *arg)
1743 + {
1744 +- struct of_phandle_args *irq_data = (void *)arg;
1745 ++ struct irq_fwspec *fwspec = (struct irq_fwspec *)arg;
1746 + struct of_ioapic_type *it;
1747 + struct irq_alloc_info tmp;
1748 ++ int type_index;
1749 +
1750 +- if (WARN_ON(irq_data->args_count < 2))
1751 ++ if (WARN_ON(fwspec->param_count < 2))
1752 + return -EINVAL;
1753 +- if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type))
1754 ++
1755 ++ type_index = fwspec->param[1];
1756 ++ if (type_index >= ARRAY_SIZE(of_ioapic_type))
1757 + return -EINVAL;
1758 +
1759 +- it = &of_ioapic_type[irq_data->args[1]];
1760 ++ it = &of_ioapic_type[type_index];
1761 + ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
1762 + tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
1763 +- tmp.ioapic_pin = irq_data->args[0];
1764 ++ tmp.ioapic_pin = fwspec->param[0];
1765 +
1766 + return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp);
1767 + }
1768 +@@ -276,14 +280,15 @@ static void __init x86_flattree_get_config(void)
1769 +
1770 + map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128);
1771 +
1772 +- initial_boot_params = dt = early_memremap(initial_dtb, map_len);
1773 +- size = of_get_flat_dt_size();
1774 ++ dt = early_memremap(initial_dtb, map_len);
1775 ++ size = fdt_totalsize(dt);
1776 + if (map_len < size) {
1777 + early_memunmap(dt, map_len);
1778 +- initial_boot_params = dt = early_memremap(initial_dtb, size);
1779 ++ dt = early_memremap(initial_dtb, size);
1780 + map_len = size;
1781 + }
1782 +
1783 ++ early_init_dt_verify(dt);
1784 + unflatten_and_copy_device_tree();
1785 + early_memunmap(dt, map_len);
1786 + }
1787 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
1788 +index cb945146b7c8..10b22fc6ef5a 100644
1789 +--- a/arch/x86/kernel/smpboot.c
1790 ++++ b/arch/x86/kernel/smpboot.c
1791 +@@ -1497,6 +1497,7 @@ static void remove_siblinginfo(int cpu)
1792 + cpumask_clear(topology_core_cpumask(cpu));
1793 + c->phys_proc_id = 0;
1794 + c->cpu_core_id = 0;
1795 ++ c->booted_cores = 0;
1796 + cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1797 + recompute_smt_state();
1798 + }
1799 +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
1800 +index da6a287a11e4..769c370011d6 100644
1801 +--- a/arch/x86/kernel/tsc.c
1802 ++++ b/arch/x86/kernel/tsc.c
1803 +@@ -24,6 +24,7 @@
1804 + #include <asm/geode.h>
1805 + #include <asm/apic.h>
1806 + #include <asm/intel-family.h>
1807 ++#include <asm/i8259.h>
1808 +
1809 + unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
1810 + EXPORT_SYMBOL(cpu_khz);
1811 +@@ -456,6 +457,20 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
1812 + unsigned long tscmin, tscmax;
1813 + int pitcnt;
1814 +
1815 ++ if (!has_legacy_pic()) {
1816 ++ /*
1817 ++ * Relies on tsc_early_delay_calibrate() to have given us semi
1818 ++ * usable udelay(), wait for the same 50ms we would have with
1819 ++ * the PIT loop below.
1820 ++ */
1821 ++ udelay(10 * USEC_PER_MSEC);
1822 ++ udelay(10 * USEC_PER_MSEC);
1823 ++ udelay(10 * USEC_PER_MSEC);
1824 ++ udelay(10 * USEC_PER_MSEC);
1825 ++ udelay(10 * USEC_PER_MSEC);
1826 ++ return ULONG_MAX;
1827 ++ }
1828 ++
1829 + /* Set the Gate high, disable speaker */
1830 + outb((inb(0x61) & ~0x02) | 0x01, 0x61);
1831 +
1832 +@@ -580,6 +595,9 @@ static unsigned long quick_pit_calibrate(void)
1833 + u64 tsc, delta;
1834 + unsigned long d1, d2;
1835 +
1836 ++ if (!has_legacy_pic())
1837 ++ return 0;
1838 ++
1839 + /* Set the Gate high, disable speaker */
1840 + outb((inb(0x61) & ~0x02) | 0x01, 0x61);
1841 +
1842 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
1843 +index a69f18d4676c..7e5119c1d15c 100644
1844 +--- a/arch/x86/kvm/cpuid.c
1845 ++++ b/arch/x86/kvm/cpuid.c
1846 +@@ -382,7 +382,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1847 +
1848 + /* cpuid 7.0.edx*/
1849 + const u32 kvm_cpuid_7_0_edx_x86_features =
1850 +- F(SPEC_CTRL) | F(SSBD) | F(ARCH_CAPABILITIES);
1851 ++ F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
1852 +
1853 + /* all calls to cpuid_count() should be made on the same cpu */
1854 + get_cpu();
1855 +@@ -468,6 +468,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1856 + entry->ecx &= ~F(PKU);
1857 + entry->edx &= kvm_cpuid_7_0_edx_x86_features;
1858 + cpuid_mask(&entry->edx, CPUID_7_EDX);
1859 ++ /*
1860 ++ * We emulate ARCH_CAPABILITIES in software even
1861 ++ * if the host doesn't support it.
1862 ++ */
1863 ++ entry->edx |= F(ARCH_CAPABILITIES);
1864 + } else {
1865 + entry->ebx = 0;
1866 + entry->ecx = 0;
1867 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
1868 +index 5c3d416fff17..a8a86be8cf15 100644
1869 +--- a/arch/x86/kvm/lapic.c
1870 ++++ b/arch/x86/kvm/lapic.c
1871 +@@ -299,8 +299,16 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
1872 + if (!lapic_in_kernel(vcpu))
1873 + return;
1874 +
1875 ++ /*
1876 ++ * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
1877 ++ * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
1878 ++ * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
1879 ++ * version first and level-triggered interrupts never get EOIed in
1880 ++ * IOAPIC.
1881 ++ */
1882 + feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
1883 +- if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
1884 ++ if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
1885 ++ !ioapic_in_kernel(vcpu->kvm))
1886 + v |= APIC_LVR_DIRECTED_EOI;
1887 + kvm_lapic_set_reg(apic, APIC_LVR, v);
1888 + }
1889 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1890 +index d92523afb425..2827a9622d97 100644
1891 +--- a/arch/x86/kvm/vmx.c
1892 ++++ b/arch/x86/kvm/vmx.c
1893 +@@ -2558,6 +2558,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
1894 + return;
1895 + }
1896 +
1897 ++ WARN_ON_ONCE(vmx->emulation_required);
1898 ++
1899 + if (kvm_exception_is_soft(nr)) {
1900 + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1901 + vmx->vcpu.arch.event_exit_inst_len);
1902 +@@ -6430,12 +6432,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
1903 + goto out;
1904 + }
1905 +
1906 +- if (err != EMULATE_DONE) {
1907 +- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1908 +- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1909 +- vcpu->run->internal.ndata = 0;
1910 +- return 0;
1911 +- }
1912 ++ if (err != EMULATE_DONE)
1913 ++ goto emulation_error;
1914 ++
1915 ++ if (vmx->emulation_required && !vmx->rmode.vm86_active &&
1916 ++ vcpu->arch.exception.pending)
1917 ++ goto emulation_error;
1918 +
1919 + if (vcpu->arch.halt_request) {
1920 + vcpu->arch.halt_request = 0;
1921 +@@ -6451,6 +6453,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
1922 +
1923 + out:
1924 + return ret;
1925 ++
1926 ++emulation_error:
1927 ++ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1928 ++ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1929 ++ vcpu->run->internal.ndata = 0;
1930 ++ return 0;
1931 + }
1932 +
1933 + static int __grow_ple_window(int val)
1934 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1935 +index a0cb85f30c94..4aa265ae8cf7 100644
1936 +--- a/arch/x86/kvm/x86.c
1937 ++++ b/arch/x86/kvm/x86.c
1938 +@@ -4131,13 +4131,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
1939 + mutex_unlock(&kvm->lock);
1940 + break;
1941 + case KVM_XEN_HVM_CONFIG: {
1942 ++ struct kvm_xen_hvm_config xhc;
1943 + r = -EFAULT;
1944 +- if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
1945 +- sizeof(struct kvm_xen_hvm_config)))
1946 ++ if (copy_from_user(&xhc, argp, sizeof(xhc)))
1947 + goto out;
1948 + r = -EINVAL;
1949 +- if (kvm->arch.xen_hvm_config.flags)
1950 ++ if (xhc.flags)
1951 + goto out;
1952 ++ memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
1953 + r = 0;
1954 + break;
1955 + }
1956 +@@ -7258,6 +7259,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1957 + {
1958 + struct msr_data apic_base_msr;
1959 + int mmu_reset_needed = 0;
1960 ++ int cpuid_update_needed = 0;
1961 + int pending_vec, max_bits, idx;
1962 + struct desc_ptr dt;
1963 +
1964 +@@ -7289,8 +7291,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1965 + vcpu->arch.cr0 = sregs->cr0;
1966 +
1967 + mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
1968 ++ cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
1969 ++ (X86_CR4_OSXSAVE | X86_CR4_PKE));
1970 + kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
1971 +- if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1972 ++ if (cpuid_update_needed)
1973 + kvm_update_cpuid(vcpu);
1974 +
1975 + idx = srcu_read_lock(&vcpu->kvm->srcu);
1976 +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
1977 +index 7df8e3a79dc0..d35d0e4bbf99 100644
1978 +--- a/arch/x86/mm/init_64.c
1979 ++++ b/arch/x86/mm/init_64.c
1980 +@@ -1014,8 +1014,7 @@ void __init mem_init(void)
1981 + after_bootmem = 1;
1982 +
1983 + /* Register memory areas for /proc/kcore */
1984 +- kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
1985 +- PAGE_SIZE, KCORE_OTHER);
1986 ++ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
1987 +
1988 + mem_init_print_info(NULL);
1989 + }
1990 +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
1991 +index 73dcb0e18c1b..dcd671467154 100644
1992 +--- a/arch/x86/mm/pageattr.c
1993 ++++ b/arch/x86/mm/pageattr.c
1994 +@@ -279,9 +279,11 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
1995 +
1996 + /*
1997 + * The .rodata section needs to be read-only. Using the pfn
1998 +- * catches all aliases.
1999 ++ * catches all aliases. This also includes __ro_after_init,
2000 ++ * so do not enforce until kernel_set_to_readonly is true.
2001 + */
2002 +- if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
2003 ++ if (kernel_set_to_readonly &&
2004 ++ within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
2005 + __pa_symbol(__end_rodata) >> PAGE_SHIFT))
2006 + pgprot_val(forbidden) |= _PAGE_RW;
2007 +
2008 +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
2009 +index b97ef29c940f..a3b63e5a527c 100644
2010 +--- a/arch/x86/mm/pgtable.c
2011 ++++ b/arch/x86/mm/pgtable.c
2012 +@@ -1,5 +1,6 @@
2013 + #include <linux/mm.h>
2014 + #include <linux/gfp.h>
2015 ++#include <linux/hugetlb.h>
2016 + #include <asm/pgalloc.h>
2017 + #include <asm/pgtable.h>
2018 + #include <asm/tlb.h>
2019 +@@ -577,6 +578,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
2020 + (mtrr != MTRR_TYPE_WRBACK))
2021 + return 0;
2022 +
2023 ++ /* Bail out if we are we on a populated non-leaf entry: */
2024 ++ if (pud_present(*pud) && !pud_huge(*pud))
2025 ++ return 0;
2026 ++
2027 + prot = pgprot_4k_2_large(prot);
2028 +
2029 + set_pte((pte_t *)pud, pfn_pte(
2030 +@@ -605,6 +610,10 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
2031 + return 0;
2032 + }
2033 +
2034 ++ /* Bail out if we are we on a populated non-leaf entry: */
2035 ++ if (pmd_present(*pmd) && !pmd_huge(*pmd))
2036 ++ return 0;
2037 ++
2038 + prot = pgprot_4k_2_large(prot);
2039 +
2040 + set_pte((pte_t *)pmd, pfn_pte(
2041 +diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
2042 +index 9f14bd34581d..74b516cb39df 100644
2043 +--- a/arch/x86/power/hibernate_32.c
2044 ++++ b/arch/x86/power/hibernate_32.c
2045 +@@ -142,7 +142,7 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
2046 + #endif
2047 + }
2048 +
2049 +-int swsusp_arch_resume(void)
2050 ++asmlinkage int swsusp_arch_resume(void)
2051 + {
2052 + int error;
2053 +
2054 +diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
2055 +index 9634557a5444..0cb1dd461529 100644
2056 +--- a/arch/x86/power/hibernate_64.c
2057 ++++ b/arch/x86/power/hibernate_64.c
2058 +@@ -149,7 +149,7 @@ static int relocate_restore_code(void)
2059 + return 0;
2060 + }
2061 +
2062 +-int swsusp_arch_resume(void)
2063 ++asmlinkage int swsusp_arch_resume(void)
2064 + {
2065 + int error;
2066 +
2067 +diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
2068 +index f6a009d88a33..52e5ea3b8e40 100644
2069 +--- a/crypto/asymmetric_keys/pkcs7_trust.c
2070 ++++ b/crypto/asymmetric_keys/pkcs7_trust.c
2071 +@@ -106,6 +106,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
2072 + pr_devel("sinfo %u: Direct signer is key %x\n",
2073 + sinfo->index, key_serial(key));
2074 + x509 = NULL;
2075 ++ sig = sinfo->sig;
2076 + goto matched;
2077 + }
2078 + if (PTR_ERR(key) != -ENOKEY)
2079 +diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
2080 +index eb76a4c10dbf..8ce203f84ec4 100644
2081 +--- a/drivers/acpi/acpi_pad.c
2082 ++++ b/drivers/acpi/acpi_pad.c
2083 +@@ -109,6 +109,7 @@ static void round_robin_cpu(unsigned int tsk_index)
2084 + cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
2085 + if (cpumask_empty(tmp)) {
2086 + mutex_unlock(&round_robin_lock);
2087 ++ free_cpumask_var(tmp);
2088 + return;
2089 + }
2090 + for_each_cpu(cpu, tmp) {
2091 +@@ -126,6 +127,8 @@ static void round_robin_cpu(unsigned int tsk_index)
2092 + mutex_unlock(&round_robin_lock);
2093 +
2094 + set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
2095 ++
2096 ++ free_cpumask_var(tmp);
2097 + }
2098 +
2099 + static void exit_round_robin(unsigned int tsk_index)
2100 +diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
2101 +index 80fc0b9b11e5..f362841881e6 100644
2102 +--- a/drivers/acpi/acpica/evevent.c
2103 ++++ b/drivers/acpi/acpica/evevent.c
2104 +@@ -204,6 +204,7 @@ u32 acpi_ev_fixed_event_detect(void)
2105 + u32 fixed_status;
2106 + u32 fixed_enable;
2107 + u32 i;
2108 ++ acpi_status status;
2109 +
2110 + ACPI_FUNCTION_NAME(ev_fixed_event_detect);
2111 +
2112 +@@ -211,8 +212,12 @@ u32 acpi_ev_fixed_event_detect(void)
2113 + * Read the fixed feature status and enable registers, as all the cases
2114 + * depend on their values. Ignore errors here.
2115 + */
2116 +- (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
2117 +- (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
2118 ++ status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
2119 ++ status |=
2120 ++ acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
2121 ++ if (ACPI_FAILURE(status)) {
2122 ++ return (int_status);
2123 ++ }
2124 +
2125 + ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
2126 + "Fixed Event Block: Enable %08X Status %08X\n",
2127 +diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
2128 +index 5d59cfcef6f4..c5d6701a5ad2 100644
2129 +--- a/drivers/acpi/acpica/nseval.c
2130 ++++ b/drivers/acpi/acpica/nseval.c
2131 +@@ -308,6 +308,14 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
2132 + /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
2133 +
2134 + status = AE_OK;
2135 ++ } else if (ACPI_FAILURE(status)) {
2136 ++
2137 ++ /* If return_object exists, delete it */
2138 ++
2139 ++ if (info->return_object) {
2140 ++ acpi_ut_remove_reference(info->return_object);
2141 ++ info->return_object = NULL;
2142 ++ }
2143 + }
2144 +
2145 + ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
2146 +diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
2147 +index bb01dea39fdc..9825780a1cd2 100644
2148 +--- a/drivers/acpi/processor_perflib.c
2149 ++++ b/drivers/acpi/processor_perflib.c
2150 +@@ -161,7 +161,7 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
2151 + {
2152 + int ret;
2153 +
2154 +- if (ignore_ppc) {
2155 ++ if (ignore_ppc || !pr->performance) {
2156 + /*
2157 + * Only when it is notification event, the _OST object
2158 + * will be evaluated. Otherwise it is skipped.
2159 +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
2160 +index cf725d581cae..145dcf293c6f 100644
2161 +--- a/drivers/acpi/scan.c
2162 ++++ b/drivers/acpi/scan.c
2163 +@@ -1422,6 +1422,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
2164 + device_initialize(&device->dev);
2165 + dev_set_uevent_suppress(&device->dev, true);
2166 + acpi_init_coherency(device);
2167 ++ /* Assume there are unmet deps until acpi_device_dep_initialize() runs */
2168 ++ device->dep_unmet = 1;
2169 + }
2170 +
2171 + void acpi_device_add_finalize(struct acpi_device *device)
2172 +@@ -1445,6 +1447,14 @@ static int acpi_add_single_object(struct acpi_device **child,
2173 + }
2174 +
2175 + acpi_init_device_object(device, handle, type, sta);
2176 ++ /*
2177 ++ * For ACPI_BUS_TYPE_DEVICE getting the status is delayed till here so
2178 ++ * that we can call acpi_bus_get_status() and use its quirk handling.
2179 ++ * Note this must be done before the get power-/wakeup_dev-flags calls.
2180 ++ */
2181 ++ if (type == ACPI_BUS_TYPE_DEVICE)
2182 ++ acpi_bus_get_status(device);
2183 ++
2184 + acpi_bus_get_power_flags(device);
2185 + acpi_bus_get_wakeup_device_flags(device);
2186 +
2187 +@@ -1517,9 +1527,11 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
2188 + return -ENODEV;
2189 +
2190 + *type = ACPI_BUS_TYPE_DEVICE;
2191 +- status = acpi_bus_get_status_handle(handle, sta);
2192 +- if (ACPI_FAILURE(status))
2193 +- *sta = 0;
2194 ++ /*
2195 ++ * acpi_add_single_object updates this once we've an acpi_device
2196 ++ * so that acpi_bus_get_status' quirk handling can be used.
2197 ++ */
2198 ++ *sta = 0;
2199 + break;
2200 + case ACPI_TYPE_PROCESSOR:
2201 + *type = ACPI_BUS_TYPE_PROCESSOR;
2202 +@@ -1621,6 +1633,8 @@ static void acpi_device_dep_initialize(struct acpi_device *adev)
2203 + acpi_status status;
2204 + int i;
2205 +
2206 ++ adev->dep_unmet = 0;
2207 ++
2208 + if (!acpi_has_method(adev->handle, "_DEP"))
2209 + return;
2210 +
2211 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
2212 +index 4fe3ec122bf0..0e2c0ac5792d 100644
2213 +--- a/drivers/ata/libata-core.c
2214 ++++ b/drivers/ata/libata-core.c
2215 +@@ -4366,6 +4366,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
2216 + /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
2217 + { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
2218 +
2219 ++ /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
2220 ++ SD7SN6S256G and SD8SN8U256G */
2221 ++ { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
2222 ++
2223 + /* devices which puke on READ_NATIVE_MAX */
2224 + { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
2225 + { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
2226 +@@ -4426,6 +4430,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
2227 + { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
2228 +
2229 + /* devices that don't properly handle queued TRIM commands */
2230 ++ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
2231 ++ ATA_HORKAGE_ZERO_AFTER_TRIM, },
2232 + { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2233 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
2234 + { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2235 +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
2236 +index 9babbc845750..fb2c00fce8f9 100644
2237 +--- a/drivers/ata/libata-scsi.c
2238 ++++ b/drivers/ata/libata-scsi.c
2239 +@@ -4156,7 +4156,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2240 + #ifdef ATA_DEBUG
2241 + struct scsi_device *scsidev = cmd->device;
2242 +
2243 +- DPRINTK("CDB (%u:%d,%d,%d) %9ph\n",
2244 ++ DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
2245 + ap->print_id,
2246 + scsidev->channel, scsidev->id, scsidev->lun,
2247 + cmd->cmnd);
2248 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
2249 +index a7b0fc7cb468..69c84fddfe8a 100644
2250 +--- a/drivers/base/regmap/regmap.c
2251 ++++ b/drivers/base/regmap/regmap.c
2252 +@@ -98,7 +98,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg)
2253 + int ret;
2254 + unsigned int val;
2255 +
2256 +- if (map->cache == REGCACHE_NONE)
2257 ++ if (map->cache_type == REGCACHE_NONE)
2258 + return false;
2259 +
2260 + if (!map->cache_ops)
2261 +diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
2262 +index 93362362aa55..8474a1b0740f 100644
2263 +--- a/drivers/block/paride/pcd.c
2264 ++++ b/drivers/block/paride/pcd.c
2265 +@@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode)
2266 + struct pcd_unit *cd = bdev->bd_disk->private_data;
2267 + int ret;
2268 +
2269 ++ check_disk_change(bdev);
2270 ++
2271 + mutex_lock(&pcd_mutex);
2272 + ret = cdrom_open(&cd->info, bdev, mode);
2273 + mutex_unlock(&pcd_mutex);
2274 +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
2275 +index 128ebd439221..07b77fb102a1 100644
2276 +--- a/drivers/cdrom/cdrom.c
2277 ++++ b/drivers/cdrom/cdrom.c
2278 +@@ -1154,9 +1154,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
2279 +
2280 + cd_dbg(CD_OPEN, "entering cdrom_open\n");
2281 +
2282 +- /* open is event synchronization point, check events first */
2283 +- check_disk_change(bdev);
2284 +-
2285 + /* if this was a O_NONBLOCK open and we should honor the flags,
2286 + * do a quick open without drive/disc integrity checks. */
2287 + cdi->use_count++;
2288 +diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
2289 +index 584bc3126403..e2808fefbb78 100644
2290 +--- a/drivers/cdrom/gdrom.c
2291 ++++ b/drivers/cdrom/gdrom.c
2292 +@@ -497,6 +497,9 @@ static struct cdrom_device_ops gdrom_ops = {
2293 + static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
2294 + {
2295 + int ret;
2296 ++
2297 ++ check_disk_change(bdev);
2298 ++
2299 + mutex_lock(&gdrom_mutex);
2300 + ret = cdrom_open(gd.cd_info, bdev, mode);
2301 + mutex_unlock(&gdrom_mutex);
2302 +diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
2303 +index 63d84e6f1891..83c695938a2d 100644
2304 +--- a/drivers/char/hw_random/stm32-rng.c
2305 ++++ b/drivers/char/hw_random/stm32-rng.c
2306 +@@ -21,6 +21,7 @@
2307 + #include <linux/of_address.h>
2308 + #include <linux/of_platform.h>
2309 + #include <linux/pm_runtime.h>
2310 ++#include <linux/reset.h>
2311 + #include <linux/slab.h>
2312 +
2313 + #define RNG_CR 0x00
2314 +@@ -46,6 +47,7 @@ struct stm32_rng_private {
2315 + struct hwrng rng;
2316 + void __iomem *base;
2317 + struct clk *clk;
2318 ++ struct reset_control *rst;
2319 + };
2320 +
2321 + static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
2322 +@@ -140,6 +142,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
2323 + if (IS_ERR(priv->clk))
2324 + return PTR_ERR(priv->clk);
2325 +
2326 ++ priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
2327 ++ if (!IS_ERR(priv->rst)) {
2328 ++ reset_control_assert(priv->rst);
2329 ++ udelay(2);
2330 ++ reset_control_deassert(priv->rst);
2331 ++ }
2332 ++
2333 + dev_set_drvdata(dev, priv);
2334 +
2335 + priv->rng.name = dev_driver_string(dev),
2336 +diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
2337 +index 6e658aa114f1..a70518a4fcec 100644
2338 +--- a/drivers/char/ipmi/ipmi_powernv.c
2339 ++++ b/drivers/char/ipmi/ipmi_powernv.c
2340 +@@ -251,8 +251,9 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
2341 + ipmi->irq = opal_event_request(prop);
2342 + }
2343 +
2344 +- if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
2345 +- "opal-ipmi", ipmi)) {
2346 ++ rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
2347 ++ "opal-ipmi", ipmi);
2348 ++ if (rc) {
2349 + dev_warn(dev, "Unable to request irq\n");
2350 + goto err_dispose;
2351 + }
2352 +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
2353 +index f11c1c7e84c6..121319198478 100644
2354 +--- a/drivers/char/ipmi/ipmi_ssif.c
2355 ++++ b/drivers/char/ipmi/ipmi_ssif.c
2356 +@@ -761,7 +761,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
2357 + ssif_info->ssif_state = SSIF_NORMAL;
2358 + ipmi_ssif_unlock_cond(ssif_info, flags);
2359 + pr_warn(PFX "Error getting flags: %d %d, %x\n",
2360 +- result, len, data[2]);
2361 ++ result, len, (len >= 3) ? data[2] : 0);
2362 + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
2363 + || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
2364 + /*
2365 +@@ -783,7 +783,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
2366 + if ((result < 0) || (len < 3) || (data[2] != 0)) {
2367 + /* Error clearing flags */
2368 + pr_warn(PFX "Error clearing flags: %d %d, %x\n",
2369 +- result, len, data[2]);
2370 ++ result, len, (len >= 3) ? data[2] : 0);
2371 + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
2372 + || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
2373 + pr_warn(PFX "Invalid response clearing flags: %x %x\n",
2374 +diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
2375 +index 738515b89073..a22c1d704901 100644
2376 +--- a/drivers/clocksource/fsl_ftm_timer.c
2377 ++++ b/drivers/clocksource/fsl_ftm_timer.c
2378 +@@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name,
2379 +
2380 + static unsigned long __init ftm_clk_init(struct device_node *np)
2381 + {
2382 +- unsigned long freq;
2383 ++ long freq;
2384 +
2385 + freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");
2386 + if (freq <= 0)
2387 +diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
2388 +index 4852d9efe74e..9f09752169ea 100644
2389 +--- a/drivers/cpufreq/cppc_cpufreq.c
2390 ++++ b/drivers/cpufreq/cppc_cpufreq.c
2391 +@@ -151,9 +151,19 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
2392 + policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
2393 + policy->shared_type = cpu->shared_type;
2394 +
2395 +- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
2396 ++ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
2397 ++ int i;
2398 ++
2399 + cpumask_copy(policy->cpus, cpu->shared_cpu_map);
2400 +- else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
2401 ++
2402 ++ for_each_cpu(i, policy->cpus) {
2403 ++ if (unlikely(i == policy->cpu))
2404 ++ continue;
2405 ++
2406 ++ memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
2407 ++ sizeof(cpu->perf_caps));
2408 ++ }
2409 ++ } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
2410 + /* Support only SW_ANY for now. */
2411 + pr_debug("Unsupported CPU co-ord type\n");
2412 + return -EFAULT;
2413 +@@ -218,8 +228,13 @@ static int __init cppc_cpufreq_init(void)
2414 + return ret;
2415 +
2416 + out:
2417 +- for_each_possible_cpu(i)
2418 +- kfree(all_cpu_data[i]);
2419 ++ for_each_possible_cpu(i) {
2420 ++ cpu = all_cpu_data[i];
2421 ++ if (!cpu)
2422 ++ break;
2423 ++ free_cpumask_var(cpu->shared_cpu_map);
2424 ++ kfree(cpu);
2425 ++ }
2426 +
2427 + kfree(all_cpu_data);
2428 + return -ENODEV;
2429 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
2430 +index 35e34c0e0429..7523929becdc 100644
2431 +--- a/drivers/cpufreq/cpufreq.c
2432 ++++ b/drivers/cpufreq/cpufreq.c
2433 +@@ -1288,14 +1288,14 @@ static int cpufreq_online(unsigned int cpu)
2434 + return 0;
2435 +
2436 + out_exit_policy:
2437 ++ for_each_cpu(j, policy->real_cpus)
2438 ++ remove_cpu_dev_symlink(policy, get_cpu_device(j));
2439 ++
2440 + up_write(&policy->rwsem);
2441 +
2442 + if (cpufreq_driver->exit)
2443 + cpufreq_driver->exit(policy);
2444 +
2445 +- for_each_cpu(j, policy->real_cpus)
2446 +- remove_cpu_dev_symlink(policy, get_cpu_device(j));
2447 +-
2448 + out_free_policy:
2449 + cpufreq_policy_free(policy, !new_policy);
2450 + return ret;
2451 +diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
2452 +index f3e211f8f6c5..71866646ffef 100644
2453 +--- a/drivers/dma/mv_xor_v2.c
2454 ++++ b/drivers/dma/mv_xor_v2.c
2455 +@@ -152,6 +152,7 @@ struct mv_xor_v2_device {
2456 + void __iomem *dma_base;
2457 + void __iomem *glob_base;
2458 + struct clk *clk;
2459 ++ struct clk *reg_clk;
2460 + struct tasklet_struct irq_tasklet;
2461 + struct list_head free_sw_desc;
2462 + struct dma_device dmadev;
2463 +@@ -697,13 +698,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
2464 + if (ret)
2465 + return ret;
2466 +
2467 ++ xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
2468 ++ if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
2469 ++ if (!IS_ERR(xor_dev->reg_clk)) {
2470 ++ ret = clk_prepare_enable(xor_dev->reg_clk);
2471 ++ if (ret)
2472 ++ return ret;
2473 ++ } else {
2474 ++ return PTR_ERR(xor_dev->reg_clk);
2475 ++ }
2476 ++ }
2477 ++
2478 + xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
2479 +- if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
2480 +- return -EPROBE_DEFER;
2481 ++ if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
2482 ++ ret = EPROBE_DEFER;
2483 ++ goto disable_reg_clk;
2484 ++ }
2485 + if (!IS_ERR(xor_dev->clk)) {
2486 + ret = clk_prepare_enable(xor_dev->clk);
2487 + if (ret)
2488 +- return ret;
2489 ++ goto disable_reg_clk;
2490 + }
2491 +
2492 + ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
2493 +@@ -812,8 +826,9 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
2494 + free_msi_irqs:
2495 + platform_msi_domain_free_irqs(&pdev->dev);
2496 + disable_clk:
2497 +- if (!IS_ERR(xor_dev->clk))
2498 +- clk_disable_unprepare(xor_dev->clk);
2499 ++ clk_disable_unprepare(xor_dev->clk);
2500 ++disable_reg_clk:
2501 ++ clk_disable_unprepare(xor_dev->reg_clk);
2502 + return ret;
2503 + }
2504 +
2505 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
2506 +index fb2e7476d96b..2c449bdacb91 100644
2507 +--- a/drivers/dma/pl330.c
2508 ++++ b/drivers/dma/pl330.c
2509 +@@ -1570,7 +1570,7 @@ static void pl330_dotask(unsigned long data)
2510 + /* Returns 1 if state was updated, 0 otherwise */
2511 + static int pl330_update(struct pl330_dmac *pl330)
2512 + {
2513 +- struct dma_pl330_desc *descdone, *tmp;
2514 ++ struct dma_pl330_desc *descdone;
2515 + unsigned long flags;
2516 + void __iomem *regs;
2517 + u32 val;
2518 +@@ -1648,7 +1648,9 @@ static int pl330_update(struct pl330_dmac *pl330)
2519 + }
2520 +
2521 + /* Now that we are in no hurry, do the callbacks */
2522 +- list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
2523 ++ while (!list_empty(&pl330->req_done)) {
2524 ++ descdone = list_first_entry(&pl330->req_done,
2525 ++ struct dma_pl330_desc, rqd);
2526 + list_del(&descdone->rqd);
2527 + spin_unlock_irqrestore(&pl330->lock, flags);
2528 + dma_pl330_rqcb(descdone, PL330_ERR_NONE);
2529 +diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
2530 +index 03c4eb3fd314..6497f5283e3b 100644
2531 +--- a/drivers/dma/qcom/bam_dma.c
2532 ++++ b/drivers/dma/qcom/bam_dma.c
2533 +@@ -387,6 +387,7 @@ struct bam_device {
2534 + struct device_dma_parameters dma_parms;
2535 + struct bam_chan *channels;
2536 + u32 num_channels;
2537 ++ u32 num_ees;
2538 +
2539 + /* execution environment ID, from DT */
2540 + u32 ee;
2541 +@@ -1076,15 +1077,19 @@ static int bam_init(struct bam_device *bdev)
2542 + u32 val;
2543 +
2544 + /* read revision and configuration information */
2545 +- val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
2546 +- val &= NUM_EES_MASK;
2547 ++ if (!bdev->num_ees) {
2548 ++ val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
2549 ++ bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
2550 ++ }
2551 +
2552 + /* check that configured EE is within range */
2553 +- if (bdev->ee >= val)