Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 30 May 2018 11:43:04
Message-Id: 1527680570.ab22674da00d00aff48540c57c2979d195b2d87f.mpagano@gentoo
1 commit: ab22674da00d00aff48540c57c2979d195b2d87f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed May 30 11:42:50 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed May 30 11:42:50 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ab22674d
7
8 Linux patches 4.14.45 and 4.14.46
9
10 0000_README | 8 +
11 1044_linux-4.14.45.patch | 16573 +++++++++++++++++++++++++++++++++++++++++++++
12 1045_linux-4.14.46.patch | 850 +++
13 3 files changed, 17431 insertions(+)
14
15 diff --git a/0000_README b/0000_README
16 index f2b1b86..63dde0e 100644
17 --- a/0000_README
18 +++ b/0000_README
19 @@ -219,6 +219,14 @@ Patch: 1043_linux-4.14.44.patch
20 From: http://www.kernel.org
21 Desc: Linux 4.14.44
22
23 +Patch: 1044_linux-4.14.45.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 4.14.45
26 +
27 +Patch: 1045_linux-4.14.46.patch
28 +From: http://www.kernel.org
29 +Desc: Linux 4.14.46
30 +
31 Patch: 1500_XATTR_USER_PREFIX.patch
32 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
33 Desc: Support for namespace user.pax.* on tmpfs.
34
35 diff --git a/1044_linux-4.14.45.patch b/1044_linux-4.14.45.patch
36 new file mode 100644
37 index 0000000..878e473
38 --- /dev/null
39 +++ b/1044_linux-4.14.45.patch
40 @@ -0,0 +1,16573 @@
41 +diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
42 +index 7eda08eb8a1e..a2b6a8a565a7 100644
43 +--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
44 ++++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
45 +@@ -20,6 +20,7 @@ Required properties :
46 + - "allwinner,sun50i-a64-ccu"
47 + - "allwinner,sun50i-a64-r-ccu"
48 + - "allwinner,sun50i-h5-ccu"
49 ++ - "allwinner,sun50i-h6-ccu"
50 + - "nextthing,gr8-ccu"
51 +
52 + - reg: Must contain the registers base address and length
53 +@@ -31,6 +32,9 @@ Required properties :
54 + - #clock-cells : must contain 1
55 + - #reset-cells : must contain 1
56 +
57 ++For the main CCU on H6, one more clock is needed:
58 ++- "iosc": the SoC's internal frequency oscillator
59 ++
60 + For the PRCM CCUs on A83T/H3/A64, two more clocks are needed:
61 + - "pll-periph": the SoC's peripheral PLL from the main CCU
62 + - "iosc": the SoC's internal frequency oscillator
63 +diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
64 +index 217a90eaabe7..9c38bbe7e6d7 100644
65 +--- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
66 ++++ b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
67 +@@ -11,7 +11,11 @@ Required properties:
68 + interrupts.
69 +
70 + Optional properties:
71 +-- clocks: Optional reference to the clock used by the XOR engine.
72 ++- clocks: Optional reference to the clocks used by the XOR engine.
73 ++- clock-names: mandatory if there is a second clock, in this case the
74 ++ name must be "core" for the first clock and "reg" for the second
75 ++ one
76 ++
77 +
78 + Example:
79 +
80 +diff --git a/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
81 +index 47284f85ec80..c3f9826692bc 100644
82 +--- a/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
83 ++++ b/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
84 +@@ -20,7 +20,8 @@ Required subnode-properties:
85 + gpio: cpuclkoutgrp0, udlclkoutgrp0, i2c1grp0, i2c2grp0,
86 + i2c3grp0, i2s0grp0, i2s1grp0, i2srefclkgrp0, spi0grp0,
87 + spi1grp0, pciedebuggrp0, uart0grp0, uart0grp1, uart1grp0,
88 +- uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0
89 ++ uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0,
90 ++ uart5nocts
91 + cpuclkout: cpuclkoutgrp0
92 + udlclkout: udlclkoutgrp0
93 + i2c1: i2c1grp0
94 +@@ -37,7 +38,7 @@ Required subnode-properties:
95 + uart2: uart2grp0, uart2grp1
96 + uart3: uart3grp0
97 + uart4: uart4grp0
98 +- uart5: uart5grp0
99 ++ uart5: uart5grp0, uart5nocts
100 + nand: nandgrp0
101 + sdio0: sdio0grp0
102 + sdio1: sdio1grp0
103 +diff --git a/Makefile b/Makefile
104 +index 787cf6605209..f3ea74e7a516 100644
105 +--- a/Makefile
106 ++++ b/Makefile
107 +@@ -1,7 +1,7 @@
108 + # SPDX-License-Identifier: GPL-2.0
109 + VERSION = 4
110 + PATCHLEVEL = 14
111 +-SUBLEVEL = 44
112 ++SUBLEVEL = 45
113 + EXTRAVERSION =
114 + NAME = Petit Gorille
115 +
116 +diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
117 +index 68dfb3cb7145..02a7c2fa6106 100644
118 +--- a/arch/alpha/include/asm/xchg.h
119 ++++ b/arch/alpha/include/asm/xchg.h
120 +@@ -12,6 +12,10 @@
121 + * Atomic exchange.
122 + * Since it can be used to implement critical sections
123 + * it must clobber "memory" (also for interrupts in UP).
124 ++ *
125 ++ * The leading and the trailing memory barriers guarantee that these
126 ++ * operations are fully ordered.
127 ++ *
128 + */
129 +
130 + static inline unsigned long
131 +@@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
132 + {
133 + unsigned long ret, tmp, addr64;
134 +
135 ++ smp_mb();
136 + __asm__ __volatile__(
137 + " andnot %4,7,%3\n"
138 + " insbl %1,%4,%1\n"
139 +@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
140 + {
141 + unsigned long ret, tmp, addr64;
142 +
143 ++ smp_mb();
144 + __asm__ __volatile__(
145 + " andnot %4,7,%3\n"
146 + " inswl %1,%4,%1\n"
147 +@@ -67,6 +73,7 @@ ____xchg(_u32, volatile int *m, unsigned long val)
148 + {
149 + unsigned long dummy;
150 +
151 ++ smp_mb();
152 + __asm__ __volatile__(
153 + "1: ldl_l %0,%4\n"
154 + " bis $31,%3,%1\n"
155 +@@ -87,6 +94,7 @@ ____xchg(_u64, volatile long *m, unsigned long val)
156 + {
157 + unsigned long dummy;
158 +
159 ++ smp_mb();
160 + __asm__ __volatile__(
161 + "1: ldq_l %0,%4\n"
162 + " bis $31,%3,%1\n"
163 +@@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
164 + * store NEW in MEM. Return the initial value in MEM. Success is
165 + * indicated by comparing RETURN with OLD.
166 + *
167 +- * The memory barrier should be placed in SMP only when we actually
168 +- * make the change. If we don't change anything (so if the returned
169 +- * prev is equal to old) then we aren't acquiring anything new and
170 +- * we don't need any memory barrier as far I can tell.
171 ++ * The leading and the trailing memory barriers guarantee that these
172 ++ * operations are fully ordered.
173 ++ *
174 ++ * The trailing memory barrier is placed in SMP unconditionally, in
175 ++ * order to guarantee that dependency ordering is preserved when a
176 ++ * dependency is headed by an unsuccessful operation.
177 + */
178 +
179 + static inline unsigned long
180 +@@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
181 + {
182 + unsigned long prev, tmp, cmp, addr64;
183 +
184 ++ smp_mb();
185 + __asm__ __volatile__(
186 + " andnot %5,7,%4\n"
187 + " insbl %1,%5,%1\n"
188 +@@ -150,8 +161,8 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
189 + " or %1,%2,%2\n"
190 + " stq_c %2,0(%4)\n"
191 + " beq %2,3f\n"
192 +- __ASM__MB
193 + "2:\n"
194 ++ __ASM__MB
195 + ".subsection 2\n"
196 + "3: br 1b\n"
197 + ".previous"
198 +@@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
199 + {
200 + unsigned long prev, tmp, cmp, addr64;
201 +
202 ++ smp_mb();
203 + __asm__ __volatile__(
204 + " andnot %5,7,%4\n"
205 + " inswl %1,%5,%1\n"
206 +@@ -177,8 +189,8 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
207 + " or %1,%2,%2\n"
208 + " stq_c %2,0(%4)\n"
209 + " beq %2,3f\n"
210 +- __ASM__MB
211 + "2:\n"
212 ++ __ASM__MB
213 + ".subsection 2\n"
214 + "3: br 1b\n"
215 + ".previous"
216 +@@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
217 + {
218 + unsigned long prev, cmp;
219 +
220 ++ smp_mb();
221 + __asm__ __volatile__(
222 + "1: ldl_l %0,%5\n"
223 + " cmpeq %0,%3,%1\n"
224 +@@ -200,8 +213,8 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
225 + " mov %4,%1\n"
226 + " stl_c %1,%2\n"
227 + " beq %1,3f\n"
228 +- __ASM__MB
229 + "2:\n"
230 ++ __ASM__MB
231 + ".subsection 2\n"
232 + "3: br 1b\n"
233 + ".previous"
234 +@@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
235 + {
236 + unsigned long prev, cmp;
237 +
238 ++ smp_mb();
239 + __asm__ __volatile__(
240 + "1: ldq_l %0,%5\n"
241 + " cmpeq %0,%3,%1\n"
242 +@@ -223,8 +237,8 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
243 + " mov %4,%1\n"
244 + " stq_c %1,%2\n"
245 + " beq %1,3f\n"
246 +- __ASM__MB
247 + "2:\n"
248 ++ __ASM__MB
249 + ".subsection 2\n"
250 + "3: br 1b\n"
251 + ".previous"
252 +diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
253 +index c84e67fdea09..4383313b064a 100644
254 +--- a/arch/arc/Kconfig
255 ++++ b/arch/arc/Kconfig
256 +@@ -487,7 +487,6 @@ config ARC_CURR_IN_REG
257 +
258 + config ARC_EMUL_UNALIGNED
259 + bool "Emulate unaligned memory access (userspace only)"
260 +- default N
261 + select SYSCTL_ARCH_UNALIGN_NO_WARN
262 + select SYSCTL_ARCH_UNALIGN_ALLOW
263 + depends on ISA_ARCOMPACT
264 +diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
265 +index ea022d47896c..21ec82466d62 100644
266 +--- a/arch/arc/include/asm/bug.h
267 ++++ b/arch/arc/include/asm/bug.h
268 +@@ -23,7 +23,8 @@ void die(const char *str, struct pt_regs *regs, unsigned long address);
269 +
270 + #define BUG() do { \
271 + pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
272 +- dump_stack(); \
273 ++ barrier_before_unreachable(); \
274 ++ __builtin_trap(); \
275 + } while (0)
276 +
277 + #define HAVE_ARCH_BUG
278 +diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
279 +index f61a52b01625..5fe84e481654 100644
280 +--- a/arch/arc/kernel/mcip.c
281 ++++ b/arch/arc/kernel/mcip.c
282 +@@ -22,10 +22,79 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
283 +
284 + static char smp_cpuinfo_buf[128];
285 +
286 ++/*
287 ++ * Set mask to halt GFRC if any online core in SMP cluster is halted.
288 ++ * Only works for ARC HS v3.0+, on earlier versions has no effect.
289 ++ */
290 ++static void mcip_update_gfrc_halt_mask(int cpu)
291 ++{
292 ++ struct bcr_generic gfrc;
293 ++ unsigned long flags;
294 ++ u32 gfrc_halt_mask;
295 ++
296 ++ READ_BCR(ARC_REG_GFRC_BUILD, gfrc);
297 ++
298 ++ /*
299 ++ * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in
300 ++ * GFRC 0x3 version.
301 ++ */
302 ++ if (gfrc.ver < 0x3)
303 ++ return;
304 ++
305 ++ raw_spin_lock_irqsave(&mcip_lock, flags);
306 ++
307 ++ __mcip_cmd(CMD_GFRC_READ_CORE, 0);
308 ++ gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
309 ++ gfrc_halt_mask |= BIT(cpu);
310 ++ __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask);
311 ++
312 ++ raw_spin_unlock_irqrestore(&mcip_lock, flags);
313 ++}
314 ++
315 ++static void mcip_update_debug_halt_mask(int cpu)
316 ++{
317 ++ u32 mcip_mask = 0;
318 ++ unsigned long flags;
319 ++
320 ++ raw_spin_lock_irqsave(&mcip_lock, flags);
321 ++
322 ++ /*
323 ++ * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK
324 ++ * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK
325 ++ * and CMD_DEBUG_READ_SELECT.
326 ++ */
327 ++ __mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
328 ++ mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
329 ++
330 ++ mcip_mask |= BIT(cpu);
331 ++
332 ++ __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask);
333 ++ /*
334 ++ * Parameter specified halt cause:
335 ++ * STATUS32[H]/actionpoint/breakpoint/self-halt
336 ++ * We choose all of them (0xF).
337 ++ */
338 ++ __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);
339 ++
340 ++ raw_spin_unlock_irqrestore(&mcip_lock, flags);
341 ++}
342 ++
343 + static void mcip_setup_per_cpu(int cpu)
344 + {
345 ++ struct mcip_bcr mp;
346 ++
347 ++ READ_BCR(ARC_REG_MCIP_BCR, mp);
348 ++
349 + smp_ipi_irq_setup(cpu, IPI_IRQ);
350 + smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
351 ++
352 ++ /* Update GFRC halt mask as new CPU came online */
353 ++ if (mp.gfrc)
354 ++ mcip_update_gfrc_halt_mask(cpu);
355 ++
356 ++ /* Update MCIP debug mask as new CPU came online */
357 ++ if (mp.dbg)
358 ++ mcip_update_debug_halt_mask(cpu);
359 + }
360 +
361 + static void mcip_ipi_send(int cpu)
362 +@@ -101,11 +170,6 @@ static void mcip_probe_n_setup(void)
363 + IS_AVAIL1(mp.gfrc, "GFRC"));
364 +
365 + cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
366 +-
367 +- if (mp.dbg) {
368 +- __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
369 +- __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
370 +- }
371 + }
372 +
373 + struct plat_smp_ops plat_smp_ops = {
374 +diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
375 +index 6df9d94a9537..115eecc0d9a4 100644
376 +--- a/arch/arc/kernel/smp.c
377 ++++ b/arch/arc/kernel/smp.c
378 +@@ -24,6 +24,7 @@
379 + #include <linux/reboot.h>
380 + #include <linux/irqdomain.h>
381 + #include <linux/export.h>
382 ++#include <linux/of_fdt.h>
383 +
384 + #include <asm/processor.h>
385 + #include <asm/setup.h>
386 +@@ -47,6 +48,42 @@ void __init smp_prepare_boot_cpu(void)
387 + {
388 + }
389 +
390 ++static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
391 ++{
392 ++ unsigned long dt_root = of_get_flat_dt_root();
393 ++ const char *buf;
394 ++
395 ++ buf = of_get_flat_dt_prop(dt_root, name, NULL);
396 ++ if (!buf)
397 ++ return -EINVAL;
398 ++
399 ++ if (cpulist_parse(buf, cpumask))
400 ++ return -EINVAL;
401 ++
402 ++ return 0;
403 ++}
404 ++
405 ++/*
406 ++ * Read from DeviceTree and setup cpu possible mask. If there is no
407 ++ * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
408 ++ */
409 ++static void __init arc_init_cpu_possible(void)
410 ++{
411 ++ struct cpumask cpumask;
412 ++
413 ++ if (arc_get_cpu_map("possible-cpus", &cpumask)) {
414 ++ pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
415 ++ NR_CPUS);
416 ++
417 ++ cpumask_setall(&cpumask);
418 ++ }
419 ++
420 ++ if (!cpumask_test_cpu(0, &cpumask))
421 ++ panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
422 ++
423 ++ init_cpu_possible(&cpumask);
424 ++}
425 ++
426 + /*
427 + * Called from setup_arch() before calling setup_processor()
428 + *
429 +@@ -58,10 +95,7 @@ void __init smp_prepare_boot_cpu(void)
430 + */
431 + void __init smp_init_cpus(void)
432 + {
433 +- unsigned int i;
434 +-
435 +- for (i = 0; i < NR_CPUS; i++)
436 +- set_cpu_possible(i, true);
437 ++ arc_init_cpu_possible();
438 +
439 + if (plat_smp_ops.init_early_smp)
440 + plat_smp_ops.init_early_smp();
441 +@@ -70,16 +104,12 @@ void __init smp_init_cpus(void)
442 + /* called from init ( ) => process 1 */
443 + void __init smp_prepare_cpus(unsigned int max_cpus)
444 + {
445 +- int i;
446 +-
447 + /*
448 + * if platform didn't set the present map already, do it now
449 + * boot cpu is set to present already by init/main.c
450 + */
451 +- if (num_present_cpus() <= 1) {
452 +- for (i = 0; i < max_cpus; i++)
453 +- set_cpu_present(i, true);
454 +- }
455 ++ if (num_present_cpus() <= 1)
456 ++ init_cpu_present(cpu_possible_mask);
457 + }
458 +
459 + void __init smp_cpus_done(unsigned int max_cpus)
460 +diff --git a/arch/arm/boot/dts/at91-tse850-3.dts b/arch/arm/boot/dts/at91-tse850-3.dts
461 +index 5f29010cdbd8..4ef80a703eda 100644
462 +--- a/arch/arm/boot/dts/at91-tse850-3.dts
463 ++++ b/arch/arm/boot/dts/at91-tse850-3.dts
464 +@@ -245,7 +245,7 @@
465 + };
466 +
467 + eeprom@50 {
468 +- compatible = "nxp,24c02", "atmel,24c02";
469 ++ compatible = "nxp,se97b", "atmel,24c02";
470 + reg = <0x50>;
471 + pagesize = <16>;
472 + };
473 +diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi
474 +index 61e158003509..168c002f0ca0 100644
475 +--- a/arch/arm/boot/dts/bcm2836.dtsi
476 ++++ b/arch/arm/boot/dts/bcm2836.dtsi
477 +@@ -9,7 +9,7 @@
478 + <0x40000000 0x40000000 0x00001000>;
479 + dma-ranges = <0xc0000000 0x00000000 0x3f000000>;
480 +
481 +- local_intc: local_intc {
482 ++ local_intc: local_intc@40000000 {
483 + compatible = "brcm,bcm2836-l1-intc";
484 + reg = <0x40000000 0x100>;
485 + interrupt-controller;
486 +diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi
487 +index bc1cca5cf43c..d5d058a568c3 100644
488 +--- a/arch/arm/boot/dts/bcm2837.dtsi
489 ++++ b/arch/arm/boot/dts/bcm2837.dtsi
490 +@@ -8,7 +8,7 @@
491 + <0x40000000 0x40000000 0x00001000>;
492 + dma-ranges = <0xc0000000 0x00000000 0x3f000000>;
493 +
494 +- local_intc: local_intc {
495 ++ local_intc: local_intc@40000000 {
496 + compatible = "brcm,bcm2836-l1-intc";
497 + reg = <0x40000000 0x100>;
498 + interrupt-controller;
499 +diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
500 +index 013431e3d7c3..4745e3c7806b 100644
501 +--- a/arch/arm/boot/dts/bcm283x.dtsi
502 ++++ b/arch/arm/boot/dts/bcm283x.dtsi
503 +@@ -251,7 +251,7 @@
504 +
505 + jtag_gpio4: jtag_gpio4 {
506 + brcm,pins = <4 5 6 12 13>;
507 +- brcm,function = <BCM2835_FSEL_ALT4>;
508 ++ brcm,function = <BCM2835_FSEL_ALT5>;
509 + };
510 + jtag_gpio22: jtag_gpio22 {
511 + brcm,pins = <22 23 24 25 26 27>;
512 +@@ -396,8 +396,8 @@
513 +
514 + i2s: i2s@7e203000 {
515 + compatible = "brcm,bcm2835-i2s";
516 +- reg = <0x7e203000 0x20>,
517 +- <0x7e101098 0x02>;
518 ++ reg = <0x7e203000 0x24>;
519 ++ clocks = <&clocks BCM2835_CLOCK_PCM>;
520 +
521 + dmas = <&dma 2>,
522 + <&dma 3>;
523 +diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
524 +index 6a44b8021702..f0e2008f7490 100644
525 +--- a/arch/arm/boot/dts/bcm958625hr.dts
526 ++++ b/arch/arm/boot/dts/bcm958625hr.dts
527 +@@ -49,7 +49,7 @@
528 +
529 + memory {
530 + device_type = "memory";
531 +- reg = <0x60000000 0x80000000>;
532 ++ reg = <0x60000000 0x20000000>;
533 + };
534 +
535 + gpio-restart {
536 +diff --git a/arch/arm/boot/dts/dra71-evm.dts b/arch/arm/boot/dts/dra71-evm.dts
537 +index 41c9132eb550..64363f75c01a 100644
538 +--- a/arch/arm/boot/dts/dra71-evm.dts
539 ++++ b/arch/arm/boot/dts/dra71-evm.dts
540 +@@ -24,13 +24,13 @@
541 +
542 + regulator-name = "vddshv8";
543 + regulator-min-microvolt = <1800000>;
544 +- regulator-max-microvolt = <3000000>;
545 ++ regulator-max-microvolt = <3300000>;
546 + regulator-boot-on;
547 + vin-supply = <&evm_5v0>;
548 +
549 + gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>;
550 + states = <1800000 0x0
551 +- 3000000 0x1>;
552 ++ 3300000 0x1>;
553 + };
554 +
555 + evm_1v8_sw: fixedregulator-evm_1v8 {
556 +diff --git a/arch/arm/boot/dts/imx6dl-icore-rqs.dts b/arch/arm/boot/dts/imx6dl-icore-rqs.dts
557 +index cf42c2f5cdc7..1281bc39b7ab 100644
558 +--- a/arch/arm/boot/dts/imx6dl-icore-rqs.dts
559 ++++ b/arch/arm/boot/dts/imx6dl-icore-rqs.dts
560 +@@ -42,7 +42,7 @@
561 +
562 + /dts-v1/;
563 +
564 +-#include "imx6q.dtsi"
565 ++#include "imx6dl.dtsi"
566 + #include "imx6qdl-icore-rqs.dtsi"
567 +
568 + / {
569 +diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
570 +index ae45af1ad062..3cc1fb9ce441 100644
571 +--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
572 ++++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
573 +@@ -213,37 +213,37 @@
574 + &iomuxc {
575 + pinctrl_enet1: enet1grp {
576 + fsl,pins = <
577 +- MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x3
578 +- MX7D_PAD_SD2_WP__ENET1_MDC 0x3
579 +- MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x1
580 +- MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x1
581 +- MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x1
582 +- MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x1
583 +- MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x1
584 +- MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x1
585 +- MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x1
586 +- MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x1
587 +- MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x1
588 +- MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x1
589 +- MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x1
590 +- MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x1
591 ++ MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x30
592 ++ MX7D_PAD_SD2_WP__ENET1_MDC 0x30
593 ++ MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x11
594 ++ MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x11
595 ++ MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x11
596 ++ MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x11
597 ++ MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x11
598 ++ MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x11
599 ++ MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x11
600 ++ MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x11
601 ++ MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x11
602 ++ MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x11
603 ++ MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x11
604 ++ MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x11
605 + >;
606 + };
607 +
608 + pinctrl_enet2: enet2grp {
609 + fsl,pins = <
610 +- MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x1
611 +- MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x1
612 +- MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x1
613 +- MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x1
614 +- MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x1
615 +- MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x1
616 +- MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x1
617 +- MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x1
618 +- MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x1
619 +- MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x1
620 +- MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x1
621 +- MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x1
622 ++ MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x11
623 ++ MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x11
624 ++ MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x11
625 ++ MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x11
626 ++ MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x11
627 ++ MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x11
628 ++ MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x11
629 ++ MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x11
630 ++ MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x11
631 ++ MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x11
632 ++ MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x11
633 ++ MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x11
634 + >;
635 + };
636 +
637 +diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
638 +index 95da5cb9d37a..b6ebe79261c6 100644
639 +--- a/arch/arm/boot/dts/r8a7791-porter.dts
640 ++++ b/arch/arm/boot/dts/r8a7791-porter.dts
641 +@@ -427,7 +427,7 @@
642 + "dclkin.0", "dclkin.1";
643 +
644 + ports {
645 +- port@1 {
646 ++ port@0 {
647 + endpoint {
648 + remote-endpoint = <&adv7511_in>;
649 + };
650 +diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
651 +index 4916c65e0ace..5c0a76493d22 100644
652 +--- a/arch/arm/boot/dts/rk3036.dtsi
653 ++++ b/arch/arm/boot/dts/rk3036.dtsi
654 +@@ -261,7 +261,7 @@
655 + max-frequency = <37500000>;
656 + clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
657 + <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
658 +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
659 ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
660 + fifo-depth = <0x100>;
661 + interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
662 + resets = <&cru SRST_SDIO>;
663 +@@ -279,7 +279,7 @@
664 + max-frequency = <37500000>;
665 + clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
666 + <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
667 +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
668 ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
669 + default-sample-phase = <158>;
670 + disable-wp;
671 + dmas = <&pdma 12>;
672 +diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
673 +index 06814421eed2..f59f7cc62be6 100644
674 +--- a/arch/arm/boot/dts/rk322x.dtsi
675 ++++ b/arch/arm/boot/dts/rk322x.dtsi
676 +@@ -600,7 +600,7 @@
677 + interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
678 + clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
679 + <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
680 +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
681 ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
682 + fifo-depth = <0x100>;
683 + pinctrl-names = "default";
684 + pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
685 +@@ -613,7 +613,7 @@
686 + interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
687 + clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
688 + <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
689 +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
690 ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
691 + fifo-depth = <0x100>;
692 + pinctrl-names = "default";
693 + pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>;
694 +@@ -628,7 +628,7 @@
695 + max-frequency = <37500000>;
696 + clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
697 + <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
698 +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
699 ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
700 + bus-width = <8>;
701 + default-sample-phase = <158>;
702 + fifo-depth = <0x100>;
703 +diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
704 +index 356ed1e62452..f7a951afd281 100644
705 +--- a/arch/arm/boot/dts/rk3288.dtsi
706 ++++ b/arch/arm/boot/dts/rk3288.dtsi
707 +@@ -927,6 +927,7 @@
708 + i2s: i2s@ff890000 {
709 + compatible = "rockchip,rk3288-i2s", "rockchip,rk3066-i2s";
710 + reg = <0x0 0xff890000 0x0 0x10000>;
711 ++ #sound-dai-cells = <0>;
712 + interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
713 + #address-cells = <1>;
714 + #size-cells = <0>;
715 +@@ -1122,6 +1123,7 @@
716 + compatible = "rockchip,rk3288-dw-hdmi";
717 + reg = <0x0 0xff980000 0x0 0x20000>;
718 + reg-io-width = <4>;
719 ++ #sound-dai-cells = <0>;
720 + rockchip,grf = <&grf>;
721 + interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
722 + clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>;
723 +diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
724 +index 7e24dc8e82d4..8d9f42a422cb 100644
725 +--- a/arch/arm/boot/dts/socfpga.dtsi
726 ++++ b/arch/arm/boot/dts/socfpga.dtsi
727 +@@ -827,7 +827,7 @@
728 + timer@fffec600 {
729 + compatible = "arm,cortex-a9-twd-timer";
730 + reg = <0xfffec600 0x100>;
731 +- interrupts = <1 13 0xf04>;
732 ++ interrupts = <1 13 0xf01>;
733 + clocks = <&mpu_periph_clk>;
734 + };
735 +
736 +diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
737 +index 9c99e817535e..5b85889f82ee 100644
738 +--- a/arch/arm/include/asm/vdso.h
739 ++++ b/arch/arm/include/asm/vdso.h
740 +@@ -12,8 +12,6 @@ struct mm_struct;
741 +
742 + void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
743 +
744 +-extern char vdso_start, vdso_end;
745 +-
746 + extern unsigned int vdso_total_pages;
747 +
748 + #else /* CONFIG_VDSO */
749 +diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
750 +index a4d6dc0f2427..f4dd7f9663c1 100644
751 +--- a/arch/arm/kernel/vdso.c
752 ++++ b/arch/arm/kernel/vdso.c
753 +@@ -39,6 +39,8 @@
754 +
755 + static struct page **vdso_text_pagelist;
756 +
757 ++extern char vdso_start[], vdso_end[];
758 ++
759 + /* Total number of pages needed for the data and text portions of the VDSO. */
760 + unsigned int vdso_total_pages __ro_after_init;
761 +
762 +@@ -197,13 +199,13 @@ static int __init vdso_init(void)
763 + unsigned int text_pages;
764 + int i;
765 +
766 +- if (memcmp(&vdso_start, "\177ELF", 4)) {
767 ++ if (memcmp(vdso_start, "\177ELF", 4)) {
768 + pr_err("VDSO is not a valid ELF object!\n");
769 + return -ENOEXEC;
770 + }
771 +
772 +- text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
773 +- pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
774 ++ text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
775 ++ pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
776 +
777 + /* Allocate the VDSO text pagelist */
778 + vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
779 +@@ -218,7 +220,7 @@ static int __init vdso_init(void)
780 + for (i = 0; i < text_pages; i++) {
781 + struct page *page;
782 +
783 +- page = virt_to_page(&vdso_start + i * PAGE_SIZE);
784 ++ page = virt_to_page(vdso_start + i * PAGE_SIZE);
785 + vdso_text_pagelist[i] = page;
786 + }
787 +
788 +@@ -229,7 +231,7 @@ static int __init vdso_init(void)
789 +
790 + cntvct_ok = cntvct_functional();
791 +
792 +- patch_vdso(&vdso_start);
793 ++ patch_vdso(vdso_start);
794 +
795 + return 0;
796 + }
797 +diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
798 +index a3e78074be70..62eb7d668890 100644
799 +--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
800 ++++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
801 +@@ -127,8 +127,8 @@ static struct gpiod_lookup_table mmc_gpios_table = {
802 + .dev_id = "da830-mmc.0",
803 + .table = {
804 + /* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/
805 +- GPIO_LOOKUP("davinci_gpio.1", 28, "cd", GPIO_ACTIVE_LOW),
806 +- GPIO_LOOKUP("davinci_gpio.1", 29, "wp", GPIO_ACTIVE_LOW),
807 ++ GPIO_LOOKUP("davinci_gpio.0", 28, "cd", GPIO_ACTIVE_LOW),
808 ++ GPIO_LOOKUP("davinci_gpio.0", 29, "wp", GPIO_ACTIVE_LOW),
809 + },
810 + };
811 +
812 +diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
813 +index 43e3e188f521..fa512413a471 100644
814 +--- a/arch/arm/mach-omap1/clock.c
815 ++++ b/arch/arm/mach-omap1/clock.c
816 +@@ -1011,17 +1011,17 @@ static int clk_debugfs_register_one(struct clk *c)
817 + return -ENOMEM;
818 + c->dent = d;
819 +
820 +- d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
821 ++ d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
822 + if (!d) {
823 + err = -ENOMEM;
824 + goto err_out;
825 + }
826 +- d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
827 ++ d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
828 + if (!d) {
829 + err = -ENOMEM;
830 + goto err_out;
831 + }
832 +- d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
833 ++ d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
834 + if (!d) {
835 + err = -ENOMEM;
836 + goto err_out;
837 +diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
838 +index 4bb6751864a5..fc5fb776a710 100644
839 +--- a/arch/arm/mach-omap2/omap-wakeupgen.c
840 ++++ b/arch/arm/mach-omap2/omap-wakeupgen.c
841 +@@ -299,8 +299,6 @@ static void irq_save_context(void)
842 + if (soc_is_dra7xx())
843 + return;
844 +
845 +- if (!sar_base)
846 +- sar_base = omap4_get_sar_ram_base();
847 + if (wakeupgen_ops && wakeupgen_ops->save_context)
848 + wakeupgen_ops->save_context();
849 + }
850 +@@ -598,6 +596,8 @@ static int __init wakeupgen_init(struct device_node *node,
851 + irq_hotplug_init();
852 + irq_pm_init();
853 +
854 ++ sar_base = omap4_get_sar_ram_base();
855 ++
856 + return 0;
857 + }
858 + IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);
859 +diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
860 +index 366158a54fcd..6f68576e5695 100644
861 +--- a/arch/arm/mach-omap2/pm.c
862 ++++ b/arch/arm/mach-omap2/pm.c
863 +@@ -186,7 +186,7 @@ static void omap_pm_end(void)
864 + cpu_idle_poll_ctrl(false);
865 + }
866 +
867 +-static void omap_pm_finish(void)
868 ++static void omap_pm_wake(void)
869 + {
870 + if (soc_is_omap34xx())
871 + omap_prcm_irq_complete();
872 +@@ -196,7 +196,7 @@ static const struct platform_suspend_ops omap_pm_ops = {
873 + .begin = omap_pm_begin,
874 + .end = omap_pm_end,
875 + .enter = omap_pm_enter,
876 +- .finish = omap_pm_finish,
877 ++ .wake = omap_pm_wake,
878 + .valid = suspend_valid_only_mem,
879 + };
880 +
881 +diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
882 +index ece09c9461f7..d61fbd7a2840 100644
883 +--- a/arch/arm/mach-omap2/timer.c
884 ++++ b/arch/arm/mach-omap2/timer.c
885 +@@ -156,12 +156,6 @@ static struct clock_event_device clockevent_gpt = {
886 + .tick_resume = omap2_gp_timer_shutdown,
887 + };
888 +
889 +-static struct property device_disabled = {
890 +- .name = "status",
891 +- .length = sizeof("disabled"),
892 +- .value = "disabled",
893 +-};
894 +-
895 + static const struct of_device_id omap_timer_match[] __initconst = {
896 + { .compatible = "ti,omap2420-timer", },
897 + { .compatible = "ti,omap3430-timer", },
898 +@@ -203,8 +197,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id *
899 + of_get_property(np, "ti,timer-secure", NULL)))
900 + continue;
901 +
902 +- if (!of_device_is_compatible(np, "ti,omap-counter32k"))
903 +- of_add_property(np, &device_disabled);
904 ++ if (!of_device_is_compatible(np, "ti,omap-counter32k")) {
905 ++ struct property *prop;
906 ++
907 ++ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
908 ++ if (!prop)
909 ++ return NULL;
910 ++ prop->name = "status";
911 ++ prop->value = "disabled";
912 ++ prop->length = strlen(prop->value);
913 ++ of_add_property(np, prop);
914 ++ }
915 + return np;
916 + }
917 +
918 +diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
919 +index 2a7bb6ccdcb7..a810f4dd34b1 100644
920 +--- a/arch/arm/mach-orion5x/Kconfig
921 ++++ b/arch/arm/mach-orion5x/Kconfig
922 +@@ -58,7 +58,6 @@ config MACH_KUROBOX_PRO
923 +
924 + config MACH_DNS323
925 + bool "D-Link DNS-323"
926 +- select GENERIC_NET_UTILS
927 + select I2C_BOARDINFO if I2C
928 + help
929 + Say 'Y' here if you want your kernel to support the
930 +@@ -66,7 +65,6 @@ config MACH_DNS323
931 +
932 + config MACH_TS209
933 + bool "QNAP TS-109/TS-209"
934 +- select GENERIC_NET_UTILS
935 + help
936 + Say 'Y' here if you want your kernel to support the
937 + QNAP TS-109/TS-209 platform.
938 +@@ -101,7 +99,6 @@ config MACH_LINKSTATION_LS_HGL
939 +
940 + config MACH_TS409
941 + bool "QNAP TS-409"
942 +- select GENERIC_NET_UTILS
943 + help
944 + Say 'Y' here if you want your kernel to support the
945 + QNAP TS-409 platform.
946 +diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
947 +index cd483bfb5ca8..d13344b2ddcd 100644
948 +--- a/arch/arm/mach-orion5x/dns323-setup.c
949 ++++ b/arch/arm/mach-orion5x/dns323-setup.c
950 +@@ -173,10 +173,42 @@ static struct mv643xx_eth_platform_data dns323_eth_data = {
951 + .phy_addr = MV643XX_ETH_PHY_ADDR(8),
952 + };
953 +
954 ++/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these
955 ++ * functions be kept somewhere?
956 ++ */
957 ++static int __init dns323_parse_hex_nibble(char n)
958 ++{
959 ++ if (n >= '0' && n <= '9')
960 ++ return n - '0';
961 ++
962 ++ if (n >= 'A' && n <= 'F')
963 ++ return n - 'A' + 10;
964 ++
965 ++ if (n >= 'a' && n <= 'f')
966 ++ return n - 'a' + 10;
967 ++
968 ++ return -1;
969 ++}
970 ++
971 ++static int __init dns323_parse_hex_byte(const char *b)
972 ++{
973 ++ int hi;
974 ++ int lo;
975 ++
976 ++ hi = dns323_parse_hex_nibble(b[0]);
977 ++ lo = dns323_parse_hex_nibble(b[1]);
978 ++
979 ++ if (hi < 0 || lo < 0)
980 ++ return -1;
981 ++
982 ++ return (hi << 4) | lo;
983 ++}
984 ++
985 + static int __init dns323_read_mac_addr(void)
986 + {
987 + u_int8_t addr[6];
988 +- void __iomem *mac_page;
989 ++ int i;
990 ++ char *mac_page;
991 +
992 + /* MAC address is stored as a regular ol' string in /dev/mtdblock4
993 + * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80).
994 +@@ -185,8 +217,23 @@ static int __init dns323_read_mac_addr(void)
995 + if (!mac_page)
996 + return -ENOMEM;
997 +
998 +- if (!mac_pton((__force const char *) mac_page, addr))
999 +- goto error_fail;
1000 ++ /* Sanity check the string we're looking at */
1001 ++ for (i = 0; i < 5; i++) {
1002 ++ if (*(mac_page + (i * 3) + 2) != ':') {
1003 ++ goto error_fail;
1004 ++ }
1005 ++ }
1006 ++
1007 ++ for (i = 0; i < 6; i++) {
1008 ++ int byte;
1009 ++
1010 ++ byte = dns323_parse_hex_byte(mac_page + (i * 3));
1011 ++ if (byte < 0) {
1012 ++ goto error_fail;
1013 ++ }
1014 ++
1015 ++ addr[i] = byte;
1016 ++ }
1017 +
1018 + iounmap(mac_page);
1019 + printk("DNS-323: Found ethernet MAC address: %pM\n", addr);
1020 +diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c
1021 +index 89774985d380..905d4f2dd0b8 100644
1022 +--- a/arch/arm/mach-orion5x/tsx09-common.c
1023 ++++ b/arch/arm/mach-orion5x/tsx09-common.c
1024 +@@ -53,12 +53,53 @@ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
1025 + .phy_addr = MV643XX_ETH_PHY_ADDR(8),
1026 + };
1027 +
1028 ++static int __init qnap_tsx09_parse_hex_nibble(char n)
1029 ++{
1030 ++ if (n >= '0' && n <= '9')
1031 ++ return n - '0';
1032 ++
1033 ++ if (n >= 'A' && n <= 'F')
1034 ++ return n - 'A' + 10;
1035 ++
1036 ++ if (n >= 'a' && n <= 'f')
1037 ++ return n - 'a' + 10;
1038 ++
1039 ++ return -1;
1040 ++}
1041 ++
1042 ++static int __init qnap_tsx09_parse_hex_byte(const char *b)
1043 ++{
1044 ++ int hi;
1045 ++ int lo;
1046 ++
1047 ++ hi = qnap_tsx09_parse_hex_nibble(b[0]);
1048 ++ lo = qnap_tsx09_parse_hex_nibble(b[1]);
1049 ++
1050 ++ if (hi < 0 || lo < 0)
1051 ++ return -1;
1052 ++
1053 ++ return (hi << 4) | lo;
1054 ++}
1055 ++
1056 + static int __init qnap_tsx09_check_mac_addr(const char *addr_str)
1057 + {
1058 + u_int8_t addr[6];
1059 ++ int i;
1060 +
1061 +- if (!mac_pton(addr_str, addr))
1062 +- return -1;
1063 ++ for (i = 0; i < 6; i++) {
1064 ++ int byte;
1065 ++
1066 ++ /*
1067 ++ * Enforce "xx:xx:xx:xx:xx:xx\n" format.
1068 ++ */
1069 ++ if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n'))
1070 ++ return -1;
1071 ++
1072 ++ byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3));
1073 ++ if (byte < 0)
1074 ++ return -1;
1075 ++ addr[i] = byte;
1076 ++ }
1077 +
1078 + printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr);
1079 +
1080 +@@ -77,12 +118,12 @@ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size)
1081 + unsigned long addr;
1082 +
1083 + for (addr = mem_base; addr < (mem_base + size); addr += 1024) {
1084 +- void __iomem *nor_page;
1085 ++ char *nor_page;
1086 + int ret = 0;
1087 +
1088 + nor_page = ioremap(addr, 1024);
1089 + if (nor_page != NULL) {
1090 +- ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page);
1091 ++ ret = qnap_tsx09_check_mac_addr(nor_page);
1092 + iounmap(nor_page);
1093 + }
1094 +
1095 +diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
1096 +index 7a327bd32521..ebef8aacea83 100644
1097 +--- a/arch/arm/plat-omap/dmtimer.c
1098 ++++ b/arch/arm/plat-omap/dmtimer.c
1099 +@@ -890,11 +890,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
1100 + timer->irq = irq->start;
1101 + timer->pdev = pdev;
1102 +
1103 +- /* Skip pm_runtime_enable for OMAP1 */
1104 +- if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
1105 +- pm_runtime_enable(dev);
1106 +- pm_runtime_irq_safe(dev);
1107 +- }
1108 ++ pm_runtime_enable(dev);
1109 ++ pm_runtime_irq_safe(dev);
1110 +
1111 + if (!timer->reserved) {
1112 + ret = pm_runtime_get_sync(dev);
1113 +diff --git a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
1114 +index 4220fbdcb24a..ff5c4c47b22b 100644
1115 +--- a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
1116 ++++ b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
1117 +@@ -98,7 +98,7 @@
1118 + clock-output-names = "clk125mhz";
1119 + };
1120 +
1121 +- pci {
1122 ++ pcie@30000000 {
1123 + compatible = "pci-host-ecam-generic";
1124 + device_type = "pci";
1125 + #interrupt-cells = <1>;
1126 +@@ -118,6 +118,7 @@
1127 + ranges =
1128 + <0x02000000 0 0x40000000 0 0x40000000 0 0x20000000
1129 + 0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>;
1130 ++ bus-range = <0 0xff>;
1131 + interrupt-map-mask = <0 0 0 7>;
1132 + interrupt-map =
1133 + /* addr pin ic icaddr icintr */
1134 +diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
1135 +index 887b61c872dd..ab00be277c6f 100644
1136 +--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
1137 ++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
1138 +@@ -484,8 +484,8 @@
1139 + blsp2_spi5: spi@075ba000{
1140 + compatible = "qcom,spi-qup-v2.2.1";
1141 + reg = <0x075ba000 0x600>;
1142 +- interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
1143 +- clocks = <&gcc GCC_BLSP2_QUP5_SPI_APPS_CLK>,
1144 ++ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
1145 ++ clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
1146 + <&gcc GCC_BLSP2_AHB_CLK>;
1147 + clock-names = "core", "iface";
1148 + pinctrl-names = "default", "sleep";
1149 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
1150 +index d4f80786e7c2..28257724a56e 100644
1151 +--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
1152 ++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
1153 +@@ -136,11 +136,12 @@
1154 + phy-mode = "rgmii";
1155 + pinctrl-names = "default";
1156 + pinctrl-0 = <&rgmiim1_pins>;
1157 ++ snps,force_thresh_dma_mode;
1158 + snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
1159 + snps,reset-active-low;
1160 + snps,reset-delays-us = <0 10000 50000>;
1161 +- tx_delay = <0x26>;
1162 +- rx_delay = <0x11>;
1163 ++ tx_delay = <0x24>;
1164 ++ rx_delay = <0x18>;
1165 + status = "okay";
1166 + };
1167 +
1168 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
1169 +index 41d61840fb99..d70e409e2b0c 100644
1170 +--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
1171 ++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
1172 +@@ -683,7 +683,7 @@
1173 + interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
1174 + clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
1175 + <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
1176 +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
1177 ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
1178 + fifo-depth = <0x100>;
1179 + status = "disabled";
1180 + };
1181 +@@ -694,7 +694,7 @@
1182 + interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
1183 + clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
1184 + <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
1185 +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
1186 ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
1187 + fifo-depth = <0x100>;
1188 + status = "disabled";
1189 + };
1190 +@@ -705,7 +705,7 @@
1191 + interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
1192 + clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
1193 + <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
1194 +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
1195 ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
1196 + fifo-depth = <0x100>;
1197 + status = "disabled";
1198 + };
1199 +diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
1200 +index 1070c8264c13..2313aea0e69e 100644
1201 +--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
1202 ++++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
1203 +@@ -257,7 +257,7 @@
1204 + max-frequency = <150000000>;
1205 + clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>,
1206 + <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>;
1207 +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
1208 ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
1209 + fifo-depth = <0x100>;
1210 + interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
1211 + resets = <&cru SRST_SDIO0>;
1212 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
1213 +index 199a5118b20d..264a6bb60c53 100644
1214 +--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
1215 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
1216 +@@ -406,8 +406,9 @@
1217 + wlan_pd_n: wlan-pd-n {
1218 + compatible = "regulator-fixed";
1219 + regulator-name = "wlan_pd_n";
1220 ++ pinctrl-names = "default";
1221 ++ pinctrl-0 = <&wlan_module_reset_l>;
1222 +
1223 +- /* Note the wlan_module_reset_l pinctrl */
1224 + enable-active-high;
1225 + gpio = <&gpio1 11 GPIO_ACTIVE_HIGH>;
1226 +
1227 +@@ -940,12 +941,6 @@ ap_i2c_audio: &i2c8 {
1228 + pinctrl-0 = <
1229 + &ap_pwroff /* AP will auto-assert this when in S3 */
1230 + &clk_32k /* This pin is always 32k on gru boards */
1231 +-
1232 +- /*
1233 +- * We want this driven low ASAP; firmware should help us, but
1234 +- * we can help ourselves too.
1235 +- */
1236 +- &wlan_module_reset_l
1237 + >;
1238 +
1239 + pcfg_output_low: pcfg-output-low {
1240 +@@ -1125,12 +1120,7 @@ ap_i2c_audio: &i2c8 {
1241 + };
1242 +
1243 + wlan_module_reset_l: wlan-module-reset-l {
1244 +- /*
1245 +- * We want this driven low ASAP (As {Soon,Strongly} As
1246 +- * Possible), to avoid leakage through the powered-down
1247 +- * WiFi.
1248 +- */
1249 +- rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_output_low>;
1250 ++ rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_pull_none>;
1251 + };
1252 +
1253 + bt_host_wake_l: bt-host-wake-l {
1254 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
1255 +index 0f873c897d0d..ce592a4c0c4c 100644
1256 +--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
1257 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
1258 +@@ -457,7 +457,7 @@
1259 + assigned-clocks = <&cru SCLK_PCIEPHY_REF>;
1260 + assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>;
1261 + assigned-clock-rates = <100000000>;
1262 +- ep-gpios = <&gpio3 RK_PB5 GPIO_ACTIVE_HIGH>;
1263 ++ ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>;
1264 + num-lanes = <4>;
1265 + pinctrl-names = "default";
1266 + pinctrl-0 = <&pcie_clkreqn_cpm>;
1267 +diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
1268 +index 9ef0797380cb..f9b0b09153e0 100644
1269 +--- a/arch/arm64/include/asm/atomic_lse.h
1270 ++++ b/arch/arm64/include/asm/atomic_lse.h
1271 +@@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v)
1272 + /* LSE atomics */
1273 + " mvn %w[i], %w[i]\n"
1274 + " stclr %w[i], %[v]")
1275 +- : [i] "+r" (w0), [v] "+Q" (v->counter)
1276 ++ : [i] "+&r" (w0), [v] "+Q" (v->counter)
1277 + : "r" (x1)
1278 + : __LL_SC_CLOBBERS);
1279 + }
1280 +@@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
1281 + /* LSE atomics */ \
1282 + " mvn %w[i], %w[i]\n" \
1283 + " ldclr" #mb " %w[i], %w[i], %[v]") \
1284 +- : [i] "+r" (w0), [v] "+Q" (v->counter) \
1285 ++ : [i] "+&r" (w0), [v] "+Q" (v->counter) \
1286 + : "r" (x1) \
1287 + : __LL_SC_CLOBBERS, ##cl); \
1288 + \
1289 +@@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v)
1290 + /* LSE atomics */
1291 + " neg %w[i], %w[i]\n"
1292 + " stadd %w[i], %[v]")
1293 +- : [i] "+r" (w0), [v] "+Q" (v->counter)
1294 ++ : [i] "+&r" (w0), [v] "+Q" (v->counter)
1295 + : "r" (x1)
1296 + : __LL_SC_CLOBBERS);
1297 + }
1298 +@@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
1299 + " neg %w[i], %w[i]\n" \
1300 + " ldadd" #mb " %w[i], w30, %[v]\n" \
1301 + " add %w[i], %w[i], w30") \
1302 +- : [i] "+r" (w0), [v] "+Q" (v->counter) \
1303 ++ : [i] "+&r" (w0), [v] "+Q" (v->counter) \
1304 + : "r" (x1) \
1305 + : __LL_SC_CLOBBERS , ##cl); \
1306 + \
1307 +@@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
1308 + /* LSE atomics */ \
1309 + " neg %w[i], %w[i]\n" \
1310 + " ldadd" #mb " %w[i], %w[i], %[v]") \
1311 +- : [i] "+r" (w0), [v] "+Q" (v->counter) \
1312 ++ : [i] "+&r" (w0), [v] "+Q" (v->counter) \
1313 + : "r" (x1) \
1314 + : __LL_SC_CLOBBERS, ##cl); \
1315 + \
1316 +@@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
1317 + /* LSE atomics */
1318 + " mvn %[i], %[i]\n"
1319 + " stclr %[i], %[v]")
1320 +- : [i] "+r" (x0), [v] "+Q" (v->counter)
1321 ++ : [i] "+&r" (x0), [v] "+Q" (v->counter)
1322 + : "r" (x1)
1323 + : __LL_SC_CLOBBERS);
1324 + }
1325 +@@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
1326 + /* LSE atomics */ \
1327 + " mvn %[i], %[i]\n" \
1328 + " ldclr" #mb " %[i], %[i], %[v]") \
1329 +- : [i] "+r" (x0), [v] "+Q" (v->counter) \
1330 ++ : [i] "+&r" (x0), [v] "+Q" (v->counter) \
1331 + : "r" (x1) \
1332 + : __LL_SC_CLOBBERS, ##cl); \
1333 + \
1334 +@@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
1335 + /* LSE atomics */
1336 + " neg %[i], %[i]\n"
1337 + " stadd %[i], %[v]")
1338 +- : [i] "+r" (x0), [v] "+Q" (v->counter)
1339 ++ : [i] "+&r" (x0), [v] "+Q" (v->counter)
1340 + : "r" (x1)
1341 + : __LL_SC_CLOBBERS);
1342 + }
1343 +@@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
1344 + " neg %[i], %[i]\n" \
1345 + " ldadd" #mb " %[i], x30, %[v]\n" \
1346 + " add %[i], %[i], x30") \
1347 +- : [i] "+r" (x0), [v] "+Q" (v->counter) \
1348 ++ : [i] "+&r" (x0), [v] "+Q" (v->counter) \
1349 + : "r" (x1) \
1350 + : __LL_SC_CLOBBERS, ##cl); \
1351 + \
1352 +@@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
1353 + /* LSE atomics */ \
1354 + " neg %[i], %[i]\n" \
1355 + " ldadd" #mb " %[i], %[i], %[v]") \
1356 +- : [i] "+r" (x0), [v] "+Q" (v->counter) \
1357 ++ : [i] "+&r" (x0), [v] "+Q" (v->counter) \
1358 + : "r" (x1) \
1359 + : __LL_SC_CLOBBERS, ##cl); \
1360 + \
1361 +@@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
1362 + " sub x30, x30, %[ret]\n"
1363 + " cbnz x30, 1b\n"
1364 + "2:")
1365 +- : [ret] "+r" (x0), [v] "+Q" (v->counter)
1366 ++ : [ret] "+&r" (x0), [v] "+Q" (v->counter)
1367 + :
1368 + : __LL_SC_CLOBBERS, "cc", "memory");
1369 +
1370 +@@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
1371 + " eor %[old1], %[old1], %[oldval1]\n" \
1372 + " eor %[old2], %[old2], %[oldval2]\n" \
1373 + " orr %[old1], %[old1], %[old2]") \
1374 +- : [old1] "+r" (x0), [old2] "+r" (x1), \
1375 ++ : [old1] "+&r" (x0), [old2] "+&r" (x1), \
1376 + [v] "+Q" (*(unsigned long *)ptr) \
1377 + : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
1378 + [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
1379 +diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
1380 +index 6ad30776e984..99390755c0c4 100644
1381 +--- a/arch/arm64/include/asm/stacktrace.h
1382 ++++ b/arch/arm64/include/asm/stacktrace.h
1383 +@@ -27,7 +27,7 @@ struct stackframe {
1384 + unsigned long fp;
1385 + unsigned long pc;
1386 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1387 +- unsigned int graph;
1388 ++ int graph;
1389 + #endif
1390 + };
1391 +
1392 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
1393 +index 52f15cd896e1..b5a28336c077 100644
1394 +--- a/arch/arm64/kernel/cpu_errata.c
1395 ++++ b/arch/arm64/kernel/cpu_errata.c
1396 +@@ -178,7 +178,7 @@ static int enable_smccc_arch_workaround_1(void *data)
1397 + case PSCI_CONDUIT_HVC:
1398 + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1399 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1400 +- if (res.a0)
1401 ++ if ((int)res.a0 < 0)
1402 + return 0;
1403 + cb = call_hvc_arch_workaround_1;
1404 + smccc_start = __smccc_workaround_1_hvc_start;
1405 +@@ -188,7 +188,7 @@ static int enable_smccc_arch_workaround_1(void *data)
1406 + case PSCI_CONDUIT_SMC:
1407 + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1408 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1409 +- if (res.a0)
1410 ++ if ((int)res.a0 < 0)
1411 + return 0;
1412 + cb = call_smc_arch_workaround_1;
1413 + smccc_start = __smccc_workaround_1_smc_start;
1414 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
1415 +index 9eaef51f83ff..1984e739f155 100644
1416 +--- a/arch/arm64/kernel/perf_event.c
1417 ++++ b/arch/arm64/kernel/perf_event.c
1418 +@@ -914,9 +914,9 @@ static void __armv8pmu_probe_pmu(void *info)
1419 + int pmuver;
1420 +
1421 + dfr0 = read_sysreg(id_aa64dfr0_el1);
1422 +- pmuver = cpuid_feature_extract_signed_field(dfr0,
1423 ++ pmuver = cpuid_feature_extract_unsigned_field(dfr0,
1424 + ID_AA64DFR0_PMUVER_SHIFT);
1425 +- if (pmuver < 1)
1426 ++ if (pmuver == 0xf || pmuver == 0)
1427 + return;
1428 +
1429 + probe->present = true;
1430 +diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
1431 +index 76809ccd309c..d5718a060672 100644
1432 +--- a/arch/arm64/kernel/stacktrace.c
1433 ++++ b/arch/arm64/kernel/stacktrace.c
1434 +@@ -59,6 +59,11 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
1435 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1436 + if (tsk->ret_stack &&
1437 + (frame->pc == (unsigned long)return_to_handler)) {
1438 ++ if (WARN_ON_ONCE(frame->graph == -1))
1439 ++ return -EINVAL;
1440 ++ if (frame->graph < -1)
1441 ++ frame->graph += FTRACE_NOTRACE_DEPTH;
1442 ++
1443 + /*
1444 + * This is a case where function graph tracer has
1445 + * modified a return address (LR) in a stack frame
1446 +diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
1447 +index a4391280fba9..f258636273c9 100644
1448 +--- a/arch/arm64/kernel/time.c
1449 ++++ b/arch/arm64/kernel/time.c
1450 +@@ -52,7 +52,7 @@ unsigned long profile_pc(struct pt_regs *regs)
1451 + frame.fp = regs->regs[29];
1452 + frame.pc = regs->pc;
1453 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1454 +- frame.graph = -1; /* no task info */
1455 ++ frame.graph = current->curr_ret_stack;
1456 + #endif
1457 + do {
1458 + int ret = unwind_frame(NULL, &frame);
1459 +diff --git a/arch/cris/include/arch-v10/arch/bug.h b/arch/cris/include/arch-v10/arch/bug.h
1460 +index 905afeacfedf..06da9d49152a 100644
1461 +--- a/arch/cris/include/arch-v10/arch/bug.h
1462 ++++ b/arch/cris/include/arch-v10/arch/bug.h
1463 +@@ -44,18 +44,25 @@ struct bug_frame {
1464 + * not be used like this with newer versions of gcc.
1465 + */
1466 + #define BUG() \
1467 ++do { \
1468 + __asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\
1469 + "movu.w " __stringify(__LINE__) ",$r0\n\t"\
1470 + "jump 0f\n\t" \
1471 + ".section .rodata\n" \
1472 + "0:\t.string \"" __FILE__ "\"\n\t" \
1473 +- ".previous")
1474 ++ ".previous"); \
1475 ++ unreachable(); \
1476 ++} while (0)
1477 + #endif
1478 +
1479 + #else
1480 +
1481 + /* This just causes an oops. */
1482 +-#define BUG() (*(int *)0 = 0)
1483 ++#define BUG() \
1484 ++do { \
1485 ++ barrier_before_unreachable(); \
1486 ++ __builtin_trap(); \
1487 ++} while (0)
1488 +
1489 + #endif
1490 +
1491 +diff --git a/arch/ia64/include/asm/bug.h b/arch/ia64/include/asm/bug.h
1492 +index bd3eeb8d1cfa..66b37a532765 100644
1493 +--- a/arch/ia64/include/asm/bug.h
1494 ++++ b/arch/ia64/include/asm/bug.h
1495 +@@ -4,7 +4,11 @@
1496 +
1497 + #ifdef CONFIG_BUG
1498 + #define ia64_abort() __builtin_trap()
1499 +-#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0)
1500 ++#define BUG() do { \
1501 ++ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
1502 ++ barrier_before_unreachable(); \
1503 ++ ia64_abort(); \
1504 ++} while (0)
1505 +
1506 + /* should this BUG be made generic? */
1507 + #define HAVE_ARCH_BUG
1508 +diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
1509 +index 85bba43e7d5d..658a8e06a69b 100644
1510 +--- a/arch/ia64/kernel/err_inject.c
1511 ++++ b/arch/ia64/kernel/err_inject.c
1512 +@@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
1513 + u64 virt_addr=simple_strtoull(buf, NULL, 16);
1514 + int ret;
1515 +
1516 +- ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
1517 ++ ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
1518 + if (ret<=0) {
1519 + #ifdef ERR_INJ_DEBUG
1520 + printk("Virtual address %lx is not existing.\n",virt_addr);
1521 +diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
1522 +index 84938fdbbada..908d58347790 100644
1523 +--- a/arch/m68k/coldfire/device.c
1524 ++++ b/arch/m68k/coldfire/device.c
1525 +@@ -135,7 +135,11 @@ static struct platform_device mcf_fec0 = {
1526 + .id = 0,
1527 + .num_resources = ARRAY_SIZE(mcf_fec0_resources),
1528 + .resource = mcf_fec0_resources,
1529 +- .dev.platform_data = FEC_PDATA,
1530 ++ .dev = {
1531 ++ .dma_mask = &mcf_fec0.dev.coherent_dma_mask,
1532 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
1533 ++ .platform_data = FEC_PDATA,
1534 ++ }
1535 + };
1536 +
1537 + #ifdef MCFFEC_BASE1
1538 +@@ -167,7 +171,11 @@ static struct platform_device mcf_fec1 = {
1539 + .id = 1,
1540 + .num_resources = ARRAY_SIZE(mcf_fec1_resources),
1541 + .resource = mcf_fec1_resources,
1542 +- .dev.platform_data = FEC_PDATA,
1543 ++ .dev = {
1544 ++ .dma_mask = &mcf_fec1.dev.coherent_dma_mask,
1545 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
1546 ++ .platform_data = FEC_PDATA,
1547 ++ }
1548 + };
1549 + #endif /* MCFFEC_BASE1 */
1550 + #endif /* CONFIG_FEC */
1551 +diff --git a/arch/m68k/include/asm/bug.h b/arch/m68k/include/asm/bug.h
1552 +index b7e2bf1ba4a6..275dca1435bf 100644
1553 +--- a/arch/m68k/include/asm/bug.h
1554 ++++ b/arch/m68k/include/asm/bug.h
1555 +@@ -8,16 +8,19 @@
1556 + #ifndef CONFIG_SUN3
1557 + #define BUG() do { \
1558 + pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
1559 ++ barrier_before_unreachable(); \
1560 + __builtin_trap(); \
1561 + } while (0)
1562 + #else
1563 + #define BUG() do { \
1564 + pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
1565 ++ barrier_before_unreachable(); \
1566 + panic("BUG!"); \
1567 + } while (0)
1568 + #endif
1569 + #else
1570 + #define BUG() do { \
1571 ++ barrier_before_unreachable(); \
1572 + __builtin_trap(); \
1573 + } while (0)
1574 + #endif
1575 +diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
1576 +index d99f5242169e..b3aec101a65d 100644
1577 +--- a/arch/mips/cavium-octeon/octeon-irq.c
1578 ++++ b/arch/mips/cavium-octeon/octeon-irq.c
1579 +@@ -2271,7 +2271,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
1580 +
1581 + parent_irq = irq_of_parse_and_map(ciu_node, 0);
1582 + if (!parent_irq) {
1583 +- pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
1584 ++ pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
1585 + ciu_node->name);
1586 + return -EINVAL;
1587 + }
1588 +@@ -2283,7 +2283,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
1589 +
1590 + addr = of_get_address(ciu_node, 0, NULL, NULL);
1591 + if (!addr) {
1592 +- pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
1593 ++ pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
1594 + return -EINVAL;
1595 + }
1596 + host_data->raw_reg = (u64)phys_to_virt(
1597 +@@ -2291,7 +2291,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
1598 +
1599 + addr = of_get_address(ciu_node, 1, NULL, NULL);
1600 + if (!addr) {
1601 +- pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
1602 ++ pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
1603 + return -EINVAL;
1604 + }
1605 + host_data->en_reg = (u64)phys_to_virt(
1606 +@@ -2299,7 +2299,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
1607 +
1608 + r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
1609 + if (r) {
1610 +- pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
1611 ++ pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
1612 + ciu_node->name);
1613 + return r;
1614 + }
1615 +@@ -2309,7 +2309,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
1616 + &octeon_irq_domain_cib_ops,
1617 + host_data);
1618 + if (!cib_domain) {
1619 +- pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
1620 ++ pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
1621 + return -ENOMEM;
1622 + }
1623 +
1624 +diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
1625 +index aa3800c82332..d99ca862dae3 100644
1626 +--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
1627 ++++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
1628 +@@ -167,7 +167,7 @@
1629 + #define AR71XX_AHB_DIV_MASK 0x7
1630 +
1631 + #define AR724X_PLL_REG_CPU_CONFIG 0x00
1632 +-#define AR724X_PLL_REG_PCIE_CONFIG 0x18
1633 ++#define AR724X_PLL_REG_PCIE_CONFIG 0x10
1634 +
1635 + #define AR724X_PLL_FB_SHIFT 0
1636 + #define AR724X_PLL_FB_MASK 0x3ff
1637 +diff --git a/arch/mips/include/asm/machine.h b/arch/mips/include/asm/machine.h
1638 +index e0d9b373d415..f83879dadd1e 100644
1639 +--- a/arch/mips/include/asm/machine.h
1640 ++++ b/arch/mips/include/asm/machine.h
1641 +@@ -52,7 +52,7 @@ mips_machine_is_compatible(const struct mips_machine *mach, const void *fdt)
1642 + if (!mach->matches)
1643 + return NULL;
1644 +
1645 +- for (match = mach->matches; match->compatible; match++) {
1646 ++ for (match = mach->matches; match->compatible[0]; match++) {
1647 + if (fdt_node_check_compatible(fdt, 0, match->compatible) == 0)
1648 + return match;
1649 + }
1650 +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
1651 +index c552c20237d4..006105fb12fe 100644
1652 +--- a/arch/mips/kernel/ptrace.c
1653 ++++ b/arch/mips/kernel/ptrace.c
1654 +@@ -454,7 +454,7 @@ static int fpr_get_msa(struct task_struct *target,
1655 + /*
1656 + * Copy the floating-point context to the supplied NT_PRFPREG buffer.
1657 + * Choose the appropriate helper for general registers, and then copy
1658 +- * the FCSR register separately.
1659 ++ * the FCSR and FIR registers separately.
1660 + */
1661 + static int fpr_get(struct task_struct *target,
1662 + const struct user_regset *regset,
1663 +@@ -462,6 +462,7 @@ static int fpr_get(struct task_struct *target,
1664 + void *kbuf, void __user *ubuf)
1665 + {
1666 + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
1667 ++ const int fir_pos = fcr31_pos + sizeof(u32);
1668 + int err;
1669 +
1670 + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
1671 +@@ -474,6 +475,12 @@ static int fpr_get(struct task_struct *target,
1672 + err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1673 + &target->thread.fpu.fcr31,
1674 + fcr31_pos, fcr31_pos + sizeof(u32));
1675 ++ if (err)
1676 ++ return err;
1677 ++
1678 ++ err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1679 ++ &boot_cpu_data.fpu_id,
1680 ++ fir_pos, fir_pos + sizeof(u32));
1681 +
1682 + return err;
1683 + }
1684 +@@ -522,7 +529,8 @@ static int fpr_set_msa(struct task_struct *target,
1685 + /*
1686 + * Copy the supplied NT_PRFPREG buffer to the floating-point context.
1687 + * Choose the appropriate helper for general registers, and then copy
1688 +- * the FCSR register separately.
1689 ++ * the FCSR register separately. Ignore the incoming FIR register
1690 ++ * contents though, as the register is read-only.
1691 + *
1692 + * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
1693 + * which is supposed to have been guaranteed by the kernel before
1694 +@@ -536,6 +544,7 @@ static int fpr_set(struct task_struct *target,
1695 + const void *kbuf, const void __user *ubuf)
1696 + {
1697 + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
1698 ++ const int fir_pos = fcr31_pos + sizeof(u32);
1699 + u32 fcr31;
1700 + int err;
1701 +
1702 +@@ -563,6 +572,11 @@ static int fpr_set(struct task_struct *target,
1703 + ptrace_setfcr31(target, fcr31);
1704 + }
1705 +
1706 ++ if (count > 0)
1707 ++ err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
1708 ++ fir_pos,
1709 ++ fir_pos + sizeof(u32));
1710 ++
1711 + return err;
1712 + }
1713 +
1714 +@@ -784,7 +798,7 @@ long arch_ptrace(struct task_struct *child, long request,
1715 + fregs = get_fpu_regs(child);
1716 +
1717 + #ifdef CONFIG_32BIT
1718 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
1719 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1720 + /*
1721 + * The odd registers are actually the high
1722 + * order bits of the values stored in the even
1723 +@@ -873,7 +887,7 @@ long arch_ptrace(struct task_struct *child, long request,
1724 +
1725 + init_fp_ctx(child);
1726 + #ifdef CONFIG_32BIT
1727 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
1728 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1729 + /*
1730 + * The odd registers are actually the high
1731 + * order bits of the values stored in the even
1732 +diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
1733 +index 40e212d6b26b..4a157d3249ac 100644
1734 +--- a/arch/mips/kernel/ptrace32.c
1735 ++++ b/arch/mips/kernel/ptrace32.c
1736 +@@ -98,7 +98,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1737 + break;
1738 + }
1739 + fregs = get_fpu_regs(child);
1740 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
1741 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1742 + /*
1743 + * The odd registers are actually the high
1744 + * order bits of the values stored in the even
1745 +@@ -205,7 +205,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1746 + sizeof(child->thread.fpu));
1747 + child->thread.fpu.fcr31 = 0;
1748 + }
1749 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
1750 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1751 + /*
1752 + * The odd registers are actually the high
1753 + * order bits of the values stored in the even
1754 +diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
1755 +index 75fdeaa8c62f..9730ba734afe 100644
1756 +--- a/arch/mips/kvm/mips.c
1757 ++++ b/arch/mips/kvm/mips.c
1758 +@@ -45,7 +45,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
1759 + { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
1760 + { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
1761 + { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
1762 +- { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
1763 ++ { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
1764 + { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
1765 + { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
1766 + { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
1767 +diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
1768 +index 6f534b209971..e12dfa48b478 100644
1769 +--- a/arch/mips/mm/c-r4k.c
1770 ++++ b/arch/mips/mm/c-r4k.c
1771 +@@ -851,9 +851,12 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
1772 + /*
1773 + * Either no secondary cache or the available caches don't have the
1774 + * subset property so we have to flush the primary caches
1775 +- * explicitly
1776 ++ * explicitly.
1777 ++ * If we would need IPI to perform an INDEX-type operation, then
1778 ++ * we have to use the HIT-type alternative as IPI cannot be used
1779 ++ * here due to interrupts possibly being disabled.
1780 + */
1781 +- if (size >= dcache_size) {
1782 ++ if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
1783 + r4k_blast_dcache();
1784 + } else {
1785 + R4600_HIT_CACHEOP_WAR_IMPL;
1786 +@@ -890,7 +893,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
1787 + return;
1788 + }
1789 +
1790 +- if (size >= dcache_size) {
1791 ++ if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
1792 + r4k_blast_dcache();
1793 + } else {
1794 + R4600_HIT_CACHEOP_WAR_IMPL;
1795 +diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
1796 +index 651974192c4d..b479926f0167 100644
1797 +--- a/arch/powerpc/boot/Makefile
1798 ++++ b/arch/powerpc/boot/Makefile
1799 +@@ -101,7 +101,8 @@ $(addprefix $(obj)/,$(zlib-y)): \
1800 + libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
1801 + libfdtheader := fdt.h libfdt.h libfdt_internal.h
1802 +
1803 +-$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
1804 ++$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \
1805 ++ treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
1806 + $(addprefix $(obj)/,$(libfdtheader))
1807 +
1808 + src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
1809 +diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
1810 +index ccf10c2f8899..c3bdd2d8ec90 100644
1811 +--- a/arch/powerpc/include/asm/exception-64s.h
1812 ++++ b/arch/powerpc/include/asm/exception-64s.h
1813 +@@ -69,6 +69,27 @@
1814 + */
1815 + #define EX_R3 EX_DAR
1816 +
1817 ++#define STF_ENTRY_BARRIER_SLOT \
1818 ++ STF_ENTRY_BARRIER_FIXUP_SECTION; \
1819 ++ nop; \
1820 ++ nop; \
1821 ++ nop
1822 ++
1823 ++#define STF_EXIT_BARRIER_SLOT \
1824 ++ STF_EXIT_BARRIER_FIXUP_SECTION; \
1825 ++ nop; \
1826 ++ nop; \
1827 ++ nop; \
1828 ++ nop; \
1829 ++ nop; \
1830 ++ nop
1831 ++
1832 ++/*
1833 ++ * r10 must be free to use, r13 must be paca
1834 ++ */
1835 ++#define INTERRUPT_TO_KERNEL \
1836 ++ STF_ENTRY_BARRIER_SLOT
1837 ++
1838 + /*
1839 + * Macros for annotating the expected destination of (h)rfid
1840 + *
1841 +@@ -85,16 +106,19 @@
1842 + rfid
1843 +
1844 + #define RFI_TO_USER \
1845 ++ STF_EXIT_BARRIER_SLOT; \
1846 + RFI_FLUSH_SLOT; \
1847 + rfid; \
1848 + b rfi_flush_fallback
1849 +
1850 + #define RFI_TO_USER_OR_KERNEL \
1851 ++ STF_EXIT_BARRIER_SLOT; \
1852 + RFI_FLUSH_SLOT; \
1853 + rfid; \
1854 + b rfi_flush_fallback
1855 +
1856 + #define RFI_TO_GUEST \
1857 ++ STF_EXIT_BARRIER_SLOT; \
1858 + RFI_FLUSH_SLOT; \
1859 + rfid; \
1860 + b rfi_flush_fallback
1861 +@@ -103,21 +127,25 @@
1862 + hrfid
1863 +
1864 + #define HRFI_TO_USER \
1865 ++ STF_EXIT_BARRIER_SLOT; \
1866 + RFI_FLUSH_SLOT; \
1867 + hrfid; \
1868 + b hrfi_flush_fallback
1869 +
1870 + #define HRFI_TO_USER_OR_KERNEL \
1871 ++ STF_EXIT_BARRIER_SLOT; \
1872 + RFI_FLUSH_SLOT; \
1873 + hrfid; \
1874 + b hrfi_flush_fallback
1875 +
1876 + #define HRFI_TO_GUEST \
1877 ++ STF_EXIT_BARRIER_SLOT; \
1878 + RFI_FLUSH_SLOT; \
1879 + hrfid; \
1880 + b hrfi_flush_fallback
1881 +
1882 + #define HRFI_TO_UNKNOWN \
1883 ++ STF_EXIT_BARRIER_SLOT; \
1884 + RFI_FLUSH_SLOT; \
1885 + hrfid; \
1886 + b hrfi_flush_fallback
1887 +@@ -249,6 +277,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
1888 + #define __EXCEPTION_PROLOG_1(area, extra, vec) \
1889 + OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
1890 + OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
1891 ++ INTERRUPT_TO_KERNEL; \
1892 + SAVE_CTR(r10, area); \
1893 + mfcr r9; \
1894 + extra(vec); \
1895 +diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
1896 +index 1e82eb3caabd..a9b64df34e2a 100644
1897 +--- a/arch/powerpc/include/asm/feature-fixups.h
1898 ++++ b/arch/powerpc/include/asm/feature-fixups.h
1899 +@@ -187,6 +187,22 @@ label##3: \
1900 + FTR_ENTRY_OFFSET label##1b-label##3b; \
1901 + .popsection;
1902 +
1903 ++#define STF_ENTRY_BARRIER_FIXUP_SECTION \
1904 ++953: \
1905 ++ .pushsection __stf_entry_barrier_fixup,"a"; \
1906 ++ .align 2; \
1907 ++954: \
1908 ++ FTR_ENTRY_OFFSET 953b-954b; \
1909 ++ .popsection;
1910 ++
1911 ++#define STF_EXIT_BARRIER_FIXUP_SECTION \
1912 ++955: \
1913 ++ .pushsection __stf_exit_barrier_fixup,"a"; \
1914 ++ .align 2; \
1915 ++956: \
1916 ++ FTR_ENTRY_OFFSET 955b-956b; \
1917 ++ .popsection;
1918 ++
1919 + #define RFI_FLUSH_FIXUP_SECTION \
1920 + 951: \
1921 + .pushsection __rfi_flush_fixup,"a"; \
1922 +@@ -199,6 +215,9 @@ label##3: \
1923 + #ifndef __ASSEMBLY__
1924 + #include <linux/types.h>
1925 +
1926 ++extern long stf_barrier_fallback;
1927 ++extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
1928 ++extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
1929 + extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
1930 +
1931 + void apply_feature_fixups(void);
1932 +diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
1933 +index eca3f9c68907..5a740feb7bd7 100644
1934 +--- a/arch/powerpc/include/asm/hvcall.h
1935 ++++ b/arch/powerpc/include/asm/hvcall.h
1936 +@@ -337,6 +337,9 @@
1937 + #define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
1938 + #define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
1939 + #define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
1940 ++#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5
1941 ++#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
1942 ++#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
1943 +
1944 + #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
1945 + #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
1946 +diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
1947 +index c6d3078bd8c3..b8b0be8f1a07 100644
1948 +--- a/arch/powerpc/include/asm/irq_work.h
1949 ++++ b/arch/powerpc/include/asm/irq_work.h
1950 +@@ -6,5 +6,6 @@ static inline bool arch_irq_work_has_interrupt(void)
1951 + {
1952 + return true;
1953 + }
1954 ++extern void arch_irq_work_raise(void);
1955 +
1956 + #endif /* _ASM_POWERPC_IRQ_WORK_H */
1957 +diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
1958 +index b8366df50d19..e6bd59353e40 100644
1959 +--- a/arch/powerpc/include/asm/paca.h
1960 ++++ b/arch/powerpc/include/asm/paca.h
1961 +@@ -238,8 +238,7 @@ struct paca_struct {
1962 + */
1963 + u64 exrfi[EX_SIZE] __aligned(0x80);
1964 + void *rfi_flush_fallback_area;
1965 +- u64 l1d_flush_congruence;
1966 +- u64 l1d_flush_sets;
1967 ++ u64 l1d_flush_size;
1968 + #endif
1969 + };
1970 +
1971 +diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
1972 +new file mode 100644
1973 +index 000000000000..44989b22383c
1974 +--- /dev/null
1975 ++++ b/arch/powerpc/include/asm/security_features.h
1976 +@@ -0,0 +1,85 @@
1977 ++/* SPDX-License-Identifier: GPL-2.0+ */
1978 ++/*
1979 ++ * Security related feature bit definitions.
1980 ++ *
1981 ++ * Copyright 2018, Michael Ellerman, IBM Corporation.
1982 ++ */
1983 ++
1984 ++#ifndef _ASM_POWERPC_SECURITY_FEATURES_H
1985 ++#define _ASM_POWERPC_SECURITY_FEATURES_H
1986 ++
1987 ++
1988 ++extern unsigned long powerpc_security_features;
1989 ++extern bool rfi_flush;
1990 ++
1991 ++/* These are bit flags */
1992 ++enum stf_barrier_type {
1993 ++ STF_BARRIER_NONE = 0x1,
1994 ++ STF_BARRIER_FALLBACK = 0x2,
1995 ++ STF_BARRIER_EIEIO = 0x4,
1996 ++ STF_BARRIER_SYNC_ORI = 0x8,
1997 ++};
1998 ++
1999 ++void setup_stf_barrier(void);
2000 ++void do_stf_barrier_fixups(enum stf_barrier_type types);
2001 ++
2002 ++static inline void security_ftr_set(unsigned long feature)
2003 ++{
2004 ++ powerpc_security_features |= feature;
2005 ++}
2006 ++
2007 ++static inline void security_ftr_clear(unsigned long feature)
2008 ++{
2009 ++ powerpc_security_features &= ~feature;
2010 ++}
2011 ++
2012 ++static inline bool security_ftr_enabled(unsigned long feature)
2013 ++{
2014 ++ return !!(powerpc_security_features & feature);
2015 ++}
2016 ++
2017 ++
2018 ++// Features indicating support for Spectre/Meltdown mitigations
2019 ++
2020 ++// The L1-D cache can be flushed with ori r30,r30,0
2021 ++#define SEC_FTR_L1D_FLUSH_ORI30 0x0000000000000001ull
2022 ++
2023 ++// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2)
2024 ++#define SEC_FTR_L1D_FLUSH_TRIG2 0x0000000000000002ull
2025 ++
2026 ++// ori r31,r31,0 acts as a speculation barrier
2027 ++#define SEC_FTR_SPEC_BAR_ORI31 0x0000000000000004ull
2028 ++
2029 ++// Speculation past bctr is disabled
2030 ++#define SEC_FTR_BCCTRL_SERIALISED 0x0000000000000008ull
2031 ++
2032 ++// Entries in L1-D are private to a SMT thread
2033 ++#define SEC_FTR_L1D_THREAD_PRIV 0x0000000000000010ull
2034 ++
2035 ++// Indirect branch prediction cache disabled
2036 ++#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
2037 ++
2038 ++
2039 ++// Features indicating need for Spectre/Meltdown mitigations
2040 ++
2041 ++// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest)
2042 ++#define SEC_FTR_L1D_FLUSH_HV 0x0000000000000040ull
2043 ++
2044 ++// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace)
2045 ++#define SEC_FTR_L1D_FLUSH_PR 0x0000000000000080ull
2046 ++
2047 ++// A speculation barrier should be used for bounds checks (Spectre variant 1)
2048 ++#define SEC_FTR_BNDS_CHK_SPEC_BAR 0x0000000000000100ull
2049 ++
2050 ++// Firmware configuration indicates user favours security over performance
2051 ++#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
2052 ++
2053 ++
2054 ++// Features enabled by default
2055 ++#define SEC_FTR_DEFAULT \
2056 ++ (SEC_FTR_L1D_FLUSH_HV | \
2057 ++ SEC_FTR_L1D_FLUSH_PR | \
2058 ++ SEC_FTR_BNDS_CHK_SPEC_BAR | \
2059 ++ SEC_FTR_FAVOUR_SECURITY)
2060 ++
2061 ++#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
2062 +diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
2063 +index 469b7fdc9be4..bbcdf929be54 100644
2064 +--- a/arch/powerpc/include/asm/setup.h
2065 ++++ b/arch/powerpc/include/asm/setup.h
2066 +@@ -49,7 +49,7 @@ enum l1d_flush_type {
2067 + L1D_FLUSH_MTTRIG = 0x8,
2068 + };
2069 +
2070 +-void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
2071 ++void setup_rfi_flush(enum l1d_flush_type, bool enable);
2072 + void do_rfi_flush_fixups(enum l1d_flush_type types);
2073 +
2074 + #endif /* !__ASSEMBLY__ */
2075 +diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
2076 +index 6c6cce937dd8..1479c61e29c5 100644
2077 +--- a/arch/powerpc/kernel/Makefile
2078 ++++ b/arch/powerpc/kernel/Makefile
2079 +@@ -42,7 +42,7 @@ obj-$(CONFIG_VDSO32) += vdso32/
2080 + obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
2081 + obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
2082 + obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
2083 +-obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
2084 ++obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o
2085 + obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
2086 + obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
2087 + obj-$(CONFIG_PPC64) += vdso64/
2088 +diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
2089 +index 748cdc4bb89a..2e5ea300258a 100644
2090 +--- a/arch/powerpc/kernel/asm-offsets.c
2091 ++++ b/arch/powerpc/kernel/asm-offsets.c
2092 +@@ -239,8 +239,7 @@ int main(void)
2093 + OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
2094 + OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
2095 + OFFSET(PACA_EXRFI, paca_struct, exrfi);
2096 +- OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence);
2097 +- OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets);
2098 ++ OFFSET(PACA_L1D_FLUSH_SIZE, paca_struct, l1d_flush_size);
2099 +
2100 + #endif
2101 + OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
2102 +diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
2103 +index 679bbe714e85..9daede99c131 100644
2104 +--- a/arch/powerpc/kernel/cpu_setup_power.S
2105 ++++ b/arch/powerpc/kernel/cpu_setup_power.S
2106 +@@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7)
2107 + beqlr
2108 + li r0,0
2109 + mtspr SPRN_LPID,r0
2110 ++ mtspr SPRN_PCR,r0
2111 + mfspr r3,SPRN_LPCR
2112 + li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
2113 + bl __init_LPCR_ISA206
2114 +@@ -42,6 +43,7 @@ _GLOBAL(__restore_cpu_power7)
2115 + beqlr
2116 + li r0,0
2117 + mtspr SPRN_LPID,r0
2118 ++ mtspr SPRN_PCR,r0
2119 + mfspr r3,SPRN_LPCR
2120 + li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
2121 + bl __init_LPCR_ISA206
2122 +@@ -59,6 +61,7 @@ _GLOBAL(__setup_cpu_power8)
2123 + beqlr
2124 + li r0,0
2125 + mtspr SPRN_LPID,r0
2126 ++ mtspr SPRN_PCR,r0
2127 + mfspr r3,SPRN_LPCR
2128 + ori r3, r3, LPCR_PECEDH
2129 + li r4,0 /* LPES = 0 */
2130 +@@ -81,6 +84,7 @@ _GLOBAL(__restore_cpu_power8)
2131 + beqlr
2132 + li r0,0
2133 + mtspr SPRN_LPID,r0
2134 ++ mtspr SPRN_PCR,r0
2135 + mfspr r3,SPRN_LPCR
2136 + ori r3, r3, LPCR_PECEDH
2137 + li r4,0 /* LPES = 0 */
2138 +@@ -103,6 +107,7 @@ _GLOBAL(__setup_cpu_power9)
2139 + mtspr SPRN_PSSCR,r0
2140 + mtspr SPRN_LPID,r0
2141 + mtspr SPRN_PID,r0
2142 ++ mtspr SPRN_PCR,r0
2143 + mfspr r3,SPRN_LPCR
2144 + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
2145 + or r3, r3, r4
2146 +@@ -128,6 +133,7 @@ _GLOBAL(__restore_cpu_power9)
2147 + mtspr SPRN_PSSCR,r0
2148 + mtspr SPRN_LPID,r0
2149 + mtspr SPRN_PID,r0
2150 ++ mtspr SPRN_PCR,r0
2151 + mfspr r3,SPRN_LPCR
2152 + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
2153 + or r3, r3, r4
2154 +diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
2155 +index f047ae1b6271..2dba206b065a 100644
2156 +--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
2157 ++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
2158 +@@ -137,6 +137,7 @@ static void __restore_cpu_cpufeatures(void)
2159 + if (hv_mode) {
2160 + mtspr(SPRN_LPID, 0);
2161 + mtspr(SPRN_HFSCR, system_registers.hfscr);
2162 ++ mtspr(SPRN_PCR, 0);
2163 + }
2164 + mtspr(SPRN_FSCR, system_registers.fscr);
2165 +
2166 +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2167 +index f9ca4bb3d48e..c09f0a6f8495 100644
2168 +--- a/arch/powerpc/kernel/exceptions-64s.S
2169 ++++ b/arch/powerpc/kernel/exceptions-64s.S
2170 +@@ -825,7 +825,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
2171 + #endif
2172 +
2173 +
2174 +-EXC_REAL_MASKABLE(decrementer, 0x900, 0x80)
2175 ++EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80)
2176 + EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900)
2177 + TRAMP_KVM(PACA_EXGEN, 0x900)
2178 + EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
2179 +@@ -901,6 +901,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
2180 + mtctr r13; \
2181 + GET_PACA(r13); \
2182 + std r10,PACA_EXGEN+EX_R10(r13); \
2183 ++ INTERRUPT_TO_KERNEL; \
2184 + KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
2185 + HMT_MEDIUM; \
2186 + mfctr r9;
2187 +@@ -909,7 +910,8 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
2188 + #define SYSCALL_KVMTEST \
2189 + HMT_MEDIUM; \
2190 + mr r9,r13; \
2191 +- GET_PACA(r13);
2192 ++ GET_PACA(r13); \
2193 ++ INTERRUPT_TO_KERNEL;
2194 + #endif
2195 +
2196 + #define LOAD_SYSCALL_HANDLER(reg) \
2197 +@@ -1434,45 +1436,56 @@ masked_##_H##interrupt: \
2198 + b .; \
2199 + MASKED_DEC_HANDLER(_H)
2200 +
2201 ++TRAMP_REAL_BEGIN(stf_barrier_fallback)
2202 ++ std r9,PACA_EXRFI+EX_R9(r13)
2203 ++ std r10,PACA_EXRFI+EX_R10(r13)
2204 ++ sync
2205 ++ ld r9,PACA_EXRFI+EX_R9(r13)
2206 ++ ld r10,PACA_EXRFI+EX_R10(r13)
2207 ++ ori 31,31,0
2208 ++ .rept 14
2209 ++ b 1f
2210 ++1:
2211 ++ .endr
2212 ++ blr
2213 ++
2214 + TRAMP_REAL_BEGIN(rfi_flush_fallback)
2215 + SET_SCRATCH0(r13);
2216 + GET_PACA(r13);
2217 + std r9,PACA_EXRFI+EX_R9(r13)
2218 + std r10,PACA_EXRFI+EX_R10(r13)
2219 + std r11,PACA_EXRFI+EX_R11(r13)
2220 +- std r12,PACA_EXRFI+EX_R12(r13)
2221 +- std r8,PACA_EXRFI+EX_R13(r13)
2222 + mfctr r9
2223 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
2224 +- ld r11,PACA_L1D_FLUSH_SETS(r13)
2225 +- ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
2226 +- /*
2227 +- * The load adresses are at staggered offsets within cachelines,
2228 +- * which suits some pipelines better (on others it should not
2229 +- * hurt).
2230 +- */
2231 +- addi r12,r12,8
2232 ++ ld r11,PACA_L1D_FLUSH_SIZE(r13)
2233 ++ srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
2234 + mtctr r11
2235 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
2236 +
2237 + /* order ld/st prior to dcbt stop all streams with flushing */
2238 + sync
2239 +-1: li r8,0
2240 +- .rept 8 /* 8-way set associative */
2241 +- ldx r11,r10,r8
2242 +- add r8,r8,r12
2243 +- xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
2244 +- add r8,r8,r11 // Add 0, this creates a dependency on the ldx
2245 +- .endr
2246 +- addi r10,r10,128 /* 128 byte cache line */
2247 ++
2248 ++ /*
2249 ++ * The load adresses are at staggered offsets within cachelines,
2250 ++ * which suits some pipelines better (on others it should not
2251 ++ * hurt).
2252 ++ */
2253 ++1:
2254 ++ ld r11,(0x80 + 8)*0(r10)
2255 ++ ld r11,(0x80 + 8)*1(r10)
2256 ++ ld r11,(0x80 + 8)*2(r10)
2257 ++ ld r11,(0x80 + 8)*3(r10)
2258 ++ ld r11,(0x80 + 8)*4(r10)
2259 ++ ld r11,(0x80 + 8)*5(r10)
2260 ++ ld r11,(0x80 + 8)*6(r10)
2261 ++ ld r11,(0x80 + 8)*7(r10)
2262 ++ addi r10,r10,0x80*8
2263 + bdnz 1b
2264 +
2265 + mtctr r9
2266 + ld r9,PACA_EXRFI+EX_R9(r13)
2267 + ld r10,PACA_EXRFI+EX_R10(r13)
2268 + ld r11,PACA_EXRFI+EX_R11(r13)
2269 +- ld r12,PACA_EXRFI+EX_R12(r13)
2270 +- ld r8,PACA_EXRFI+EX_R13(r13)
2271 + GET_SCRATCH0(r13);
2272 + rfid
2273 +
2274 +@@ -1482,39 +1495,37 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
2275 + std r9,PACA_EXRFI+EX_R9(r13)
2276 + std r10,PACA_EXRFI+EX_R10(r13)
2277 + std r11,PACA_EXRFI+EX_R11(r13)
2278 +- std r12,PACA_EXRFI+EX_R12(r13)
2279 +- std r8,PACA_EXRFI+EX_R13(r13)
2280 + mfctr r9
2281 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
2282 +- ld r11,PACA_L1D_FLUSH_SETS(r13)
2283 +- ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
2284 +- /*
2285 +- * The load adresses are at staggered offsets within cachelines,
2286 +- * which suits some pipelines better (on others it should not
2287 +- * hurt).
2288 +- */
2289 +- addi r12,r12,8
2290 ++ ld r11,PACA_L1D_FLUSH_SIZE(r13)
2291 ++ srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
2292 + mtctr r11
2293 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
2294 +
2295 + /* order ld/st prior to dcbt stop all streams with flushing */
2296 + sync
2297 +-1: li r8,0
2298 +- .rept 8 /* 8-way set associative */
2299 +- ldx r11,r10,r8
2300 +- add r8,r8,r12
2301 +- xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
2302 +- add r8,r8,r11 // Add 0, this creates a dependency on the ldx
2303 +- .endr
2304 +- addi r10,r10,128 /* 128 byte cache line */
2305 ++
2306 ++ /*
2307 ++ * The load adresses are at staggered offsets within cachelines,
2308 ++ * which suits some pipelines better (on others it should not
2309 ++ * hurt).
2310 ++ */
2311 ++1:
2312 ++ ld r11,(0x80 + 8)*0(r10)
2313 ++ ld r11,(0x80 + 8)*1(r10)
2314 ++ ld r11,(0x80 + 8)*2(r10)
2315 ++ ld r11,(0x80 + 8)*3(r10)
2316 ++ ld r11,(0x80 + 8)*4(r10)
2317 ++ ld r11,(0x80 + 8)*5(r10)
2318 ++ ld r11,(0x80 + 8)*6(r10)
2319 ++ ld r11,(0x80 + 8)*7(r10)
2320 ++ addi r10,r10,0x80*8
2321 + bdnz 1b
2322 +
2323 + mtctr r9
2324 + ld r9,PACA_EXRFI+EX_R9(r13)
2325 + ld r10,PACA_EXRFI+EX_R10(r13)
2326 + ld r11,PACA_EXRFI+EX_R11(r13)
2327 +- ld r12,PACA_EXRFI+EX_R12(r13)
2328 +- ld r8,PACA_EXRFI+EX_R13(r13)
2329 + GET_SCRATCH0(r13);
2330 + hrfid
2331 +
2332 +diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
2333 +index 1125c9be9e06..e35cebd45c35 100644
2334 +--- a/arch/powerpc/kernel/idle_book3s.S
2335 ++++ b/arch/powerpc/kernel/idle_book3s.S
2336 +@@ -838,6 +838,8 @@ BEGIN_FTR_SECTION
2337 + mtspr SPRN_PTCR,r4
2338 + ld r4,_RPR(r1)
2339 + mtspr SPRN_RPR,r4
2340 ++ ld r4,_AMOR(r1)
2341 ++ mtspr SPRN_AMOR,r4
2342 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2343 +
2344 + ld r4,_TSCR(r1)
2345 +diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
2346 +new file mode 100644
2347 +index 000000000000..b98a722da915
2348 +--- /dev/null
2349 ++++ b/arch/powerpc/kernel/security.c
2350 +@@ -0,0 +1,237 @@
2351 ++// SPDX-License-Identifier: GPL-2.0+
2352 ++//
2353 ++// Security related flags and so on.
2354 ++//
2355 ++// Copyright 2018, Michael Ellerman, IBM Corporation.
2356 ++
2357 ++#include <linux/kernel.h>
2358 ++#include <linux/device.h>
2359 ++#include <linux/seq_buf.h>
2360 ++
2361 ++#include <asm/debugfs.h>
2362 ++#include <asm/security_features.h>
2363 ++
2364 ++
2365 ++unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
2366 ++
2367 ++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
2368 ++{
2369 ++ bool thread_priv;
2370 ++
2371 ++ thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
2372 ++
2373 ++ if (rfi_flush || thread_priv) {
2374 ++ struct seq_buf s;
2375 ++ seq_buf_init(&s, buf, PAGE_SIZE - 1);
2376 ++
2377 ++ seq_buf_printf(&s, "Mitigation: ");
2378 ++
2379 ++ if (rfi_flush)
2380 ++ seq_buf_printf(&s, "RFI Flush");
2381 ++
2382 ++ if (rfi_flush && thread_priv)
2383 ++ seq_buf_printf(&s, ", ");
2384 ++
2385 ++ if (thread_priv)
2386 ++ seq_buf_printf(&s, "L1D private per thread");
2387 ++
2388 ++ seq_buf_printf(&s, "\n");
2389 ++
2390 ++ return s.len;
2391 ++ }
2392 ++
2393 ++ if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
2394 ++ !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
2395 ++ return sprintf(buf, "Not affected\n");
2396 ++
2397 ++ return sprintf(buf, "Vulnerable\n");
2398 ++}
2399 ++
2400 ++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
2401 ++{
2402 ++ if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
2403 ++ return sprintf(buf, "Not affected\n");
2404 ++
2405 ++ return sprintf(buf, "Vulnerable\n");
2406 ++}
2407 ++
2408 ++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
2409 ++{
2410 ++ bool bcs, ccd, ori;
2411 ++ struct seq_buf s;
2412 ++
2413 ++ seq_buf_init(&s, buf, PAGE_SIZE - 1);
2414 ++
2415 ++ bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
2416 ++ ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
2417 ++ ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
2418 ++
2419 ++ if (bcs || ccd) {
2420 ++ seq_buf_printf(&s, "Mitigation: ");
2421 ++
2422 ++ if (bcs)
2423 ++ seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
2424 ++
2425 ++ if (bcs && ccd)
2426 ++ seq_buf_printf(&s, ", ");
2427 ++
2428 ++ if (ccd)
2429 ++ seq_buf_printf(&s, "Indirect branch cache disabled");
2430 ++ } else
2431 ++ seq_buf_printf(&s, "Vulnerable");
2432 ++
2433 ++ if (ori)
2434 ++ seq_buf_printf(&s, ", ori31 speculation barrier enabled");
2435 ++
2436 ++ seq_buf_printf(&s, "\n");
2437 ++
2438 ++ return s.len;
2439 ++}
2440 ++
2441 ++/*
2442 ++ * Store-forwarding barrier support.
2443 ++ */
2444 ++
2445 ++static enum stf_barrier_type stf_enabled_flush_types;
2446 ++static bool no_stf_barrier;
2447 ++bool stf_barrier;
2448 ++
2449 ++static int __init handle_no_stf_barrier(char *p)
2450 ++{
2451 ++ pr_info("stf-barrier: disabled on command line.");
2452 ++ no_stf_barrier = true;
2453 ++ return 0;
2454 ++}
2455 ++
2456 ++early_param("no_stf_barrier", handle_no_stf_barrier);
2457 ++
2458 ++/* This is the generic flag used by other architectures */
2459 ++static int __init handle_ssbd(char *p)
2460 ++{
2461 ++ if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
2462 ++ /* Until firmware tells us, we have the barrier with auto */
2463 ++ return 0;
2464 ++ } else if (strncmp(p, "off", 3) == 0) {
2465 ++ handle_no_stf_barrier(NULL);
2466 ++ return 0;
2467 ++ } else
2468 ++ return 1;
2469 ++
2470 ++ return 0;
2471 ++}
2472 ++early_param("spec_store_bypass_disable", handle_ssbd);
2473 ++
2474 ++/* This is the generic flag used by other architectures */
2475 ++static int __init handle_no_ssbd(char *p)
2476 ++{
2477 ++ handle_no_stf_barrier(NULL);
2478 ++ return 0;
2479 ++}
2480 ++early_param("nospec_store_bypass_disable", handle_no_ssbd);
2481 ++
2482 ++static void stf_barrier_enable(bool enable)
2483 ++{
2484 ++ if (enable)
2485 ++ do_stf_barrier_fixups(stf_enabled_flush_types);
2486 ++ else
2487 ++ do_stf_barrier_fixups(STF_BARRIER_NONE);
2488 ++
2489 ++ stf_barrier = enable;
2490 ++}
2491 ++
2492 ++void setup_stf_barrier(void)
2493 ++{
2494 ++ enum stf_barrier_type type;
2495 ++ bool enable, hv;
2496 ++
2497 ++ hv = cpu_has_feature(CPU_FTR_HVMODE);
2498 ++
2499 ++ /* Default to fallback in case fw-features are not available */
2500 ++ if (cpu_has_feature(CPU_FTR_ARCH_300))
2501 ++ type = STF_BARRIER_EIEIO;
2502 ++ else if (cpu_has_feature(CPU_FTR_ARCH_207S))
2503 ++ type = STF_BARRIER_SYNC_ORI;
2504 ++ else if (cpu_has_feature(CPU_FTR_ARCH_206))
2505 ++ type = STF_BARRIER_FALLBACK;
2506 ++ else
2507 ++ type = STF_BARRIER_NONE;
2508 ++
2509 ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
2510 ++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
2511 ++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
2512 ++
2513 ++ if (type == STF_BARRIER_FALLBACK) {
2514 ++ pr_info("stf-barrier: fallback barrier available\n");
2515 ++ } else if (type == STF_BARRIER_SYNC_ORI) {
2516 ++ pr_info("stf-barrier: hwsync barrier available\n");
2517 ++ } else if (type == STF_BARRIER_EIEIO) {
2518 ++ pr_info("stf-barrier: eieio barrier available\n");
2519 ++ }
2520 ++
2521 ++ stf_enabled_flush_types = type;
2522 ++
2523 ++ if (!no_stf_barrier)
2524 ++ stf_barrier_enable(enable);
2525 ++}
2526 ++
2527 ++ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
2528 ++{
2529 ++ if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
2530 ++ const char *type;
2531 ++ switch (stf_enabled_flush_types) {
2532 ++ case STF_BARRIER_EIEIO:
2533 ++ type = "eieio";
2534 ++ break;
2535 ++ case STF_BARRIER_SYNC_ORI:
2536 ++ type = "hwsync";
2537 ++ break;
2538 ++ case STF_BARRIER_FALLBACK:
2539 ++ type = "fallback";
2540 ++ break;
2541 ++ default:
2542 ++ type = "unknown";
2543 ++ }
2544 ++ return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
2545 ++ }
2546 ++
2547 ++ if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
2548 ++ !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
2549 ++ return sprintf(buf, "Not affected\n");
2550 ++
2551 ++ return sprintf(buf, "Vulnerable\n");
2552 ++}
2553 ++
2554 ++#ifdef CONFIG_DEBUG_FS
2555 ++static int stf_barrier_set(void *data, u64 val)
2556 ++{
2557 ++ bool enable;
2558 ++
2559 ++ if (val == 1)
2560 ++ enable = true;
2561 ++ else if (val == 0)
2562 ++ enable = false;
2563 ++ else
2564 ++ return -EINVAL;
2565 ++
2566 ++ /* Only do anything if we're changing state */
2567 ++ if (enable != stf_barrier)
2568 ++ stf_barrier_enable(enable);
2569 ++
2570 ++ return 0;
2571 ++}
2572 ++
2573 ++static int stf_barrier_get(void *data, u64 *val)
2574 ++{
2575 ++ *val = stf_barrier ? 1 : 0;
2576 ++ return 0;
2577 ++}
2578 ++
2579 ++DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
2580 ++
2581 ++static __init int stf_barrier_debugfs_init(void)
2582 ++{
2583 ++ debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
2584 ++ return 0;
2585 ++}
2586 ++device_initcall(stf_barrier_debugfs_init);
2587 ++#endif /* CONFIG_DEBUG_FS */
2588 +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
2589 +index 9527a4c6cbc2..0618aa61b26a 100644
2590 +--- a/arch/powerpc/kernel/setup_64.c
2591 ++++ b/arch/powerpc/kernel/setup_64.c
2592 +@@ -822,9 +822,6 @@ static void do_nothing(void *unused)
2593 +
2594 + void rfi_flush_enable(bool enable)
2595 + {
2596 +- if (rfi_flush == enable)
2597 +- return;
2598 +-
2599 + if (enable) {
2600 + do_rfi_flush_fixups(enabled_flush_types);
2601 + on_each_cpu(do_nothing, NULL, 1);
2602 +@@ -834,11 +831,15 @@ void rfi_flush_enable(bool enable)
2603 + rfi_flush = enable;
2604 + }
2605 +
2606 +-static void init_fallback_flush(void)
2607 ++static void __ref init_fallback_flush(void)
2608 + {
2609 + u64 l1d_size, limit;
2610 + int cpu;
2611 +
2612 ++ /* Only allocate the fallback flush area once (at boot time). */
2613 ++ if (l1d_flush_fallback_area)
2614 ++ return;
2615 ++
2616 + l1d_size = ppc64_caches.l1d.size;
2617 + limit = min(safe_stack_limit(), ppc64_rma_size);
2618 +
2619 +@@ -851,34 +852,23 @@ static void init_fallback_flush(void)
2620 + memset(l1d_flush_fallback_area, 0, l1d_size * 2);
2621 +
2622 + for_each_possible_cpu(cpu) {
2623 +- /*
2624 +- * The fallback flush is currently coded for 8-way
2625 +- * associativity. Different associativity is possible, but it
2626 +- * will be treated as 8-way and may not evict the lines as
2627 +- * effectively.
2628 +- *
2629 +- * 128 byte lines are mandatory.
2630 +- */
2631 +- u64 c = l1d_size / 8;
2632 +-
2633 + paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
2634 +- paca[cpu].l1d_flush_congruence = c;
2635 +- paca[cpu].l1d_flush_sets = c / 128;
2636 ++ paca[cpu].l1d_flush_size = l1d_size;
2637 + }
2638 + }
2639 +
2640 +-void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
2641 ++void setup_rfi_flush(enum l1d_flush_type types, bool enable)
2642 + {
2643 + if (types & L1D_FLUSH_FALLBACK) {
2644 +- pr_info("rfi-flush: Using fallback displacement flush\n");
2645 ++ pr_info("rfi-flush: fallback displacement flush available\n");
2646 + init_fallback_flush();
2647 + }
2648 +
2649 + if (types & L1D_FLUSH_ORI)
2650 +- pr_info("rfi-flush: Using ori type flush\n");
2651 ++ pr_info("rfi-flush: ori type flush available\n");
2652 +
2653 + if (types & L1D_FLUSH_MTTRIG)
2654 +- pr_info("rfi-flush: Using mttrig type flush\n");
2655 ++ pr_info("rfi-flush: mttrig type flush available\n");
2656 +
2657 + enabled_flush_types = types;
2658 +
2659 +@@ -889,13 +879,19 @@ void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
2660 + #ifdef CONFIG_DEBUG_FS
2661 + static int rfi_flush_set(void *data, u64 val)
2662 + {
2663 ++ bool enable;
2664 ++
2665 + if (val == 1)
2666 +- rfi_flush_enable(true);
2667 ++ enable = true;
2668 + else if (val == 0)
2669 +- rfi_flush_enable(false);
2670 ++ enable = false;
2671 + else
2672 + return -EINVAL;
2673 +
2674 ++ /* Only do anything if we're changing state */
2675 ++ if (enable != rfi_flush)
2676 ++ rfi_flush_enable(enable);
2677 ++
2678 + return 0;
2679 + }
2680 +
2681 +@@ -914,12 +910,4 @@ static __init int rfi_flush_debugfs_init(void)
2682 + }
2683 + device_initcall(rfi_flush_debugfs_init);
2684 + #endif
2685 +-
2686 +-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
2687 +-{
2688 +- if (rfi_flush)
2689 +- return sprintf(buf, "Mitigation: RFI Flush\n");
2690 +-
2691 +- return sprintf(buf, "Vulnerable\n");
2692 +-}
2693 + #endif /* CONFIG_PPC_BOOK3S_64 */
2694 +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2695 +index d17007451f62..ac2e5e56a9f0 100644
2696 +--- a/arch/powerpc/kernel/traps.c
2697 ++++ b/arch/powerpc/kernel/traps.c
2698 +@@ -182,6 +182,12 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
2699 + }
2700 + raw_local_irq_restore(flags);
2701 +
2702 ++ /*
2703 ++ * system_reset_excption handles debugger, crash dump, panic, for 0x100
2704 ++ */
2705 ++ if (TRAP(regs) == 0x100)
2706 ++ return;
2707 ++
2708 + crash_fadump(regs, "die oops");
2709 +
2710 + if (kexec_should_crash(current))
2711 +@@ -246,8 +252,13 @@ void die(const char *str, struct pt_regs *regs, long err)
2712 + {
2713 + unsigned long flags;
2714 +
2715 +- if (debugger(regs))
2716 +- return;
2717 ++ /*
2718 ++ * system_reset_excption handles debugger, crash dump, panic, for 0x100
2719 ++ */
2720 ++ if (TRAP(regs) != 0x100) {
2721 ++ if (debugger(regs))
2722 ++ return;
2723 ++ }
2724 +
2725 + flags = oops_begin(regs);
2726 + if (__die(str, regs, err))
2727 +@@ -1379,6 +1390,22 @@ void facility_unavailable_exception(struct pt_regs *regs)
2728 + value = mfspr(SPRN_FSCR);
2729 +
2730 + status = value >> 56;
2731 ++ if ((hv || status >= 2) &&
2732 ++ (status < ARRAY_SIZE(facility_strings)) &&
2733 ++ facility_strings[status])
2734 ++ facility = facility_strings[status];
2735 ++
2736 ++ /* We should not have taken this interrupt in kernel */
2737 ++ if (!user_mode(regs)) {
2738 ++ pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
2739 ++ facility, status, regs->nip);
2740 ++ die("Unexpected facility unavailable exception", regs, SIGABRT);
2741 ++ }
2742 ++
2743 ++ /* We restore the interrupt state now */
2744 ++ if (!arch_irq_disabled_regs(regs))
2745 ++ local_irq_enable();
2746 ++
2747 + if (status == FSCR_DSCR_LG) {
2748 + /*
2749 + * User is accessing the DSCR register using the problem
2750 +@@ -1445,25 +1472,11 @@ void facility_unavailable_exception(struct pt_regs *regs)
2751 + return;
2752 + }
2753 +
2754 +- if ((hv || status >= 2) &&
2755 +- (status < ARRAY_SIZE(facility_strings)) &&
2756 +- facility_strings[status])
2757 +- facility = facility_strings[status];
2758 +-
2759 +- /* We restore the interrupt state now */
2760 +- if (!arch_irq_disabled_regs(regs))
2761 +- local_irq_enable();
2762 +-
2763 + pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
2764 + hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
2765 +
2766 + out:
2767 +- if (user_mode(regs)) {
2768 +- _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
2769 +- return;
2770 +- }
2771 +-
2772 +- die("Unexpected facility unavailable exception", regs, SIGABRT);
2773 ++ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
2774 + }
2775 + #endif
2776 +
2777 +diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
2778 +index 307843d23682..c89ffb88fa3b 100644
2779 +--- a/arch/powerpc/kernel/vmlinux.lds.S
2780 ++++ b/arch/powerpc/kernel/vmlinux.lds.S
2781 +@@ -133,6 +133,20 @@ SECTIONS
2782 + RO_DATA(PAGE_SIZE)
2783 +
2784 + #ifdef CONFIG_PPC64
2785 ++ . = ALIGN(8);
2786 ++ __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
2787 ++ __start___stf_entry_barrier_fixup = .;
2788 ++ *(__stf_entry_barrier_fixup)
2789 ++ __stop___stf_entry_barrier_fixup = .;
2790 ++ }
2791 ++
2792 ++ . = ALIGN(8);
2793 ++ __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
2794 ++ __start___stf_exit_barrier_fixup = .;
2795 ++ *(__stf_exit_barrier_fixup)
2796 ++ __stop___stf_exit_barrier_fixup = .;
2797 ++ }
2798 ++
2799 + . = ALIGN(8);
2800 + __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
2801 + __start___rfi_flush_fixup = .;
2802 +diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
2803 +index d0c0b8443dcf..762a899e85a4 100644
2804 +--- a/arch/powerpc/lib/feature-fixups.c
2805 ++++ b/arch/powerpc/lib/feature-fixups.c
2806 +@@ -23,6 +23,7 @@
2807 + #include <asm/page.h>
2808 + #include <asm/sections.h>
2809 + #include <asm/setup.h>
2810 ++#include <asm/security_features.h>
2811 + #include <asm/firmware.h>
2812 +
2813 + struct fixup_entry {
2814 +@@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
2815 + }
2816 +
2817 + #ifdef CONFIG_PPC_BOOK3S_64
2818 ++void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
2819 ++{
2820 ++ unsigned int instrs[3], *dest;
2821 ++ long *start, *end;
2822 ++ int i;
2823 ++
2824 ++ start = PTRRELOC(&__start___stf_entry_barrier_fixup),
2825 ++ end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
2826 ++
2827 ++ instrs[0] = 0x60000000; /* nop */
2828 ++ instrs[1] = 0x60000000; /* nop */
2829 ++ instrs[2] = 0x60000000; /* nop */
2830 ++
2831 ++ i = 0;
2832 ++ if (types & STF_BARRIER_FALLBACK) {
2833 ++ instrs[i++] = 0x7d4802a6; /* mflr r10 */
2834 ++ instrs[i++] = 0x60000000; /* branch patched below */
2835 ++ instrs[i++] = 0x7d4803a6; /* mtlr r10 */
2836 ++ } else if (types & STF_BARRIER_EIEIO) {
2837 ++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
2838 ++ } else if (types & STF_BARRIER_SYNC_ORI) {
2839 ++ instrs[i++] = 0x7c0004ac; /* hwsync */
2840 ++ instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
2841 ++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
2842 ++ }
2843 ++
2844 ++ for (i = 0; start < end; start++, i++) {
2845 ++ dest = (void *)start + *start;
2846 ++
2847 ++ pr_devel("patching dest %lx\n", (unsigned long)dest);
2848 ++
2849 ++ patch_instruction(dest, instrs[0]);
2850 ++
2851 ++ if (types & STF_BARRIER_FALLBACK)
2852 ++ patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
2853 ++ BRANCH_SET_LINK);
2854 ++ else
2855 ++ patch_instruction(dest + 1, instrs[1]);
2856 ++
2857 ++ patch_instruction(dest + 2, instrs[2]);
2858 ++ }
2859 ++
2860 ++ printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
2861 ++ (types == STF_BARRIER_NONE) ? "no" :
2862 ++ (types == STF_BARRIER_FALLBACK) ? "fallback" :
2863 ++ (types == STF_BARRIER_EIEIO) ? "eieio" :
2864 ++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
2865 ++ : "unknown");
2866 ++}
2867 ++
2868 ++void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
2869 ++{
2870 ++ unsigned int instrs[6], *dest;
2871 ++ long *start, *end;
2872 ++ int i;
2873 ++
2874 ++ start = PTRRELOC(&__start___stf_exit_barrier_fixup),
2875 ++ end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
2876 ++
2877 ++ instrs[0] = 0x60000000; /* nop */
2878 ++ instrs[1] = 0x60000000; /* nop */
2879 ++ instrs[2] = 0x60000000; /* nop */
2880 ++ instrs[3] = 0x60000000; /* nop */
2881 ++ instrs[4] = 0x60000000; /* nop */
2882 ++ instrs[5] = 0x60000000; /* nop */
2883 ++
2884 ++ i = 0;
2885 ++ if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
2886 ++ if (cpu_has_feature(CPU_FTR_HVMODE)) {
2887 ++ instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
2888 ++ instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
2889 ++ } else {
2890 ++ instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
2891 ++ instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
2892 ++ }
2893 ++ instrs[i++] = 0x7c0004ac; /* hwsync */
2894 ++ instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
2895 ++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
2896 ++ if (cpu_has_feature(CPU_FTR_HVMODE)) {
2897 ++ instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
2898 ++ } else {
2899 ++ instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
2900 ++ }
2901 ++ } else if (types & STF_BARRIER_EIEIO) {
2902 ++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
2903 ++ }
2904 ++
2905 ++ for (i = 0; start < end; start++, i++) {
2906 ++ dest = (void *)start + *start;
2907 ++
2908 ++ pr_devel("patching dest %lx\n", (unsigned long)dest);
2909 ++
2910 ++ patch_instruction(dest, instrs[0]);
2911 ++ patch_instruction(dest + 1, instrs[1]);
2912 ++ patch_instruction(dest + 2, instrs[2]);
2913 ++ patch_instruction(dest + 3, instrs[3]);
2914 ++ patch_instruction(dest + 4, instrs[4]);
2915 ++ patch_instruction(dest + 5, instrs[5]);
2916 ++ }
2917 ++ printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
2918 ++ (types == STF_BARRIER_NONE) ? "no" :
2919 ++ (types == STF_BARRIER_FALLBACK) ? "fallback" :
2920 ++ (types == STF_BARRIER_EIEIO) ? "eieio" :
2921 ++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
2922 ++ : "unknown");
2923 ++}
2924 ++
2925 ++
2926 ++void do_stf_barrier_fixups(enum stf_barrier_type types)
2927 ++{
2928 ++ do_stf_entry_barrier_fixups(types);
2929 ++ do_stf_exit_barrier_fixups(types);
2930 ++}
2931 ++
2932 + void do_rfi_flush_fixups(enum l1d_flush_type types)
2933 + {
2934 + unsigned int instrs[3], *dest;
2935 +@@ -153,7 +268,14 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
2936 + patch_instruction(dest + 2, instrs[2]);
2937 + }
2938 +
2939 +- printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
2940 ++ printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
2941 ++ (types == L1D_FLUSH_NONE) ? "no" :
2942 ++ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
2943 ++ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
2944 ++ ? "ori+mttrig type"
2945 ++ : "ori type" :
2946 ++ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
2947 ++ : "unknown");
2948 + }
2949 + #endif /* CONFIG_PPC_BOOK3S_64 */
2950 +
2951 +diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
2952 +index f9941b3b5770..f760494ecd66 100644
2953 +--- a/arch/powerpc/net/bpf_jit_comp.c
2954 ++++ b/arch/powerpc/net/bpf_jit_comp.c
2955 +@@ -329,6 +329,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
2956 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
2957 + PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
2958 + break;
2959 ++ case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
2960 ++ PPC_LWZ_OFFS(r_A, r_skb, K);
2961 ++ break;
2962 + case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
2963 + PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
2964 + break;
2965 +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
2966 +index fce545774d50..b7a6044161e8 100644
2967 +--- a/arch/powerpc/perf/core-book3s.c
2968 ++++ b/arch/powerpc/perf/core-book3s.c
2969 +@@ -457,6 +457,16 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
2970 + /* invalid entry */
2971 + continue;
2972 +
2973 ++ /*
2974 ++ * BHRB rolling buffer could very much contain the kernel
2975 ++ * addresses at this point. Check the privileges before
2976 ++ * exporting it to userspace (avoid exposure of regions
2977 ++ * where we could have speculative execution)
2978 ++ */
2979 ++ if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
2980 ++ is_kernel_addr(addr))
2981 ++ continue;
2982 ++
2983 + /* Branches are read most recent first (ie. mfbhrb 0 is
2984 + * the most recent branch).
2985 + * There are two types of valid entries:
2986 +@@ -1226,6 +1236,7 @@ static void power_pmu_disable(struct pmu *pmu)
2987 + */
2988 + write_mmcr0(cpuhw, val);
2989 + mb();
2990 ++ isync();
2991 +
2992 + /*
2993 + * Disable instruction sampling if it was enabled
2994 +@@ -1234,12 +1245,26 @@ static void power_pmu_disable(struct pmu *pmu)
2995 + mtspr(SPRN_MMCRA,
2996 + cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
2997 + mb();
2998 ++ isync();
2999 + }
3000 +
3001 + cpuhw->disabled = 1;
3002 + cpuhw->n_added = 0;
3003 +
3004 + ebb_switch_out(mmcr0);
3005 ++
3006 ++#ifdef CONFIG_PPC64
3007 ++ /*
3008 ++ * These are readable by userspace, may contain kernel
3009 ++ * addresses and are not switched by context switch, so clear
3010 ++ * them now to avoid leaking anything to userspace in general
3011 ++ * including to another process.
3012 ++ */
3013 ++ if (ppmu->flags & PPMU_ARCH_207S) {
3014 ++ mtspr(SPRN_SDAR, 0);
3015 ++ mtspr(SPRN_SIAR, 0);
3016 ++ }
3017 ++#endif
3018 + }
3019 +
3020 + local_irq_restore(flags);
3021 +diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
3022 +index 4043109f4051..63f007f2de7e 100644
3023 +--- a/arch/powerpc/platforms/powernv/npu-dma.c
3024 ++++ b/arch/powerpc/platforms/powernv/npu-dma.c
3025 +@@ -413,6 +413,11 @@ struct npu_context {
3026 + void *priv;
3027 + };
3028 +
3029 ++struct mmio_atsd_reg {
3030 ++ struct npu *npu;
3031 ++ int reg;
3032 ++};
3033 ++
3034 + /*
3035 + * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
3036 + * if none are available.
3037 +@@ -422,7 +427,7 @@ static int get_mmio_atsd_reg(struct npu *npu)
3038 + int i;
3039 +
3040 + for (i = 0; i < npu->mmio_atsd_count; i++) {
3041 +- if (!test_and_set_bit(i, &npu->mmio_atsd_usage))
3042 ++ if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
3043 + return i;
3044 + }
3045 +
3046 +@@ -431,86 +436,90 @@ static int get_mmio_atsd_reg(struct npu *npu)
3047 +
3048 + static void put_mmio_atsd_reg(struct npu *npu, int reg)
3049 + {
3050 +- clear_bit(reg, &npu->mmio_atsd_usage);
3051 ++ clear_bit_unlock(reg, &npu->mmio_atsd_usage);
3052 + }
3053 +
3054 + /* MMIO ATSD register offsets */
3055 + #define XTS_ATSD_AVA 1
3056 + #define XTS_ATSD_STAT 2
3057 +
3058 +-static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
3059 +- unsigned long va)
3060 ++static void mmio_launch_invalidate(struct mmio_atsd_reg *mmio_atsd_reg,
3061 ++ unsigned long launch, unsigned long va)
3062 + {
3063 +- int mmio_atsd_reg;
3064 +-
3065 +- do {
3066 +- mmio_atsd_reg = get_mmio_atsd_reg(npu);
3067 +- cpu_relax();
3068 +- } while (mmio_atsd_reg < 0);
3069 ++ struct npu *npu = mmio_atsd_reg->npu;
3070 ++ int reg = mmio_atsd_reg->reg;
3071 +
3072 + __raw_writeq(cpu_to_be64(va),
3073 +- npu->mmio_atsd_regs[mmio_atsd_reg] + XTS_ATSD_AVA);
3074 ++ npu->mmio_atsd_regs[reg] + XTS_ATSD_AVA);
3075 + eieio();
3076 +- __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[mmio_atsd_reg]);
3077 +-
3078 +- return mmio_atsd_reg;
3079 ++ __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[reg]);
3080 + }
3081 +
3082 +-static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
3083 ++static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
3084 ++ unsigned long pid, bool flush)
3085 + {
3086 ++ int i;
3087 + unsigned long launch;
3088 +
3089 +- /* IS set to invalidate matching PID */
3090 +- launch = PPC_BIT(12);
3091 ++ for (i = 0; i <= max_npu2_index; i++) {
3092 ++ if (mmio_atsd_reg[i].reg < 0)
3093 ++ continue;
3094 ++
3095 ++ /* IS set to invalidate matching PID */
3096 ++ launch = PPC_BIT(12);
3097 +
3098 +- /* PRS set to process-scoped */
3099 +- launch |= PPC_BIT(13);
3100 ++ /* PRS set to process-scoped */
3101 ++ launch |= PPC_BIT(13);
3102 +
3103 +- /* AP */
3104 +- launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
3105 ++ /* AP */
3106 ++ launch |= (u64)
3107 ++ mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
3108 +
3109 +- /* PID */
3110 +- launch |= pid << PPC_BITLSHIFT(38);
3111 ++ /* PID */
3112 ++ launch |= pid << PPC_BITLSHIFT(38);
3113 +
3114 +- /* No flush */
3115 +- launch |= !flush << PPC_BITLSHIFT(39);
3116 ++ /* No flush */
3117 ++ launch |= !flush << PPC_BITLSHIFT(39);
3118 +
3119 +- /* Invalidating the entire process doesn't use a va */
3120 +- return mmio_launch_invalidate(npu, launch, 0);
3121 ++ /* Invalidating the entire process doesn't use a va */
3122 ++ mmio_launch_invalidate(&mmio_atsd_reg[i], launch, 0);
3123 ++ }
3124 + }
3125 +
3126 +-static int mmio_invalidate_va(struct npu *npu, unsigned long va,
3127 +- unsigned long pid, bool flush)
3128 ++static void mmio_invalidate_va(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
3129 ++ unsigned long va, unsigned long pid, bool flush)
3130 + {
3131 ++ int i;
3132 + unsigned long launch;
3133 +
3134 +- /* IS set to invalidate target VA */
3135 +- launch = 0;
3136 ++ for (i = 0; i <= max_npu2_index; i++) {
3137 ++ if (mmio_atsd_reg[i].reg < 0)
3138 ++ continue;
3139 ++
3140 ++ /* IS set to invalidate target VA */
3141 ++ launch = 0;
3142 +
3143 +- /* PRS set to process scoped */
3144 +- launch |= PPC_BIT(13);
3145 ++ /* PRS set to process scoped */
3146 ++ launch |= PPC_BIT(13);
3147 +
3148 +- /* AP */
3149 +- launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
3150 ++ /* AP */
3151 ++ launch |= (u64)
3152 ++ mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
3153 +
3154 +- /* PID */
3155 +- launch |= pid << PPC_BITLSHIFT(38);
3156 ++ /* PID */
3157 ++ launch |= pid << PPC_BITLSHIFT(38);
3158 +
3159 +- /* No flush */
3160 +- launch |= !flush << PPC_BITLSHIFT(39);
3161 ++ /* No flush */
3162 ++ launch |= !flush << PPC_BITLSHIFT(39);
3163 +
3164 +- return mmio_launch_invalidate(npu, launch, va);
3165 ++ mmio_launch_invalidate(&mmio_atsd_reg[i], launch, va);
3166 ++ }
3167 + }
3168 +
3169 + #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
3170 +
3171 +-struct mmio_atsd_reg {
3172 +- struct npu *npu;
3173 +- int reg;
3174 +-};
3175 +-
3176 + static void mmio_invalidate_wait(
3177 +- struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
3178 ++ struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
3179 + {
3180 + struct npu *npu;
3181 + int i, reg;
3182 +@@ -525,16 +534,67 @@ static void mmio_invalidate_wait(
3183 + reg = mmio_atsd_reg[i].reg;
3184 + while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
3185 + cpu_relax();
3186 ++ }
3187 ++}
3188 ++
3189 ++/*
3190 ++ * Acquires all the address translation shootdown (ATSD) registers required to
3191 ++ * launch an ATSD on all links this npu_context is active on.
3192 ++ */
3193 ++static void acquire_atsd_reg(struct npu_context *npu_context,
3194 ++ struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
3195 ++{
3196 ++ int i, j;
3197 ++ struct npu *npu;
3198 ++ struct pci_dev *npdev;
3199 ++ struct pnv_phb *nphb;
3200 +
3201 +- put_mmio_atsd_reg(npu, reg);
3202 ++ for (i = 0; i <= max_npu2_index; i++) {
3203 ++ mmio_atsd_reg[i].reg = -1;
3204 ++ for (j = 0; j < NV_MAX_LINKS; j++) {
3205 ++ /*
3206 ++ * There are no ordering requirements with respect to
3207 ++ * the setup of struct npu_context, but to ensure
3208 ++ * consistent behaviour we need to ensure npdev[][] is
3209 ++ * only read once.
3210 ++ */
3211 ++ npdev = READ_ONCE(npu_context->npdev[i][j]);
3212 ++ if (!npdev)
3213 ++ continue;
3214 +
3215 ++ nphb = pci_bus_to_host(npdev->bus)->private_data;
3216 ++ npu = &nphb->npu;
3217 ++ mmio_atsd_reg[i].npu = npu;
3218 ++ mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
3219 ++ while (mmio_atsd_reg[i].reg < 0) {
3220 ++ mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
3221 ++ cpu_relax();
3222 ++ }
3223 ++ break;
3224 ++ }
3225 ++ }
3226 ++}
3227 ++
3228 ++/*
3229 ++ * Release previously acquired ATSD registers. To avoid deadlocks the registers
3230 ++ * must be released in the same order they were acquired above in
3231 ++ * acquire_atsd_reg.
3232 ++ */
3233 ++static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
3234 ++{
3235 ++ int i;
3236 ++
3237 ++ for (i = 0; i <= max_npu2_index; i++) {
3238 + /*
3239 +- * The GPU requires two flush ATSDs to ensure all entries have
3240 +- * been flushed. We use PID 0 as it will never be used for a
3241 +- * process on the GPU.
3242 ++ * We can't rely on npu_context->npdev[][] being the same here
3243 ++ * as when acquire_atsd_reg() was called, hence we use the
3244 ++ * values stored in mmio_atsd_reg during the acquire phase
3245 ++ * rather than re-reading npdev[][].
3246 + */
3247 +- if (flush)
3248 +- mmio_invalidate_pid(npu, 0, true);
3249 ++ if (mmio_atsd_reg[i].reg < 0)
3250 ++ continue;
3251 ++
3252 ++ put_mmio_atsd_reg(mmio_atsd_reg[i].npu, mmio_atsd_reg[i].reg);
3253 + }
3254 + }
3255 +
3256 +@@ -545,10 +605,6 @@ static void mmio_invalidate_wait(
3257 + static void mmio_invalidate(struct npu_context *npu_context, int va,
3258 + unsigned long address, bool flush)
3259 + {
3260 +- int i, j;
3261 +- struct npu *npu;
3262 +- struct pnv_phb *nphb;
3263 +- struct pci_dev *npdev;
3264 + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
3265 + unsigned long pid = npu_context->mm->context.id;
3266 +
3267 +@@ -562,37 +618,25 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
3268 + * Loop over all the NPUs this process is active on and launch
3269 + * an invalidate.
3270 + */
3271 +- for (i = 0; i <= max_npu2_index; i++) {
3272 +- mmio_atsd_reg[i].reg = -1;
3273 +- for (j = 0; j < NV_MAX_LINKS; j++) {
3274 +- npdev = npu_context->npdev[i][j];
3275 +- if (!npdev)
3276 +- continue;
3277 +-
3278 +- nphb = pci_bus_to_host(npdev->bus)->private_data;
3279 +- npu = &nphb->npu;
3280 +- mmio_atsd_reg[i].npu = npu;
3281 +-
3282 +- if (va)
3283 +- mmio_atsd_reg[i].reg =
3284 +- mmio_invalidate_va(npu, address, pid,
3285 +- flush);
3286 +- else
3287 +- mmio_atsd_reg[i].reg =
3288 +- mmio_invalidate_pid(npu, pid, flush);
3289 +-
3290 +- /*
3291 +- * The NPU hardware forwards the shootdown to all GPUs
3292 +- * so we only have to launch one shootdown per NPU.
3293 +- */
3294 +- break;
3295 +- }
3296 ++ acquire_atsd_reg(npu_context, mmio_atsd_reg);
3297 ++ if (va)
3298 ++ mmio_invalidate_va(mmio_atsd_reg, address, pid, flush);
3299 ++ else
3300 ++ mmio_invalidate_pid(mmio_atsd_reg, pid, flush);
3301 ++
3302 ++ mmio_invalidate_wait(mmio_atsd_reg);
3303 ++ if (flush) {
3304 ++ /*
3305 ++ * The GPU requires two flush ATSDs to ensure all entries have
3306 ++ * been flushed. We use PID 0 as it will never be used for a
3307 ++ * process on the GPU.
3308 ++ */
3309 ++ mmio_invalidate_pid(mmio_atsd_reg, 0, true);
3310 ++ mmio_invalidate_wait(mmio_atsd_reg);
3311 ++ mmio_invalidate_pid(mmio_atsd_reg, 0, true);
3312 ++ mmio_invalidate_wait(mmio_atsd_reg);
3313 + }
3314 +-
3315 +- mmio_invalidate_wait(mmio_atsd_reg, flush);
3316 +- if (flush)
3317 +- /* Wait for the flush to complete */
3318 +- mmio_invalidate_wait(mmio_atsd_reg, false);
3319 ++ release_atsd_reg(mmio_atsd_reg);
3320 + }
3321 +
3322 + static void pnv_npu2_mn_release(struct mmu_notifier *mn,
3323 +@@ -735,7 +779,16 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
3324 + if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
3325 + &nvlink_index)))
3326 + return ERR_PTR(-ENODEV);
3327 +- npu_context->npdev[npu->index][nvlink_index] = npdev;
3328 ++
3329 ++ /*
3330 ++ * npdev is a pci_dev pointer setup by the PCI code. We assign it to
3331 ++ * npdev[][] to indicate to the mmu notifiers that an invalidation
3332 ++ * should also be sent over this nvlink. The notifiers don't use any
3333 ++ * other fields in npu_context, so we just need to ensure that when they
3334 ++ * deference npu_context->npdev[][] it is either a valid pointer or
3335 ++ * NULL.
3336 ++ */
3337 ++ WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev);
3338 +
3339 + return npu_context;
3340 + }
3341 +@@ -774,7 +827,7 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
3342 + if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
3343 + &nvlink_index)))
3344 + return;
3345 +- npu_context->npdev[npu->index][nvlink_index] = NULL;
3346 ++ WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
3347 + opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
3348 + PCI_DEVID(gpdev->bus->number, gpdev->devfn));
3349 + kref_put(&npu_context->kref, pnv_npu2_release_context);
3350 +diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
3351 +index 7966a314d93a..fd143c934768 100644
3352 +--- a/arch/powerpc/platforms/powernv/setup.c
3353 ++++ b/arch/powerpc/platforms/powernv/setup.c
3354 +@@ -37,53 +37,92 @@
3355 + #include <asm/kexec.h>
3356 + #include <asm/smp.h>
3357 + #include <asm/setup.h>
3358 ++#include <asm/security_features.h>
3359 +
3360 + #include "powernv.h"
3361 +
3362 ++
3363 ++static bool fw_feature_is(const char *state, const char *name,
3364 ++ struct device_node *fw_features)
3365 ++{
3366 ++ struct device_node *np;
3367 ++ bool rc = false;
3368 ++
3369 ++ np = of_get_child_by_name(fw_features, name);
3370 ++ if (np) {
3371 ++ rc = of_property_read_bool(np, state);
3372 ++ of_node_put(np);
3373 ++ }
3374 ++
3375 ++ return rc;
3376 ++}
3377 ++
3378 ++static void init_fw_feat_flags(struct device_node *np)
3379 ++{
3380 ++ if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
3381 ++ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
3382 ++
3383 ++ if (fw_feature_is("enabled", "fw-bcctrl-serialized", np))
3384 ++ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
3385 ++
3386 ++ if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np))
3387 ++ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
3388 ++
3389 ++ if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np))
3390 ++ security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
3391 ++
3392 ++ if (fw_feature_is("enabled", "fw-l1d-thread-split", np))
3393 ++ security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
3394 ++
3395 ++ if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
3396 ++ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
3397 ++
3398 ++ /*
3399 ++ * The features below are enabled by default, so we instead look to see
3400 ++ * if firmware has *disabled* them, and clear them if so.
3401 ++ */
3402 ++ if (fw_feature_is("disabled", "speculation-policy-favor-security", np))
3403 ++ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
3404 ++
3405 ++ if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np))
3406 ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
3407 ++
3408 ++ if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np))
3409 ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
3410 ++
3411 ++ if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np))
3412 ++ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
3413 ++}
3414 ++
3415 + static void pnv_setup_rfi_flush(void)
3416 + {
3417 + struct device_node *np, *fw_features;
3418 + enum l1d_flush_type type;
3419 +- int enable;
3420 ++ bool enable;
3421 +
3422 + /* Default to fallback in case fw-features are not available */
3423 + type = L1D_FLUSH_FALLBACK;
3424 +- enable = 1;
3425 +
3426 + np = of_find_node_by_name(NULL, "ibm,opal");
3427 + fw_features = of_get_child_by_name(np, "fw-features");
3428 + of_node_put(np);
3429 +
3430 + if (fw_features) {
3431 +- np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
3432 +- if (np && of_property_read_bool(np, "enabled"))
3433 +- type = L1D_FLUSH_MTTRIG;
3434 ++ init_fw_feat_flags(fw_features);
3435 ++ of_node_put(fw_features);
3436 +
3437 +- of_node_put(np);
3438 ++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
3439 ++ type = L1D_FLUSH_MTTRIG;
3440 +
3441 +- np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
3442 +- if (np && of_property_read_bool(np, "enabled"))
3443 ++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
3444 + type = L1D_FLUSH_ORI;
3445 +-
3446 +- of_node_put(np);
3447 +-
3448 +- /* Enable unless firmware says NOT to */
3449 +- enable = 2;
3450 +- np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
3451 +- if (np && of_property_read_bool(np, "disabled"))
3452 +- enable--;
3453 +-
3454 +- of_node_put(np);
3455 +-
3456 +- np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
3457 +- if (np && of_property_read_bool(np, "disabled"))
3458 +- enable--;
3459 +-
3460 +- of_node_put(np);
3461 +- of_node_put(fw_features);
3462 + }
3463 +
3464 +- setup_rfi_flush(type, enable > 0);
3465 ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
3466 ++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \
3467 ++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
3468 ++
3469 ++ setup_rfi_flush(type, enable);
3470 + }
3471 +
3472 + static void __init pnv_setup_arch(void)
3473 +@@ -91,6 +130,7 @@ static void __init pnv_setup_arch(void)
3474 + set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
3475 +
3476 + pnv_setup_rfi_flush();
3477 ++ setup_stf_barrier();
3478 +
3479 + /* Initialize SMP */
3480 + pnv_smp_init();
3481 +diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
3482 +index f7042ad492ba..fbea7db043fa 100644
3483 +--- a/arch/powerpc/platforms/pseries/mobility.c
3484 ++++ b/arch/powerpc/platforms/pseries/mobility.c
3485 +@@ -348,6 +348,9 @@ void post_mobility_fixup(void)
3486 + printk(KERN_ERR "Post-mobility device tree update "
3487 + "failed: %d\n", rc);
3488 +
3489 ++ /* Possibly switch to a new RFI flush type */
3490 ++ pseries_setup_rfi_flush();
3491 ++
3492 + return;
3493 + }
3494 +
3495 +diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
3496 +index 1ae1d9f4dbe9..27cdcb69fd18 100644
3497 +--- a/arch/powerpc/platforms/pseries/pseries.h
3498 ++++ b/arch/powerpc/platforms/pseries/pseries.h
3499 +@@ -100,4 +100,6 @@ static inline unsigned long cmo_get_page_size(void)
3500 +
3501 + int dlpar_workqueue_init(void);
3502 +
3503 ++void pseries_setup_rfi_flush(void);
3504 ++
3505 + #endif /* _PSERIES_PSERIES_H */
3506 +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
3507 +index ae4f596273b5..45f814041448 100644
3508 +--- a/arch/powerpc/platforms/pseries/setup.c
3509 ++++ b/arch/powerpc/platforms/pseries/setup.c
3510 +@@ -68,6 +68,7 @@
3511 + #include <asm/plpar_wrappers.h>
3512 + #include <asm/kexec.h>
3513 + #include <asm/isa-bridge.h>
3514 ++#include <asm/security_features.h>
3515 +
3516 + #include "pseries.h"
3517 +
3518 +@@ -459,35 +460,78 @@ static void __init find_and_init_phbs(void)
3519 + of_pci_check_probe_only();
3520 + }
3521 +
3522 +-static void pseries_setup_rfi_flush(void)
3523 ++static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
3524 ++{
3525 ++ /*
3526 ++ * The features below are disabled by default, so we instead look to see
3527 ++ * if firmware has *enabled* them, and set them if so.
3528 ++ */
3529 ++ if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
3530 ++ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
3531 ++
3532 ++ if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED)
3533 ++ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
3534 ++
3535 ++ if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30)
3536 ++ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
3537 ++
3538 ++ if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
3539 ++ security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
3540 ++
3541 ++ if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV)
3542 ++ security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
3543 ++
3544 ++ if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
3545 ++ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
3546 ++
3547 ++ /*
3548 ++ * The features below are enabled by default, so we instead look to see
3549 ++ * if firmware has *disabled* them, and clear them if so.
3550 ++ */
3551 ++ if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
3552 ++ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
3553 ++
3554 ++ if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
3555 ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
3556 ++
3557 ++ if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
3558 ++ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
3559 ++}
3560 ++
3561 ++void pseries_setup_rfi_flush(void)
3562 + {
3563 + struct h_cpu_char_result result;
3564 + enum l1d_flush_type types;
3565 + bool enable;
3566 + long rc;
3567 +
3568 +- /* Enable by default */
3569 +- enable = true;
3570 ++ /*
3571 ++ * Set features to the defaults assumed by init_cpu_char_feature_flags()
3572 ++ * so it can set/clear again any features that might have changed after
3573 ++ * migration, and in case the hypercall fails and it is not even called.
3574 ++ */
3575 ++ powerpc_security_features = SEC_FTR_DEFAULT;
3576 +
3577 + rc = plpar_get_cpu_characteristics(&result);
3578 +- if (rc == H_SUCCESS) {
3579 +- types = L1D_FLUSH_NONE;
3580 ++ if (rc == H_SUCCESS)
3581 ++ init_cpu_char_feature_flags(&result);
3582 +
3583 +- if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
3584 +- types |= L1D_FLUSH_MTTRIG;
3585 +- if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
3586 +- types |= L1D_FLUSH_ORI;
3587 ++ /*
3588 ++ * We're the guest so this doesn't apply to us, clear it to simplify
3589 ++ * handling of it elsewhere.
3590 ++ */
3591 ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
3592 +
3593 +- /* Use fallback if nothing set in hcall */
3594 +- if (types == L1D_FLUSH_NONE)
3595 +- types = L1D_FLUSH_FALLBACK;
3596 ++ types = L1D_FLUSH_FALLBACK;
3597 +
3598 +- if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
3599 +- enable = false;
3600 +- } else {
3601 +- /* Default to fallback if case hcall is not available */
3602 +- types = L1D_FLUSH_FALLBACK;
3603 +- }
3604 ++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
3605 ++ types |= L1D_FLUSH_MTTRIG;
3606 ++
3607 ++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
3608 ++ types |= L1D_FLUSH_ORI;
3609 ++
3610 ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
3611 ++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
3612 +
3613 + setup_rfi_flush(types, enable);
3614 + }
3615 +@@ -510,6 +554,7 @@ static void __init pSeries_setup_arch(void)
3616 + fwnmi_init();
3617 +
3618 + pseries_setup_rfi_flush();
3619 ++ setup_stf_barrier();
3620 +
3621 + /* By default, only probe PCI (can be overridden by rtas_pci) */
3622 + pci_add_flags(PCI_PROBE_ONLY);
3623 +diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
3624 +index ead3e2549ebf..205dec18d6b5 100644
3625 +--- a/arch/powerpc/sysdev/mpic.c
3626 ++++ b/arch/powerpc/sysdev/mpic.c
3627 +@@ -626,7 +626,7 @@ static inline u32 mpic_physmask(u32 cpumask)
3628 + int i;
3629 + u32 mask = 0;
3630 +
3631 +- for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
3632 ++ for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1)
3633 + mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
3634 + return mask;
3635 + }
3636 +diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
3637 +index 2c8b325591cc..a5938fadd031 100644
3638 +--- a/arch/powerpc/xmon/xmon.c
3639 ++++ b/arch/powerpc/xmon/xmon.c
3640 +@@ -2348,6 +2348,8 @@ static void dump_one_paca(int cpu)
3641 + DUMP(p, slb_cache_ptr, "x");
3642 + for (i = 0; i < SLB_CACHE_ENTRIES; i++)
3643 + printf(" slb_cache[%d]: = 0x%016lx\n", i, p->slb_cache[i]);
3644 ++
3645 ++ DUMP(p, rfi_flush_fallback_area, "px");
3646 + #endif
3647 + DUMP(p, dscr_default, "llx");
3648 + #ifdef CONFIG_PPC_BOOK3E
3649 +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
3650 +index eb7b530d1783..4f1f5fc8139d 100644
3651 +--- a/arch/s390/kvm/vsie.c
3652 ++++ b/arch/s390/kvm/vsie.c
3653 +@@ -590,7 +590,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
3654 +
3655 + gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
3656 + if (gpa && (scb_s->ecb & ECB_TE)) {
3657 +- if (!(gpa & ~0x1fffU)) {
3658 ++ if (!(gpa & ~0x1fffUL)) {
3659 + rc = set_validity_icpt(scb_s, 0x0080U);
3660 + goto unpin;
3661 + }
3662 +diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
3663 +index c001f782c5f1..28cc61216b64 100644
3664 +--- a/arch/sh/kernel/entry-common.S
3665 ++++ b/arch/sh/kernel/entry-common.S
3666 +@@ -255,7 +255,7 @@ debug_trap:
3667 + mov.l @r8, r8
3668 + jsr @r8
3669 + nop
3670 +- bra __restore_all
3671 ++ bra ret_from_exception
3672 + nop
3673 + CFI_ENDPROC
3674 +
3675 +diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3676 +index abad97edf736..28db058d471b 100644
3677 +--- a/arch/sparc/include/asm/atomic_64.h
3678 ++++ b/arch/sparc/include/asm/atomic_64.h
3679 +@@ -83,7 +83,11 @@ ATOMIC_OPS(xor)
3680 + #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
3681 +
3682 + #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3683 +-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3684 ++
3685 ++static inline int atomic_xchg(atomic_t *v, int new)
3686 ++{
3687 ++ return xchg(&v->counter, new);
3688 ++}
3689 +
3690 + static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3691 + {
3692 +diff --git a/arch/sparc/include/asm/bug.h b/arch/sparc/include/asm/bug.h
3693 +index 6f17528356b2..ea53e418f6c0 100644
3694 +--- a/arch/sparc/include/asm/bug.h
3695 ++++ b/arch/sparc/include/asm/bug.h
3696 +@@ -9,10 +9,14 @@
3697 + void do_BUG(const char *file, int line);
3698 + #define BUG() do { \
3699 + do_BUG(__FILE__, __LINE__); \
3700 ++ barrier_before_unreachable(); \
3701 + __builtin_trap(); \
3702 + } while (0)
3703 + #else
3704 +-#define BUG() __builtin_trap()
3705 ++#define BUG() do { \
3706 ++ barrier_before_unreachable(); \
3707 ++ __builtin_trap(); \
3708 ++} while (0)
3709 + #endif
3710 +
3711 + #define HAVE_ARCH_BUG
3712 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
3713 +index 011a47b4587c..717c9219d00e 100644
3714 +--- a/arch/x86/events/core.c
3715 ++++ b/arch/x86/events/core.c
3716 +@@ -1162,16 +1162,13 @@ int x86_perf_event_set_period(struct perf_event *event)
3717 +
3718 + per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
3719 +
3720 +- if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
3721 +- local64_read(&hwc->prev_count) != (u64)-left) {
3722 +- /*
3723 +- * The hw event starts counting from this event offset,
3724 +- * mark it to be able to extra future deltas:
3725 +- */
3726 +- local64_set(&hwc->prev_count, (u64)-left);
3727 ++ /*
3728 ++ * The hw event starts counting from this event offset,
3729 ++ * mark it to be able to extra future deltas:
3730 ++ */
3731 ++ local64_set(&hwc->prev_count, (u64)-left);
3732 +
3733 +- wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
3734 +- }
3735 ++ wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
3736 +
3737 + /*
3738 + * Due to erratum on certan cpu we need
3739 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
3740 +index 9b18a227fff7..6965ee8c4b8a 100644
3741 +--- a/arch/x86/events/intel/core.c
3742 ++++ b/arch/x86/events/intel/core.c
3743 +@@ -2201,9 +2201,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
3744 + int bit, loops;
3745 + u64 status;
3746 + int handled;
3747 ++ int pmu_enabled;
3748 +
3749 + cpuc = this_cpu_ptr(&cpu_hw_events);
3750 +
3751 ++ /*
3752 ++ * Save the PMU state.
3753 ++ * It needs to be restored when leaving the handler.
3754 ++ */
3755 ++ pmu_enabled = cpuc->enabled;
3756 + /*
3757 + * No known reason to not always do late ACK,
3758 + * but just in case do it opt-in.
3759 +@@ -2211,6 +2217,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
3760 + if (!x86_pmu.late_ack)
3761 + apic_write(APIC_LVTPC, APIC_DM_NMI);
3762 + intel_bts_disable_local();
3763 ++ cpuc->enabled = 0;
3764 + __intel_pmu_disable_all();
3765 + handled = intel_pmu_drain_bts_buffer();
3766 + handled += intel_bts_interrupt();
3767 +@@ -2320,7 +2327,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
3768 +
3769 + done:
3770 + /* Only restore PMU state when it's active. See x86_pmu_disable(). */
3771 +- if (cpuc->enabled)
3772 ++ cpuc->enabled = pmu_enabled;
3773 ++ if (pmu_enabled)
3774 + __intel_pmu_enable_all(0, true);
3775 + intel_bts_enable_local();
3776 +
3777 +@@ -3188,7 +3196,7 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3778 + * Therefore the effective (average) period matches the requested period,
3779 + * despite coarser hardware granularity.
3780 + */
3781 +-static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
3782 ++static u64 bdw_limit_period(struct perf_event *event, u64 left)
3783 + {
3784 + if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
3785 + X86_CONFIG(.event=0xc0, .umask=0x01)) {
3786 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
3787 +index 8156e47da7ba..10b39d44981c 100644
3788 +--- a/arch/x86/events/intel/ds.c
3789 ++++ b/arch/x86/events/intel/ds.c
3790 +@@ -1150,6 +1150,7 @@ static void setup_pebs_sample_data(struct perf_event *event,
3791 + if (pebs == NULL)
3792 + return;
3793 +
3794 ++ regs->flags &= ~PERF_EFLAGS_EXACT;
3795 + sample_type = event->attr.sample_type;
3796 + dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
3797 +
3798 +@@ -1194,7 +1195,6 @@ static void setup_pebs_sample_data(struct perf_event *event,
3799 + */
3800 + *regs = *iregs;
3801 + regs->flags = pebs->flags;
3802 +- set_linear_ip(regs, pebs->ip);
3803 +
3804 + if (sample_type & PERF_SAMPLE_REGS_INTR) {
3805 + regs->ax = pebs->ax;
3806 +@@ -1230,13 +1230,22 @@ static void setup_pebs_sample_data(struct perf_event *event,
3807 + #endif
3808 + }
3809 +
3810 +- if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
3811 +- regs->ip = pebs->real_ip;
3812 +- regs->flags |= PERF_EFLAGS_EXACT;
3813 +- } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
3814 +- regs->flags |= PERF_EFLAGS_EXACT;
3815 +- else
3816 +- regs->flags &= ~PERF_EFLAGS_EXACT;
3817 ++ if (event->attr.precise_ip > 1) {
3818 ++ /* Haswell and later have the eventing IP, so use it: */
3819 ++ if (x86_pmu.intel_cap.pebs_format >= 2) {
3820 ++ set_linear_ip(regs, pebs->real_ip);
3821 ++ regs->flags |= PERF_EFLAGS_EXACT;
3822 ++ } else {
3823 ++ /* Otherwise use PEBS off-by-1 IP: */
3824 ++ set_linear_ip(regs, pebs->ip);
3825 ++
3826 ++ /* ... and try to fix it up using the LBR entries: */
3827 ++ if (intel_pmu_pebs_fixup_ip(regs))
3828 ++ regs->flags |= PERF_EFLAGS_EXACT;
3829 ++ }
3830 ++ } else
3831 ++ set_linear_ip(regs, pebs->ip);
3832 ++
3833 +
3834 + if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) &&
3835 + x86_pmu.intel_cap.pebs_format >= 1)
3836 +@@ -1303,17 +1312,84 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
3837 + return NULL;
3838 + }
3839 +
3840 ++/*
3841 ++ * Special variant of intel_pmu_save_and_restart() for auto-reload.
3842 ++ */
3843 ++static int
3844 ++intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
3845 ++{
3846 ++ struct hw_perf_event *hwc = &event->hw;
3847 ++ int shift = 64 - x86_pmu.cntval_bits;
3848 ++ u64 period = hwc->sample_period;
3849 ++ u64 prev_raw_count, new_raw_count;
3850 ++ s64 new, old;
3851 ++
3852 ++ WARN_ON(!period);
3853 ++
3854 ++ /*
3855 ++ * drain_pebs() only happens when the PMU is disabled.
3856 ++ */
3857 ++ WARN_ON(this_cpu_read(cpu_hw_events.enabled));
3858 ++
3859 ++ prev_raw_count = local64_read(&hwc->prev_count);
3860 ++ rdpmcl(hwc->event_base_rdpmc, new_raw_count);
3861 ++ local64_set(&hwc->prev_count, new_raw_count);
3862 ++
3863 ++ /*
3864 ++ * Since the counter increments a negative counter value and
3865 ++ * overflows on the sign switch, giving the interval:
3866 ++ *
3867 ++ * [-period, 0]
3868 ++ *
3869 ++ * the difference between two consequtive reads is:
3870 ++ *
3871 ++ * A) value2 - value1;
3872 ++ * when no overflows have happened in between,
3873 ++ *
3874 ++ * B) (0 - value1) + (value2 - (-period));
3875 ++ * when one overflow happened in between,
3876 ++ *
3877 ++ * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
3878 ++ * when @n overflows happened in between.
3879 ++ *
3880 ++ * Here A) is the obvious difference, B) is the extension to the
3881 ++ * discrete interval, where the first term is to the top of the
3882 ++ * interval and the second term is from the bottom of the next
3883 ++ * interval and C) the extension to multiple intervals, where the
3884 ++ * middle term is the whole intervals covered.
3885 ++ *
3886 ++ * An equivalent of C, by reduction, is:
3887 ++ *
3888 ++ * value2 - value1 + n * period
3889 ++ */
3890 ++ new = ((s64)(new_raw_count << shift) >> shift);
3891 ++ old = ((s64)(prev_raw_count << shift) >> shift);
3892 ++ local64_add(new - old + count * period, &event->count);
3893 ++
3894 ++ perf_event_update_userpage(event);
3895 ++
3896 ++ return 0;
3897 ++}
3898 ++
3899 + static void __intel_pmu_pebs_event(struct perf_event *event,
3900 + struct pt_regs *iregs,
3901 + void *base, void *top,
3902 + int bit, int count)
3903 + {
3904 ++ struct hw_perf_event *hwc = &event->hw;
3905 + struct perf_sample_data data;
3906 + struct pt_regs regs;
3907 + void *at = get_next_pebs_record_by_bit(base, top, bit);
3908 +
3909 +- if (!intel_pmu_save_and_restart(event) &&
3910 +- !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
3911 ++ if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
3912 ++ /*
3913 ++ * Now, auto-reload is only enabled in fixed period mode.
3914 ++ * The reload value is always hwc->sample_period.
3915 ++ * May need to change it, if auto-reload is enabled in
3916 ++ * freq mode later.
3917 ++ */
3918 ++ intel_pmu_save_and_restart_reload(event, count);
3919 ++ } else if (!intel_pmu_save_and_restart(event))
3920 + return;
3921 +
3922 + while (count > 1) {
3923 +@@ -1365,8 +1441,11 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
3924 + return;
3925 +
3926 + n = top - at;
3927 +- if (n <= 0)
3928 ++ if (n <= 0) {
3929 ++ if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
3930 ++ intel_pmu_save_and_restart_reload(event, 0);
3931 + return;
3932 ++ }
3933 +
3934 + __intel_pmu_pebs_event(event, iregs, at, top, 0, n);
3935 + }
3936 +@@ -1389,8 +1468,22 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
3937 +
3938 + ds->pebs_index = ds->pebs_buffer_base;
3939 +
3940 +- if (unlikely(base >= top))
3941 ++ if (unlikely(base >= top)) {
3942 ++ /*
3943 ++ * The drain_pebs() could be called twice in a short period
3944 ++ * for auto-reload event in pmu::read(). There are no
3945 ++ * overflows have happened in between.
3946 ++ * It needs to call intel_pmu_save_and_restart_reload() to
3947 ++ * update the event->count for this case.
3948 ++ */
3949 ++ for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
3950 ++ x86_pmu.max_pebs_events) {
3951 ++ event = cpuc->events[bit];
3952 ++ if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
3953 ++ intel_pmu_save_and_restart_reload(event, 0);
3954 ++ }
3955 + return;
3956 ++ }
3957 +
3958 + for (at = base; at < top; at += x86_pmu.pebs_record_size) {
3959 + struct pebs_record_nhm *p = at;
3960 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
3961 +index 8e4ea143ed96..dc4728eccfd8 100644
3962 +--- a/arch/x86/events/perf_event.h
3963 ++++ b/arch/x86/events/perf_event.h
3964 +@@ -556,7 +556,7 @@ struct x86_pmu {
3965 + struct x86_pmu_quirk *quirks;
3966 + int perfctr_second_write;
3967 + bool late_ack;
3968 +- unsigned (*limit_period)(struct perf_event *event, unsigned l);
3969 ++ u64 (*limit_period)(struct perf_event *event, u64 l);
3970 +
3971 + /*
3972 + * sysfs attrs
3973 +diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
3974 +index cf5961ca8677..4cd6a3b71824 100644
3975 +--- a/arch/x86/include/asm/alternative.h
3976 ++++ b/arch/x86/include/asm/alternative.h
3977 +@@ -218,13 +218,11 @@ static inline int alternatives_text_reserved(void *start, void *end)
3978 + */
3979 + #define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \
3980 + output, input...) \
3981 +-{ \
3982 + asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
3983 + "call %P[new2]", feature2) \
3984 + : output, ASM_CALL_CONSTRAINT \
3985 + : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
3986 +- [new2] "i" (newfunc2), ## input); \
3987 +-}
3988 ++ [new2] "i" (newfunc2), ## input)
3989 +
3990 + /*
3991 + * use this macro(s) if you need more than one output parameter
3992 +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
3993 +index 704f31315dde..875ca99b82ee 100644
3994 +--- a/arch/x86/include/asm/tlbflush.h
3995 ++++ b/arch/x86/include/asm/tlbflush.h
3996 +@@ -131,7 +131,12 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
3997 + static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
3998 + {
3999 + VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
4000 +- VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID));
4001 ++ /*
4002 ++ * Use boot_cpu_has() instead of this_cpu_has() as this function
4003 ++ * might be called during early boot. This should work even after
4004 ++ * boot because all CPU's the have same capabilities:
4005 ++ */
4006 ++ VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID));
4007 + return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
4008 + }
4009 +
4010 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
4011 +index 5942aa5f569b..ebdcc368a2d3 100644
4012 +--- a/arch/x86/kernel/apic/apic.c
4013 ++++ b/arch/x86/kernel/apic/apic.c
4014 +@@ -1481,7 +1481,7 @@ void setup_local_APIC(void)
4015 + * TODO: set up through-local-APIC from through-I/O-APIC? --macro
4016 + */
4017 + value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
4018 +- if (!cpu && (pic_mode || !value)) {
4019 ++ if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
4020 + value = APIC_DM_EXTINT;
4021 + apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
4022 + } else {
4023 +diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
4024 +index 7be35b600299..2dae1b3c42fc 100644
4025 +--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
4026 ++++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
4027 +@@ -1657,6 +1657,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
4028 + if (ret < 0)
4029 + goto out_common_fail;
4030 + closid = ret;
4031 ++ ret = 0;
4032 +
4033 + rdtgrp->closid = closid;
4034 + list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
4035 +diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
4036 +index 76e07698e6d1..7fa0855e4b9a 100644
4037 +--- a/arch/x86/kernel/devicetree.c
4038 ++++ b/arch/x86/kernel/devicetree.c
4039 +@@ -12,6 +12,7 @@
4040 + #include <linux/of_address.h>
4041 + #include <linux/of_platform.h>
4042 + #include <linux/of_irq.h>
4043 ++#include <linux/libfdt.h>
4044 + #include <linux/slab.h>
4045 + #include <linux/pci.h>
4046 + #include <linux/of_pci.h>
4047 +@@ -200,19 +201,22 @@ static struct of_ioapic_type of_ioapic_type[] =
4048 + static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
4049 + unsigned int nr_irqs, void *arg)
4050 + {
4051 +- struct of_phandle_args *irq_data = (void *)arg;
4052 ++ struct irq_fwspec *fwspec = (struct irq_fwspec *)arg;
4053 + struct of_ioapic_type *it;
4054 + struct irq_alloc_info tmp;
4055 ++ int type_index;
4056 +
4057 +- if (WARN_ON(irq_data->args_count < 2))
4058 ++ if (WARN_ON(fwspec->param_count < 2))
4059 + return -EINVAL;
4060 +- if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type))
4061 ++
4062 ++ type_index = fwspec->param[1];
4063 ++ if (type_index >= ARRAY_SIZE(of_ioapic_type))
4064 + return -EINVAL;
4065 +
4066 +- it = &of_ioapic_type[irq_data->args[1]];
4067 ++ it = &of_ioapic_type[type_index];
4068 + ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
4069 + tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
4070 +- tmp.ioapic_pin = irq_data->args[0];
4071 ++ tmp.ioapic_pin = fwspec->param[0];
4072 +
4073 + return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp);
4074 + }
4075 +@@ -276,14 +280,15 @@ static void __init x86_flattree_get_config(void)
4076 +
4077 + map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128);
4078 +
4079 +- initial_boot_params = dt = early_memremap(initial_dtb, map_len);
4080 +- size = of_get_flat_dt_size();
4081 ++ dt = early_memremap(initial_dtb, map_len);
4082 ++ size = fdt_totalsize(dt);
4083 + if (map_len < size) {
4084 + early_memunmap(dt, map_len);
4085 +- initial_boot_params = dt = early_memremap(initial_dtb, size);
4086 ++ dt = early_memremap(initial_dtb, size);
4087 + map_len = size;
4088 + }
4089 +
4090 ++ early_init_dt_verify(dt);
4091 + unflatten_and_copy_device_tree();
4092 + early_memunmap(dt, map_len);
4093 + }
4094 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
4095 +index 4a96aa004390..344d3c160f8d 100644
4096 +--- a/arch/x86/kernel/smpboot.c
4097 ++++ b/arch/x86/kernel/smpboot.c
4098 +@@ -1521,6 +1521,7 @@ static void remove_siblinginfo(int cpu)
4099 + cpumask_clear(topology_core_cpumask(cpu));
4100 + c->phys_proc_id = 0;
4101 + c->cpu_core_id = 0;
4102 ++ c->booted_cores = 0;
4103 + cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
4104 + recompute_smt_state();
4105 + }
4106 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
4107 +index d67e3b31f3db..d1f5c744142b 100644
4108 +--- a/arch/x86/kvm/cpuid.c
4109 ++++ b/arch/x86/kvm/cpuid.c
4110 +@@ -394,8 +394,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
4111 +
4112 + /* cpuid 7.0.edx*/
4113 + const u32 kvm_cpuid_7_0_edx_x86_features =
4114 +- F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
4115 +- F(ARCH_CAPABILITIES);
4116 ++ F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
4117 ++ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
4118 +
4119 + /* all calls to cpuid_count() should be made on the same cpu */
4120 + get_cpu();
4121 +@@ -481,6 +481,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
4122 + entry->ecx &= ~F(PKU);
4123 + entry->edx &= kvm_cpuid_7_0_edx_x86_features;
4124 + cpuid_mask(&entry->edx, CPUID_7_EDX);
4125 ++ /*
4126 ++ * We emulate ARCH_CAPABILITIES in software even
4127 ++ * if the host doesn't support it.
4128 ++ */
4129 ++ entry->edx |= F(ARCH_CAPABILITIES);
4130 + } else {
4131 + entry->ebx = 0;
4132 + entry->ecx = 0;
4133 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
4134 +index ab8993fe58cc..6d0fbff71d7a 100644
4135 +--- a/arch/x86/kvm/lapic.c
4136 ++++ b/arch/x86/kvm/lapic.c
4137 +@@ -321,8 +321,16 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
4138 + if (!lapic_in_kernel(vcpu))
4139 + return;
4140 +
4141 ++ /*
4142 ++ * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
4143 ++ * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
4144 ++ * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
4145 ++ * version first and level-triggered interrupts never get EOIed in
4146 ++ * IOAPIC.
4147 ++ */
4148 + feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
4149 +- if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
4150 ++ if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
4151 ++ !ioapic_in_kernel(vcpu->kvm))
4152 + v |= APIC_LVR_DIRECTED_EOI;
4153 + kvm_lapic_set_reg(apic, APIC_LVR, v);
4154 + }
4155 +@@ -1467,11 +1475,23 @@ static bool set_target_expiration(struct kvm_lapic *apic)
4156 +
4157 + static void advance_periodic_target_expiration(struct kvm_lapic *apic)
4158 + {
4159 +- apic->lapic_timer.tscdeadline +=
4160 +- nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
4161 ++ ktime_t now = ktime_get();
4162 ++ u64 tscl = rdtsc();
4163 ++ ktime_t delta;
4164 ++
4165 ++ /*
4166 ++ * Synchronize both deadlines to the same time source or
4167 ++ * differences in the periods (caused by differences in the
4168 ++ * underlying clocks or numerical approximation errors) will
4169 ++ * cause the two to drift apart over time as the errors
4170 ++ * accumulate.
4171 ++ */
4172 + apic->lapic_timer.target_expiration =
4173 + ktime_add_ns(apic->lapic_timer.target_expiration,
4174 + apic->lapic_timer.period);
4175 ++ delta = ktime_sub(apic->lapic_timer.target_expiration, now);
4176 ++ apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
4177 ++ nsec_to_cycles(apic->vcpu, delta);
4178 + }
4179 +
4180 + static void start_sw_period(struct kvm_lapic *apic)
4181 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
4182 +index 2e63edf8312c..4c88572d2b81 100644
4183 +--- a/arch/x86/kvm/vmx.c
4184 ++++ b/arch/x86/kvm/vmx.c
4185 +@@ -2583,6 +2583,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
4186 + return;
4187 + }
4188 +
4189 ++ WARN_ON_ONCE(vmx->emulation_required);
4190 ++
4191 + if (kvm_exception_is_soft(nr)) {
4192 + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
4193 + vmx->vcpu.arch.event_exit_inst_len);
4194 +@@ -6829,12 +6831,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
4195 + goto out;
4196 + }
4197 +
4198 +- if (err != EMULATE_DONE) {
4199 +- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4200 +- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4201 +- vcpu->run->internal.ndata = 0;
4202 +- return 0;
4203 +- }
4204 ++ if (err != EMULATE_DONE)
4205 ++ goto emulation_error;
4206 ++
4207 ++ if (vmx->emulation_required && !vmx->rmode.vm86_active &&
4208 ++ vcpu->arch.exception.pending)
4209 ++ goto emulation_error;
4210 +
4211 + if (vcpu->arch.halt_request) {
4212 + vcpu->arch.halt_request = 0;
4213 +@@ -6850,6 +6852,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
4214 +
4215 + out:
4216 + return ret;
4217 ++
4218 ++emulation_error:
4219 ++ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4220 ++ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4221 ++ vcpu->run->internal.ndata = 0;
4222 ++ return 0;
4223 + }
4224 +
4225 + static int __grow_ple_window(int val)
4226 +@@ -11174,7 +11182,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
4227 + if (ret)
4228 + return ret;
4229 +
4230 +- if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
4231 ++ /*
4232 ++ * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
4233 ++ * by event injection, halt vcpu.
4234 ++ */
4235 ++ if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
4236 ++ !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK))
4237 + return kvm_vcpu_halt(vcpu);
4238 +
4239 + vmx->nested.nested_run_pending = 1;
4240 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
4241 +index 649f476039de..adac01d0181a 100644
4242 +--- a/arch/x86/kvm/x86.c
4243 ++++ b/arch/x86/kvm/x86.c
4244 +@@ -7505,6 +7505,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4245 + {
4246 + struct msr_data apic_base_msr;
4247 + int mmu_reset_needed = 0;
4248 ++ int cpuid_update_needed = 0;
4249 + int pending_vec, max_bits, idx;
4250 + struct desc_ptr dt;
4251 +
4252 +@@ -7542,8 +7543,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4253 + vcpu->arch.cr0 = sregs->cr0;
4254 +
4255 + mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
4256 ++ cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
4257 ++ (X86_CR4_OSXSAVE | X86_CR4_PKE));
4258 + kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
4259 +- if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
4260 ++ if (cpuid_update_needed)
4261 + kvm_update_cpuid(vcpu);
4262 +
4263 + idx = srcu_read_lock(&vcpu->kvm->srcu);
4264 +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
4265 +index 3ed9a08885c5..4085897fef64 100644
4266 +--- a/arch/x86/mm/pageattr.c
4267 ++++ b/arch/x86/mm/pageattr.c
4268 +@@ -298,9 +298,11 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
4269 +
4270 + /*
4271 + * The .rodata section needs to be read-only. Using the pfn
4272 +- * catches all aliases.
4273 ++ * catches all aliases. This also includes __ro_after_init,
4274 ++ * so do not enforce until kernel_set_to_readonly is true.
4275 + */
4276 +- if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
4277 ++ if (kernel_set_to_readonly &&
4278 ++ within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
4279 + __pa_symbol(__end_rodata) >> PAGE_SHIFT))
4280 + pgprot_val(forbidden) |= _PAGE_RW;
4281 +
4282 +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
4283 +index 34cda7e0551b..c03c85e4fb6a 100644
4284 +--- a/arch/x86/mm/pgtable.c
4285 ++++ b/arch/x86/mm/pgtable.c
4286 +@@ -1,6 +1,7 @@
4287 + // SPDX-License-Identifier: GPL-2.0
4288 + #include <linux/mm.h>
4289 + #include <linux/gfp.h>
4290 ++#include <linux/hugetlb.h>
4291 + #include <asm/pgalloc.h>
4292 + #include <asm/pgtable.h>
4293 + #include <asm/tlb.h>
4294 +@@ -636,6 +637,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
4295 + (mtrr != MTRR_TYPE_WRBACK))
4296 + return 0;
4297 +
4298 ++ /* Bail out if we are we on a populated non-leaf entry: */
4299 ++ if (pud_present(*pud) && !pud_huge(*pud))
4300 ++ return 0;
4301 ++
4302 + prot = pgprot_4k_2_large(prot);
4303 +
4304 + set_pte((pte_t *)pud, pfn_pte(
4305 +@@ -664,6 +669,10 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
4306 + return 0;
4307 + }
4308 +
4309 ++ /* Bail out if we are we on a populated non-leaf entry: */
4310 ++ if (pmd_present(*pmd) && !pmd_huge(*pmd))
4311 ++ return 0;
4312 ++
4313 + prot = pgprot_4k_2_large(prot);
4314 +
4315 + set_pte((pte_t *)pmd, pfn_pte(
4316 +diff --git a/block/partition-generic.c b/block/partition-generic.c
4317 +index 91622db9aedf..08dabcd8b6ae 100644
4318 +--- a/block/partition-generic.c
4319 ++++ b/block/partition-generic.c
4320 +@@ -51,6 +51,12 @@ const char *bdevname(struct block_device *bdev, char *buf)
4321 +
4322 + EXPORT_SYMBOL(bdevname);
4323 +
4324 ++const char *bio_devname(struct bio *bio, char *buf)
4325 ++{
4326 ++ return disk_name(bio->bi_disk, bio->bi_partno, buf);
4327 ++}
4328 ++EXPORT_SYMBOL(bio_devname);
4329 ++
4330 + /*
4331 + * There's very little reason to use this, you should really
4332 + * have a struct block_device just about everywhere and use
4333 +diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
4334 +index f6a009d88a33..52e5ea3b8e40 100644
4335 +--- a/crypto/asymmetric_keys/pkcs7_trust.c
4336 ++++ b/crypto/asymmetric_keys/pkcs7_trust.c
4337 +@@ -106,6 +106,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
4338 + pr_devel("sinfo %u: Direct signer is key %x\n",
4339 + sinfo->index, key_serial(key));
4340 + x509 = NULL;
4341 ++ sig = sinfo->sig;
4342 + goto matched;
4343 + }
4344 + if (PTR_ERR(key) != -ENOKEY)
4345 +diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
4346 +index 754431031282..552c1f725b6c 100644
4347 +--- a/drivers/acpi/acpi_pad.c
4348 ++++ b/drivers/acpi/acpi_pad.c
4349 +@@ -110,6 +110,7 @@ static void round_robin_cpu(unsigned int tsk_index)
4350 + cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
4351 + if (cpumask_empty(tmp)) {
4352 + mutex_unlock(&round_robin_lock);
4353 ++ free_cpumask_var(tmp);
4354 + return;
4355 + }
4356 + for_each_cpu(cpu, tmp) {
4357 +@@ -127,6 +128,8 @@ static void round_robin_cpu(unsigned int tsk_index)
4358 + mutex_unlock(&round_robin_lock);
4359 +
4360 + set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
4361 ++
4362 ++ free_cpumask_var(tmp);
4363 + }
4364 +
4365 + static void exit_round_robin(unsigned int tsk_index)
4366 +diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
4367 +index d3b6b314fa50..37b0b4c04220 100644
4368 +--- a/drivers/acpi/acpica/evevent.c
4369 ++++ b/drivers/acpi/acpica/evevent.c
4370 +@@ -204,6 +204,7 @@ u32 acpi_ev_fixed_event_detect(void)
4371 + u32 fixed_status;
4372 + u32 fixed_enable;
4373 + u32 i;
4374 ++ acpi_status status;
4375 +
4376 + ACPI_FUNCTION_NAME(ev_fixed_event_detect);
4377 +
4378 +@@ -211,8 +212,12 @@ u32 acpi_ev_fixed_event_detect(void)
4379 + * Read the fixed feature status and enable registers, as all the cases
4380 + * depend on their values. Ignore errors here.
4381 + */
4382 +- (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
4383 +- (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
4384 ++ status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
4385 ++ status |=
4386 ++ acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
4387 ++ if (ACPI_FAILURE(status)) {
4388 ++ return (int_status);
4389 ++ }
4390 +
4391 + ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
4392 + "Fixed Event Block: Enable %08X Status %08X\n",
4393 +diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
4394 +index d22167cbd0ca..f13d3cfa74e1 100644
4395 +--- a/drivers/acpi/acpica/nseval.c
4396 ++++ b/drivers/acpi/acpica/nseval.c
4397 +@@ -308,6 +308,14 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
4398 + /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
4399 +
4400 + status = AE_OK;
4401 ++ } else if (ACPI_FAILURE(status)) {
4402 ++
4403 ++ /* If return_object exists, delete it */
4404 ++
4405 ++ if (info->return_object) {
4406 ++ acpi_ut_remove_reference(info->return_object);
4407 ++ info->return_object = NULL;
4408 ++ }
4409 + }
4410 +
4411 + ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
4412 +diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
4413 +index eb9dfaca555f..11ce4e5d10e2 100644
4414 +--- a/drivers/acpi/acpica/psargs.c
4415 ++++ b/drivers/acpi/acpica/psargs.c
4416 +@@ -890,6 +890,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
4417 + ACPI_POSSIBLE_METHOD_CALL);
4418 +
4419 + if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) {
4420 ++
4421 ++ /* Free method call op and corresponding namestring sub-ob */
4422 ++
4423 ++ acpi_ps_free_op(arg->common.value.arg);
4424 + acpi_ps_free_op(arg);
4425 + arg = NULL;
4426 + walk_state->arg_count = 1;
4427 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
4428 +index 473f150d6b22..71008dbabe98 100644
4429 +--- a/drivers/ata/libata-core.c
4430 ++++ b/drivers/ata/libata-core.c
4431 +@@ -4483,6 +4483,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4432 + /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4433 + { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4434 +
4435 ++ /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
4436 ++ SD7SN6S256G and SD8SN8U256G */
4437 ++ { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
4438 ++
4439 + /* devices which puke on READ_NATIVE_MAX */
4440 + { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4441 + { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4442 +@@ -4543,6 +4547,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4443 + { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
4444 +
4445 + /* devices that don't properly handle queued TRIM commands */
4446 ++ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4447 ++ ATA_HORKAGE_ZERO_AFTER_TRIM, },
4448 + { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4449 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
4450 + { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4451 +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
4452 +index 4ff69f508e95..6b0440a12c51 100644
4453 +--- a/drivers/ata/libata-scsi.c
4454 ++++ b/drivers/ata/libata-scsi.c
4455 +@@ -4287,7 +4287,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
4456 + #ifdef ATA_DEBUG
4457 + struct scsi_device *scsidev = cmd->device;
4458 +
4459 +- DPRINTK("CDB (%u:%d,%d,%d) %9ph\n",
4460 ++ DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
4461 + ap->print_id,
4462 + scsidev->channel, scsidev->id, scsidev->lun,
4463 + cmd->cmnd);
4464 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
4465 +index efdadd153abe..8fd08023c0f5 100644
4466 +--- a/drivers/base/regmap/regmap.c
4467 ++++ b/drivers/base/regmap/regmap.c
4468 +@@ -98,7 +98,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg)
4469 + int ret;
4470 + unsigned int val;
4471 +
4472 +- if (map->cache == REGCACHE_NONE)
4473 ++ if (map->cache_type == REGCACHE_NONE)
4474 + return false;
4475 +
4476 + if (!map->cache_ops)
4477 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
4478 +index 5f2a4240a204..86258b00a1d4 100644
4479 +--- a/drivers/block/nbd.c
4480 ++++ b/drivers/block/nbd.c
4481 +@@ -1591,7 +1591,7 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
4482 + if (new_index < 0) {
4483 + mutex_unlock(&nbd_index_mutex);
4484 + printk(KERN_ERR "nbd: failed to add new device\n");
4485 +- return ret;
4486 ++ return new_index;
4487 + }
4488 + nbd = idr_find(&nbd_index_idr, new_index);
4489 + }
4490 +diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
4491 +index 69dfa1d3f453..f01d4a8a783a 100644
4492 +--- a/drivers/block/null_blk.c
4493 ++++ b/drivers/block/null_blk.c
4494 +@@ -68,6 +68,7 @@ enum nullb_device_flags {
4495 + NULLB_DEV_FL_CACHE = 3,
4496 + };
4497 +
4498 ++#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
4499 + /*
4500 + * nullb_page is a page in memory for nullb devices.
4501 + *
4502 +@@ -82,10 +83,10 @@ enum nullb_device_flags {
4503 + */
4504 + struct nullb_page {
4505 + struct page *page;
4506 +- unsigned long bitmap;
4507 ++ DECLARE_BITMAP(bitmap, MAP_SZ);
4508 + };
4509 +-#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1)
4510 +-#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2)
4511 ++#define NULLB_PAGE_LOCK (MAP_SZ - 1)
4512 ++#define NULLB_PAGE_FREE (MAP_SZ - 2)
4513 +
4514 + struct nullb_device {
4515 + struct nullb *nullb;
4516 +@@ -725,7 +726,7 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
4517 + if (!t_page->page)
4518 + goto out_freepage;
4519 +
4520 +- t_page->bitmap = 0;
4521 ++ memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
4522 + return t_page;
4523 + out_freepage:
4524 + kfree(t_page);
4525 +@@ -735,13 +736,20 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
4526 +
4527 + static void null_free_page(struct nullb_page *t_page)
4528 + {
4529 +- __set_bit(NULLB_PAGE_FREE, &t_page->bitmap);
4530 +- if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap))
4531 ++ __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
4532 ++ if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
4533 + return;
4534 + __free_page(t_page->page);
4535 + kfree(t_page);
4536 + }
4537 +
4538 ++static bool null_page_empty(struct nullb_page *page)
4539 ++{
4540 ++ int size = MAP_SZ - 2;
4541 ++
4542 ++ return find_first_bit(page->bitmap, size) == size;
4543 ++}
4544 ++
4545 + static void null_free_sector(struct nullb *nullb, sector_t sector,
4546 + bool is_cache)
4547 + {
4548 +@@ -756,9 +764,9 @@ static void null_free_sector(struct nullb *nullb, sector_t sector,
4549 +
4550 + t_page = radix_tree_lookup(root, idx);
4551 + if (t_page) {
4552 +- __clear_bit(sector_bit, &t_page->bitmap);
4553 ++ __clear_bit(sector_bit, t_page->bitmap);
4554 +
4555 +- if (!t_page->bitmap) {
4556 ++ if (null_page_empty(t_page)) {
4557 + ret = radix_tree_delete_item(root, idx, t_page);
4558 + WARN_ON(ret != t_page);
4559 + null_free_page(ret);
4560 +@@ -829,7 +837,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
4561 + t_page = radix_tree_lookup(root, idx);
4562 + WARN_ON(t_page && t_page->page->index != idx);
4563 +
4564 +- if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap)))
4565 ++ if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
4566 + return t_page;
4567 +
4568 + return NULL;
4569 +@@ -892,10 +900,10 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
4570 +
4571 + t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
4572 +
4573 +- __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap);
4574 +- if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) {
4575 ++ __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
4576 ++ if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
4577 + null_free_page(c_page);
4578 +- if (t_page && t_page->bitmap == 0) {
4579 ++ if (t_page && null_page_empty(t_page)) {
4580 + ret = radix_tree_delete_item(&nullb->dev->data,
4581 + idx, t_page);
4582 + null_free_page(t_page);
4583 +@@ -911,11 +919,11 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
4584 +
4585 + for (i = 0; i < PAGE_SECTORS;
4586 + i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
4587 +- if (test_bit(i, &c_page->bitmap)) {
4588 ++ if (test_bit(i, c_page->bitmap)) {
4589 + offset = (i << SECTOR_SHIFT);
4590 + memcpy(dst + offset, src + offset,
4591 + nullb->dev->blocksize);
4592 +- __set_bit(i, &t_page->bitmap);
4593 ++ __set_bit(i, t_page->bitmap);
4594 + }
4595 + }
4596 +
4597 +@@ -952,10 +960,10 @@ static int null_make_cache_space(struct nullb *nullb, unsigned long n)
4598 + * We found the page which is being flushed to disk by other
4599 + * threads
4600 + */
4601 +- if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap))
4602 ++ if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
4603 + c_pages[i] = NULL;
4604 + else
4605 +- __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap);
4606 ++ __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
4607 + }
4608 +
4609 + one_round = 0;
4610 +@@ -1008,7 +1016,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
4611 + kunmap_atomic(dst);
4612 + kunmap_atomic(src);
4613 +
4614 +- __set_bit(sector & SECTOR_MASK, &t_page->bitmap);
4615 ++ __set_bit(sector & SECTOR_MASK, t_page->bitmap);
4616 +
4617 + if (is_fua)
4618 + null_free_sector(nullb, sector, true);
4619 +@@ -1922,10 +1930,6 @@ static int __init null_init(void)
4620 + struct nullb *nullb;
4621 + struct nullb_device *dev;
4622 +
4623 +- /* check for nullb_page.bitmap */
4624 +- if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT))
4625 +- return -EINVAL;
4626 +-
4627 + if (g_bs > PAGE_SIZE) {
4628 + pr_warn("null_blk: invalid block size\n");
4629 + pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
4630 +diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
4631 +index 7b8c6368beb7..a026211afb51 100644
4632 +--- a/drivers/block/paride/pcd.c
4633 ++++ b/drivers/block/paride/pcd.c
4634 +@@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode)
4635 + struct pcd_unit *cd = bdev->bd_disk->private_data;
4636 + int ret;
4637 +
4638 ++ check_disk_change(bdev);
4639 ++
4640 + mutex_lock(&pcd_mutex);
4641 + ret = cdrom_open(&cd->info, bdev, mode);
4642 + mutex_unlock(&pcd_mutex);
4643 +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
4644 +index 891265acb10e..7d23225f79ed 100644
4645 +--- a/drivers/block/xen-blkfront.c
4646 ++++ b/drivers/block/xen-blkfront.c
4647 +@@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock);
4648 +
4649 + static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
4650 + static void blkfront_gather_backend_features(struct blkfront_info *info);
4651 ++static int negotiate_mq(struct blkfront_info *info);
4652 +
4653 + static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
4654 + {
4655 +@@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev,
4656 + unsigned int i, max_page_order;
4657 + unsigned int ring_page_order;
4658 +
4659 ++ if (!info)
4660 ++ return -ENODEV;
4661 ++
4662 + max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
4663 + "max-ring-page-order", 0);
4664 + ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
4665 + info->nr_ring_pages = 1 << ring_page_order;
4666 +
4667 ++ err = negotiate_mq(info);
4668 ++ if (err)
4669 ++ goto destroy_blkring;
4670 ++
4671 + for (i = 0; i < info->nr_rings; i++) {
4672 + struct blkfront_ring_info *rinfo = &info->rinfo[i];
4673 +
4674 +@@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev,
4675 + }
4676 +
4677 + info->xbdev = dev;
4678 +- err = negotiate_mq(info);
4679 +- if (err) {
4680 +- kfree(info);
4681 +- return err;
4682 +- }
4683 +
4684 + mutex_init(&info->mutex);
4685 + info->vdevice = vdevice;
4686 +@@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev)
4687 +
4688 + blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
4689 +
4690 +- err = negotiate_mq(info);
4691 +- if (err)
4692 +- return err;
4693 +-
4694 + err = talk_to_blkback(dev, info);
4695 + if (!err)
4696 + blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
4697 +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
4698 +index 5f7d86509f2f..bfc566d3f31a 100644
4699 +--- a/drivers/cdrom/cdrom.c
4700 ++++ b/drivers/cdrom/cdrom.c
4701 +@@ -1152,9 +1152,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
4702 +
4703 + cd_dbg(CD_OPEN, "entering cdrom_open\n");
4704 +
4705 +- /* open is event synchronization point, check events first */
4706 +- check_disk_change(bdev);
4707 +-
4708 + /* if this was a O_NONBLOCK open and we should honor the flags,
4709 + * do a quick open without drive/disc integrity checks. */
4710 + cdi->use_count++;
4711 +diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
4712 +index 6495b03f576c..ae3a7537cf0f 100644
4713 +--- a/drivers/cdrom/gdrom.c
4714 ++++ b/drivers/cdrom/gdrom.c
4715 +@@ -497,6 +497,9 @@ static const struct cdrom_device_ops gdrom_ops = {
4716 + static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
4717 + {
4718 + int ret;
4719 ++
4720 ++ check_disk_change(bdev);
4721 ++
4722 + mutex_lock(&gdrom_mutex);
4723 + ret = cdrom_open(gd.cd_info, bdev, mode);
4724 + mutex_unlock(&gdrom_mutex);
4725 +diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
4726 +index 63d84e6f1891..83c695938a2d 100644
4727 +--- a/drivers/char/hw_random/stm32-rng.c
4728 ++++ b/drivers/char/hw_random/stm32-rng.c
4729 +@@ -21,6 +21,7 @@
4730 + #include <linux/of_address.h>
4731 + #include <linux/of_platform.h>
4732 + #include <linux/pm_runtime.h>
4733 ++#include <linux/reset.h>
4734 + #include <linux/slab.h>
4735 +
4736 + #define RNG_CR 0x00
4737 +@@ -46,6 +47,7 @@ struct stm32_rng_private {
4738 + struct hwrng rng;
4739 + void __iomem *base;
4740 + struct clk *clk;
4741 ++ struct reset_control *rst;
4742 + };
4743 +
4744 + static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
4745 +@@ -140,6 +142,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
4746 + if (IS_ERR(priv->clk))
4747 + return PTR_ERR(priv->clk);
4748 +
4749 ++ priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
4750 ++ if (!IS_ERR(priv->rst)) {
4751 ++ reset_control_assert(priv->rst);
4752 ++ udelay(2);
4753 ++ reset_control_deassert(priv->rst);
4754 ++ }
4755 ++
4756 + dev_set_drvdata(dev, priv);
4757 +
4758 + priv->rng.name = dev_driver_string(dev),
4759 +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
4760 +index 0aea3bcb6158..6f2eaba1cd6a 100644
4761 +--- a/drivers/char/ipmi/ipmi_ssif.c
4762 ++++ b/drivers/char/ipmi/ipmi_ssif.c
4763 +@@ -763,7 +763,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
4764 + ssif_info->ssif_state = SSIF_NORMAL;
4765 + ipmi_ssif_unlock_cond(ssif_info, flags);
4766 + pr_warn(PFX "Error getting flags: %d %d, %x\n",
4767 +- result, len, data[2]);
4768 ++ result, len, (len >= 3) ? data[2] : 0);
4769 + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
4770 + || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
4771 + /*
4772 +@@ -785,7 +785,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
4773 + if ((result < 0) || (len < 3) || (data[2] != 0)) {
4774 + /* Error clearing flags */
4775 + pr_warn(PFX "Error clearing flags: %d %d, %x\n",
4776 +- result, len, data[2]);
4777 ++ result, len, (len >= 3) ? data[2] : 0);
4778 + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
4779 + || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
4780 + pr_warn(PFX "Invalid response clearing flags: %x %x\n",
4781 +diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
4782 +index 3ee7e6fea621..846d18daf893 100644
4783 +--- a/drivers/clocksource/fsl_ftm_timer.c
4784 ++++ b/drivers/clocksource/fsl_ftm_timer.c
4785 +@@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name,
4786 +
4787 + static unsigned long __init ftm_clk_init(struct device_node *np)
4788 + {
4789 +- unsigned long freq;
4790 ++ long freq;
4791 +
4792 + freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");
4793 + if (freq <= 0)
4794 +diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
4795 +index ae3167c28b12..a07f51231e33 100644
4796 +--- a/drivers/clocksource/mips-gic-timer.c
4797 ++++ b/drivers/clocksource/mips-gic-timer.c
4798 +@@ -164,7 +164,7 @@ static int __init __gic_clocksource_init(void)
4799 +
4800 + /* Set clocksource mask. */
4801 + count_width = read_gic_config() & GIC_CONFIG_COUNTBITS;
4802 +- count_width >>= __fls(GIC_CONFIG_COUNTBITS);
4803 ++ count_width >>= __ffs(GIC_CONFIG_COUNTBITS);
4804 + count_width *= 4;
4805 + count_width += 32;
4806 + gic_clocksource.mask = CLOCKSOURCE_MASK(count_width);
4807 +diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
4808 +index dcb1cb9a4572..8b432d6e846d 100644
4809 +--- a/drivers/cpufreq/cppc_cpufreq.c
4810 ++++ b/drivers/cpufreq/cppc_cpufreq.c
4811 +@@ -167,9 +167,19 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
4812 + NSEC_PER_USEC;
4813 + policy->shared_type = cpu->shared_type;
4814 +
4815 +- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
4816 ++ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
4817 ++ int i;
4818 ++
4819 + cpumask_copy(policy->cpus, cpu->shared_cpu_map);
4820 +- else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
4821 ++
4822 ++ for_each_cpu(i, policy->cpus) {
4823 ++ if (unlikely(i == policy->cpu))
4824 ++ continue;
4825 ++
4826 ++ memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
4827 ++ sizeof(cpu->perf_caps));
4828 ++ }
4829 ++ } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
4830 + /* Support only SW_ANY for now. */
4831 + pr_debug("Unsupported CPU co-ord type\n");
4832 + return -EFAULT;
4833 +@@ -233,8 +243,13 @@ static int __init cppc_cpufreq_init(void)
4834 + return ret;
4835 +
4836 + out:
4837 +- for_each_possible_cpu(i)
4838 +- kfree(all_cpu_data[i]);
4839 ++ for_each_possible_cpu(i) {
4840 ++ cpu = all_cpu_data[i];
4841 ++ if (!cpu)
4842 ++ break;
4843 ++ free_cpumask_var(cpu->shared_cpu_map);
4844 ++ kfree(cpu);
4845 ++ }
4846 +
4847 + kfree(all_cpu_data);
4848 + return -ENODEV;
4849 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
4850 +index ea43b147a7fe..789fc3a8289f 100644
4851 +--- a/drivers/cpufreq/cpufreq.c
4852 ++++ b/drivers/cpufreq/cpufreq.c
4853 +@@ -1315,14 +1315,14 @@ static int cpufreq_online(unsigned int cpu)
4854 + return 0;
4855 +
4856 + out_exit_policy:
4857 ++ for_each_cpu(j, policy->real_cpus)
4858 ++ remove_cpu_dev_symlink(policy, get_cpu_device(j));
4859 ++
4860 + up_write(&policy->rwsem);
4861 +
4862 + if (cpufreq_driver->exit)
4863 + cpufreq_driver->exit(policy);
4864 +
4865 +- for_each_cpu(j, policy->real_cpus)
4866 +- remove_cpu_dev_symlink(policy, get_cpu_device(j));
4867 +-
4868 + out_free_policy:
4869 + cpufreq_policy_free(policy);
4870 + return ret;
4871 +diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
4872 +index f652a0e0f5a2..3548caa9e933 100644
4873 +--- a/drivers/dma/mv_xor_v2.c
4874 ++++ b/drivers/dma/mv_xor_v2.c
4875 +@@ -163,6 +163,7 @@ struct mv_xor_v2_device {
4876 + void __iomem *dma_base;
4877 + void __iomem *glob_base;
4878 + struct clk *clk;
4879 ++ struct clk *reg_clk;
4880 + struct tasklet_struct irq_tasklet;
4881 + struct list_head free_sw_desc;
4882 + struct dma_device dmadev;
4883 +@@ -749,13 +750,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
4884 + if (ret)
4885 + return ret;
4886 +
4887 ++ xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
4888 ++ if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
4889 ++ if (!IS_ERR(xor_dev->reg_clk)) {
4890 ++ ret = clk_prepare_enable(xor_dev->reg_clk);
4891 ++ if (ret)
4892 ++ return ret;
4893 ++ } else {
4894 ++ return PTR_ERR(xor_dev->reg_clk);
4895 ++ }
4896 ++ }
4897 ++
4898 + xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
4899 +- if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
4900 +- return -EPROBE_DEFER;
4901 ++ if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
4902 ++ ret = EPROBE_DEFER;
4903 ++ goto disable_reg_clk;
4904 ++ }
4905 + if (!IS_ERR(xor_dev->clk)) {
4906 + ret = clk_prepare_enable(xor_dev->clk);
4907 + if (ret)
4908 +- return ret;
4909 ++ goto disable_reg_clk;
4910 + }
4911 +
4912 + ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
4913 +@@ -866,8 +880,9 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
4914 + free_msi_irqs:
4915 + platform_msi_domain_free_irqs(&pdev->dev);
4916 + disable_clk:
4917 +- if (!IS_ERR(xor_dev->clk))
4918 +- clk_disable_unprepare(xor_dev->clk);
4919 ++ clk_disable_unprepare(xor_dev->clk);
4920 ++disable_reg_clk:
4921 ++ clk_disable_unprepare(xor_dev->reg_clk);
4922 + return ret;
4923 + }
4924 +
4925 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
4926 +index f122c2a7b9f0..7432c8894e32 100644
4927 +--- a/drivers/dma/pl330.c
4928 ++++ b/drivers/dma/pl330.c
4929 +@@ -1510,7 +1510,7 @@ static void pl330_dotask(unsigned long data)
4930 + /* Returns 1 if state was updated, 0 otherwise */
4931 + static int pl330_update(struct pl330_dmac *pl330)
4932 + {
4933 +- struct dma_pl330_desc *descdone, *tmp;
4934 ++ struct dma_pl330_desc *descdone;
4935 + unsigned long flags;
4936 + void __iomem *regs;
4937 + u32 val;
4938 +@@ -1588,7 +1588,9 @@ static int pl330_update(struct pl330_dmac *pl330)
4939 + }
4940 +
4941 + /* Now that we are in no hurry, do the callbacks */
4942 +- list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
4943 ++ while (!list_empty(&pl330->req_done)) {
4944 ++ descdone = list_first_entry(&pl330->req_done,
4945 ++ struct dma_pl330_desc, rqd);
4946 + list_del(&descdone->rqd);
4947 + spin_unlock_irqrestore(&pl330->lock, flags);
4948 + dma_pl330_rqcb(descdone, PL330_ERR_NONE);
4949 +diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
4950 +index 6d89fb6a6a92..8fbf175fdcc7 100644
4951 +--- a/drivers/dma/qcom/bam_dma.c
4952 ++++ b/drivers/dma/qcom/bam_dma.c
4953 +@@ -388,6 +388,7 @@ struct bam_device {
4954 + struct device_dma_parameters dma_parms;
4955 + struct bam_chan *channels;
4956 + u32 num_channels;
4957 ++ u32 num_ees;
4958 +
4959 + /* execution environment ID, from DT */
4960 + u32 ee;
4961 +@@ -1080,15 +1081,19 @@ static int bam_init(struct bam_device *bdev)
4962 + u32 val;
4963 +
4964 + /* read revision and configuration information */
4965 +- val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
4966 +- val &= NUM_EES_MASK;
4967 ++ if (!bdev->num_ees) {
4968 ++ val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
4969 ++ bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
4970 ++ }
4971 +
4972 + /* check that configured EE is within range */
4973 +- if (bdev->ee >= val)
4974 ++ if (bdev->ee >= bdev->num_ees)
4975 + return -EINVAL;
4976 +
4977 +- val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
4978 +- bdev->num_channels = val & BAM_NUM_PIPES_MASK;
4979 ++ if (!bdev->num_channels) {
4980 ++ val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
4981 ++ bdev->num_channels = val & BAM_NUM_PIPES_MASK;
4982 ++ }
4983 +
4984 + if (bdev->controlled_remotely)
4985 + return 0;
4986 +@@ -1183,6 +1188,18 @@ static int bam_dma_probe(struct platform_device *pdev)
4987 + bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
4988 + "qcom,controlled-remotely");
4989 +
4990 ++ if (bdev->controlled_remotely) {
4991 ++ ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
4992 ++ &bdev->num_channels);
4993 ++ if (ret)
4994 ++ dev_err(bdev->dev, "num-channels unspecified in dt\n");
4995 ++
4996 ++ ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
4997 ++ &bdev->num_ees);
4998 ++ if (ret)
4999 ++ dev_err(bdev->dev, "num-ees unspecified in dt\n");
5000 ++ }
5001 ++
5002 + bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
5003 + if (IS_ERR(bdev->bamclk))
5004 + return PTR_ERR(bdev->bamclk);
5005 +diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
5006 +index 2b2c7db3e480..9d6ce5051d8f 100644
5007 +--- a/drivers/dma/sh/rcar-dmac.c
5008 ++++ b/drivers/dma/sh/rcar-dmac.c
5009 +@@ -880,7 +880,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
5010 +
5011 + rcar_dmac_chan_configure_desc(chan, desc);
5012 +
5013 +- max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
5014 ++ max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
5015 +
5016 + /*
5017 + * Allocate and fill the transfer chunk descriptors. We own the only
5018 +@@ -1264,8 +1264,17 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
5019 + * If the cookie doesn't correspond to the currently running transfer
5020 + * then the descriptor hasn't been processed yet, and the residue is
5021 + * equal to the full descriptor size.
5022 ++ * Also, a client driver is possible to call this function before
5023 ++ * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running"
5024 ++ * will be the next descriptor, and the done list will appear. So, if
5025 ++ * the argument cookie matches the done list's cookie, we can assume
5026 ++ * the residue is zero.
5027 + */
5028 + if (cookie != desc->async_tx.cookie) {
5029 ++ list_for_each_entry(desc, &chan->desc.done, node) {
5030 ++ if (cookie == desc->async_tx.cookie)
5031 ++ return 0;
5032 ++ }
5033 + list_for_each_entry(desc, &chan->desc.pending, node) {
5034 + if (cookie == desc->async_tx.cookie)
5035 + return desc->size;
5036 +diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
5037 +index e8db9659a36b..fe0d30340e96 100644
5038 +--- a/drivers/firmware/dmi_scan.c
5039 ++++ b/drivers/firmware/dmi_scan.c
5040 +@@ -191,7 +191,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
5041 + char *s;
5042 + int is_ff = 1, is_00 = 1, i;
5043 +
5044 +- if (dmi_ident[slot] || dm->length <= index + 16)
5045 ++ if (dmi_ident[slot] || dm->length < index + 16)
5046 + return;
5047 +
5048 + d = (u8 *) dm + index;
5049 +diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
5050 +index 1cc41c3d6315..86a1ad17a32e 100644
5051 +--- a/drivers/firmware/efi/arm-runtime.c
5052 ++++ b/drivers/firmware/efi/arm-runtime.c
5053 +@@ -54,6 +54,9 @@ static struct ptdump_info efi_ptdump_info = {
5054 +
5055 + static int __init ptdump_init(void)
5056 + {
5057 ++ if (!efi_enabled(EFI_RUNTIME_SERVICES))
5058 ++ return 0;
5059 ++
5060 + return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
5061 + }
5062 + device_initcall(ptdump_init);
5063 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
5064 +index 8d689ab7e429..1ef486b5d54b 100644
5065 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
5066 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
5067 +@@ -26,6 +26,7 @@
5068 + #define AMDGPU_AMDKFD_H_INCLUDED
5069 +
5070 + #include <linux/types.h>
5071 ++#include <linux/mm.h>
5072 + #include <linux/mmu_context.h>
5073 + #include <kgd_kfd_interface.h>
5074 +
5075 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
5076 +index 659997bfff30..cd84bd0b1eaf 100644
5077 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
5078 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
5079 +@@ -322,14 +322,45 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
5080 + {
5081 + unsigned i;
5082 + int r, ret = 0;
5083 ++ long tmo_gfx, tmo_mm;
5084 ++
5085 ++ tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
5086 ++ if (amdgpu_sriov_vf(adev)) {
5087 ++ /* for MM engines in hypervisor side they are not scheduled together
5088 ++ * with CP and SDMA engines, so even in exclusive mode MM engine could
5089 ++ * still running on other VF thus the IB TEST TIMEOUT for MM engines
5090 ++ * under SR-IOV should be set to a long time. 8 sec should be enough
5091 ++ * for the MM comes back to this VF.
5092 ++ */
5093 ++ tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
5094 ++ }
5095 ++
5096 ++ if (amdgpu_sriov_runtime(adev)) {
5097 ++ /* for CP & SDMA engines since they are scheduled together so
5098 ++ * need to make the timeout width enough to cover the time
5099 ++ * cost waiting for it coming back under RUNTIME only
5100 ++ */
5101 ++ tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
5102 ++ }
5103 +
5104 + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5105 + struct amdgpu_ring *ring = adev->rings[i];
5106 ++ long tmo;
5107 +
5108 + if (!ring || !ring->ready)
5109 + continue;
5110 +
5111 +- r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT);
5112 ++ /* MM engine need more time */
5113 ++ if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
5114 ++ ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
5115 ++ ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
5116 ++ ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
5117 ++ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
5118 ++ tmo = tmo_mm;
5119 ++ else
5120 ++ tmo = tmo_gfx;
5121 ++
5122 ++ r = amdgpu_ring_test_ib(ring, tmo);
5123 + if (r) {
5124 + ring->ready = false;
5125 +
5126 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
5127 +index 69182eeca264..1a30c54a0889 100644
5128 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
5129 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
5130 +@@ -2889,7 +2889,13 @@ static int gfx_v9_0_hw_fini(void *handle)
5131 + amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
5132 + amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
5133 + if (amdgpu_sriov_vf(adev)) {
5134 +- pr_debug("For SRIOV client, shouldn't do anything.\n");
5135 ++ gfx_v9_0_cp_gfx_enable(adev, false);
5136 ++ /* must disable polling for SRIOV when hw finished, otherwise
5137 ++ * CPC engine may still keep fetching WB address which is already
5138 ++ * invalid after sw finished and trigger DMAR reading error in
5139 ++ * hypervisor side.
5140 ++ */
5141 ++ WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
5142 + return 0;
5143 + }
5144 + gfx_v9_0_cp_enable(adev, false);
5145 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
5146 +index 6dc0f6e346e7..a1d71429fb72 100644
5147 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
5148 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
5149 +@@ -456,7 +456,10 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
5150 + adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
5151 + if (!adev->mc.vram_width) {
5152 + /* hbm memory channel size */
5153 +- chansize = 128;
5154 ++ if (adev->flags & AMD_IS_APU)
5155 ++ chansize = 64;
5156 ++ else
5157 ++ chansize = 128;
5158 +
5159 + tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
5160 + tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
5161 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
5162 +index 1d312603de9f..308571b09c6b 100644
5163 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
5164 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
5165 +@@ -166,8 +166,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
5166 + packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
5167 + packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
5168 +
5169 +- /* TODO: scratch support */
5170 +- packet->sh_hidden_private_base_vmid = 0;
5171 ++ packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
5172 +
5173 + packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
5174 + packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
5175 +diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
5176 +index 5f4c2e833a65..d665dd5af5dd 100644
5177 +--- a/drivers/gpu/drm/ast/ast_tables.h
5178 ++++ b/drivers/gpu/drm/ast/ast_tables.h
5179 +@@ -97,7 +97,7 @@ static const struct ast_vbios_dclk_info dclk_table[] = {
5180 + {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
5181 + {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
5182 + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
5183 +- {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */
5184 ++ {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
5185 + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
5186 + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
5187 + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
5188 +@@ -127,7 +127,7 @@ static const struct ast_vbios_dclk_info dclk_table_ast2500[] = {
5189 + {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
5190 + {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
5191 + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
5192 +- {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */
5193 ++ {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
5194 + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
5195 + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
5196 + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
5197 +diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
5198 +index b1ab4ab09532..60373d7eb220 100644
5199 +--- a/drivers/gpu/drm/bridge/sii902x.c
5200 ++++ b/drivers/gpu/drm/bridge/sii902x.c
5201 +@@ -137,7 +137,9 @@ static int sii902x_get_modes(struct drm_connector *connector)
5202 + struct sii902x *sii902x = connector_to_sii902x(connector);
5203 + struct regmap *regmap = sii902x->regmap;
5204 + u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
5205 ++ struct device *dev = &sii902x->i2c->dev;
5206 + unsigned long timeout;
5207 ++ unsigned int retries;
5208 + unsigned int status;
5209 + struct edid *edid;
5210 + int num = 0;
5211 +@@ -159,7 +161,7 @@ static int sii902x_get_modes(struct drm_connector *connector)
5212 + time_before(jiffies, timeout));
5213 +
5214 + if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
5215 +- dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus\n");
5216 ++ dev_err(dev, "failed to acquire the i2c bus\n");
5217 + return -ETIMEDOUT;
5218 + }
5219 +
5220 +@@ -179,9 +181,19 @@ static int sii902x_get_modes(struct drm_connector *connector)
5221 + if (ret)
5222 + return ret;
5223 +
5224 +- ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
5225 ++ /*
5226 ++ * Sometimes the I2C bus can stall after failure to use the
5227 ++ * EDID channel. Retry a few times to see if things clear
5228 ++ * up, else continue anyway.
5229 ++ */
5230 ++ retries = 5;
5231 ++ do {
5232 ++ ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA,
5233 ++ &status);
5234 ++ retries--;
5235 ++ } while (ret && retries);
5236 + if (ret)
5237 +- return ret;
5238 ++ dev_err(dev, "failed to read status (%d)\n", ret);
5239 +
5240 + ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
5241 + SII902X_SYS_CTRL_DDC_BUS_REQ |
5242 +@@ -201,7 +213,7 @@ static int sii902x_get_modes(struct drm_connector *connector)
5243 +
5244 + if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
5245 + SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
5246 +- dev_err(&sii902x->i2c->dev, "failed to release the i2c bus\n");
5247 ++ dev_err(dev, "failed to release the i2c bus\n");
5248 + return -ETIMEDOUT;
5249 + }
5250 +
5251 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
5252 +index 2b8bf2dd6387..9effe40f5fa5 100644
5253 +--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
5254 ++++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
5255 +@@ -926,7 +926,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
5256 + struct drm_device *drm_dev = g2d->subdrv.drm_dev;
5257 + struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
5258 + struct drm_exynos_pending_g2d_event *e;
5259 +- struct timeval now;
5260 ++ struct timespec64 now;
5261 +
5262 + if (list_empty(&runqueue_node->event_list))
5263 + return;
5264 +@@ -934,9 +934,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
5265 + e = list_first_entry(&runqueue_node->event_list,
5266 + struct drm_exynos_pending_g2d_event, base.link);
5267 +
5268 +- do_gettimeofday(&now);
5269 ++ ktime_get_ts64(&now);
5270 + e->event.tv_sec = now.tv_sec;
5271 +- e->event.tv_usec = now.tv_usec;
5272 ++ e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
5273 + e->event.cmdlist_no = cmdlist_no;
5274 +
5275 + drm_send_event(drm_dev, &e->base);
5276 +diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
5277 +index 30496134a3d0..d7cbe53c4c01 100644
5278 +--- a/drivers/gpu/drm/exynos/regs-fimc.h
5279 ++++ b/drivers/gpu/drm/exynos/regs-fimc.h
5280 +@@ -569,7 +569,7 @@
5281 + #define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
5282 + #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
5283 + #define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
5284 +-#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
5285 ++#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0))
5286 +
5287 + /* Real input DMA size register */
5288 + #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
5289 +diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
5290 +index 53e0b24beda6..d976391dfa31 100644
5291 +--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
5292 ++++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
5293 +@@ -225,7 +225,11 @@ static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
5294 + struct drm_crtc_state *old_crtc_state)
5295 + {
5296 + drm_crtc_vblank_on(crtc);
5297 ++}
5298 +
5299 ++static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
5300 ++ struct drm_crtc_state *old_crtc_state)
5301 ++{
5302 + spin_lock_irq(&crtc->dev->event_lock);
5303 + if (crtc->state->event) {
5304 + WARN_ON(drm_crtc_vblank_get(crtc));
5305 +@@ -293,6 +297,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
5306 + .mode_set_nofb = ipu_crtc_mode_set_nofb,
5307 + .atomic_check = ipu_crtc_atomic_check,
5308 + .atomic_begin = ipu_crtc_atomic_begin,
5309 ++ .atomic_flush = ipu_crtc_atomic_flush,
5310 + .atomic_disable = ipu_crtc_atomic_disable,
5311 + .atomic_enable = ipu_crtc_atomic_enable,
5312 + };
5313 +diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
5314 +index 5155f0179b61..05520202c967 100644
5315 +--- a/drivers/gpu/drm/meson/meson_crtc.c
5316 ++++ b/drivers/gpu/drm/meson/meson_crtc.c
5317 +@@ -36,6 +36,7 @@
5318 + #include "meson_venc.h"
5319 + #include "meson_vpp.h"
5320 + #include "meson_viu.h"
5321 ++#include "meson_canvas.h"
5322 + #include "meson_registers.h"
5323 +
5324 + /* CRTC definition */
5325 +@@ -192,6 +193,11 @@ void meson_crtc_irq(struct meson_drm *priv)
5326 + } else
5327 + meson_vpp_disable_interlace_vscaler_osd1(priv);
5328 +
5329 ++ meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
5330 ++ priv->viu.osd1_addr, priv->viu.osd1_stride,
5331 ++ priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
5332 ++ MESON_CANVAS_BLKMODE_LINEAR);
5333 ++
5334 + /* Enable OSD1 */
5335 + writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
5336 + priv->io_base + _REG(VPP_MISC));
5337 +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
5338 +index 7742c7d81ed8..4ad8223c60ea 100644
5339 +--- a/drivers/gpu/drm/meson/meson_drv.c
5340 ++++ b/drivers/gpu/drm/meson/meson_drv.c
5341 +@@ -180,40 +180,51 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
5342 +
5343 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
5344 + regs = devm_ioremap_resource(dev, res);
5345 +- if (IS_ERR(regs))
5346 +- return PTR_ERR(regs);
5347 ++ if (IS_ERR(regs)) {
5348 ++ ret = PTR_ERR(regs);
5349 ++ goto free_drm;
5350 ++ }
5351 +
5352 + priv->io_base = regs;
5353 +
5354 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
5355 + /* Simply ioremap since it may be a shared register zone */
5356 + regs = devm_ioremap(dev, res->start, resource_size(res));
5357 +- if (!regs)
5358 +- return -EADDRNOTAVAIL;
5359 ++ if (!regs) {
5360 ++ ret = -EADDRNOTAVAIL;
5361 ++ goto free_drm;
5362 ++ }
5363 +
5364 + priv->hhi = devm_regmap_init_mmio(dev, regs,
5365 + &meson_regmap_config);
5366 + if (IS_ERR(priv->hhi)) {
5367 + dev_err(&pdev->dev, "Couldn't create the HHI regmap\n");
5368 +- return PTR_ERR(priv->hhi);
5369 ++ ret = PTR_ERR(priv->hhi);
5370 ++ goto free_drm;
5371 + }
5372 +
5373 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
5374 + /* Simply ioremap since it may be a shared register zone */
5375 + regs = devm_ioremap(dev, res->start, resource_size(res));
5376 +- if (!regs)
5377 +- return -EADDRNOTAVAIL;
5378 ++ if (!regs) {
5379 ++ ret = -EADDRNOTAVAIL;
5380 ++ goto free_drm;
5381 ++ }
5382 +
5383 + priv->dmc = devm_regmap_init_mmio(dev, regs,
5384 + &meson_regmap_config);
5385 + if (IS_ERR(priv->dmc)) {
5386 + dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
5387 +- return PTR_ERR(priv->dmc);
5388 ++ ret = PTR_ERR(priv->dmc);
5389 ++ goto free_drm;
5390 + }
5391 +
5392 + priv->vsync_irq = platform_get_irq(pdev, 0);
5393 +
5394 +- drm_vblank_init(drm, 1);
5395 ++ ret = drm_vblank_init(drm, 1);
5396 ++ if (ret)
5397 ++ goto free_drm;
5398 ++
5399 + drm_mode_config_init(drm);
5400 + drm->mode_config.max_width = 3840;
5401 + drm->mode_config.max_height = 2160;
5402 +diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
5403 +index 5e8b392b9d1f..8450d6ac8c9b 100644
5404 +--- a/drivers/gpu/drm/meson/meson_drv.h
5405 ++++ b/drivers/gpu/drm/meson/meson_drv.h
5406 +@@ -43,6 +43,9 @@ struct meson_drm {
5407 + bool osd1_commit;
5408 + uint32_t osd1_ctrl_stat;
5409 + uint32_t osd1_blk0_cfg[5];
5410 ++ uint32_t osd1_addr;
5411 ++ uint32_t osd1_stride;
5412 ++ uint32_t osd1_height;
5413 + } viu;
5414 +
5415 + struct {
5416 +diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
5417 +index 17e96fa47868..0b6011b8d632 100644
5418 +--- a/drivers/gpu/drm/meson/meson_plane.c
5419 ++++ b/drivers/gpu/drm/meson/meson_plane.c
5420 +@@ -164,10 +164,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
5421 + /* Update Canvas with buffer address */
5422 + gem = drm_fb_cma_get_gem_obj(fb, 0);
5423 +
5424 +- meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
5425 +- gem->paddr, fb->pitches[0],
5426 +- fb->height, MESON_CANVAS_WRAP_NONE,
5427 +- MESON_CANVAS_BLKMODE_LINEAR);
5428 ++ priv->viu.osd1_addr = gem->paddr;
5429 ++ priv->viu.osd1_stride = fb->pitches[0];
5430 ++ priv->viu.osd1_height = fb->height;
5431 +
5432 + spin_unlock_irqrestore(&priv->drm->event_lock, flags);
5433 + }
5434 +diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
5435 +index f56f60f695e1..debbbf0fd4bd 100644
5436 +--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
5437 ++++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
5438 +@@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd)
5439 + struct nouveau_encoder *nv_encoder = bl_get_data(bd);
5440 + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
5441 + struct nvif_object *device = &drm->client.device.object;
5442 +- int or = nv_encoder->or;
5443 ++ int or = ffs(nv_encoder->dcb->or) - 1;
5444 + u32 div = 1025;
5445 + u32 val;
5446 +
5447 +@@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd)
5448 + struct nouveau_encoder *nv_encoder = bl_get_data(bd);
5449 + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
5450 + struct nvif_object *device = &drm->client.device.object;
5451 +- int or = nv_encoder->or;
5452 ++ int or = ffs(nv_encoder->dcb->or) - 1;
5453 + u32 div = 1025;
5454 + u32 val = (bd->props.brightness * div) / 100;
5455 +
5456 +@@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd)
5457 + struct nouveau_encoder *nv_encoder = bl_get_data(bd);
5458 + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
5459 + struct nvif_object *device = &drm->client.device.object;
5460 +- int or = nv_encoder->or;
5461 ++ int or = ffs(nv_encoder->dcb->or) - 1;
5462 + u32 div, val;
5463 +
5464 + div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
5465 +@@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd)
5466 + struct nouveau_encoder *nv_encoder = bl_get_data(bd);
5467 + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
5468 + struct nvif_object *device = &drm->client.device.object;
5469 +- int or = nv_encoder->or;
5470 ++ int or = ffs(nv_encoder->dcb->or) - 1;
5471 + u32 div, val;
5472 +
5473 + div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
5474 +@@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector)
5475 + return -ENODEV;
5476 + }
5477 +
5478 +- if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
5479 ++ if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)))
5480 + return 0;
5481 +
5482 + if (drm->client.device.info.chipset <= 0xa0 ||
5483 +diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
5484 +index d1755f12236b..41ebb37aaa79 100644
5485 +--- a/drivers/gpu/drm/omapdrm/dss/dss.c
5486 ++++ b/drivers/gpu/drm/omapdrm/dss/dss.c
5487 +@@ -1299,88 +1299,18 @@ static const struct soc_device_attribute dss_soc_devices[] = {
5488 +
5489 + static int dss_bind(struct device *dev)
5490 + {
5491 +- struct platform_device *pdev = to_platform_device(dev);
5492 +- struct resource *dss_mem;
5493 +- u32 rev;
5494 + int r;
5495 +
5496 +- dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
5497 +- dss.base = devm_ioremap_resource(&pdev->dev, dss_mem);
5498 +- if (IS_ERR(dss.base))
5499 +- return PTR_ERR(dss.base);
5500 +-
5501 +- r = dss_get_clocks();
5502 ++ r = component_bind_all(dev, NULL);
5503 + if (r)
5504 + return r;
5505 +
5506 +- r = dss_setup_default_clock();
5507 +- if (r)
5508 +- goto err_setup_clocks;
5509 +-
5510 +- r = dss_video_pll_probe(pdev);
5511 +- if (r)
5512 +- goto err_pll_init;
5513 +-
5514 +- r = dss_init_ports(pdev);
5515 +- if (r)
5516 +- goto err_init_ports;
5517 +-
5518 +- pm_runtime_enable(&pdev->dev);
5519 +-
5520 +- r = dss_runtime_get();
5521 +- if (r)
5522 +- goto err_runtime_get;
5523 +-
5524 +- dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
5525 +-
5526 +- /* Select DPLL */
5527 +- REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
5528 +-
5529 +- dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
5530 +-
5531 +-#ifdef CONFIG_OMAP2_DSS_VENC
5532 +- REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
5533 +- REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
5534 +- REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
5535 +-#endif
5536 +- dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
5537 +- dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
5538 +- dss.dispc_clk_source = DSS_CLK_SRC_FCK;
5539 +- dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
5540 +- dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
5541 +-
5542 +- rev = dss_read_reg(DSS_REVISION);
5543 +- pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
5544 +-
5545 +- dss_runtime_put();
5546 +-
5547 +- r = component_bind_all(&pdev->dev, NULL);
5548 +- if (r)
5549 +- goto err_component;
5550 +-
5551 +- dss_debugfs_create_file("dss", dss_dump_regs);
5552 +-
5553 + pm_set_vt_switch(0);
5554 +
5555 + omapdss_gather_components(dev);
5556 + omapdss_set_is_initialized(true);
5557 +
5558 + return 0;
5559 +-
5560 +-err_component:
5561 +-err_runtime_get:
5562 +- pm_runtime_disable(&pdev->dev);
5563 +- dss_uninit_ports(pdev);
5564 +-err_init_ports:
5565 +- if (dss.video1_pll)
5566 +- dss_video_pll_uninit(dss.video1_pll);
5567 +-
5568 +- if (dss.video2_pll)
5569 +- dss_video_pll_uninit(dss.video2_pll);
5570 +-err_pll_init:
5571 +-err_setup_clocks:
5572 +- dss_put_clocks();
5573 +- return r;
5574 + }
5575 +
5576 + static void dss_unbind(struct device *dev)
5577 +@@ -1390,18 +1320,6 @@ static void dss_unbind(struct device *dev)
5578 + omapdss_set_is_initialized(false);
5579 +
5580 + component_unbind_all(&pdev->dev, NULL);
5581 +-
5582 +- if (dss.video1_pll)
5583 +- dss_video_pll_uninit(dss.video1_pll);
5584 +-
5585 +- if (dss.video2_pll)
5586 +- dss_video_pll_uninit(dss.video2_pll);
5587 +-
5588 +- dss_uninit_ports(pdev);
5589 +-
5590 +- pm_runtime_disable(&pdev->dev);
5591 +-
5592 +- dss_put_clocks();
5593 + }
5594 +
5595 + static const struct component_master_ops dss_component_ops = {
5596 +@@ -1433,10 +1351,46 @@ static int dss_add_child_component(struct device *dev, void *data)
5597 + return 0;
5598 + }
5599 +
5600 ++static int dss_probe_hardware(void)
5601 ++{
5602 ++ u32 rev;
5603 ++ int r;
5604 ++
5605 ++ r = dss_runtime_get();
5606 ++ if (r)
5607 ++ return r;
5608 ++
5609 ++ dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
5610 ++
5611 ++ /* Select DPLL */
5612 ++ REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
5613 ++
5614 ++ dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
5615 ++
5616 ++#ifdef CONFIG_OMAP2_DSS_VENC
5617 ++ REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
5618 ++ REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
5619 ++ REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
5620 ++#endif
5621 ++ dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
5622 ++ dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
5623 ++ dss.dispc_clk_source = DSS_CLK_SRC_FCK;
5624 ++ dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
5625 ++ dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
5626 ++
5627 ++ rev = dss_read_reg(DSS_REVISION);
5628 ++ pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
5629 ++
5630 ++ dss_runtime_put();
5631 ++
5632 ++ return 0;
5633 ++}
5634 ++
5635 + static int dss_probe(struct platform_device *pdev)
5636 + {
5637 + const struct soc_device_attribute *soc;
5638 + struct component_match *match = NULL;
5639 ++ struct resource *dss_mem;
5640 + int r;
5641 +
5642 + dss.pdev = pdev;
5643 +@@ -1451,20 +1405,69 @@ static int dss_probe(struct platform_device *pdev)
5644 + else
5645 + dss.feat = of_match_device(dss_of_match, &pdev->dev)->data;
5646 +
5647 +- r = dss_initialize_debugfs();
5648 ++ /* Map I/O registers, get and setup clocks. */
5649 ++ dss_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5650 ++ dss.base = devm_ioremap_resource(&pdev->dev, dss_mem);
5651 ++ if (IS_ERR(dss.base))
5652 ++ return PTR_ERR(dss.base);
5653 ++
5654 ++ r = dss_get_clocks();
5655 + if (r)
5656 + return r;
5657 +
5658 +- /* add all the child devices as components */
5659 ++ r = dss_setup_default_clock();
5660 ++ if (r)
5661 ++ goto err_put_clocks;
5662 ++
5663 ++ /* Setup the video PLLs and the DPI and SDI ports. */
5664 ++ r = dss_video_pll_probe(pdev);
5665 ++ if (r)
5666 ++ goto err_put_clocks;
5667 ++
5668 ++ r = dss_init_ports(pdev);
5669 ++ if (r)
5670 ++ goto err_uninit_plls;
5671 ++
5672 ++ /* Enable runtime PM and probe the hardware. */
5673 ++ pm_runtime_enable(&pdev->dev);
5674 ++
5675 ++ r = dss_probe_hardware();
5676 ++ if (r)
5677 ++ goto err_pm_runtime_disable;
5678 ++
5679 ++ /* Initialize debugfs. */
5680 ++ r = dss_initialize_debugfs();
5681 ++ if (r)
5682 ++ goto err_pm_runtime_disable;
5683 ++
5684 ++ dss_debugfs_create_file("dss", dss_dump_regs);
5685 ++
5686 ++ /* Add all the child devices as components. */
5687 + device_for_each_child(&pdev->dev, &match, dss_add_child_component);
5688 +
5689 + r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
5690 +- if (r) {
5691 +- dss_uninitialize_debugfs();
5692 +- return r;
5693 +- }
5694 ++ if (r)
5695 ++ goto err_uninit_debugfs;
5696 +
5697 + return 0;
5698 ++
5699 ++err_uninit_debugfs:
5700 ++ dss_uninitialize_debugfs();
5701 ++
5702 ++err_pm_runtime_disable:
5703 ++ pm_runtime_disable(&pdev->dev);
5704 ++ dss_uninit_ports(pdev);
5705 ++
5706 ++err_uninit_plls:
5707 ++ if (dss.video1_pll)
5708 ++ dss_video_pll_uninit(dss.video1_pll);
5709 ++ if (dss.video2_pll)
5710 ++ dss_video_pll_uninit(dss.video2_pll);
5711 ++
5712 ++err_put_clocks:
5713 ++ dss_put_clocks();
5714 ++
5715 ++ return r;
5716 + }
5717 +
5718 + static int dss_remove(struct platform_device *pdev)
5719 +@@ -1473,6 +1476,18 @@ static int dss_remove(struct platform_device *pdev)
5720 +
5721 + dss_uninitialize_debugfs();
5722 +
5723 ++ pm_runtime_disable(&pdev->dev);
5724 ++
5725 ++ dss_uninit_ports(pdev);
5726 ++
5727 ++ if (dss.video1_pll)
5728 ++ dss_video_pll_uninit(dss.video1_pll);
5729 ++
5730 ++ if (dss.video2_pll)
5731 ++ dss_video_pll_uninit(dss.video2_pll);
5732 ++
5733 ++ dss_put_clocks();
5734 ++
5735 + return 0;
5736 + }
5737 +
5738 +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
5739 +index 234af81fb3d0..fc56d033febe 100644
5740 +--- a/drivers/gpu/drm/panel/panel-simple.c
5741 ++++ b/drivers/gpu/drm/panel/panel-simple.c
5742 +@@ -1561,7 +1561,7 @@ static const struct panel_desc ontat_yx700wv03 = {
5743 + .width = 154,
5744 + .height = 83,
5745 + },
5746 +- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
5747 ++ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
5748 + };
5749 +
5750 + static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
5751 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
5752 +index 12d22f3db1af..6a4b8c98a719 100644
5753 +--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
5754 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
5755 +@@ -59,11 +59,8 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
5756 +
5757 + rcar_lvds_write(lvds, LVDPLLCR, pllcr);
5758 +
5759 +- /*
5760 +- * Select the input, hardcode mode 0, enable LVDS operation and turn
5761 +- * bias circuitry on.
5762 +- */
5763 +- lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN;
5764 ++ /* Select the input and set the LVDS mode. */
5765 ++ lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT;
5766 + if (rcrtc->index == 2)
5767 + lvdcr0 |= LVDCR0_DUSEL;
5768 + rcar_lvds_write(lvds, LVDCR0, lvdcr0);
5769 +@@ -74,6 +71,10 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
5770 + LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) |
5771 + LVDCR1_CLKSTBY_GEN2);
5772 +
5773 ++ /* Enable LVDS operation and turn bias circuitry on. */
5774 ++ lvdcr0 |= LVDCR0_BEN | LVDCR0_LVEN;
5775 ++ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
5776 ++
5777 + /*
5778 + * Turn the PLL on, wait for the startup delay, and turn the output
5779 + * on.
5780 +@@ -95,7 +96,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
5781 + u32 lvdcr0;
5782 + u32 pllcr;
5783 +
5784 +- /* PLL clock configuration */
5785 ++ /* Set the PLL clock configuration and LVDS mode. */
5786 + if (freq < 42000)
5787 + pllcr = LVDPLLCR_PLLDIVCNT_42M;
5788 + else if (freq < 85000)
5789 +@@ -107,6 +108,9 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
5790 +
5791 + rcar_lvds_write(lvds, LVDPLLCR, pllcr);
5792 +
5793 ++ lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT;
5794 ++ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
5795 ++
5796 + /* Turn all the channels on. */
5797 + rcar_lvds_write(lvds, LVDCR1,
5798 + LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
5799 +@@ -117,7 +121,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
5800 + * Turn the PLL on, set it to LVDS normal mode, wait for the startup
5801 + * delay and turn the output on.
5802 + */
5803 +- lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_PLLON;
5804 ++ lvdcr0 |= LVDCR0_PLLON;
5805 + rcar_lvds_write(lvds, LVDCR0, lvdcr0);
5806 +
5807 + lvdcr0 |= LVDCR0_PWD;
5808 +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
5809 +index 1869c8bb76c8..bde65186a3c3 100644
5810 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
5811 ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
5812 +@@ -262,7 +262,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
5813 + * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
5814 + */
5815 + vma->vm_flags &= ~VM_PFNMAP;
5816 +- vma->vm_pgoff = 0;
5817 +
5818 + if (rk_obj->pages)
5819 + ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
5820 +@@ -297,6 +296,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
5821 + if (ret)
5822 + return ret;
5823 +
5824 ++ /*
5825 ++ * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
5826 ++ * whole buffer from the start.
5827 ++ */
5828 ++ vma->vm_pgoff = 0;
5829 ++
5830 + obj = vma->vm_private_data;
5831 +
5832 + return rockchip_drm_gem_object_mmap(obj, vma);
5833 +diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
5834 +index d401156490f3..4460ca46a350 100644
5835 +--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
5836 ++++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
5837 +@@ -129,10 +129,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw)
5838 + static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
5839 + {
5840 + struct sun4i_dclk *dclk = hw_to_dclk(hw);
5841 ++ u32 val = degrees / 120;
5842 ++
5843 ++ val <<= 28;
5844 +
5845 + regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
5846 + GENMASK(29, 28),
5847 +- degrees / 120);
5848 ++ val);
5849 +
5850 + return 0;
5851 + }
5852 +diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
5853 +index 597d563d636a..0598b4c18c25 100644
5854 +--- a/drivers/gpu/drm/tegra/drm.c
5855 ++++ b/drivers/gpu/drm/tegra/drm.c
5856 +@@ -250,6 +250,7 @@ static void tegra_drm_unload(struct drm_device *drm)
5857 +
5858 + drm_kms_helper_poll_fini(drm);
5859 + tegra_drm_fb_exit(drm);
5860 ++ drm_atomic_helper_shutdown(drm);
5861 + drm_mode_config_cleanup(drm);
5862 +
5863 + err = host1x_device_exit(device);
5864 +diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
5865 +index b94bd5440e57..ed9c443bb8a1 100644
5866 +--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
5867 ++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
5868 +@@ -196,6 +196,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
5869 + case VIRTGPU_PARAM_3D_FEATURES:
5870 + value = vgdev->has_virgl_3d == true ? 1 : 0;
5871 + break;
5872 ++ case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
5873 ++ value = 1;
5874 ++ break;
5875 + default:
5876 + return -EINVAL;
5877 + }
5878 +@@ -471,7 +474,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
5879 + {
5880 + struct virtio_gpu_device *vgdev = dev->dev_private;
5881 + struct drm_virtgpu_get_caps *args = data;
5882 +- int size;
5883 ++ unsigned size, host_caps_size;
5884 + int i;
5885 + int found_valid = -1;
5886 + int ret;
5887 +@@ -480,6 +483,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
5888 + if (vgdev->num_capsets == 0)
5889 + return -ENOSYS;
5890 +
5891 ++ /* don't allow userspace to pass 0 */
5892 ++ if (args->size == 0)
5893 ++ return -EINVAL;
5894 ++
5895 + spin_lock(&vgdev->display_info_lock);
5896 + for (i = 0; i < vgdev->num_capsets; i++) {
5897 + if (vgdev->capsets[i].id == args->cap_set_id) {
5898 +@@ -495,11 +502,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
5899 + return -EINVAL;
5900 + }
5901 +
5902 +- size = vgdev->capsets[found_valid].max_size;
5903 +- if (args->size > size) {
5904 +- spin_unlock(&vgdev->display_info_lock);
5905 +- return -EINVAL;
5906 +- }
5907 ++ host_caps_size = vgdev->capsets[found_valid].max_size;
5908 ++ /* only copy to user the minimum of the host caps size or the guest caps size */
5909 ++ size = min(args->size, host_caps_size);
5910 +
5911 + list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
5912 + if (cache_ent->id == args->cap_set_id &&
5913 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
5914 +index 557a033fb610..8545488aa0cf 100644
5915 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
5916 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
5917 +@@ -135,17 +135,24 @@
5918 +
5919 + #else
5920 +
5921 +-/* In the 32-bit version of this macro, we use "m" because there is no
5922 +- * more register left for bp
5923 ++/*
5924 ++ * In the 32-bit version of this macro, we store bp in a memory location
5925 ++ * because we've ran out of registers.
5926 ++ * Now we can't reference that memory location while we've modified
5927 ++ * %esp or %ebp, so we first push it on the stack, just before we push
5928 ++ * %ebp, and then when we need it we read it from the stack where we
5929 ++ * just pushed it.
5930 + */
5931 + #define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
5932 + port_num, magic, bp, \
5933 + eax, ebx, ecx, edx, si, di) \
5934 + ({ \
5935 +- asm volatile ("push %%ebp;" \
5936 +- "mov %12, %%ebp;" \
5937 ++ asm volatile ("push %12;" \
5938 ++ "push %%ebp;" \
5939 ++ "mov 0x04(%%esp), %%ebp;" \
5940 + "rep outsb;" \
5941 +- "pop %%ebp;" : \
5942 ++ "pop %%ebp;" \
5943 ++ "add $0x04, %%esp;" : \
5944 + "=a"(eax), \
5945 + "=b"(ebx), \
5946 + "=c"(ecx), \
5947 +@@ -167,10 +174,12 @@
5948 + port_num, magic, bp, \
5949 + eax, ebx, ecx, edx, si, di) \
5950 + ({ \
5951 +- asm volatile ("push %%ebp;" \
5952 +- "mov %12, %%ebp;" \
5953 ++ asm volatile ("push %12;" \
5954 ++ "push %%ebp;" \
5955 ++ "mov 0x04(%%esp), %%ebp;" \
5956 + "rep insb;" \
5957 +- "pop %%ebp" : \
5958 ++ "pop %%ebp;" \
5959 ++ "add $0x04, %%esp;" : \
5960 + "=a"(eax), \
5961 + "=b"(ebx), \
5962 + "=c"(ecx), \
5963 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
5964 +index aacce4753a62..205a5f4b58f3 100644
5965 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
5966 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
5967 +@@ -453,7 +453,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
5968 + struct drm_plane_state *old_state)
5969 + {
5970 + struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
5971 ++ struct drm_crtc *crtc = plane->state->crtc ?
5972 ++ plane->state->crtc : old_state->crtc;
5973 +
5974 ++ if (vps->dmabuf)
5975 ++ vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false);
5976 + vmw_dmabuf_unreference(&vps->dmabuf);
5977 + vps->dmabuf_size = 0;
5978 +
5979 +@@ -491,10 +495,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
5980 + }
5981 +
5982 + size = new_state->crtc_w * new_state->crtc_h * 4;
5983 ++ dev_priv = vmw_priv(crtc->dev);
5984 +
5985 + if (vps->dmabuf) {
5986 +- if (vps->dmabuf_size == size)
5987 +- return 0;
5988 ++ if (vps->dmabuf_size == size) {
5989 ++ /*
5990 ++ * Note that this might temporarily up the pin-count
5991 ++ * to 2, until cleanup_fb() is called.
5992 ++ */
5993 ++ return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf,
5994 ++ true);
5995 ++ }
5996 +
5997 + vmw_dmabuf_unreference(&vps->dmabuf);
5998 + vps->dmabuf_size = 0;
5999 +@@ -504,7 +515,6 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
6000 + if (!vps->dmabuf)
6001 + return -ENOMEM;
6002 +
6003 +- dev_priv = vmw_priv(crtc->dev);
6004 + vmw_svga_enable(dev_priv);
6005 +
6006 + /* After we have alloced the backing store might not be able to
6007 +@@ -515,13 +525,18 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
6008 + &vmw_vram_ne_placement,
6009 + false, &vmw_dmabuf_bo_free);
6010 + vmw_overlay_resume_all(dev_priv);
6011 +-
6012 +- if (ret != 0)
6013 ++ if (ret) {
6014 + vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
6015 +- else
6016 +- vps->dmabuf_size = size;
6017 ++ return ret;
6018 ++ }
6019 +
6020 +- return ret;
6021 ++ vps->dmabuf_size = size;
6022 ++
6023 ++ /*
6024 ++ * TTM already thinks the buffer is pinned, but make sure the
6025 ++ * pin_count is upped.
6026 ++ */
6027 ++ return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true);
6028 + }
6029 +
6030 +
6031 +diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
6032 +index c860a7997cb5..1d1612e28854 100644
6033 +--- a/drivers/gpu/ipu-v3/ipu-pre.c
6034 ++++ b/drivers/gpu/ipu-v3/ipu-pre.c
6035 +@@ -125,11 +125,14 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)
6036 + if (pre_node == pre->dev->of_node) {
6037 + mutex_unlock(&ipu_pre_list_mutex);
6038 + device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE);
6039 ++ of_node_put(pre_node);
6040 + return pre;
6041 + }
6042 + }
6043 + mutex_unlock(&ipu_pre_list_mutex);
6044 +
6045 ++ of_node_put(pre_node);
6046 ++
6047 + return NULL;
6048 + }
6049 +
6050 +diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
6051 +index 0013ca9f72c8..1c36fa3a90e2 100644
6052 +--- a/drivers/gpu/ipu-v3/ipu-prg.c
6053 ++++ b/drivers/gpu/ipu-v3/ipu-prg.c
6054 +@@ -101,11 +101,14 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)
6055 + mutex_unlock(&ipu_prg_list_mutex);
6056 + device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);
6057 + prg->id = ipu_id;
6058 ++ of_node_put(prg_node);
6059 + return prg;
6060 + }
6061 + }
6062 + mutex_unlock(&ipu_prg_list_mutex);
6063 +
6064 ++ of_node_put(prg_node);
6065 ++
6066 + return NULL;
6067 + }
6068 +
6069 +@@ -249,10 +252,14 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan)
6070 + {
6071 + int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
6072 + struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
6073 +- struct ipu_prg_channel *chan = &prg->chan[prg_chan];
6074 ++ struct ipu_prg_channel *chan;
6075 + u32 val;
6076 +
6077 +- if (!chan->enabled || prg_chan < 0)
6078 ++ if (prg_chan < 0)
6079 ++ return;
6080 ++
6081 ++ chan = &prg->chan[prg_chan];
6082 ++ if (!chan->enabled)
6083 + return;
6084 +
6085 + clk_prepare_enable(prg->clk_ipg);
6086 +@@ -279,13 +286,15 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
6087 + {
6088 + int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
6089 + struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
6090 +- struct ipu_prg_channel *chan = &prg->chan[prg_chan];
6091 ++ struct ipu_prg_channel *chan;
6092 + u32 val;
6093 + int ret;
6094 +
6095 + if (prg_chan < 0)
6096 + return prg_chan;
6097 +
6098 ++ chan = &prg->chan[prg_chan];
6099 ++
6100 + if (chan->enabled) {
6101 + ipu_pre_update(prg->pres[chan->used_pre], *eba);
6102 + return 0;
6103 +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
6104 +index c219e43b8f02..f5f3f8cf57ea 100644
6105 +--- a/drivers/hwmon/nct6775.c
6106 ++++ b/drivers/hwmon/nct6775.c
6107 +@@ -1469,7 +1469,7 @@ static void nct6775_update_pwm(struct device *dev)
6108 + duty_is_dc = data->REG_PWM_MODE[i] &&
6109 + (nct6775_read_value(data, data->REG_PWM_MODE[i])
6110 + & data->PWM_MODE_MASK[i]);
6111 +- data->pwm_mode[i] = duty_is_dc;
6112 ++ data->pwm_mode[i] = !duty_is_dc;
6113 +
6114 + fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]);
6115 + for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) {
6116 +@@ -2350,7 +2350,7 @@ show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
6117 + struct nct6775_data *data = nct6775_update_device(dev);
6118 + struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
6119 +
6120 +- return sprintf(buf, "%d\n", !data->pwm_mode[sattr->index]);
6121 ++ return sprintf(buf, "%d\n", data->pwm_mode[sattr->index]);
6122 + }
6123 +
6124 + static ssize_t
6125 +@@ -2371,9 +2371,9 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
6126 + if (val > 1)
6127 + return -EINVAL;
6128 +
6129 +- /* Setting DC mode is not supported for all chips/channels */
6130 ++ /* Setting DC mode (0) is not supported for all chips/channels */
6131 + if (data->REG_PWM_MODE[nr] == 0) {
6132 +- if (val)
6133 ++ if (!val)
6134 + return -EINVAL;
6135 + return count;
6136 + }
6137 +@@ -2382,7 +2382,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
6138 + data->pwm_mode[nr] = val;
6139 + reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]);
6140 + reg &= ~data->PWM_MODE_MASK[nr];
6141 +- if (val)
6142 ++ if (!val)
6143 + reg |= data->PWM_MODE_MASK[nr];
6144 + nct6775_write_value(data, data->REG_PWM_MODE[nr], reg);
6145 + mutex_unlock(&data->update_lock);
6146 +diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
6147 +index 00d6995af4c2..8a44e94d5679 100644
6148 +--- a/drivers/hwmon/pmbus/adm1275.c
6149 ++++ b/drivers/hwmon/pmbus/adm1275.c
6150 +@@ -154,7 +154,7 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
6151 + const struct adm1275_data *data = to_adm1275_data(info);
6152 + int ret = 0;
6153 +
6154 +- if (page)
6155 ++ if (page > 0)
6156 + return -ENXIO;
6157 +
6158 + switch (reg) {
6159 +@@ -240,7 +240,7 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
6160 + const struct adm1275_data *data = to_adm1275_data(info);
6161 + int ret;
6162 +
6163 +- if (page)
6164 ++ if (page > 0)
6165 + return -ENXIO;
6166 +
6167 + switch (reg) {
6168 +diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
6169 +index dd4883a19045..e951f9b87abb 100644
6170 +--- a/drivers/hwmon/pmbus/max8688.c
6171 ++++ b/drivers/hwmon/pmbus/max8688.c
6172 +@@ -45,7 +45,7 @@ static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
6173 + {
6174 + int ret;
6175 +
6176 +- if (page)
6177 ++ if (page > 0)
6178 + return -ENXIO;
6179 +
6180 + switch (reg) {
6181 +diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
6182 +index 6ea62c62ff27..9cdb3fbc8c1f 100644
6183 +--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
6184 ++++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
6185 +@@ -315,7 +315,7 @@ static void debug_dump_regs(struct debug_drvdata *drvdata)
6186 + }
6187 +
6188 + pc = debug_adjust_pc(drvdata);
6189 +- dev_emerg(dev, " EDPCSR: [<%p>] %pS\n", (void *)pc, (void *)pc);
6190 ++ dev_emerg(dev, " EDPCSR: [<%px>] %pS\n", (void *)pc, (void *)pc);
6191 +
6192 + if (drvdata->edcidsr_present)
6193 + dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
6194 +diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
6195 +index 1a023e30488c..c1793313bb08 100644
6196 +--- a/drivers/hwtracing/intel_th/core.c
6197 ++++ b/drivers/hwtracing/intel_th/core.c
6198 +@@ -935,7 +935,7 @@ EXPORT_SYMBOL_GPL(intel_th_trace_disable);
6199 + int intel_th_set_output(struct intel_th_device *thdev,
6200 + unsigned int master)
6201 + {
6202 +- struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent);
6203 ++ struct intel_th_device *hub = to_intel_th_hub(thdev);
6204 + struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
6205 +
6206 + if (!hubdrv->set_output)
6207 +diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
6208 +index a832c45276a4..b0fb97823d6a 100644
6209 +--- a/drivers/i2c/busses/i2c-mv64xxx.c
6210 ++++ b/drivers/i2c/busses/i2c-mv64xxx.c
6211 +@@ -844,12 +844,16 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
6212 + */
6213 + if (of_device_is_compatible(np, "marvell,mv78230-i2c")) {
6214 + drv_data->offload_enabled = true;
6215 +- drv_data->errata_delay = true;
6216 ++ /* The delay is only needed in standard mode (100kHz) */
6217 ++ if (bus_freq <= 100000)
6218 ++ drv_data->errata_delay = true;
6219 + }
6220 +
6221 + if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
6222 + drv_data->offload_enabled = false;
6223 +- drv_data->errata_delay = true;
6224 ++ /* The delay is only needed in standard mode (100kHz) */
6225 ++ if (bus_freq <= 100000)
6226 ++ drv_data->errata_delay = true;
6227 + }
6228 +
6229 + if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
6230 +diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
6231 +index 6ff0be8cbdc9..4de45db76756 100644
6232 +--- a/drivers/ide/ide-cd.c
6233 ++++ b/drivers/ide/ide-cd.c
6234 +@@ -1614,6 +1614,8 @@ static int idecd_open(struct block_device *bdev, fmode_t mode)
6235 + struct cdrom_info *info;
6236 + int rc = -ENXIO;
6237 +
6238 ++ check_disk_change(bdev);
6239 ++
6240 + mutex_lock(&ide_cd_mutex);
6241 + info = ide_cd_get(bdev->bd_disk);
6242 + if (!info)
6243 +diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
6244 +index 45f2f095f793..4eb72ff539fc 100644
6245 +--- a/drivers/infiniband/core/multicast.c
6246 ++++ b/drivers/infiniband/core/multicast.c
6247 +@@ -724,21 +724,19 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
6248 + {
6249 + int ret;
6250 + u16 gid_index;
6251 +- u8 p;
6252 +-
6253 +- if (rdma_protocol_roce(device, port_num)) {
6254 +- ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
6255 +- gid_type, port_num,
6256 +- ndev,
6257 +- &gid_index);
6258 +- } else if (rdma_protocol_ib(device, port_num)) {
6259 +- ret = ib_find_cached_gid(device, &rec->port_gid,
6260 +- IB_GID_TYPE_IB, NULL, &p,
6261 +- &gid_index);
6262 +- } else {
6263 +- ret = -EINVAL;
6264 +- }
6265 +
6266 ++ /* GID table is not based on the netdevice for IB link layer,
6267 ++ * so ignore ndev during search.
6268 ++ */
6269 ++ if (rdma_protocol_ib(device, port_num))
6270 ++ ndev = NULL;
6271 ++ else if (!rdma_protocol_roce(device, port_num))
6272 ++ return -EINVAL;
6273 ++
6274 ++ ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
6275 ++ gid_type, port_num,
6276 ++ ndev,
6277 ++ &gid_index);
6278 + if (ret)
6279 + return ret;
6280 +
6281 +diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
6282 +index 9cb801d1fe54..1984d6cee3e0 100644
6283 +--- a/drivers/infiniband/core/rdma_core.c
6284 ++++ b/drivers/infiniband/core/rdma_core.c
6285 +@@ -486,12 +486,13 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)
6286 + ret = uobject->type->type_class->remove_commit(uobject,
6287 + RDMA_REMOVE_DESTROY);
6288 + if (ret)
6289 +- return ret;
6290 ++ goto out;
6291 +
6292 + uobject->type = &null_obj_type;
6293 +
6294 ++out:
6295 + up_read(&ucontext->cleanup_rwsem);
6296 +- return 0;
6297 ++ return ret;
6298 + }
6299 +
6300 + static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
6301 +diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
6302 +index ab5e1024fea9..b81d2597f563 100644
6303 +--- a/drivers/infiniband/core/sa_query.c
6304 ++++ b/drivers/infiniband/core/sa_query.c
6305 +@@ -1291,10 +1291,9 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
6306 +
6307 + resolved_dev = dev_get_by_index(dev_addr.net,
6308 + dev_addr.bound_dev_if);
6309 +- if (resolved_dev->flags & IFF_LOOPBACK) {
6310 +- dev_put(resolved_dev);
6311 +- resolved_dev = idev;
6312 +- dev_hold(resolved_dev);
6313 ++ if (!resolved_dev) {
6314 ++ dev_put(idev);
6315 ++ return -ENODEV;
6316 + }
6317 + ndev = ib_get_ndev_from_path(rec);
6318 + rcu_read_lock();
6319 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
6320 +index c65f0e8ecbd6..e47baf0950e3 100644
6321 +--- a/drivers/infiniband/core/ucma.c
6322 ++++ b/drivers/infiniband/core/ucma.c
6323 +@@ -1315,7 +1315,7 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
6324 + if (IS_ERR(ctx))
6325 + return PTR_ERR(ctx);
6326 +
6327 +- if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
6328 ++ if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
6329 + return -EINVAL;
6330 +
6331 + optval = memdup_user((void __user *) (unsigned long) cmd.optval,
6332 +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
6333 +index 9a4e899d94b3..2b6c9b516070 100644
6334 +--- a/drivers/infiniband/core/umem.c
6335 ++++ b/drivers/infiniband/core/umem.c
6336 +@@ -119,7 +119,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
6337 + umem->length = size;
6338 + umem->address = addr;
6339 + umem->page_shift = PAGE_SHIFT;
6340 +- umem->pid = get_task_pid(current, PIDTYPE_PID);
6341 + /*
6342 + * We ask for writable memory if any of the following
6343 + * access flags are set. "Local write" and "remote write"
6344 +@@ -132,7 +131,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
6345 + IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
6346 +
6347 + if (access & IB_ACCESS_ON_DEMAND) {
6348 +- put_pid(umem->pid);
6349 + ret = ib_umem_odp_get(context, umem, access);
6350 + if (ret) {
6351 + kfree(umem);
6352 +@@ -148,7 +146,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
6353 +
6354 + page_list = (struct page **) __get_free_page(GFP_KERNEL);
6355 + if (!page_list) {
6356 +- put_pid(umem->pid);
6357 + kfree(umem);
6358 + return ERR_PTR(-ENOMEM);
6359 + }
6360 +@@ -231,7 +228,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
6361 + if (ret < 0) {
6362 + if (need_release)
6363 + __ib_umem_release(context->device, umem, 0);
6364 +- put_pid(umem->pid);
6365 + kfree(umem);
6366 + } else
6367 + current->mm->pinned_vm = locked;
6368 +@@ -274,8 +270,7 @@ void ib_umem_release(struct ib_umem *umem)
6369 +
6370 + __ib_umem_release(umem->context->device, umem, 1);
6371 +
6372 +- task = get_pid_task(umem->pid, PIDTYPE_PID);
6373 +- put_pid(umem->pid);
6374 ++ task = get_pid_task(umem->context->tgid, PIDTYPE_PID);
6375 + if (!task)
6376 + goto out;
6377 + mm = get_task_mm(task);
6378 +diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
6379 +index 8f2dc79ad4ec..5e9f72ea4579 100644
6380 +--- a/drivers/infiniband/core/uverbs_ioctl.c
6381 ++++ b/drivers/infiniband/core/uverbs_ioctl.c
6382 +@@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev,
6383 + return 0;
6384 + }
6385 +
6386 ++ if (test_bit(attr_id, attr_bundle_h->valid_bitmap))
6387 ++ return -EINVAL;
6388 ++
6389 + spec = &attr_spec_bucket->attrs[attr_id];
6390 + e = &elements[attr_id];
6391 + e->uattr = uattr_ptr;
6392 +diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c
6393 +index 76ddb6564578..48a99dce976c 100644
6394 +--- a/drivers/infiniband/core/uverbs_ioctl_merge.c
6395 ++++ b/drivers/infiniband/core/uverbs_ioctl_merge.c
6396 +@@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters,
6397 + short min = SHRT_MAX;
6398 + const void *elem;
6399 + int i, j, last_stored = -1;
6400 ++ unsigned int equal_min = 0;
6401 +
6402 + for_each_element(elem, i, j, elements, num_elements, num_offset,
6403 + data_offset) {
6404 +@@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters,
6405 + */
6406 + iters[last_stored == i ? num_iters - 1 : num_iters++] = elem;
6407 + last_stored = i;
6408 ++ if (min == GET_ID(id))
6409 ++ equal_min++;
6410 ++ else
6411 ++ equal_min = 1;
6412 + min = GET_ID(id);
6413 + }
6414 +
6415 +@@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters,
6416 + * Therefore, we need to clean the beginning of the array to make sure
6417 + * all ids of final elements are equal to min.
6418 + */
6419 +- for (i = num_iters - 1; i >= 0 &&
6420 +- GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--)
6421 +- ;
6422 +-
6423 +- num_iters -= i + 1;
6424 +- memmove(iters, iters + i + 1, sizeof(*iters) * num_iters);
6425 ++ memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min);
6426 +
6427 + *min_id = min;
6428 +- return num_iters;
6429 ++ return equal_min;
6430 + }
6431 +
6432 + #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
6433 +@@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
6434 + hash = kzalloc(sizeof(*hash) +
6435 + ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1),
6436 + sizeof(long)) +
6437 +- BITS_TO_LONGS(attr_max_bucket) * sizeof(long),
6438 ++ BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long),
6439 + GFP_KERNEL);
6440 + if (!hash) {
6441 + res = -ENOMEM;
6442 +@@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_
6443 + * first handler which != NULL. This also defines the
6444 + * set of flags used for this handler.
6445 + */
6446 +- for (i = num_object_defs - 1;
6447 ++ for (i = num_method_defs - 1;
6448 + i >= 0 && !method_defs[i]->handler; i--)
6449 + ;
6450 + hash->methods[min_id++] = method;
6451 +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
6452 +index b210495ff33c..ef9135aa392c 100644
6453 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
6454 ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
6455 +@@ -1180,7 +1180,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
6456 + rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
6457 + if (rc) {
6458 + dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
6459 +- goto fail;
6460 ++ goto free_umem;
6461 + }
6462 + }
6463 +
6464 +@@ -1208,6 +1208,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
6465 + return &qp->ib_qp;
6466 + qp_destroy:
6467 + bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
6468 ++free_umem:
6469 ++ if (udata) {
6470 ++ if (qp->rumem)
6471 ++ ib_umem_release(qp->rumem);
6472 ++ if (qp->sumem)
6473 ++ ib_umem_release(qp->sumem);
6474 ++ }
6475 + fail:
6476 + kfree(qp);
6477 + return ERR_PTR(rc);
6478 +@@ -1956,10 +1963,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
6479 + wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
6480 + wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
6481 +
6482 ++ /* Need unconditional fence for local invalidate
6483 ++ * opcode to work as expected.
6484 ++ */
6485 ++ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
6486 ++
6487 + if (wr->send_flags & IB_SEND_SIGNALED)
6488 + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
6489 +- if (wr->send_flags & IB_SEND_FENCE)
6490 +- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
6491 + if (wr->send_flags & IB_SEND_SOLICITED)
6492 + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
6493 +
6494 +@@ -1980,8 +1990,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
6495 + wqe->frmr.levels = qplib_frpl->hwq.level + 1;
6496 + wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
6497 +
6498 +- if (wr->wr.send_flags & IB_SEND_FENCE)
6499 +- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
6500 ++ /* Need unconditional fence for reg_mr
6501 ++ * opcode to function as expected.
6502 ++ */
6503 ++
6504 ++ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
6505 ++
6506 + if (wr->wr.send_flags & IB_SEND_SIGNALED)
6507 + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
6508 +
6509 +diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
6510 +index e7450ea92aa9..bf811b23bc95 100644
6511 +--- a/drivers/infiniband/hw/bnxt_re/main.c
6512 ++++ b/drivers/infiniband/hw/bnxt_re/main.c
6513 +@@ -1240,9 +1240,12 @@ static void bnxt_re_task(struct work_struct *work)
6514 + switch (re_work->event) {
6515 + case NETDEV_REGISTER:
6516 + rc = bnxt_re_ib_reg(rdev);
6517 +- if (rc)
6518 ++ if (rc) {
6519 + dev_err(rdev_to_dev(rdev),
6520 + "Failed to register with IB: %#x", rc);
6521 ++ bnxt_re_remove_one(rdev);
6522 ++ bnxt_re_dev_unreg(rdev);
6523 ++ }
6524 + break;
6525 + case NETDEV_UP:
6526 + bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
6527 +@@ -1398,6 +1401,11 @@ static void __exit bnxt_re_mod_exit(void)
6528 +
6529 + list_for_each_entry(rdev, &to_be_deleted, list) {
6530 + dev_info(rdev_to_dev(rdev), "Unregistering Device");
6531 ++ /*
6532 ++ * Flush out any scheduled tasks before destroying the
6533 ++ * resources
6534 ++ */
6535 ++ flush_workqueue(bnxt_re_wq);
6536 + bnxt_re_dev_stop(rdev);
6537 + bnxt_re_ib_unreg(rdev, true);
6538 + bnxt_re_remove_one(rdev);
6539 +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
6540 +index 2bdb1562bd21..8d91733009a4 100644
6541 +--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
6542 ++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
6543 +@@ -457,7 +457,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
6544 + int rc;
6545 +
6546 + RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
6547 +-
6548 ++ /* Supply (log-base-2-of-host-page-size - base-page-shift)
6549 ++ * to bono to adjust the doorbell page sizes.
6550 ++ */
6551 ++ req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
6552 ++ RCFW_DBR_BASE_PAGE_SHIFT);
6553 + /*
6554 + * VFs need not setup the HW context area, PF
6555 + * shall setup this area for VF. Skipping the
6556 +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
6557 +index 85b16da287f9..7c85e3c4445b 100644
6558 +--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
6559 ++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
6560 +@@ -49,6 +49,7 @@
6561 + #define RCFW_COMM_SIZE 0x104
6562 +
6563 + #define RCFW_DBR_PCI_BAR_REGION 2
6564 ++#define RCFW_DBR_BASE_PAGE_SHIFT 12
6565 +
6566 + #define RCFW_CMD_PREP(req, CMD, cmd_flags) \
6567 + do { \
6568 +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
6569 +index e277e54a05eb..9536de8c5fb8 100644
6570 +--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
6571 ++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
6572 +@@ -130,7 +130,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
6573 + attr->max_pkey = le32_to_cpu(sb->max_pkeys);
6574 +
6575 + attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
6576 +- attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE;
6577 ++ attr->l2_db_size = (sb->l2_db_space_size + 1) *
6578 ++ (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
6579 + attr->max_sgid = le32_to_cpu(sb->max_gid);
6580 +
6581 + strlcpy(attr->fw_ver, "20.6.28.0", sizeof(attr->fw_ver));
6582 +diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
6583 +index eeb55b2db57e..480f592e5b4b 100644
6584 +--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
6585 ++++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
6586 +@@ -1734,7 +1734,30 @@ struct cmdq_initialize_fw {
6587 + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4)
6588 + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4)
6589 + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4)
6590 +- __le16 reserved16;
6591 ++ /* This value is (log-base-2-of-DBR-page-size - 12).
6592 ++ * 0 for 4KB. HW supported values are enumerated below.
6593 ++ */
6594 ++ __le16 log2_dbr_pg_size;
6595 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL
6596 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0
6597 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL
6598 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL
6599 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL
6600 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL
6601 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL
6602 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL
6603 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL
6604 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL
6605 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL
6606 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL
6607 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL
6608 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL
6609 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL
6610 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL
6611 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL
6612 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL
6613 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \
6614 ++ CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M
6615 + __le64 qpc_page_dir;
6616 + __le64 mrw_page_dir;
6617 + __le64 srq_page_dir;
6618 +diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
6619 +index 82114ba86041..259562282668 100644
6620 +--- a/drivers/infiniband/hw/hfi1/chip.c
6621 ++++ b/drivers/infiniband/hw/hfi1/chip.c
6622 +@@ -5945,6 +5945,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
6623 + u64 status;
6624 + u32 sw_index;
6625 + int i = 0;
6626 ++ unsigned long irq_flags;
6627 +
6628 + sw_index = dd->hw_to_sw[hw_context];
6629 + if (sw_index >= dd->num_send_contexts) {
6630 +@@ -5954,10 +5955,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
6631 + return;
6632 + }
6633 + sci = &dd->send_contexts[sw_index];
6634 ++ spin_lock_irqsave(&dd->sc_lock, irq_flags);
6635 + sc = sci->sc;
6636 + if (!sc) {
6637 + dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
6638 + sw_index, hw_context);
6639 ++ spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
6640 + return;
6641 + }
6642 +
6643 +@@ -5979,6 +5982,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
6644 + */
6645 + if (sc->type != SC_USER)
6646 + queue_work(dd->pport->hfi1_wq, &sc->halt_work);
6647 ++ spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
6648 +
6649 + /*
6650 + * Update the counters for the corresponding status bits.
6651 +diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
6652 +index cab796341697..d92f639c287f 100644
6653 +--- a/drivers/infiniband/hw/mlx4/cq.c
6654 ++++ b/drivers/infiniband/hw/mlx4/cq.c
6655 +@@ -597,6 +597,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
6656 + wc->dlid_path_bits = 0;
6657 +
6658 + if (is_eth) {
6659 ++ wc->slid = 0;
6660 + wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
6661 + memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
6662 + memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
6663 +@@ -845,7 +846,6 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
6664 + }
6665 + }
6666 +
6667 +- wc->slid = be16_to_cpu(cqe->rlid);
6668 + g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
6669 + wc->src_qp = g_mlpath_rqpn & 0xffffff;
6670 + wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
6671 +@@ -854,6 +854,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
6672 + wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
6673 + cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
6674 + if (is_eth) {
6675 ++ wc->slid = 0;
6676 + wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
6677 + if (be32_to_cpu(cqe->vlan_my_qpn) &
6678 + MLX4_CQE_CVLAN_PRESENT_MASK) {
6679 +@@ -865,6 +866,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
6680 + memcpy(wc->smac, cqe->smac, ETH_ALEN);
6681 + wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
6682 + } else {
6683 ++ wc->slid = be16_to_cpu(cqe->rlid);
6684 + wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
6685 + wc->vlan_id = 0xffff;
6686 + }
6687 +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
6688 +index 8c681a36e6c7..e2beb182d54c 100644
6689 +--- a/drivers/infiniband/hw/mlx4/main.c
6690 ++++ b/drivers/infiniband/hw/mlx4/main.c
6691 +@@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
6692 + gid_tbl[i].version = 2;
6693 + if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
6694 + gid_tbl[i].type = 1;
6695 +- else
6696 +- memset(&gid_tbl[i].gid, 0, 12);
6697 + }
6698 + }
6699 +
6700 +@@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device,
6701 + if (!gids) {
6702 + ret = -ENOMEM;
6703 + } else {
6704 +- for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
6705 +- memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
6706 ++ for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
6707 ++ memcpy(&gids[i].gid,
6708 ++ &port_gid_table->gids[i].gid,
6709 ++ sizeof(union ib_gid));
6710 ++ gids[i].gid_type =
6711 ++ port_gid_table->gids[i].gid_type;
6712 ++ }
6713 + }
6714 + }
6715 + spin_unlock_bh(&iboe->lock);
6716 +diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
6717 +index faedc080a5e6..d804880d637a 100644
6718 +--- a/drivers/infiniband/hw/mlx5/cq.c
6719 ++++ b/drivers/infiniband/hw/mlx5/cq.c
6720 +@@ -224,7 +224,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
6721 + wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
6722 + break;
6723 + }
6724 +- wc->slid = be16_to_cpu(cqe->slid);
6725 + wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
6726 + wc->dlid_path_bits = cqe->ml_path;
6727 + g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
6728 +@@ -239,10 +238,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
6729 + }
6730 +
6731 + if (ll != IB_LINK_LAYER_ETHERNET) {
6732 ++ wc->slid = be16_to_cpu(cqe->slid);
6733 + wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
6734 + return;
6735 + }
6736 +
6737 ++ wc->slid = 0;
6738 + vlan_present = cqe->l4_l3_hdr_type & 0x1;
6739 + roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
6740 + if (vlan_present) {
6741 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
6742 +index fb5302ee57c7..ab70194a73db 100644
6743 +--- a/drivers/infiniband/hw/mlx5/main.c
6744 ++++ b/drivers/infiniband/hw/mlx5/main.c
6745 +@@ -270,6 +270,9 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
6746 + if (err)
6747 + return err;
6748 +
6749 ++ props->active_width = IB_WIDTH_4X;
6750 ++ props->active_speed = IB_SPEED_QDR;
6751 ++
6752 + translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
6753 + &props->active_width);
6754 +
6755 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
6756 +index 749fe906a5b6..ef9ee6c328a1 100644
6757 +--- a/drivers/infiniband/hw/mlx5/qp.c
6758 ++++ b/drivers/infiniband/hw/mlx5/qp.c
6759 +@@ -2881,8 +2881,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
6760 + goto out;
6761 +
6762 + if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
6763 +- !optab[mlx5_cur][mlx5_new])
6764 ++ !optab[mlx5_cur][mlx5_new]) {
6765 ++ err = -EINVAL;
6766 + goto out;
6767 ++ }
6768 +
6769 + op = optab[mlx5_cur][mlx5_new];
6770 + optpar = ib_mask_to_mlx5_opt(attr_mask);
6771 +diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
6772 +index 97d033f51dc9..ddb05b42e5e6 100644
6773 +--- a/drivers/infiniband/hw/qedr/main.c
6774 ++++ b/drivers/infiniband/hw/qedr/main.c
6775 +@@ -782,7 +782,8 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
6776 +
6777 + dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
6778 + if (!dev->num_cnq) {
6779 +- DP_ERR(dev, "not enough CNQ resources.\n");
6780 ++ DP_ERR(dev, "Failed. At least one CNQ is required.\n");
6781 ++ rc = -ENOMEM;
6782 + goto init_err;
6783 + }
6784 +
6785 +diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
6786 +index 769ac07c3c8e..7f4cc9336442 100644
6787 +--- a/drivers/infiniband/hw/qedr/verbs.c
6788 ++++ b/drivers/infiniband/hw/qedr/verbs.c
6789 +@@ -1663,14 +1663,15 @@ static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
6790 +
6791 + static int qedr_update_qp_state(struct qedr_dev *dev,
6792 + struct qedr_qp *qp,
6793 ++ enum qed_roce_qp_state cur_state,
6794 + enum qed_roce_qp_state new_state)
6795 + {
6796 + int status = 0;
6797 +
6798 +- if (new_state == qp->state)
6799 ++ if (new_state == cur_state)
6800 + return 0;
6801 +
6802 +- switch (qp->state) {
6803 ++ switch (cur_state) {
6804 + case QED_ROCE_QP_STATE_RESET:
6805 + switch (new_state) {
6806 + case QED_ROCE_QP_STATE_INIT:
6807 +@@ -1774,6 +1775,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
6808 + struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
6809 + const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
6810 + enum ib_qp_state old_qp_state, new_qp_state;
6811 ++ enum qed_roce_qp_state cur_state;
6812 + int rc = 0;
6813 +
6814 + DP_DEBUG(dev, QEDR_MSG_QP,
6815 +@@ -1903,18 +1905,23 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
6816 + SET_FIELD(qp_params.modify_flags,
6817 + QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
6818 +
6819 +- qp_params.ack_timeout = attr->timeout;
6820 +- if (attr->timeout) {
6821 +- u32 temp;
6822 +-
6823 +- temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
6824 +- /* FW requires [msec] */
6825 +- qp_params.ack_timeout = temp;
6826 +- } else {
6827 +- /* Infinite */
6828 ++ /* The received timeout value is an exponent used like this:
6829 ++ * "12.7.34 LOCAL ACK TIMEOUT
6830 ++ * Value representing the transport (ACK) timeout for use by
6831 ++ * the remote, expressed as: 4.096 * 2^timeout [usec]"
6832 ++ * The FW expects timeout in msec so we need to divide the usec
6833 ++ * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
6834 ++ * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
6835 ++ * The value of zero means infinite so we use a 'max_t' to make
6836 ++ * sure that sub 1 msec values will be configured as 1 msec.
6837 ++ */
6838 ++ if (attr->timeout)
6839 ++ qp_params.ack_timeout =
6840 ++ 1 << max_t(int, attr->timeout - 8, 0);
6841 ++ else
6842 + qp_params.ack_timeout = 0;
6843 +- }
6844 + }
6845 ++
6846 + if (attr_mask & IB_QP_RETRY_CNT) {
6847 + SET_FIELD(qp_params.modify_flags,
6848 + QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
6849 +@@ -1987,13 +1994,25 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
6850 + qp->dest_qp_num = attr->dest_qp_num;
6851 + }
6852 +
6853 ++ cur_state = qp->state;
6854 ++
6855 ++ /* Update the QP state before the actual ramrod to prevent a race with
6856 ++ * fast path. Modifying the QP state to error will cause the device to
6857 ++ * flush the CQEs and while polling the flushed CQEs will considered as
6858 ++ * a potential issue if the QP isn't in error state.
6859 ++ */
6860 ++ if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
6861 ++ !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
6862 ++ qp->state = QED_ROCE_QP_STATE_ERR;
6863 ++
6864 + if (qp->qp_type != IB_QPT_GSI)
6865 + rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
6866 + qp->qed_qp, &qp_params);
6867 +
6868 + if (attr_mask & IB_QP_STATE) {
6869 + if ((qp->qp_type != IB_QPT_GSI) && (!udata))
6870 +- rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
6871 ++ rc = qedr_update_qp_state(dev, qp, cur_state,
6872 ++ qp_params.new_state);
6873 + qp->state = qp_params.new_state;
6874 + }
6875 +
6876 +@@ -2832,6 +2851,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
6877 +
6878 + switch (wr->opcode) {
6879 + case IB_WR_SEND_WITH_IMM:
6880 ++ if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
6881 ++ rc = -EINVAL;
6882 ++ *bad_wr = wr;
6883 ++ break;
6884 ++ }
6885 + wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
6886 + swqe = (struct rdma_sq_send_wqe_1st *)wqe;
6887 + swqe->wqe_size = 2;
6888 +@@ -2873,6 +2897,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
6889 + break;
6890 +
6891 + case IB_WR_RDMA_WRITE_WITH_IMM:
6892 ++ if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
6893 ++ rc = -EINVAL;
6894 ++ *bad_wr = wr;
6895 ++ break;
6896 ++ }
6897 + wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
6898 + rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
6899 +
6900 +@@ -3518,7 +3547,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
6901 + {
6902 + struct qedr_dev *dev = get_qedr_dev(ibcq->device);
6903 + struct qedr_cq *cq = get_qedr_cq(ibcq);
6904 +- union rdma_cqe *cqe = cq->latest_cqe;
6905 ++ union rdma_cqe *cqe;
6906 + u32 old_cons, new_cons;
6907 + unsigned long flags;
6908 + int update = 0;
6909 +@@ -3535,6 +3564,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
6910 + return qedr_gsi_poll_cq(ibcq, num_entries, wc);
6911 +
6912 + spin_lock_irqsave(&cq->cq_lock, flags);
6913 ++ cqe = cq->latest_cqe;
6914 + old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
6915 + while (num_entries && is_valid_cqe(cq, cqe)) {
6916 + struct qedr_qp *qp;
6917 +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
6918 +index 906bacf365d4..1cbf4e407afa 100644
6919 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
6920 ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
6921 +@@ -1206,7 +1206,7 @@ int rxe_register_device(struct rxe_dev *rxe)
6922 + rxe->ndev->dev_addr);
6923 + dev->dev.dma_ops = &dma_virt_ops;
6924 + dma_coerce_mask_and_coherent(&dev->dev,
6925 +- dma_get_required_mask(dev->dev.parent));
6926 ++ dma_get_required_mask(&dev->dev));
6927 +
6928 + dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
6929 + dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
6930 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
6931 +index 99a2a57b6cfd..10190e361a13 100644
6932 +--- a/drivers/iommu/amd_iommu.c
6933 ++++ b/drivers/iommu/amd_iommu.c
6934 +@@ -311,6 +311,8 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
6935 +
6936 + if (dev_data == NULL) {
6937 + dev_data = alloc_dev_data(devid);
6938 ++ if (!dev_data)
6939 ++ return NULL;
6940 +
6941 + if (translation_pre_enabled(iommu))
6942 + dev_data->defer_attach = true;
6943 +diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
6944 +index 16d33ac19db0..c30f62700431 100644
6945 +--- a/drivers/iommu/mtk_iommu.c
6946 ++++ b/drivers/iommu/mtk_iommu.c
6947 +@@ -60,7 +60,7 @@
6948 + (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))
6949 +
6950 + #define REG_MMU_IVRP_PADDR 0x114
6951 +-#define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31))
6952 ++
6953 + #define REG_MMU_VLD_PA_RNG 0x118
6954 + #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))
6955 +
6956 +@@ -532,8 +532,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
6957 + F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
6958 + writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
6959 +
6960 +- writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
6961 +- data->base + REG_MMU_IVRP_PADDR);
6962 ++ if (data->m4u_plat == M4U_MT8173)
6963 ++ regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
6964 ++ else
6965 ++ regval = lower_32_bits(data->protect_base) |
6966 ++ upper_32_bits(data->protect_base);
6967 ++ writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
6968 ++
6969 + if (data->enable_4GB && data->m4u_plat != M4U_MT8173) {
6970 + /*
6971 + * If 4GB mode is enabled, the validate PA range is from
6972 +@@ -688,6 +693,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
6973 + reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
6974 + reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
6975 + reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
6976 ++ reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
6977 + clk_disable_unprepare(data->bclk);
6978 + return 0;
6979 + }
6980 +@@ -710,8 +716,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
6981 + writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
6982 + writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
6983 + writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
6984 +- writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
6985 +- base + REG_MMU_IVRP_PADDR);
6986 ++ writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
6987 + if (data->m4u_dom)
6988 + writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
6989 + base + REG_MMU_PT_BASE_ADDR);
6990 +diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
6991 +index b4451a1c7c2f..778498b8633f 100644
6992 +--- a/drivers/iommu/mtk_iommu.h
6993 ++++ b/drivers/iommu/mtk_iommu.h
6994 +@@ -32,6 +32,7 @@ struct mtk_iommu_suspend_reg {
6995 + u32 ctrl_reg;
6996 + u32 int_control0;
6997 + u32 int_main_control;
6998 ++ u32 ivrp_paddr;
6999 + };
7000 +
7001 + enum mtk_iommu_plat {
7002 +diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
7003 +index 910b5b6f96b1..eb65b6e78d57 100644
7004 +--- a/drivers/macintosh/rack-meter.c
7005 ++++ b/drivers/macintosh/rack-meter.c
7006 +@@ -154,8 +154,8 @@ static void rackmeter_do_pause(struct rackmeter *rm, int pause)
7007 + DBDMA_DO_STOP(rm->dma_regs);
7008 + return;
7009 + }
7010 +- memset(rdma->buf1, 0, ARRAY_SIZE(rdma->buf1));
7011 +- memset(rdma->buf2, 0, ARRAY_SIZE(rdma->buf2));
7012 ++ memset(rdma->buf1, 0, sizeof(rdma->buf1));
7013 ++ memset(rdma->buf2, 0, sizeof(rdma->buf2));
7014 +
7015 + rm->dma_buf_v->mark = 0;
7016 +
7017 +diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
7018 +index f34ad8720756..5b63afff46d5 100644
7019 +--- a/drivers/md/bcache/request.c
7020 ++++ b/drivers/md/bcache/request.c
7021 +@@ -651,11 +651,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
7022 + static void search_free(struct closure *cl)
7023 + {
7024 + struct search *s = container_of(cl, struct search, cl);
7025 +- bio_complete(s);
7026 +
7027 + if (s->iop.bio)
7028 + bio_put(s->iop.bio);
7029 +
7030 ++ bio_complete(s);
7031 + closure_debug_destroy(cl);
7032 + mempool_free(s, s->d->c->search);
7033 + }
7034 +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
7035 +index f046dedc59ab..930b00f6a3a2 100644
7036 +--- a/drivers/md/bcache/writeback.c
7037 ++++ b/drivers/md/bcache/writeback.c
7038 +@@ -421,9 +421,15 @@ static int bch_writeback_thread(void *arg)
7039 + while (!kthread_should_stop()) {
7040 + down_write(&dc->writeback_lock);
7041 + set_current_state(TASK_INTERRUPTIBLE);
7042 +- if (!atomic_read(&dc->has_dirty) ||
7043 +- (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
7044 +- !dc->writeback_running)) {
7045 ++ /*
7046 ++ * If the bache device is detaching, skip here and continue
7047 ++ * to perform writeback. Otherwise, if no dirty data on cache,
7048 ++ * or there is dirty data on cache but writeback is disabled,
7049 ++ * the writeback thread should sleep here and wait for others
7050 ++ * to wake up it.
7051 ++ */
7052 ++ if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
7053 ++ (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
7054 + up_write(&dc->writeback_lock);
7055 +
7056 + if (kthread_should_stop()) {
7057 +@@ -444,6 +450,14 @@ static int bch_writeback_thread(void *arg)
7058 + cached_dev_put(dc);
7059 + SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
7060 + bch_write_bdev_super(dc, NULL);
7061 ++ /*
7062 ++ * If bcache device is detaching via sysfs interface,
7063 ++ * writeback thread should stop after there is no dirty
7064 ++ * data on cache. BCACHE_DEV_DETACHING flag is set in
7065 ++ * bch_cached_dev_detach().
7066 ++ */
7067 ++ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
7068 ++ break;
7069 + }
7070 +
7071 + up_write(&dc->writeback_lock);
7072 +diff --git a/drivers/md/md.c b/drivers/md/md.c
7073 +index e058c209bbcf..24e64b04424a 100644
7074 +--- a/drivers/md/md.c
7075 ++++ b/drivers/md/md.c
7076 +@@ -779,6 +779,9 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
7077 + struct bio *bio;
7078 + int ff = 0;
7079 +
7080 ++ if (!page)
7081 ++ return;
7082 ++
7083 + if (test_bit(Faulty, &rdev->flags))
7084 + return;
7085 +
7086 +@@ -5434,6 +5437,7 @@ int md_run(struct mddev *mddev)
7087 + * the only valid external interface is through the md
7088 + * device.
7089 + */
7090 ++ mddev->has_superblocks = false;
7091 + rdev_for_each(rdev, mddev) {
7092 + if (test_bit(Faulty, &rdev->flags))
7093 + continue;
7094 +@@ -5447,6 +5451,9 @@ int md_run(struct mddev *mddev)
7095 + set_disk_ro(mddev->gendisk, 1);
7096 + }
7097 +
7098 ++ if (rdev->sb_page)
7099 ++ mddev->has_superblocks = true;
7100 ++
7101 + /* perform some consistency tests on the device.
7102 + * We don't want the data to overlap the metadata,
7103 + * Internal Bitmap issues have been handled elsewhere.
7104 +@@ -5479,8 +5486,10 @@ int md_run(struct mddev *mddev)
7105 + }
7106 + if (mddev->sync_set == NULL) {
7107 + mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
7108 +- if (!mddev->sync_set)
7109 +- return -ENOMEM;
7110 ++ if (!mddev->sync_set) {
7111 ++ err = -ENOMEM;
7112 ++ goto abort;
7113 ++ }
7114 + }
7115 +
7116 + spin_lock(&pers_lock);
7117 +@@ -5493,7 +5502,8 @@ int md_run(struct mddev *mddev)
7118 + else
7119 + pr_warn("md: personality for level %s is not loaded!\n",
7120 + mddev->clevel);
7121 +- return -EINVAL;
7122 ++ err = -EINVAL;
7123 ++ goto abort;
7124 + }
7125 + spin_unlock(&pers_lock);
7126 + if (mddev->level != pers->level) {
7127 +@@ -5506,7 +5516,8 @@ int md_run(struct mddev *mddev)
7128 + pers->start_reshape == NULL) {
7129 + /* This personality cannot handle reshaping... */
7130 + module_put(pers->owner);
7131 +- return -EINVAL;
7132 ++ err = -EINVAL;
7133 ++ goto abort;
7134 + }
7135 +
7136 + if (pers->sync_request) {
7137 +@@ -5580,7 +5591,7 @@ int md_run(struct mddev *mddev)
7138 + mddev->private = NULL;
7139 + module_put(pers->owner);
7140 + bitmap_destroy(mddev);
7141 +- return err;
7142 ++ goto abort;
7143 + }
7144 + if (mddev->queue) {
7145 + bool nonrot = true;
7146 +@@ -5642,6 +5653,18 @@ int md_run(struct mddev *mddev)
7147 + sysfs_notify_dirent_safe(mddev->sysfs_action);
7148 + sysfs_notify(&mddev->kobj, NULL, "degraded");
7149 + return 0;
7150 ++
7151 ++abort:
7152 ++ if (mddev->bio_set) {
7153 ++ bioset_free(mddev->bio_set);
7154 ++ mddev->bio_set = NULL;
7155 ++ }
7156 ++ if (mddev->sync_set) {
7157 ++ bioset_free(mddev->sync_set);
7158 ++ mddev->sync_set = NULL;
7159 ++ }
7160 ++
7161 ++ return err;
7162 + }
7163 + EXPORT_SYMBOL_GPL(md_run);
7164 +
7165 +@@ -8006,6 +8029,7 @@ EXPORT_SYMBOL(md_done_sync);
7166 + bool md_write_start(struct mddev *mddev, struct bio *bi)
7167 + {
7168 + int did_change = 0;
7169 ++
7170 + if (bio_data_dir(bi) != WRITE)
7171 + return true;
7172 +
7173 +@@ -8038,6 +8062,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
7174 + rcu_read_unlock();
7175 + if (did_change)
7176 + sysfs_notify_dirent_safe(mddev->sysfs_state);
7177 ++ if (!mddev->has_superblocks)
7178 ++ return true;
7179 + wait_event(mddev->sb_wait,
7180 + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
7181 + mddev->suspended);
7182 +@@ -8496,6 +8522,19 @@ void md_do_sync(struct md_thread *thread)
7183 + set_mask_bits(&mddev->sb_flags, 0,
7184 + BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
7185 +
7186 ++ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7187 ++ !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7188 ++ mddev->delta_disks > 0 &&
7189 ++ mddev->pers->finish_reshape &&
7190 ++ mddev->pers->size &&
7191 ++ mddev->queue) {
7192 ++ mddev_lock_nointr(mddev);
7193 ++ md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
7194 ++ mddev_unlock(mddev);
7195 ++ set_capacity(mddev->gendisk, mddev->array_sectors);
7196 ++ revalidate_disk(mddev->gendisk);
7197 ++ }
7198 ++
7199 + spin_lock(&mddev->lock);
7200 + if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7201 + /* We completed so min/max setting can be forgotten if used. */
7202 +diff --git a/drivers/md/md.h b/drivers/md/md.h
7203 +index d8287d3cd1bf..9b0a896890ef 100644
7204 +--- a/drivers/md/md.h
7205 ++++ b/drivers/md/md.h
7206 +@@ -462,6 +462,8 @@ struct mddev {
7207 + void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
7208 + struct md_cluster_info *cluster_info;
7209 + unsigned int good_device_nr; /* good device num within cluster raid */
7210 ++
7211 ++ bool has_superblocks:1;
7212 + };
7213 +
7214 + enum recovery_flags {
7215 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
7216 +index 788fc0800465..e4e01d3bab81 100644
7217 +--- a/drivers/md/raid1.c
7218 ++++ b/drivers/md/raid1.c
7219 +@@ -1813,6 +1813,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
7220 + struct md_rdev *repl =
7221 + conf->mirrors[conf->raid_disks + number].rdev;
7222 + freeze_array(conf, 0);
7223 ++ if (atomic_read(&repl->nr_pending)) {
7224 ++ /* It means that some queued IO of retry_list
7225 ++ * hold repl. Thus, we cannot set replacement
7226 ++ * as NULL, avoiding rdev NULL pointer
7227 ++ * dereference in sync_request_write and
7228 ++ * handle_write_finished.
7229 ++ */
7230 ++ err = -EBUSY;
7231 ++ unfreeze_array(conf);
7232 ++ goto abort;
7233 ++ }
7234 + clear_bit(Replacement, &repl->flags);
7235 + p->rdev = repl;
7236 + conf->mirrors[conf->raid_disks + number].rdev = NULL;
7237 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
7238 +index 0d18d3b95201..5fb31ef52945 100644
7239 +--- a/drivers/md/raid10.c
7240 ++++ b/drivers/md/raid10.c
7241 +@@ -2625,7 +2625,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
7242 + for (m = 0; m < conf->copies; m++) {
7243 + int dev = r10_bio->devs[m].devnum;
7244 + rdev = conf->mirrors[dev].rdev;
7245 +- if (r10_bio->devs[m].bio == NULL)
7246 ++ if (r10_bio->devs[m].bio == NULL ||
7247 ++ r10_bio->devs[m].bio->bi_end_io == NULL)
7248 + continue;
7249 + if (!r10_bio->devs[m].bio->bi_status) {
7250 + rdev_clear_badblocks(
7251 +@@ -2640,7 +2641,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
7252 + md_error(conf->mddev, rdev);
7253 + }
7254 + rdev = conf->mirrors[dev].replacement;
7255 +- if (r10_bio->devs[m].repl_bio == NULL)
7256 ++ if (r10_bio->devs[m].repl_bio == NULL ||
7257 ++ r10_bio->devs[m].repl_bio->bi_end_io == NULL)
7258 + continue;
7259 +
7260 + if (!r10_bio->devs[m].repl_bio->bi_status) {
7261 +@@ -4691,17 +4693,11 @@ static void raid10_finish_reshape(struct mddev *mddev)
7262 + return;
7263 +
7264 + if (mddev->delta_disks > 0) {
7265 +- sector_t size = raid10_size(mddev, 0, 0);
7266 +- md_set_array_sectors(mddev, size);
7267 + if (mddev->recovery_cp > mddev->resync_max_sectors) {
7268 + mddev->recovery_cp = mddev->resync_max_sectors;
7269 + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7270 + }
7271 +- mddev->resync_max_sectors = size;
7272 +- if (mddev->queue) {
7273 +- set_capacity(mddev->gendisk, mddev->array_sectors);
7274 +- revalidate_disk(mddev->gendisk);
7275 +- }
7276 ++ mddev->resync_max_sectors = mddev->array_sectors;
7277 + } else {
7278 + int d;
7279 + rcu_read_lock();
7280 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
7281 +index 7ec822ced80b..de1ef6264ee7 100644
7282 +--- a/drivers/md/raid5.c
7283 ++++ b/drivers/md/raid5.c
7284 +@@ -2197,15 +2197,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
7285 + static int grow_stripes(struct r5conf *conf, int num)
7286 + {
7287 + struct kmem_cache *sc;
7288 ++ size_t namelen = sizeof(conf->cache_name[0]);
7289 + int devs = max(conf->raid_disks, conf->previous_raid_disks);
7290 +
7291 + if (conf->mddev->gendisk)
7292 +- sprintf(conf->cache_name[0],
7293 ++ snprintf(conf->cache_name[0], namelen,
7294 + "raid%d-%s", conf->level, mdname(conf->mddev));
7295 + else
7296 +- sprintf(conf->cache_name[0],
7297 ++ snprintf(conf->cache_name[0], namelen,
7298 + "raid%d-%p", conf->level, conf->mddev);
7299 +- sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
7300 ++ snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
7301 +
7302 + conf->active_name = 0;
7303 + sc = kmem_cache_create(conf->cache_name[conf->active_name],
7304 +@@ -8000,13 +8001,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
7305 +
7306 + if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7307 +
7308 +- if (mddev->delta_disks > 0) {
7309 +- md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
7310 +- if (mddev->queue) {
7311 +- set_capacity(mddev->gendisk, mddev->array_sectors);
7312 +- revalidate_disk(mddev->gendisk);
7313 +- }
7314 +- } else {
7315 ++ if (mddev->delta_disks <= 0) {
7316 + int d;
7317 + spin_lock_irq(&conf->device_lock);
7318 + mddev->degraded = raid5_calc_degraded(conf);
7319 +diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
7320 +index b1afeccbb97f..c96dcda1111f 100644
7321 +--- a/drivers/misc/cxl/cxl.h
7322 ++++ b/drivers/misc/cxl/cxl.h
7323 +@@ -365,6 +365,9 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
7324 + #define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */
7325 + #define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */
7326 +
7327 ++/****** CXL_PSL_DEBUG *****************************************************/
7328 ++#define CXL_PSL_DEBUG_CDC (1ull << (63-27)) /* Coherent Data cache support */
7329 ++
7330 + /****** CXL_XSL9_IERAT_ERAT - CAIA 2 **********************************/
7331 + #define CXL_XSL9_IERAT_MLPID (1ull << (63-0)) /* Match LPID */
7332 + #define CXL_XSL9_IERAT_MPID (1ull << (63-1)) /* Match PID */
7333 +@@ -659,6 +662,7 @@ struct cxl_native {
7334 + irq_hw_number_t err_hwirq;
7335 + unsigned int err_virq;
7336 + u64 ps_off;
7337 ++ bool no_data_cache; /* set if no data cache on the card */
7338 + const struct cxl_service_layer_ops *sl_ops;
7339 + };
7340 +
7341 +diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
7342 +index 4a82c313cf71..9c042b0b8c55 100644
7343 +--- a/drivers/misc/cxl/native.c
7344 ++++ b/drivers/misc/cxl/native.c
7345 +@@ -352,8 +352,17 @@ int cxl_data_cache_flush(struct cxl *adapter)
7346 + u64 reg;
7347 + unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
7348 +
7349 +- pr_devel("Flushing data cache\n");
7350 ++ /*
7351 ++ * Do a datacache flush only if datacache is available.
7352 ++ * In case of PSL9D datacache absent hence flush operation.
7353 ++ * would timeout.
7354 ++ */
7355 ++ if (adapter->native->no_data_cache) {
7356 ++ pr_devel("No PSL data cache. Ignoring cache flush req.\n");
7357 ++ return 0;
7358 ++ }
7359 +
7360 ++ pr_devel("Flushing data cache\n");
7361 + reg = cxl_p1_read(adapter, CXL_PSL_Control);
7362 + reg |= CXL_PSL_Control_Fr;
7363 + cxl_p1_write(adapter, CXL_PSL_Control, reg);
7364 +diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
7365 +index 81093f8157a9..2b3fd0a51701 100644
7366 +--- a/drivers/misc/cxl/pci.c
7367 ++++ b/drivers/misc/cxl/pci.c
7368 +@@ -457,6 +457,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
7369 + u64 chipid;
7370 + u32 phb_index;
7371 + u64 capp_unit_id;
7372 ++ u64 psl_debug;
7373 + int rc;
7374 +
7375 + rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
7376 +@@ -507,6 +508,16 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
7377 + if (cxl_is_power9_dd1())
7378 + cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL);
7379 +
7380 ++ /*
7381 ++ * Check if PSL has data-cache. We need to flush adapter datacache
7382 ++ * when as its about to be removed.
7383 ++ */
7384 ++ psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG);
7385 ++ if (psl_debug & CXL_PSL_DEBUG_CDC) {
7386 ++ dev_dbg(&dev->dev, "No data-cache present\n");
7387 ++ adapter->native->no_data_cache = true;
7388 ++ }
7389 ++
7390 + return 0;
7391 + }
7392 +
7393 +@@ -1450,10 +1461,8 @@ int cxl_pci_reset(struct cxl *adapter)
7394 +
7395 + /*
7396 + * The adapter is about to be reset, so ignore errors.
7397 +- * Not supported on P9 DD1
7398 + */
7399 +- if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
7400 +- cxl_data_cache_flush(adapter);
7401 ++ cxl_data_cache_flush(adapter);
7402 +
7403 + /* pcie_warm_reset requests a fundamental pci reset which includes a
7404 + * PERST assert/deassert. PERST triggers a loading of the image
7405 +@@ -1898,10 +1907,8 @@ static void cxl_pci_remove_adapter(struct cxl *adapter)
7406 +
7407 + /*
7408 + * Flush adapter datacache as its about to be removed.
7409 +- * Not supported on P9 DD1.
7410 + */
7411 +- if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
7412 +- cxl_data_cache_flush(adapter);
7413 ++ cxl_data_cache_flush(adapter);
7414 +
7415 + cxl_deconfigure_adapter(adapter);
7416 +
7417 +diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
7418 +index 61666d269771..0cfbdb3ab68a 100644
7419 +--- a/drivers/mmc/host/sdhci-iproc.c
7420 ++++ b/drivers/mmc/host/sdhci-iproc.c
7421 +@@ -33,6 +33,8 @@ struct sdhci_iproc_host {
7422 + const struct sdhci_iproc_data *data;
7423 + u32 shadow_cmd;
7424 + u32 shadow_blk;
7425 ++ bool is_cmd_shadowed;
7426 ++ bool is_blk_shadowed;
7427 + };
7428 +
7429 + #define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
7430 +@@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
7431 +
7432 + static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
7433 + {
7434 +- u32 val = sdhci_iproc_readl(host, (reg & ~3));
7435 +- u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
7436 ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
7437 ++ struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
7438 ++ u32 val;
7439 ++ u16 word;
7440 ++
7441 ++ if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
7442 ++ /* Get the saved transfer mode */
7443 ++ val = iproc_host->shadow_cmd;
7444 ++ } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
7445 ++ iproc_host->is_blk_shadowed) {
7446 ++ /* Get the saved block info */
7447 ++ val = iproc_host->shadow_blk;
7448 ++ } else {
7449 ++ val = sdhci_iproc_readl(host, (reg & ~3));
7450 ++ }
7451 ++ word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
7452 + return word;
7453 + }
7454 +
7455 +@@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
7456 +
7457 + if (reg == SDHCI_COMMAND) {
7458 + /* Write the block now as we are issuing a command */
7459 +- if (iproc_host->shadow_blk != 0) {
7460 ++ if (iproc_host->is_blk_shadowed) {
7461 + sdhci_iproc_writel(host, iproc_host->shadow_blk,
7462 + SDHCI_BLOCK_SIZE);
7463 +- iproc_host->shadow_blk = 0;
7464 ++ iproc_host->is_blk_shadowed = false;
7465 + }
7466 + oldval = iproc_host->shadow_cmd;
7467 +- } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
7468 ++ iproc_host->is_cmd_shadowed = false;
7469 ++ } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
7470 ++ iproc_host->is_blk_shadowed) {
7471 + /* Block size and count are stored in shadow reg */
7472 + oldval = iproc_host->shadow_blk;
7473 + } else {
7474 +@@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
7475 + if (reg == SDHCI_TRANSFER_MODE) {
7476 + /* Save the transfer mode until the command is issued */
7477 + iproc_host->shadow_cmd = newval;
7478 ++ iproc_host->is_cmd_shadowed = true;
7479 + } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
7480 + /* Save the block info until the command is issued */
7481 + iproc_host->shadow_blk = newval;
7482 ++ iproc_host->is_blk_shadowed = true;
7483 + } else {
7484 + /* Command or other regular 32-bit write */
7485 + sdhci_iproc_writel(host, newval, reg & ~3);
7486 +@@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
7487 +
7488 + static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
7489 + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
7490 +- .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
7491 ++ .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
7492 + .ops = &sdhci_iproc_32only_ops,
7493 + };
7494 +
7495 +@@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = {
7496 + .caps1 = SDHCI_DRIVER_TYPE_C |
7497 + SDHCI_DRIVER_TYPE_D |
7498 + SDHCI_SUPPORT_DDR50,
7499 +- .mmc_caps = MMC_CAP_1_8V_DDR,
7500 + };
7501 +
7502 + static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
7503 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
7504 +index f0aa57222f17..00245b73c224 100644
7505 +--- a/drivers/net/bonding/bond_main.c
7506 ++++ b/drivers/net/bonding/bond_main.c
7507 +@@ -1528,7 +1528,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
7508 + if (res) {
7509 + netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
7510 + slave_dev->name);
7511 +- goto err_close;
7512 ++ goto err_hwaddr_unsync;
7513 + }
7514 +
7515 + prev_slave = bond_last_slave(bond);
7516 +@@ -1769,6 +1769,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
7517 + synchronize_rcu();
7518 + slave_disable_netpoll(new_slave);
7519 +
7520 ++err_hwaddr_unsync:
7521 ++ if (!bond_uses_primary(bond))
7522 ++ bond_hw_addr_flush(bond_dev, slave_dev);
7523 ++
7524 + err_close:
7525 + slave_dev->priv_flags &= ~IFF_BONDING;
7526 + dev_close(slave_dev);
7527 +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
7528 +index f4947a74b65f..5d4e61741476 100644
7529 +--- a/drivers/net/can/m_can/m_can.c
7530 ++++ b/drivers/net/can/m_can/m_can.c
7531 +@@ -25,6 +25,7 @@
7532 + #include <linux/platform_device.h>
7533 + #include <linux/iopoll.h>
7534 + #include <linux/can/dev.h>
7535 ++#include <linux/pinctrl/consumer.h>
7536 +
7537 + /* napi related */
7538 + #define M_CAN_NAPI_WEIGHT 64
7539 +@@ -246,7 +247,7 @@ enum m_can_mram_cfg {
7540 +
7541 + /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
7542 + #define RXFC_FWM_SHIFT 24
7543 +-#define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT)
7544 ++#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT)
7545 + #define RXFC_FS_SHIFT 16
7546 + #define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT)
7547 +
7548 +@@ -1682,6 +1683,8 @@ static __maybe_unused int m_can_suspend(struct device *dev)
7549 + m_can_clk_stop(priv);
7550 + }
7551 +
7552 ++ pinctrl_pm_select_sleep_state(dev);
7553 ++
7554 + priv->can.state = CAN_STATE_SLEEPING;
7555 +
7556 + return 0;
7557 +@@ -1692,6 +1695,8 @@ static __maybe_unused int m_can_resume(struct device *dev)
7558 + struct net_device *ndev = dev_get_drvdata(dev);
7559 + struct m_can_priv *priv = netdev_priv(ndev);
7560 +
7561 ++ pinctrl_pm_select_default_state(dev);
7562 ++
7563 + m_can_init_ram(priv);
7564 +
7565 + priv->can.state = CAN_STATE_ERROR_ACTIVE;
7566 +diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
7567 +index d040aeb45172..15c2a831edf1 100644
7568 +--- a/drivers/net/dsa/Makefile
7569 ++++ b/drivers/net/dsa/Makefile
7570 +@@ -1,7 +1,10 @@
7571 + # SPDX-License-Identifier: GPL-2.0
7572 + obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
7573 + bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
7574 +-obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o
7575 ++obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
7576 ++ifdef CONFIG_NET_DSA_LOOP
7577 ++obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
7578 ++endif
7579 + obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
7580 + obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
7581 + obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
7582 +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
7583 +index c142b97add2c..3b073e152237 100644
7584 +--- a/drivers/net/dsa/mt7530.c
7585 ++++ b/drivers/net/dsa/mt7530.c
7586 +@@ -1122,6 +1122,7 @@ static const struct of_device_id mt7530_of_match[] = {
7587 + { .compatible = "mediatek,mt7530" },
7588 + { /* sentinel */ },
7589 + };
7590 ++MODULE_DEVICE_TABLE(of, mt7530_of_match);
7591 +
7592 + static struct mdio_driver mt7530_mdio_driver = {
7593 + .probe = mt7530_probe,
7594 +diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
7595 +index 48d672b204a4..a4080f18135c 100644
7596 +--- a/drivers/net/ethernet/broadcom/bgmac.c
7597 ++++ b/drivers/net/ethernet/broadcom/bgmac.c
7598 +@@ -532,7 +532,8 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
7599 + int i;
7600 +
7601 + for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
7602 +- int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
7603 ++ u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1);
7604 ++ unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN;
7605 +
7606 + slot = &ring->slots[i];
7607 + dev_kfree_skb(slot->skb);
7608 +diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
7609 +index 4040d846da8e..40d02fec2747 100644
7610 +--- a/drivers/net/ethernet/broadcom/bgmac.h
7611 ++++ b/drivers/net/ethernet/broadcom/bgmac.h
7612 +@@ -479,9 +479,9 @@ struct bgmac_rx_header {
7613 + struct bgmac {
7614 + union {
7615 + struct {
7616 +- void *base;
7617 +- void *idm_base;
7618 +- void *nicpm_base;
7619 ++ void __iomem *base;
7620 ++ void __iomem *idm_base;
7621 ++ void __iomem *nicpm_base;
7622 + } plat;
7623 + struct {
7624 + struct bcma_device *core;
7625 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
7626 +index 807cf75f0a98..bfd2d0382f4c 100644
7627 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
7628 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
7629 +@@ -3808,6 +3808,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
7630 + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
7631 + struct hwrm_vnic_tpa_cfg_input req = {0};
7632 +
7633 ++ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
7634 ++ return 0;
7635 ++
7636 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
7637 +
7638 + if (tpa_flags) {
7639 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
7640 +index 92d9d795d874..44a0d04dd8a0 100644
7641 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
7642 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
7643 +@@ -815,8 +815,6 @@ static int setup_fw_sge_queues(struct adapter *adap)
7644 +
7645 + err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
7646 + adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
7647 +- if (err)
7648 +- t4_free_sge_resources(adap);
7649 + return err;
7650 + }
7651 +
7652 +@@ -4679,7 +4677,6 @@ static void dummy_setup(struct net_device *dev)
7653 + /* Initialize the device structure. */
7654 + dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
7655 + dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
7656 +- dev->needs_free_netdev = true;
7657 + }
7658 +
7659 + static int config_mgmt_dev(struct pci_dev *pdev)
7660 +@@ -5117,6 +5114,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7661 + if (err)
7662 + goto out_free_dev;
7663 +
7664 ++ err = setup_fw_sge_queues(adapter);
7665 ++ if (err) {
7666 ++ dev_err(adapter->pdev_dev,
7667 ++ "FW sge queue allocation failed, err %d", err);
7668 ++ goto out_free_dev;
7669 ++ }
7670 ++
7671 + /*
7672 + * The card is now ready to go. If any errors occur during device
7673 + * registration we do not fail the whole card but rather proceed only
7674 +@@ -5165,7 +5169,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7675 + cxgb4_ptp_init(adapter);
7676 +
7677 + print_adapter_info(adapter);
7678 +- setup_fw_sge_queues(adapter);
7679 + return 0;
7680 +
7681 + sriov:
7682 +@@ -5221,6 +5224,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7683 + #endif
7684 +
7685 + out_free_dev:
7686 ++ t4_free_sge_resources(adapter);
7687 + free_some_resources(adapter);
7688 + if (adapter->flags & USING_MSIX)
7689 + free_msix_info(adapter);
7690 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
7691 +index 71a315bc1409..99a9d5278369 100644
7692 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
7693 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
7694 +@@ -342,6 +342,7 @@ static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
7695 + {
7696 + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
7697 +
7698 ++ adap->sge.uld_rxq_info[uld_type] = NULL;
7699 + kfree(rxq_info->rspq_id);
7700 + kfree(rxq_info->uldrxq);
7701 + kfree(rxq_info);
7702 +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
7703 +index d24ee1ad3be1..aef40f02c77f 100644
7704 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c
7705 ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
7706 +@@ -1897,6 +1897,8 @@ static int enic_open(struct net_device *netdev)
7707 + }
7708 +
7709 + for (i = 0; i < enic->rq_count; i++) {
7710 ++ /* enable rq before updating rq desc */
7711 ++ vnic_rq_enable(&enic->rq[i]);
7712 + vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
7713 + /* Need at least one buffer on ring to get going */
7714 + if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
7715 +@@ -1908,8 +1910,6 @@ static int enic_open(struct net_device *netdev)
7716 +
7717 + for (i = 0; i < enic->wq_count; i++)
7718 + vnic_wq_enable(&enic->wq[i]);
7719 +- for (i = 0; i < enic->rq_count; i++)
7720 +- vnic_rq_enable(&enic->rq[i]);
7721 +
7722 + if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
7723 + enic_dev_add_station_addr(enic);
7724 +@@ -1935,8 +1935,12 @@ static int enic_open(struct net_device *netdev)
7725 + return 0;
7726 +
7727 + err_out_free_rq:
7728 +- for (i = 0; i < enic->rq_count; i++)
7729 ++ for (i = 0; i < enic->rq_count; i++) {
7730 ++ err = vnic_rq_disable(&enic->rq[i]);
7731 ++ if (err)
7732 ++ return err;
7733 + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
7734 ++ }
7735 + enic_dev_notify_unset(enic);
7736 + err_out_free_intr:
7737 + enic_unset_affinity_hint(enic);
7738 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
7739 +index 4f6e9d3470d5..5b4f05805006 100644
7740 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
7741 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
7742 +@@ -1930,8 +1930,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
7743 + goto csum_failed;
7744 + }
7745 +
7746 ++ /* SGT[0] is used by the linear part */
7747 + sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
7748 +- qm_sg_entry_set_len(&sgt[0], skb_headlen(skb));
7749 ++ frag_len = skb_headlen(skb);
7750 ++ qm_sg_entry_set_len(&sgt[0], frag_len);
7751 + sgt[0].bpid = FSL_DPAA_BPID_INV;
7752 + sgt[0].offset = 0;
7753 + addr = dma_map_single(dev, skb->data,
7754 +@@ -1944,9 +1946,9 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
7755 + qm_sg_entry_set64(&sgt[0], addr);
7756 +
7757 + /* populate the rest of SGT entries */
7758 +- frag = &skb_shinfo(skb)->frags[0];
7759 +- frag_len = frag->size;
7760 +- for (i = 1; i <= nr_frags; i++, frag++) {
7761 ++ for (i = 0; i < nr_frags; i++) {
7762 ++ frag = &skb_shinfo(skb)->frags[i];
7763 ++ frag_len = frag->size;
7764 + WARN_ON(!skb_frag_page(frag));
7765 + addr = skb_frag_dma_map(dev, frag, 0,
7766 + frag_len, dma_dir);
7767 +@@ -1956,15 +1958,16 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
7768 + goto sg_map_failed;
7769 + }
7770 +
7771 +- qm_sg_entry_set_len(&sgt[i], frag_len);
7772 +- sgt[i].bpid = FSL_DPAA_BPID_INV;
7773 +- sgt[i].offset = 0;
7774 ++ qm_sg_entry_set_len(&sgt[i + 1], frag_len);
7775 ++ sgt[i + 1].bpid = FSL_DPAA_BPID_INV;
7776 ++ sgt[i + 1].offset = 0;
7777 +
7778 + /* keep the offset in the address */
7779 +- qm_sg_entry_set64(&sgt[i], addr);
7780 +- frag_len = frag->size;
7781 ++ qm_sg_entry_set64(&sgt[i + 1], addr);
7782 + }
7783 +- qm_sg_entry_set_f(&sgt[i - 1], frag_len);
7784 ++
7785 ++ /* Set the final bit in the last used entry of the SGT */
7786 ++ qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
7787 +
7788 + qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
7789 +
7790 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
7791 +index faea674094b9..85306d1b2acf 100644
7792 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
7793 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
7794 +@@ -211,7 +211,7 @@ static int dpaa_set_pauseparam(struct net_device *net_dev,
7795 + if (epause->rx_pause)
7796 + newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
7797 + if (epause->tx_pause)
7798 +- newadv |= ADVERTISED_Asym_Pause;
7799 ++ newadv ^= ADVERTISED_Asym_Pause;
7800 +
7801 + oldadv = phydev->advertising &
7802 + (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
7803 +diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
7804 +index ea43b4974149..7af31ddd093f 100644
7805 +--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
7806 ++++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
7807 +@@ -1100,7 +1100,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
7808 + set_bucket(dtsec->regs, bucket, true);
7809 +
7810 + /* Create element to be added to the driver hash table */
7811 +- hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
7812 ++ hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
7813 + if (!hash_entry)
7814 + return -ENOMEM;
7815 + hash_entry->addr = addr;
7816 +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
7817 +index 3bdeb295514b..63daae120b2d 100644
7818 +--- a/drivers/net/ethernet/freescale/gianfar.c
7819 ++++ b/drivers/net/ethernet/freescale/gianfar.c
7820 +@@ -3072,9 +3072,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
7821 + if (ndev->features & NETIF_F_RXCSUM)
7822 + gfar_rx_checksum(skb, fcb);
7823 +
7824 +- /* Tell the skb what kind of packet this is */
7825 +- skb->protocol = eth_type_trans(skb, ndev);
7826 +-
7827 + /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
7828 + * Even if vlan rx accel is disabled, on some chips
7829 + * RXFCB_VLN is pseudo randomly set.
7830 +@@ -3145,13 +3142,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
7831 + continue;
7832 + }
7833 +
7834 ++ gfar_process_frame(ndev, skb);
7835 ++
7836 + /* Increment the number of packets */
7837 + total_pkts++;
7838 + total_bytes += skb->len;
7839 +
7840 + skb_record_rx_queue(skb, rx_queue->qindex);
7841 +
7842 +- gfar_process_frame(ndev, skb);
7843 ++ skb->protocol = eth_type_trans(skb, ndev);
7844 +
7845 + /* Send the packet up the stack */
7846 + napi_gro_receive(&rx_queue->grp->napi_rx, skb);
7847 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
7848 +index 3ae02b0620bc..98493be7b4af 100644
7849 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
7850 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
7851 +@@ -933,6 +933,35 @@ static int ibmvnic_open(struct net_device *netdev)
7852 + return rc;
7853 + }
7854 +
7855 ++static void clean_rx_pools(struct ibmvnic_adapter *adapter)
7856 ++{
7857 ++ struct ibmvnic_rx_pool *rx_pool;
7858 ++ u64 rx_entries;
7859 ++ int rx_scrqs;
7860 ++ int i, j;
7861 ++
7862 ++ if (!adapter->rx_pool)
7863 ++ return;
7864 ++
7865 ++ rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
7866 ++ rx_entries = adapter->req_rx_add_entries_per_subcrq;
7867 ++
7868 ++ /* Free any remaining skbs in the rx buffer pools */
7869 ++ for (i = 0; i < rx_scrqs; i++) {
7870 ++ rx_pool = &adapter->rx_pool[i];
7871 ++ if (!rx_pool)
7872 ++ continue;
7873 ++
7874 ++ netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
7875 ++ for (j = 0; j < rx_entries; j++) {
7876 ++ if (rx_pool->rx_buff[j].skb) {
7877 ++ dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
7878 ++ rx_pool->rx_buff[j].skb = NULL;
7879 ++ }
7880 ++ }
7881 ++ }
7882 ++}
7883 ++
7884 + static void clean_tx_pools(struct ibmvnic_adapter *adapter)
7885 + {
7886 + struct ibmvnic_tx_pool *tx_pool;
7887 +@@ -1010,7 +1039,7 @@ static int __ibmvnic_close(struct net_device *netdev)
7888 + }
7889 + }
7890 + }
7891 +-
7892 ++ clean_rx_pools(adapter);
7893 + clean_tx_pools(adapter);
7894 + adapter->state = VNIC_CLOSED;
7895 + return rc;
7896 +@@ -1460,8 +1489,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
7897 + return 0;
7898 + }
7899 +
7900 +- netif_carrier_on(netdev);
7901 +-
7902 + /* kick napi */
7903 + for (i = 0; i < adapter->req_rx_queues; i++)
7904 + napi_schedule(&adapter->napi[i]);
7905 +@@ -1469,6 +1496,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
7906 + if (adapter->reset_reason != VNIC_RESET_FAILOVER)
7907 + netdev_notify_peers(netdev);
7908 +
7909 ++ netif_carrier_on(netdev);
7910 ++
7911 + return 0;
7912 + }
7913 +
7914 +@@ -1636,6 +1665,12 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget)
7915 + be16_to_cpu(next->rx_comp.rc));
7916 + /* free the entry */
7917 + next->rx_comp.first = 0;
7918 ++ dev_kfree_skb_any(rx_buff->skb);
7919 ++ remove_buff_from_pool(adapter, rx_buff);
7920 ++ continue;
7921 ++ } else if (!rx_buff->skb) {
7922 ++ /* free the entry */
7923 ++ next->rx_comp.first = 0;
7924 + remove_buff_from_pool(adapter, rx_buff);
7925 + continue;
7926 + }
7927 +@@ -1927,6 +1962,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
7928 + }
7929 +
7930 + memset(scrq->msgs, 0, 4 * PAGE_SIZE);
7931 ++ atomic_set(&scrq->used, 0);
7932 + scrq->cur = 0;
7933 +
7934 + rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
7935 +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
7936 +index 31277d3bb7dc..ff308b05d68c 100644
7937 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
7938 ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
7939 +@@ -1602,7 +1602,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
7940 + * we have already determined whether we have link or not.
7941 + */
7942 + if (!mac->autoneg)
7943 +- return -E1000_ERR_CONFIG;
7944 ++ return 1;
7945 +
7946 + /* Auto-Neg is enabled. Auto Speed Detection takes care
7947 + * of MAC speed/duplex configuration. So we only need to
7948 +diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
7949 +index f457c5703d0c..db735644b312 100644
7950 +--- a/drivers/net/ethernet/intel/e1000e/mac.c
7951 ++++ b/drivers/net/ethernet/intel/e1000e/mac.c
7952 +@@ -450,7 +450,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
7953 + * we have already determined whether we have link or not.
7954 + */
7955 + if (!mac->autoneg)
7956 +- return -E1000_ERR_CONFIG;
7957 ++ return 1;
7958 +
7959 + /* Auto-Neg is enabled. Auto Speed Detection takes care
7960 + * of MAC speed/duplex configuration. So we only need to
7961 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
7962 +index 991c2a0dd67e..7a226537877b 100644
7963 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
7964 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
7965 +@@ -2329,8 +2329,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
7966 + {
7967 + struct pci_dev *pdev = adapter->pdev;
7968 +
7969 +- ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
7970 +- GFP_KERNEL);
7971 ++ ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
7972 ++ GFP_KERNEL);
7973 + if (!ring->desc)
7974 + return -ENOMEM;
7975 +
7976 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
7977 +index d36b799116e4..04dbf64fb1cb 100644
7978 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
7979 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
7980 +@@ -7196,6 +7196,17 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
7981 + }
7982 + i40e_get_oem_version(&pf->hw);
7983 +
7984 ++ if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
7985 ++ ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
7986 ++ hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
7987 ++ /* The following delay is necessary for 4.33 firmware and older
7988 ++ * to recover after EMP reset. 200 ms should suffice but we
7989 ++ * put here 300 ms to be sure that FW is ready to operate
7990 ++ * after reset.
7991 ++ */
7992 ++ mdelay(300);
7993 ++ }
7994 ++
7995 + /* re-verify the eeprom if we just had an EMP reset */
7996 + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
7997 + i40e_verify_eeprom(pf);
7998 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
7999 +index 9e30cfeac04b..20a8018d41ef 100644
8000 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
8001 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
8002 +@@ -7658,7 +7658,8 @@ static void ixgbe_service_task(struct work_struct *work)
8003 +
8004 + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
8005 + ixgbe_ptp_overflow_check(adapter);
8006 +- ixgbe_ptp_rx_hang(adapter);
8007 ++ if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
8008 ++ ixgbe_ptp_rx_hang(adapter);
8009 + ixgbe_ptp_tx_hang(adapter);
8010 + }
8011 +
8012 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
8013 +index a539263cd79c..d28f873169a9 100644
8014 +--- a/drivers/net/ethernet/marvell/mvneta.c
8015 ++++ b/drivers/net/ethernet/marvell/mvneta.c
8016 +@@ -1112,6 +1112,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
8017 + }
8018 + mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
8019 +
8020 ++ q_map = 0;
8021 + /* Enable all initialized RXQs. */
8022 + for (queue = 0; queue < rxq_number; queue++) {
8023 + struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
8024 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
8025 +index fdaef00465d7..576b61c119bb 100644
8026 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
8027 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
8028 +@@ -46,7 +46,7 @@ config MLX5_MPFS
8029 +
8030 + config MLX5_ESWITCH
8031 + bool "Mellanox Technologies MLX5 SRIOV E-Switch support"
8032 +- depends on MLX5_CORE_EN
8033 ++ depends on MLX5_CORE_EN && NET_SWITCHDEV
8034 + default y
8035 + ---help---
8036 + Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC.
8037 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
8038 +index e9a1fbcc4adf..3efe45bc2471 100644
8039 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
8040 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
8041 +@@ -1802,7 +1802,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
8042 +
8043 + cmd->checksum_disabled = 1;
8044 + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
8045 +- cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
8046 ++ cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
8047 +
8048 + cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
8049 + if (cmd->cmdif_rev > CMD_IF_REV) {
8050 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
8051 +index 225b2ad3e15f..337ce9423794 100644
8052 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
8053 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
8054 +@@ -4022,7 +4022,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
8055 + }
8056 + }
8057 +
8058 +-#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH)
8059 ++#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
8060 + static const struct switchdev_ops mlx5e_switchdev_ops = {
8061 + .switchdev_port_attr_get = mlx5e_attr_get,
8062 + };
8063 +@@ -4126,7 +4126,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
8064 +
8065 + mlx5e_set_netdev_dev_addr(netdev);
8066 +
8067 +-#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH)
8068 ++#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
8069 + if (MLX5_VPORT_MANAGER(mdev))
8070 + netdev->switchdev_ops = &mlx5e_switchdev_ops;
8071 + #endif
8072 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
8073 +index 5ffd1db4e797..4727e7390834 100644
8074 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
8075 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
8076 +@@ -825,9 +825,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
8077 +
8078 + netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
8079 +
8080 +-#ifdef CONFIG_NET_SWITCHDEV
8081 + netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
8082 +-#endif
8083 +
8084 + netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
8085 + netdev->hw_features |= NETIF_F_HW_TC;
8086 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
8087 +index ede66e6af786..e28f9dab9ceb 100644
8088 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
8089 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
8090 +@@ -2018,7 +2018,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
8091 + if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
8092 + attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
8093 + } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
8094 +- if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
8095 ++ if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
8096 ++ tcf_vlan_push_prio(a))
8097 + return -EOPNOTSUPP;
8098 +
8099 + attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
8100 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
8101 +index f6963b0b4a55..122506daa586 100644
8102 +--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
8103 ++++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
8104 +@@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
8105 + MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
8106 + MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
8107 + MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
8108 +- MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8),
8109 +- MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2),
8110 +- MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6),
8111 +- MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
8112 +- MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
8113 +- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
8114 +- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8),
8115 +- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8),
8116 +- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8),
8117 + MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
8118 + MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
8119 ++ MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
8120 ++ MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
8121 ++ MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
8122 ++ MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32),
8123 ++ MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32),
8124 ++ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8),
8125 ++ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8),
8126 ++ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8),
8127 ++ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),
8128 + };
8129 +
8130 +-#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38
8131 ++#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
8132 +
8133 + struct mlxsw_afk_element_inst { /* element instance in actual block */
8134 + const struct mlxsw_afk_element_info *info;
8135 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
8136 +index 99bd6e88ebc7..8b48338b4a70 100644
8137 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
8138 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
8139 +@@ -1417,6 +1417,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
8140 + }
8141 +
8142 + mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
8143 ++ mlxsw_sp_port_vlan->ref_count = 1;
8144 + mlxsw_sp_port_vlan->vid = vid;
8145 + list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
8146 +
8147 +@@ -1444,8 +1445,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
8148 + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8149 +
8150 + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8151 +- if (mlxsw_sp_port_vlan)
8152 ++ if (mlxsw_sp_port_vlan) {
8153 ++ mlxsw_sp_port_vlan->ref_count++;
8154 + return mlxsw_sp_port_vlan;
8155 ++ }
8156 +
8157 + return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
8158 + }
8159 +@@ -1454,6 +1457,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8160 + {
8161 + struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8162 +
8163 ++ if (--mlxsw_sp_port_vlan->ref_count != 0)
8164 ++ return;
8165 ++
8166 + if (mlxsw_sp_port_vlan->bridge_port)
8167 + mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
8168 + else if (fid)
8169 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
8170 +index 88892d47acae..8c4ce0a0cc82 100644
8171 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
8172 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
8173 +@@ -194,6 +194,7 @@ struct mlxsw_sp_port_vlan {
8174 + struct list_head list;
8175 + struct mlxsw_sp_port *mlxsw_sp_port;
8176 + struct mlxsw_sp_fid *fid;
8177 ++ unsigned int ref_count;
8178 + u16 vid;
8179 + struct mlxsw_sp_bridge_port *bridge_port;
8180 + struct list_head bridge_vlan_node;
8181 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
8182 +index bbd238e50f05..54262af4e98f 100644
8183 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
8184 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
8185 +@@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
8186 + [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
8187 + [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
8188 + [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
8189 ++ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
8190 + };
8191 +
8192 + static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
8193 + [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
8194 +- [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
8195 + };
8196 +
8197 + static const int *mlxsw_sp_packet_type_sfgc_types[] = {
8198 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
8199 +index af106be8cc08..629bfa0cd3f0 100644
8200 +--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
8201 ++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
8202 +@@ -2471,7 +2471,10 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
8203 + if (rc)
8204 + return rc;
8205 +
8206 +- /* Free Task CXT */
8207 ++ /* Free Task CXT ( Intentionally RoCE as task-id is shared between
8208 ++ * RoCE and iWARP )
8209 ++ */
8210 ++ proto = PROTOCOLID_ROCE;
8211 + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
8212 + qed_cxt_get_proto_tid_count(p_hwfn, proto));
8213 + if (rc)
8214 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
8215 +index 6fb99518a61f..1b6554866138 100644
8216 +--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
8217 ++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
8218 +@@ -360,6 +360,7 @@ static void qed_rdma_free(struct qed_hwfn *p_hwfn)
8219 + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
8220 +
8221 + qed_rdma_resc_free(p_hwfn);
8222 ++ qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto);
8223 + }
8224 +
8225 + static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
8226 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
8227 +index 6fc854b120b0..d50cc2635477 100644
8228 +--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
8229 ++++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
8230 +@@ -320,13 +320,11 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
8231 + barrier();
8232 + writel(txq->tx_db.raw, txq->doorbell_addr);
8233 +
8234 +- /* mmiowb is needed to synchronize doorbell writes from more than one
8235 +- * processor. It guarantees that the write arrives to the device before
8236 +- * the queue lock is released and another start_xmit is called (possibly
8237 +- * on another CPU). Without this barrier, the next doorbell can bypass
8238 +- * this doorbell. This is applicable to IA64/Altix systems.
8239 ++ /* Fence required to flush the write combined buffer, since another
8240 ++ * CPU may write to the same doorbell address and data may be lost
8241 ++ * due to relaxed order nature of write combined bar.
8242 + */
8243 +- mmiowb();
8244 ++ wmb();
8245 + }
8246 +
8247 + static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
8248 +@@ -1247,16 +1245,10 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
8249 +
8250 + csum_flag = qede_check_csum(parse_flag);
8251 + if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
8252 +- if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
8253 ++ if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag))
8254 + rxq->rx_ip_frags++;
8255 +- } else {
8256 +- DP_NOTICE(edev,
8257 +- "CQE has error, flags = %x, dropping incoming packet\n",
8258 +- parse_flag);
8259 ++ else
8260 + rxq->rx_hw_errors++;
8261 +- qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
8262 +- return 0;
8263 +- }
8264 + }
8265 +
8266 + /* Basic validation passed; Need to prepare an SKB. This would also
8267 +diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
8268 +index 3ed9033e56db..44f797ab5d15 100644
8269 +--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
8270 ++++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
8271 +@@ -1204,9 +1204,9 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)
8272 + while (tx_q->tpd.consume_idx != hw_consume_idx) {
8273 + tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
8274 + if (tpbuf->dma_addr) {
8275 +- dma_unmap_single(adpt->netdev->dev.parent,
8276 +- tpbuf->dma_addr, tpbuf->length,
8277 +- DMA_TO_DEVICE);
8278 ++ dma_unmap_page(adpt->netdev->dev.parent,
8279 ++ tpbuf->dma_addr, tpbuf->length,
8280 ++ DMA_TO_DEVICE);
8281 + tpbuf->dma_addr = 0;
8282 + }
8283 +
8284 +@@ -1363,9 +1363,11 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
8285 +
8286 + tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
8287 + tpbuf->length = mapped_len;
8288 +- tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
8289 +- skb->data, tpbuf->length,
8290 +- DMA_TO_DEVICE);
8291 ++ tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
8292 ++ virt_to_page(skb->data),
8293 ++ offset_in_page(skb->data),
8294 ++ tpbuf->length,
8295 ++ DMA_TO_DEVICE);
8296 + ret = dma_mapping_error(adpt->netdev->dev.parent,
8297 + tpbuf->dma_addr);
8298 + if (ret)
8299 +@@ -1381,9 +1383,12 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
8300 + if (mapped_len < len) {
8301 + tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
8302 + tpbuf->length = len - mapped_len;
8303 +- tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
8304 +- skb->data + mapped_len,
8305 +- tpbuf->length, DMA_TO_DEVICE);
8306 ++ tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
8307 ++ virt_to_page(skb->data +
8308 ++ mapped_len),
8309 ++ offset_in_page(skb->data +
8310 ++ mapped_len),
8311 ++ tpbuf->length, DMA_TO_DEVICE);
8312 + ret = dma_mapping_error(adpt->netdev->dev.parent,
8313 + tpbuf->dma_addr);
8314 + if (ret)
8315 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
8316 +index db31963c5d9d..38080e95a82d 100644
8317 +--- a/drivers/net/ethernet/renesas/sh_eth.c
8318 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
8319 +@@ -753,6 +753,7 @@ static struct sh_eth_cpu_data sh7757_data = {
8320 + .rpadir = 1,
8321 + .rpadir_value = 2 << 16,
8322 + .rtrate = 1,
8323 ++ .dual_port = 1,
8324 + };
8325 +
8326 + #define SH_GIGA_ETH_BASE 0xfee00000UL
8327 +@@ -831,6 +832,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
8328 + .no_trimd = 1,
8329 + .no_ade = 1,
8330 + .tsu = 1,
8331 ++ .dual_port = 1,
8332 + };
8333 +
8334 + /* SH7734 */
8335 +@@ -901,6 +903,7 @@ static struct sh_eth_cpu_data sh7763_data = {
8336 + .tsu = 1,
8337 + .irq_flags = IRQF_SHARED,
8338 + .magic = 1,
8339 ++ .dual_port = 1,
8340 + };
8341 +
8342 + static struct sh_eth_cpu_data sh7619_data = {
8343 +@@ -933,6 +936,7 @@ static struct sh_eth_cpu_data sh771x_data = {
8344 + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
8345 + EESIPR_PREIP | EESIPR_CERFIP,
8346 + .tsu = 1,
8347 ++ .dual_port = 1,
8348 + };
8349 +
8350 + static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
8351 +@@ -2911,7 +2915,7 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
8352 + /* SuperH's TSU register init function */
8353 + static void sh_eth_tsu_init(struct sh_eth_private *mdp)
8354 + {
8355 +- if (sh_eth_is_rz_fast_ether(mdp)) {
8356 ++ if (!mdp->cd->dual_port) {
8357 + sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
8358 + sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
8359 + TSU_FWSLC); /* Enable POST registers */
8360 +diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
8361 +index a6753ccba711..6ab3d46d4f28 100644
8362 +--- a/drivers/net/ethernet/renesas/sh_eth.h
8363 ++++ b/drivers/net/ethernet/renesas/sh_eth.h
8364 +@@ -509,6 +509,7 @@ struct sh_eth_cpu_data {
8365 + unsigned rmiimode:1; /* EtherC has RMIIMODE register */
8366 + unsigned rtrate:1; /* EtherC has RTRATE register */
8367 + unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */
8368 ++ unsigned dual_port:1; /* Dual EtherC/E-DMAC */
8369 + };
8370 +
8371 + struct sh_eth_private {
8372 +diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
8373 +index 012fb66eed8d..f0afb88d7bc2 100644
8374 +--- a/drivers/net/ethernet/smsc/smsc911x.c
8375 ++++ b/drivers/net/ethernet/smsc/smsc911x.c
8376 +@@ -2335,14 +2335,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
8377 + pdata = netdev_priv(dev);
8378 + BUG_ON(!pdata);
8379 + BUG_ON(!pdata->ioaddr);
8380 +- WARN_ON(dev->phydev);
8381 +
8382 + SMSC_TRACE(pdata, ifdown, "Stopping driver");
8383 +
8384 ++ unregister_netdev(dev);
8385 ++
8386 + mdiobus_unregister(pdata->mii_bus);
8387 + mdiobus_free(pdata->mii_bus);
8388 +
8389 +- unregister_netdev(dev);
8390 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
8391 + "smsc911x-memory");
8392 + if (!res)
8393 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
8394 +index d0cc73795056..9866d2e34cdd 100644
8395 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
8396 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
8397 +@@ -1829,6 +1829,11 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
8398 + if (unlikely(status & tx_dma_own))
8399 + break;
8400 +
8401 ++ /* Make sure descriptor fields are read after reading
8402 ++ * the own bit.
8403 ++ */
8404 ++ dma_rmb();
8405 ++
8406 + /* Just consider the last segment and ...*/
8407 + if (likely(!(status & tx_not_ls))) {
8408 + /* ... verify the status error condition */
8409 +@@ -2368,7 +2373,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
8410 + continue;
8411 +
8412 + packet = priv->plat->rx_queues_cfg[queue].pkt_route;
8413 +- priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
8414 ++ priv->hw->mac->rx_queue_routing(priv->hw, packet, queue);
8415 + }
8416 + }
8417 +
8418 +@@ -2918,8 +2923,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
8419 + tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
8420 +
8421 + /* If context desc is used to change MSS */
8422 +- if (mss_desc)
8423 ++ if (mss_desc) {
8424 ++ /* Make sure that first descriptor has been completely
8425 ++ * written, including its own bit. This is because MSS is
8426 ++ * actually before first descriptor, so we need to make
8427 ++ * sure that MSS's own bit is the last thing written.
8428 ++ */
8429 ++ dma_wmb();
8430 + priv->hw->desc->set_tx_owner(mss_desc);
8431 ++ }
8432 +
8433 + /* The own bit must be the latest setting done when prepare the
8434 + * descriptor and then barrier is needed to make sure that
8435 +diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
8436 +index 0b95105f7060..65347d2f139b 100644
8437 +--- a/drivers/net/ethernet/sun/sunvnet.c
8438 ++++ b/drivers/net/ethernet/sun/sunvnet.c
8439 +@@ -311,7 +311,7 @@ static struct vnet *vnet_new(const u64 *local_mac,
8440 + dev->ethtool_ops = &vnet_ethtool_ops;
8441 + dev->watchdog_timeo = VNET_TX_TIMEOUT;
8442 +
8443 +- dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
8444 ++ dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO |
8445 + NETIF_F_HW_CSUM | NETIF_F_SG;
8446 + dev->features = dev->hw_features;
8447 +
8448 +diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
8449 +index 99be63eacaeb..4647ecbe6f36 100644
8450 +--- a/drivers/net/hyperv/netvsc.c
8451 ++++ b/drivers/net/hyperv/netvsc.c
8452 +@@ -1261,7 +1261,7 @@ void netvsc_channel_cb(void *context)
8453 + /* disable interupts from host */
8454 + hv_begin_read(rbi);
8455 +
8456 +- __napi_schedule(&nvchan->napi);
8457 ++ __napi_schedule_irqoff(&nvchan->napi);
8458 + }
8459 + }
8460 +
8461 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
8462 +index 11b46c8d2d67..3a7241c8713c 100644
8463 +--- a/drivers/net/hyperv/netvsc_drv.c
8464 ++++ b/drivers/net/hyperv/netvsc_drv.c
8465 +@@ -66,12 +66,43 @@ static int debug = -1;
8466 + module_param(debug, int, S_IRUGO);
8467 + MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8468 +
8469 +-static void netvsc_set_multicast_list(struct net_device *net)
8470 ++static void netvsc_change_rx_flags(struct net_device *net, int change)
8471 + {
8472 +- struct net_device_context *net_device_ctx = netdev_priv(net);
8473 +- struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
8474 ++ struct net_device_context *ndev_ctx = netdev_priv(net);
8475 ++ struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
8476 ++ int inc;
8477 +
8478 +- rndis_filter_update(nvdev);
8479 ++ if (!vf_netdev)
8480 ++ return;
8481 ++
8482 ++ if (change & IFF_PROMISC) {
8483 ++ inc = (net->flags & IFF_PROMISC) ? 1 : -1;
8484 ++ dev_set_promiscuity(vf_netdev, inc);
8485 ++ }
8486 ++
8487 ++ if (change & IFF_ALLMULTI) {
8488 ++ inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
8489 ++ dev_set_allmulti(vf_netdev, inc);
8490 ++ }
8491 ++}
8492 ++
8493 ++static void netvsc_set_rx_mode(struct net_device *net)
8494 ++{
8495 ++ struct net_device_context *ndev_ctx = netdev_priv(net);
8496 ++ struct net_device *vf_netdev;
8497 ++ struct netvsc_device *nvdev;
8498 ++
8499 ++ rcu_read_lock();
8500 ++ vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
8501 ++ if (vf_netdev) {
8502 ++ dev_uc_sync(vf_netdev, net);
8503 ++ dev_mc_sync(vf_netdev, net);
8504 ++ }
8505 ++
8506 ++ nvdev = rcu_dereference(ndev_ctx->nvdev);
8507 ++ if (nvdev)
8508 ++ rndis_filter_update(nvdev);
8509 ++ rcu_read_unlock();
8510 + }
8511 +
8512 + static int netvsc_open(struct net_device *net)
8513 +@@ -1582,7 +1613,8 @@ static const struct net_device_ops device_ops = {
8514 + .ndo_open = netvsc_open,
8515 + .ndo_stop = netvsc_close,
8516 + .ndo_start_xmit = netvsc_start_xmit,
8517 +- .ndo_set_rx_mode = netvsc_set_multicast_list,
8518 ++ .ndo_change_rx_flags = netvsc_change_rx_flags,
8519 ++ .ndo_set_rx_mode = netvsc_set_rx_mode,
8520 + .ndo_change_mtu = netvsc_change_mtu,
8521 + .ndo_validate_addr = eth_validate_addr,
8522 + .ndo_set_mac_address = netvsc_set_mac_addr,
8523 +@@ -1814,6 +1846,15 @@ static void __netvsc_vf_setup(struct net_device *ndev,
8524 + netdev_warn(vf_netdev,
8525 + "unable to change mtu to %u\n", ndev->mtu);
8526 +
8527 ++ /* set multicast etc flags on VF */
8528 ++ dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
8529 ++
8530 ++ /* sync address list from ndev to VF */
8531 ++ netif_addr_lock_bh(ndev);
8532 ++ dev_uc_sync(vf_netdev, ndev);
8533 ++ dev_mc_sync(vf_netdev, ndev);
8534 ++ netif_addr_unlock_bh(ndev);
8535 ++
8536 + if (netif_running(ndev)) {
8537 + ret = dev_open(vf_netdev);
8538 + if (ret)
8539 +diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
8540 +index 6dde92c1c113..d1ae184008b4 100644
8541 +--- a/drivers/net/hyperv/rndis_filter.c
8542 ++++ b/drivers/net/hyperv/rndis_filter.c
8543 +@@ -850,15 +850,19 @@ static void rndis_set_multicast(struct work_struct *w)
8544 + {
8545 + struct rndis_device *rdev
8546 + = container_of(w, struct rndis_device, mcast_work);
8547 ++ u32 filter = NDIS_PACKET_TYPE_DIRECTED;
8548 ++ unsigned int flags = rdev->ndev->flags;
8549 +
8550 +- if (rdev->ndev->flags & IFF_PROMISC)
8551 +- rndis_filter_set_packet_filter(rdev,
8552 +- NDIS_PACKET_TYPE_PROMISCUOUS);
8553 +- else
8554 +- rndis_filter_set_packet_filter(rdev,
8555 +- NDIS_PACKET_TYPE_BROADCAST |
8556 +- NDIS_PACKET_TYPE_ALL_MULTICAST |
8557 +- NDIS_PACKET_TYPE_DIRECTED);
8558 ++ if (flags & IFF_PROMISC) {
8559 ++ filter = NDIS_PACKET_TYPE_PROMISCUOUS;
8560 ++ } else {
8561 ++ if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
8562 ++ filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
8563 ++ if (flags & IFF_BROADCAST)
8564 ++ filter |= NDIS_PACKET_TYPE_BROADCAST;
8565 ++ }
8566 ++
8567 ++ rndis_filter_set_packet_filter(rdev, filter);
8568 + }
8569 +
8570 + void rndis_filter_update(struct netvsc_device *nvdev)
8571 +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
8572 +index 24a1eabbbc9d..22e466ea919a 100644
8573 +--- a/drivers/net/ieee802154/ca8210.c
8574 ++++ b/drivers/net/ieee802154/ca8210.c
8575 +@@ -2493,13 +2493,14 @@ static ssize_t ca8210_test_int_user_write(
8576 + struct ca8210_priv *priv = filp->private_data;
8577 + u8 command[CA8210_SPI_BUF_SIZE];
8578 +
8579 +- if (len > CA8210_SPI_BUF_SIZE) {
8580 ++ memset(command, SPI_IDLE, 6);
8581 ++ if (len > CA8210_SPI_BUF_SIZE || len < 2) {
8582 + dev_warn(
8583 + &priv->spi->dev,
8584 +- "userspace requested erroneously long write (%zu)\n",
8585 ++ "userspace requested erroneous write length (%zu)\n",
8586 + len
8587 + );
8588 +- return -EMSGSIZE;
8589 ++ return -EBADE;
8590 + }
8591 +
8592 + ret = copy_from_user(command, in_buf, len);
8593 +@@ -2511,6 +2512,13 @@ static ssize_t ca8210_test_int_user_write(
8594 + );
8595 + return -EIO;
8596 + }
8597 ++ if (len != command[1] + 2) {
8598 ++ dev_err(
8599 ++ &priv->spi->dev,
8600 ++ "write len does not match packet length field\n"
8601 ++ );
8602 ++ return -EBADE;
8603 ++ }
8604 +
8605 + ret = ca8210_test_check_upstream(command, priv->spi);
8606 + if (ret == 0) {
8607 +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
8608 +index 0f35597553f4..963a02c988e9 100644
8609 +--- a/drivers/net/macvlan.c
8610 ++++ b/drivers/net/macvlan.c
8611 +@@ -1448,7 +1448,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
8612 + /* the macvlan port may be freed by macvlan_uninit when fail to register.
8613 + * so we destroy the macvlan port only when it's valid.
8614 + */
8615 +- if (create && macvlan_port_get_rtnl(dev))
8616 ++ if (create && macvlan_port_get_rtnl(lowerdev))
8617 + macvlan_port_destroy(port->dev);
8618 + return err;
8619 + }
8620 +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
8621 +index cbd629822f04..26fbbd3ffe33 100644
8622 +--- a/drivers/net/phy/dp83640.c
8623 ++++ b/drivers/net/phy/dp83640.c
8624 +@@ -1207,6 +1207,23 @@ static void dp83640_remove(struct phy_device *phydev)
8625 + kfree(dp83640);
8626 + }
8627 +
8628 ++static int dp83640_soft_reset(struct phy_device *phydev)
8629 ++{
8630 ++ int ret;
8631 ++
8632 ++ ret = genphy_soft_reset(phydev);
8633 ++ if (ret < 0)
8634 ++ return ret;
8635 ++
8636 ++ /* From DP83640 datasheet: "Software driver code must wait 3 us
8637 ++ * following a software reset before allowing further serial MII
8638 ++ * operations with the DP83640."
8639 ++ */
8640 ++ udelay(10); /* Taking udelay inaccuracy into account */
8641 ++
8642 ++ return 0;
8643 ++}
8644 ++
8645 + static int dp83640_config_init(struct phy_device *phydev)
8646 + {
8647 + struct dp83640_private *dp83640 = phydev->priv;
8648 +@@ -1501,6 +1518,7 @@ static struct phy_driver dp83640_driver = {
8649 + .flags = PHY_HAS_INTERRUPT,
8650 + .probe = dp83640_probe,
8651 + .remove = dp83640_remove,
8652 ++ .soft_reset = dp83640_soft_reset,
8653 + .config_init = dp83640_config_init,
8654 + .config_aneg = genphy_config_aneg,
8655 + .read_status = genphy_read_status,
8656 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
8657 +index 1fb464837b3e..9881edc568ba 100644
8658 +--- a/drivers/net/usb/lan78xx.c
8659 ++++ b/drivers/net/usb/lan78xx.c
8660 +@@ -2083,10 +2083,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
8661 +
8662 + dev->fc_autoneg = phydev->autoneg;
8663 +
8664 +- phy_start(phydev);
8665 +-
8666 +- netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
8667 +-
8668 + return 0;
8669 +
8670 + error:
8671 +@@ -2352,6 +2348,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
8672 + u32 buf;
8673 + int ret = 0;
8674 + unsigned long timeout;
8675 ++ u8 sig;
8676 +
8677 + ret = lan78xx_read_reg(dev, HW_CFG, &buf);
8678 + buf |= HW_CFG_LRST_;
8679 +@@ -2451,6 +2448,15 @@ static int lan78xx_reset(struct lan78xx_net *dev)
8680 + /* LAN7801 only has RGMII mode */
8681 + if (dev->chipid == ID_REV_CHIP_ID_7801_)
8682 + buf &= ~MAC_CR_GMII_EN_;
8683 ++
8684 ++ if (dev->chipid == ID_REV_CHIP_ID_7800_) {
8685 ++ ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
8686 ++ if (!ret && sig != EEPROM_INDICATOR) {
8687 ++ /* Implies there is no external eeprom. Set mac speed */
8688 ++ netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
8689 ++ buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
8690 ++ }
8691 ++ }
8692 + ret = lan78xx_write_reg(dev, MAC_CR, buf);
8693 +
8694 + ret = lan78xx_read_reg(dev, MAC_TX, &buf);
8695 +@@ -2513,9 +2519,9 @@ static int lan78xx_open(struct net_device *net)
8696 + if (ret < 0)
8697 + goto done;
8698 +
8699 +- ret = lan78xx_phy_init(dev);
8700 +- if (ret < 0)
8701 +- goto done;
8702 ++ phy_start(net->phydev);
8703 ++
8704 ++ netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
8705 +
8706 + /* for Link Check */
8707 + if (dev->urb_intr) {
8708 +@@ -2576,13 +2582,8 @@ static int lan78xx_stop(struct net_device *net)
8709 + if (timer_pending(&dev->stat_monitor))
8710 + del_timer_sync(&dev->stat_monitor);
8711 +
8712 +- phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
8713 +- phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
8714 +-
8715 +- phy_stop(net->phydev);
8716 +- phy_disconnect(net->phydev);
8717 +-
8718 +- net->phydev = NULL;
8719 ++ if (net->phydev)
8720 ++ phy_stop(net->phydev);
8721 +
8722 + clear_bit(EVENT_DEV_OPEN, &dev->flags);
8723 + netif_stop_queue(net);
8724 +@@ -3497,8 +3498,13 @@ static void lan78xx_disconnect(struct usb_interface *intf)
8725 + return;
8726 +
8727 + udev = interface_to_usbdev(intf);
8728 +-
8729 + net = dev->net;
8730 ++
8731 ++ phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
8732 ++ phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
8733 ++
8734 ++ phy_disconnect(net->phydev);
8735 ++
8736 + unregister_netdev(net);
8737 +
8738 + cancel_delayed_work_sync(&dev->wq);
8739 +@@ -3658,8 +3664,14 @@ static int lan78xx_probe(struct usb_interface *intf,
8740 + pm_runtime_set_autosuspend_delay(&udev->dev,
8741 + DEFAULT_AUTOSUSPEND_DELAY);
8742 +
8743 ++ ret = lan78xx_phy_init(dev);
8744 ++ if (ret < 0)
8745 ++ goto out4;
8746 ++
8747 + return 0;
8748 +
8749 ++out4:
8750 ++ unregister_netdev(netdev);
8751 + out3:
8752 + lan78xx_unbind(dev, intf);
8753 + out2:
8754 +@@ -4007,7 +4019,7 @@ static int lan78xx_reset_resume(struct usb_interface *intf)
8755 +
8756 + lan78xx_reset(dev);
8757 +
8758 +- lan78xx_phy_init(dev);
8759 ++ phy_start(dev->net->phydev);
8760 +
8761 + return lan78xx_resume(intf);
8762 + }
8763 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
8764 +index e522085ecbf7..8e06f308ce44 100644
8765 +--- a/drivers/net/usb/qmi_wwan.c
8766 ++++ b/drivers/net/usb/qmi_wwan.c
8767 +@@ -1184,6 +1184,7 @@ static const struct usb_device_id products[] = {
8768 + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
8769 + {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
8770 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
8771 ++ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
8772 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
8773 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
8774 + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
8775 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
8776 +index d51d9abf7986..aa88b640cb6c 100644
8777 +--- a/drivers/net/usb/r8152.c
8778 ++++ b/drivers/net/usb/r8152.c
8779 +@@ -1793,7 +1793,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
8780 +
8781 + tx_data += len;
8782 + agg->skb_len += len;
8783 +- agg->skb_num++;
8784 ++ agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;
8785 +
8786 + dev_kfree_skb_any(skb);
8787 +
8788 +diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
8789 +index d0a113743195..7a6a1fe79309 100644
8790 +--- a/drivers/net/usb/smsc75xx.c
8791 ++++ b/drivers/net/usb/smsc75xx.c
8792 +@@ -954,10 +954,11 @@ static int smsc75xx_set_features(struct net_device *netdev,
8793 + /* it's racing here! */
8794 +
8795 + ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
8796 +- if (ret < 0)
8797 ++ if (ret < 0) {
8798 + netdev_warn(dev->net, "Error writing RFE_CTL\n");
8799 +-
8800 +- return ret;
8801 ++ return ret;
8802 ++ }
8803 ++ return 0;
8804 + }
8805 +
8806 + static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
8807 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
8808 +index bb15b3012aa5..948611317c97 100644
8809 +--- a/drivers/net/virtio_net.c
8810 ++++ b/drivers/net/virtio_net.c
8811 +@@ -513,7 +513,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
8812 + void *orig_data;
8813 + u32 act;
8814 +
8815 +- if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
8816 ++ if (unlikely(hdr->hdr.gso_type))
8817 + goto err_xdp;
8818 +
8819 + if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
8820 +@@ -2655,8 +2655,8 @@ static int virtnet_probe(struct virtio_device *vdev)
8821 +
8822 + /* Assume link up if device can't report link status,
8823 + otherwise get link status from config. */
8824 ++ netif_carrier_off(dev);
8825 + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
8826 +- netif_carrier_off(dev);
8827 + schedule_work(&vi->config_work);
8828 + } else {
8829 + vi->status = VIRTIO_NET_S_LINK_UP;
8830 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
8831 +index c1772215702a..df11bb449988 100644
8832 +--- a/drivers/net/wireless/ath/ath10k/mac.c
8833 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
8834 +@@ -7059,10 +7059,20 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
8835 + {
8836 + struct ath10k *ar = hw->priv;
8837 + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
8838 ++ struct ath10k_vif *arvif = (void *)vif->drv_priv;
8839 ++ struct ath10k_peer *peer;
8840 + u32 bw, smps;
8841 +
8842 + spin_lock_bh(&ar->data_lock);
8843 +
8844 ++ peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
8845 ++ if (!peer) {
8846 ++ spin_unlock_bh(&ar->data_lock);
8847 ++ ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
8848 ++ sta->addr, arvif->vdev_id);
8849 ++ return;
8850 ++ }
8851 ++
8852 + ath10k_dbg(ar, ATH10K_DBG_MAC,
8853 + "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
8854 + sta->addr, changed, sta->bandwidth, sta->rx_nss,
8855 +@@ -7810,6 +7820,7 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
8856 + .max_interfaces = 8,
8857 + .num_different_channels = 1,
8858 + .beacon_int_infra_match = true,
8859 ++ .beacon_int_min_gcd = 1,
8860 + #ifdef CONFIG_ATH10K_DFS_CERTIFIED
8861 + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
8862 + BIT(NL80211_CHAN_WIDTH_20) |
8863 +@@ -7933,6 +7944,7 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
8864 + .max_interfaces = 16,
8865 + .num_different_channels = 1,
8866 + .beacon_int_infra_match = true,
8867 ++ .beacon_int_min_gcd = 1,
8868 + #ifdef CONFIG_ATH10K_DFS_CERTIFIED
8869 + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
8870 + BIT(NL80211_CHAN_WIDTH_20) |
8871 +diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
8872 +index 5e77fe1f5b0d..a41bcbda1d9e 100644
8873 +--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
8874 ++++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
8875 +@@ -479,14 +479,16 @@ ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv)
8876 + {
8877 + int i = 0;
8878 + int ret = 0;
8879 ++ struct rchan_buf *buf;
8880 + struct rchan *rc = spec_priv->rfs_chan_spec_scan;
8881 +
8882 +- for_each_online_cpu(i)
8883 +- ret += relay_buf_full(*per_cpu_ptr(rc->buf, i));
8884 +-
8885 +- i = num_online_cpus();
8886 ++ for_each_possible_cpu(i) {
8887 ++ if ((buf = *per_cpu_ptr(rc->buf, i))) {
8888 ++ ret += relay_buf_full(buf);
8889 ++ }
8890 ++ }
8891 +
8892 +- if (ret == i)
8893 ++ if (ret)
8894 + return 1;
8895 + else
8896 + return 0;
8897 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
8898 +index 4157c90ad973..083e5ce7eac7 100644
8899 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
8900 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
8901 +@@ -6916,7 +6916,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
8902 + return;
8903 +
8904 + /* ignore non-ISO3166 country codes */
8905 +- for (i = 0; i < sizeof(req->alpha2); i++)
8906 ++ for (i = 0; i < 2; i++)
8907 + if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
8908 + brcmf_err("not an ISO3166 code (0x%02x 0x%02x)\n",
8909 + req->alpha2[0], req->alpha2[1]);
8910 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
8911 +index 3721a3ed358b..f824bebceb06 100644
8912 +--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
8913 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
8914 +@@ -211,7 +211,7 @@ enum {
8915 + * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
8916 + * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
8917 + * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
8918 +- * @T2_V2_START_IMMEDIATELY: start time event immediately
8919 ++ * @TE_V2_START_IMMEDIATELY: start time event immediately
8920 + * @TE_V2_DEP_OTHER: depends on another time event
8921 + * @TE_V2_DEP_TSF: depends on a specific time
8922 + * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
8923 +@@ -230,7 +230,7 @@ enum iwl_time_event_policy {
8924 + TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
8925 + TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
8926 + TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
8927 +- T2_V2_START_IMMEDIATELY = BIT(11),
8928 ++ TE_V2_START_IMMEDIATELY = BIT(11),
8929 +
8930 + /* placement characteristics */
8931 + TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
8932 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
8933 +index f5dd7d83cd0a..2fa7ec466275 100644
8934 +--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
8935 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
8936 +@@ -8,6 +8,7 @@
8937 + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
8938 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
8939 + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
8940 ++ * Copyright(c) 2018 Intel Corporation
8941 + *
8942 + * This program is free software; you can redistribute it and/or modify
8943 + * it under the terms of version 2 of the GNU General Public License as
8944 +@@ -33,6 +34,7 @@
8945 + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
8946 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
8947 + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
8948 ++ * Copyright(c) 2018 Intel Corporation
8949 + * All rights reserved.
8950 + *
8951 + * Redistribution and use in source and binary forms, with or without
8952 +@@ -928,7 +930,6 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
8953 +
8954 + out:
8955 + iwl_fw_free_dump_desc(fwrt);
8956 +- fwrt->dump.trig = NULL;
8957 + clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
8958 + }
8959 + IWL_EXPORT_SYMBOL(iwl_fw_error_dump);
8960 +@@ -1084,6 +1085,14 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
8961 + fwrt->ops->dump_start(fwrt->ops_ctx))
8962 + return;
8963 +
8964 ++ if (fwrt->ops && fwrt->ops->fw_running &&
8965 ++ !fwrt->ops->fw_running(fwrt->ops_ctx)) {
8966 ++ IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
8967 ++ iwl_fw_free_dump_desc(fwrt);
8968 ++ clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
8969 ++ goto out;
8970 ++ }
8971 ++
8972 + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
8973 + /* stop recording */
8974 + iwl_fw_dbg_stop_recording(fwrt);
8975 +@@ -1117,7 +1126,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
8976 + iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl);
8977 + }
8978 + }
8979 +-
8980 ++out:
8981 + if (fwrt->ops && fwrt->ops->dump_end)
8982 + fwrt->ops->dump_end(fwrt->ops_ctx);
8983 + }
8984 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
8985 +index 223fb77a3aa9..72259bff9922 100644
8986 +--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
8987 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
8988 +@@ -8,6 +8,7 @@
8989 + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
8990 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
8991 + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
8992 ++ * Copyright(c) 2018 Intel Corporation
8993 + *
8994 + * This program is free software; you can redistribute it and/or modify
8995 + * it under the terms of version 2 of the GNU General Public License as
8996 +@@ -33,6 +34,7 @@
8997 + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
8998 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
8999 + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
9000 ++ * Copyright(c) 2018 Intel Corporation
9001 + * All rights reserved.
9002 + *
9003 + * Redistribution and use in source and binary forms, with or without
9004 +@@ -91,6 +93,7 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
9005 + if (fwrt->dump.desc != &iwl_dump_desc_assert)
9006 + kfree(fwrt->dump.desc);
9007 + fwrt->dump.desc = NULL;
9008 ++ fwrt->dump.trig = NULL;
9009 + }
9010 +
9011 + void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
9012 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
9013 +index 50cfb6d795a5..fb1ad3c5c93c 100644
9014 +--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
9015 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
9016 +@@ -6,6 +6,7 @@
9017 + * GPL LICENSE SUMMARY
9018 + *
9019 + * Copyright(c) 2017 Intel Deutschland GmbH
9020 ++ * Copyright(c) 2018 Intel Corporation
9021 + *
9022 + * This program is free software; you can redistribute it and/or modify
9023 + * it under the terms of version 2 of the GNU General Public License as
9024 +@@ -26,6 +27,7 @@
9025 + * BSD LICENSE
9026 + *
9027 + * Copyright(c) 2017 Intel Deutschland GmbH
9028 ++ * Copyright(c) 2018 Intel Corporation
9029 + * All rights reserved.
9030 + *
9031 + * Redistribution and use in source and binary forms, with or without
9032 +@@ -68,6 +70,7 @@
9033 + struct iwl_fw_runtime_ops {
9034 + int (*dump_start)(void *ctx);
9035 + void (*dump_end)(void *ctx);
9036 ++ bool (*fw_running)(void *ctx);
9037 + };
9038 +
9039 + #define MAX_NUM_LMAC 2
9040 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
9041 +index e97904c2c4d4..714996187236 100644
9042 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
9043 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
9044 +@@ -8,6 +8,7 @@
9045 + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9046 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
9047 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
9048 ++ * Copyright(c) 2018 Intel Corporation
9049 + *
9050 + * This program is free software; you can redistribute it and/or modify
9051 + * it under the terms of version 2 of the GNU General Public License as
9052 +@@ -35,6 +36,7 @@
9053 + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9054 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
9055 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
9056 ++ * Copyright(c) 2018 Intel Corporation
9057 + * All rights reserved.
9058 + *
9059 + * Redistribution and use in source and binary forms, with or without
9060 +@@ -1209,9 +1211,6 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
9061 + {
9062 + int ret;
9063 +
9064 +- if (!iwl_mvm_firmware_running(mvm))
9065 +- return -EIO;
9066 +-
9067 + ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
9068 + if (ret)
9069 + return ret;
9070 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
9071 +index 2f22e14e00fe..8ba16fc24e3a 100644
9072 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
9073 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
9074 +@@ -438,7 +438,8 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
9075 + }
9076 +
9077 + /* Allocate the CAB queue for softAP and GO interfaces */
9078 +- if (vif->type == NL80211_IFTYPE_AP) {
9079 ++ if (vif->type == NL80211_IFTYPE_AP ||
9080 ++ vif->type == NL80211_IFTYPE_ADHOC) {
9081 + /*
9082 + * For TVQM this will be overwritten later with the FW assigned
9083 + * queue value (when queue is enabled).
9084 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
9085 +index a9ac872226fd..db1fab9aa1c6 100644
9086 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
9087 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
9088 +@@ -8,6 +8,7 @@
9089 + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9090 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
9091 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
9092 ++ * Copyright(c) 2018 Intel Corporation
9093 + *
9094 + * This program is free software; you can redistribute it and/or modify
9095 + * it under the terms of version 2 of the GNU General Public License as
9096 +@@ -2127,15 +2128,40 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
9097 + if (ret)
9098 + goto out_remove;
9099 +
9100 +- ret = iwl_mvm_add_mcast_sta(mvm, vif);
9101 +- if (ret)
9102 +- goto out_unbind;
9103 +-
9104 +- /* Send the bcast station. At this stage the TBTT and DTIM time events
9105 +- * are added and applied to the scheduler */
9106 +- ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
9107 +- if (ret)
9108 +- goto out_rm_mcast;
9109 ++ /*
9110 ++ * This is not very nice, but the simplest:
9111 ++ * For older FWs adding the mcast sta before the bcast station may
9112 ++ * cause assert 0x2b00.
9113 ++ * This is fixed in later FW so make the order of removal depend on
9114 ++ * the TLV
9115 ++ */
9116 ++ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
9117 ++ ret = iwl_mvm_add_mcast_sta(mvm, vif);
9118 ++ if (ret)
9119 ++ goto out_unbind;
9120 ++ /*
9121 ++ * Send the bcast station. At this stage the TBTT and DTIM time
9122 ++ * events are added and applied to the scheduler
9123 ++ */
9124 ++ ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
9125 ++ if (ret) {
9126 ++ iwl_mvm_rm_mcast_sta(mvm, vif);
9127 ++ goto out_unbind;
9128 ++ }
9129 ++ } else {
9130 ++ /*
9131 ++ * Send the bcast station. At this stage the TBTT and DTIM time
9132 ++ * events are added and applied to the scheduler
9133 ++ */
9134 ++ ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
9135 ++ if (ret)
9136 ++ goto out_unbind;
9137 ++ ret = iwl_mvm_add_mcast_sta(mvm, vif);
9138 ++ if (ret) {
9139 ++ iwl_mvm_send_rm_bcast_sta(mvm, vif);
9140 ++ goto out_unbind;
9141 ++ }
9142 ++ }
9143 +
9144 + /* must be set before quota calculations */
9145 + mvmvif->ap_ibss_active = true;
9146 +@@ -2165,7 +2191,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
9147 + iwl_mvm_power_update_mac(mvm);
9148 + mvmvif->ap_ibss_active = false;
9149 + iwl_mvm_send_rm_bcast_sta(mvm, vif);
9150 +-out_rm_mcast:
9151 + iwl_mvm_rm_mcast_sta(mvm, vif);
9152 + out_unbind:
9153 + iwl_mvm_binding_remove_vif(mvm, vif);
9154 +@@ -2703,6 +2728,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
9155 +
9156 + /* enable beacon filtering */
9157 + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
9158 ++
9159 ++ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
9160 ++ false);
9161 ++
9162 + ret = 0;
9163 + } else if (old_state == IEEE80211_STA_AUTHORIZED &&
9164 + new_state == IEEE80211_STA_ASSOC) {
9165 +@@ -3468,6 +3497,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
9166 + ret = 0;
9167 + goto out;
9168 + case NL80211_IFTYPE_STATION:
9169 ++ mvmvif->csa_bcn_pending = false;
9170 + break;
9171 + case NL80211_IFTYPE_MONITOR:
9172 + /* always disable PS when a monitor interface is active */
9173 +@@ -3511,7 +3541,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
9174 + }
9175 +
9176 + if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
9177 +- u32 duration = 2 * vif->bss_conf.beacon_int;
9178 ++ u32 duration = 3 * vif->bss_conf.beacon_int;
9179 +
9180 + /* iwl_mvm_protect_session() reads directly from the
9181 + * device (the system time), so make sure it is
9182 +@@ -3524,6 +3554,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
9183 + /* Protect the session to make sure we hear the first
9184 + * beacon on the new channel.
9185 + */
9186 ++ mvmvif->csa_bcn_pending = true;
9187 + iwl_mvm_protect_session(mvm, vif, duration, duration,
9188 + vif->bss_conf.beacon_int / 2,
9189 + true);
9190 +@@ -3967,6 +3998,7 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
9191 + if (vif->type == NL80211_IFTYPE_STATION) {
9192 + struct iwl_mvm_sta *mvmsta;
9193 +
9194 ++ mvmvif->csa_bcn_pending = false;
9195 + mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
9196 + mvmvif->ap_sta_id);
9197 +
9198 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
9199 +index 2ec27ceb8af9..736c176f1fd6 100644
9200 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
9201 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
9202 +@@ -434,6 +434,9 @@ struct iwl_mvm_vif {
9203 + bool csa_failed;
9204 + u16 csa_target_freq;
9205 +
9206 ++ /* Indicates that we are waiting for a beacon on a new channel */
9207 ++ bool csa_bcn_pending;
9208 ++
9209 + /* TCP Checksum Offload */
9210 + netdev_features_t features;
9211 +
9212 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
9213 +index 9fb40955d5f4..54f411b83bea 100644
9214 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
9215 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
9216 +@@ -8,6 +8,7 @@
9217 + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9218 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
9219 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
9220 ++ * Copyright(c) 2018 Intel Corporation
9221 + *
9222 + * This program is free software; you can redistribute it and/or modify
9223 + * it under the terms of version 2 of the GNU General Public License as
9224 +@@ -35,6 +36,7 @@
9225 + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9226 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
9227 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
9228 ++ * Copyright(c) 2018 Intel Corporation
9229 + * All rights reserved.
9230 + *
9231 + * Redistribution and use in source and binary forms, with or without
9232 +@@ -553,9 +555,15 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)
9233 + iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
9234 + }
9235 +
9236 ++static bool iwl_mvm_fwrt_fw_running(void *ctx)
9237 ++{
9238 ++ return iwl_mvm_firmware_running(ctx);
9239 ++}
9240 ++
9241 + static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
9242 + .dump_start = iwl_mvm_fwrt_dump_start,
9243 + .dump_end = iwl_mvm_fwrt_dump_end,
9244 ++ .fw_running = iwl_mvm_fwrt_fw_running,
9245 + };
9246 +
9247 + static struct iwl_op_mode *
9248 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
9249 +index d22cef7381ba..386fdee23eb0 100644
9250 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
9251 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
9252 +@@ -2690,7 +2690,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
9253 + struct ieee80211_sta *sta,
9254 + struct iwl_lq_sta *lq_sta,
9255 + enum nl80211_band band,
9256 +- struct rs_rate *rate)
9257 ++ struct rs_rate *rate,
9258 ++ bool init)
9259 + {
9260 + int i, nentries;
9261 + unsigned long active_rate;
9262 +@@ -2744,14 +2745,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
9263 + */
9264 + if (sta->vht_cap.vht_supported &&
9265 + best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
9266 +- switch (sta->bandwidth) {
9267 +- case IEEE80211_STA_RX_BW_160:
9268 +- case IEEE80211_STA_RX_BW_80:
9269 +- case IEEE80211_STA_RX_BW_40:
9270 ++ /*
9271 ++ * In AP mode, when a new station associates, rs is initialized
9272 ++ * immediately upon association completion, before the phy
9273 ++ * context is updated with the association parameters, so the
9274 ++ * sta bandwidth might be wider than the phy context allows.
9275 ++ * To avoid this issue, always initialize rs with 20mhz
9276 ++ * bandwidth rate, and after authorization, when the phy context
9277 ++ * is already up-to-date, re-init rs with the correct bw.
9278 ++ */
9279 ++ u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta);
9280 ++
9281 ++ switch (bw) {
9282 ++ case RATE_MCS_CHAN_WIDTH_40:
9283 ++ case RATE_MCS_CHAN_WIDTH_80:
9284 ++ case RATE_MCS_CHAN_WIDTH_160:
9285 + initial_rates = rs_optimal_rates_vht;
9286 + nentries = ARRAY_SIZE(rs_optimal_rates_vht);
9287 + break;
9288 +- case IEEE80211_STA_RX_BW_20:
9289 ++ case RATE_MCS_CHAN_WIDTH_20:
9290 + initial_rates = rs_optimal_rates_vht_20mhz;
9291 + nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
9292 + break;
9293 +@@ -2762,7 +2774,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
9294 +
9295 + active_rate = lq_sta->active_siso_rate;
9296 + rate->type = LQ_VHT_SISO;
9297 +- rate->bw = rs_bw_from_sta_bw(sta);
9298 ++ rate->bw = bw;
9299 + } else if (sta->ht_cap.ht_supported &&
9300 + best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
9301 + initial_rates = rs_optimal_rates_ht;
9302 +@@ -2844,7 +2856,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
9303 + tbl = &(lq_sta->lq_info[active_tbl]);
9304 + rate = &tbl->rate;
9305 +
9306 +- rs_get_initial_rate(mvm, sta, lq_sta, band, rate);
9307 ++ rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init);
9308 + rs_init_optimal_rate(mvm, sta, lq_sta);
9309 +
9310 + WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B,
9311 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
9312 +index 819e6f66a5b5..e2196dc35dc6 100644
9313 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
9314 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
9315 +@@ -71,6 +71,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
9316 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
9317 + struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
9318 + struct iwl_mvm_key_pn *ptk_pn;
9319 ++ int res;
9320 + u8 tid, keyidx;
9321 + u8 pn[IEEE80211_CCMP_PN_LEN];
9322 + u8 *extiv;
9323 +@@ -127,12 +128,13 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
9324 + pn[4] = extiv[1];
9325 + pn[5] = extiv[0];
9326 +
9327 +- if (memcmp(pn, ptk_pn->q[queue].pn[tid],
9328 +- IEEE80211_CCMP_PN_LEN) <= 0)
9329 ++ res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
9330 ++ if (res < 0)
9331 ++ return -1;
9332 ++ if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
9333 + return -1;
9334 +
9335 +- if (!(stats->flag & RX_FLAG_AMSDU_MORE))
9336 +- memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
9337 ++ memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
9338 + stats->flag |= RX_FLAG_PN_VALIDATED;
9339 +
9340 + return 0;
9341 +@@ -310,28 +312,21 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
9342 + }
9343 +
9344 + /*
9345 +- * returns true if a packet outside BA session is a duplicate and
9346 +- * should be dropped
9347 ++ * returns true if a packet is a duplicate and should be dropped.
9348 ++ * Updates AMSDU PN tracking info
9349 + */
9350 +-static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
9351 +- struct ieee80211_rx_status *rx_status,
9352 +- struct ieee80211_hdr *hdr,
9353 +- struct iwl_rx_mpdu_desc *desc)
9354 ++static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
9355 ++ struct ieee80211_rx_status *rx_status,
9356 ++ struct ieee80211_hdr *hdr,
9357 ++ struct iwl_rx_mpdu_desc *desc)
9358 + {
9359 + struct iwl_mvm_sta *mvm_sta;
9360 + struct iwl_mvm_rxq_dup_data *dup_data;
9361 +- u8 baid, tid, sub_frame_idx;
9362 ++ u8 tid, sub_frame_idx;
9363 +
9364 + if (WARN_ON(IS_ERR_OR_NULL(sta)))
9365 + return false;
9366 +
9367 +- baid = (le32_to_cpu(desc->reorder_data) &
9368 +- IWL_RX_MPDU_REORDER_BAID_MASK) >>
9369 +- IWL_RX_MPDU_REORDER_BAID_SHIFT;
9370 +-
9371 +- if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
9372 +- return false;
9373 +-
9374 + mvm_sta = iwl_mvm_sta_from_mac80211(sta);
9375 + dup_data = &mvm_sta->dup_data[queue];
9376 +
9377 +@@ -361,6 +356,12 @@ static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
9378 + dup_data->last_sub_frame[tid] >= sub_frame_idx))
9379 + return true;
9380 +
9381 ++ /* Allow same PN as the first subframe for following sub frames */
9382 ++ if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
9383 ++ sub_frame_idx > dup_data->last_sub_frame[tid] &&
9384 ++ desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
9385 ++ rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
9386 ++
9387 + dup_data->last_seq[tid] = hdr->seq_ctrl;
9388 + dup_data->last_sub_frame[tid] = sub_frame_idx;
9389 +
9390 +@@ -929,7 +930,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
9391 + if (ieee80211_is_data(hdr->frame_control))
9392 + iwl_mvm_rx_csum(sta, skb, desc);
9393 +
9394 +- if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
9395 ++ if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
9396 + kfree_skb(skb);
9397 + goto out;
9398 + }
9399 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
9400 +index 0d7929799942..d31d84eebc5d 100644
9401 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
9402 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
9403 +@@ -1679,7 +1679,8 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
9404 + u32 qmask, enum nl80211_iftype iftype,
9405 + enum iwl_sta_type type)
9406 + {
9407 +- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
9408 ++ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
9409 ++ sta->sta_id == IWL_MVM_INVALID_STA) {
9410 + sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
9411 + if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
9412 + return -ENOSPC;
9413 +@@ -2023,7 +2024,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
9414 + struct iwl_trans_txq_scd_cfg cfg = {
9415 + .fifo = IWL_MVM_TX_FIFO_MCAST,
9416 + .sta_id = msta->sta_id,
9417 +- .tid = IWL_MAX_TID_COUNT,
9418 ++ .tid = 0,
9419 + .aggregate = false,
9420 + .frame_limit = IWL_FRAME_LIMIT,
9421 + };
9422 +@@ -2036,6 +2037,17 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
9423 + vif->type != NL80211_IFTYPE_ADHOC))
9424 + return -ENOTSUPP;
9425 +
9426 ++ /*
9427 ++ * In IBSS, ieee80211_check_queues() sets the cab_queue to be
9428 ++ * invalid, so make sure we use the queue we want.
9429 ++ * Note that this is done here as we want to avoid making DQA
9430 ++ * changes in mac80211 layer.
9431 ++ */
9432 ++ if (vif->type == NL80211_IFTYPE_ADHOC) {
9433 ++ vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
9434 ++ mvmvif->cab_queue = vif->cab_queue;
9435 ++ }
9436 ++
9437 + /*
9438 + * While in previous FWs we had to exclude cab queue from TFD queue
9439 + * mask, now it is needed as any other queue.
9440 +@@ -2063,24 +2075,13 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
9441 + if (iwl_mvm_has_new_tx_api(mvm)) {
9442 + int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
9443 + msta->sta_id,
9444 +- IWL_MAX_TID_COUNT,
9445 ++ 0,
9446 + timeout);
9447 + mvmvif->cab_queue = queue;
9448 + } else if (!fw_has_api(&mvm->fw->ucode_capa,
9449 +- IWL_UCODE_TLV_API_STA_TYPE)) {
9450 +- /*
9451 +- * In IBSS, ieee80211_check_queues() sets the cab_queue to be
9452 +- * invalid, so make sure we use the queue we want.
9453 +- * Note that this is done here as we want to avoid making DQA
9454 +- * changes in mac80211 layer.
9455 +- */
9456 +- if (vif->type == NL80211_IFTYPE_ADHOC) {
9457 +- vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
9458 +- mvmvif->cab_queue = vif->cab_queue;
9459 +- }
9460 ++ IWL_UCODE_TLV_API_STA_TYPE))
9461 + iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
9462 + &cfg, timeout);
9463 +- }
9464 +
9465 + return 0;
9466 + }
9467 +@@ -2099,7 +2100,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
9468 + iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
9469 +
9470 + iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
9471 +- IWL_MAX_TID_COUNT, 0);
9472 ++ 0, 0);
9473 +
9474 + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
9475 + if (ret)
9476 +@@ -2435,28 +2436,12 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
9477 +
9478 + /*
9479 + * Note the possible cases:
9480 +- * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
9481 +- * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
9482 +- * one and mark it as reserved
9483 +- * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
9484 +- * non-DQA mode, since the TXQ hasn't yet been allocated
9485 +- * Don't support case 3 for new TX path as it is not expected to happen
9486 +- * and aggregation will be offloaded soon anyway
9487 ++ * 1. An enabled TXQ - TXQ needs to become agg'ed
9488 ++ * 2. The TXQ hasn't yet been enabled, so find a free one and mark
9489 ++ * it as reserved
9490 + */
9491 + txq_id = mvmsta->tid_data[tid].txq_id;
9492 +- if (iwl_mvm_has_new_tx_api(mvm)) {
9493 +- if (txq_id == IWL_MVM_INVALID_QUEUE) {
9494 +- ret = -ENXIO;
9495 +- goto release_locks;
9496 +- }
9497 +- } else if (unlikely(mvm->queue_info[txq_id].status ==
9498 +- IWL_MVM_QUEUE_SHARED)) {
9499 +- ret = -ENXIO;
9500 +- IWL_DEBUG_TX_QUEUES(mvm,
9501 +- "Can't start tid %d agg on shared queue!\n",
9502 +- tid);
9503 +- goto release_locks;
9504 +- } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
9505 ++ if (txq_id == IWL_MVM_INVALID_QUEUE) {
9506 + txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
9507 + IWL_MVM_DQA_MIN_DATA_QUEUE,
9508 + IWL_MVM_DQA_MAX_DATA_QUEUE);
9509 +@@ -2465,16 +2450,16 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
9510 + IWL_ERR(mvm, "Failed to allocate agg queue\n");
9511 + goto release_locks;
9512 + }
9513 +- /*
9514 +- * TXQ shouldn't be in inactive mode for non-DQA, so getting
9515 +- * an inactive queue from iwl_mvm_find_free_queue() is
9516 +- * certainly a bug
9517 +- */
9518 +- WARN_ON(mvm->queue_info[txq_id].status ==
9519 +- IWL_MVM_QUEUE_INACTIVE);
9520 +
9521 + /* TXQ hasn't yet been enabled, so mark it only as reserved */
9522 + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
9523 ++ } else if (unlikely(mvm->queue_info[txq_id].status ==
9524 ++ IWL_MVM_QUEUE_SHARED)) {
9525 ++ ret = -ENXIO;
9526 ++ IWL_DEBUG_TX_QUEUES(mvm,
9527 ++ "Can't start tid %d agg on shared queue!\n",
9528 ++ tid);
9529 ++ goto release_locks;
9530 + }
9531 +
9532 + spin_unlock(&mvm->queue_info_lock);
9533 +@@ -2645,8 +2630,10 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
9534 +
9535 + static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
9536 + struct iwl_mvm_sta *mvmsta,
9537 +- u16 txq_id)
9538 ++ struct iwl_mvm_tid_data *tid_data)
9539 + {
9540 ++ u16 txq_id = tid_data->txq_id;
9541 ++
9542 + if (iwl_mvm_has_new_tx_api(mvm))
9543 + return;
9544 +
9545 +@@ -2658,8 +2645,10 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
9546 + * allocated through iwl_mvm_enable_txq, so we can just mark it back as
9547 + * free.
9548 + */
9549 +- if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
9550 ++ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
9551 + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
9552 ++ tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
9553 ++ }
9554 +
9555 + spin_unlock_bh(&mvm->queue_info_lock);
9556 + }
9557 +@@ -2690,7 +2679,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
9558 +
9559 + mvmsta->agg_tids &= ~BIT(tid);
9560 +
9561 +- iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
9562 ++ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
9563 +
9564 + switch (tid_data->state) {
9565 + case IWL_AGG_ON:
9566 +@@ -2757,7 +2746,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
9567 + mvmsta->agg_tids &= ~BIT(tid);
9568 + spin_unlock_bh(&mvmsta->lock);
9569 +
9570 +- iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
9571 ++ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
9572 +
9573 + if (old_state >= IWL_AGG_ON) {
9574 + iwl_mvm_drain_sta(mvm, mvmsta, true);
9575 +@@ -3119,8 +3108,9 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
9576 + int ret, size;
9577 + u32 status;
9578 +
9579 ++ /* This is a valid situation for GTK removal */
9580 + if (sta_id == IWL_MVM_INVALID_STA)
9581 +- return -EINVAL;
9582 ++ return 0;
9583 +
9584 + key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
9585 + STA_KEY_FLG_KEYID_MSK);
9586 +@@ -3181,17 +3171,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
9587 + }
9588 + sta_id = mvm_sta->sta_id;
9589 +
9590 +- if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
9591 +- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
9592 +- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
9593 +- ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id,
9594 +- false);
9595 +- goto end;
9596 +- }
9597 +-
9598 + /*
9599 + * It is possible that the 'sta' parameter is NULL, and thus
9600 +- * there is a need to retrieve the sta from the local station
9601 ++ * there is a need to retrieve the sta from the local station
9602 + * table.
9603 + */
9604 + if (!sta) {
9605 +@@ -3206,6 +3188,17 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
9606 +
9607 + if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
9608 + return -EINVAL;
9609 ++ } else {
9610 ++ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
9611 ++
9612 ++ sta_id = mvmvif->mcast_sta.sta_id;
9613 ++ }
9614 ++
9615 ++ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
9616 ++ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
9617 ++ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
9618 ++ ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
9619 ++ goto end;
9620 + }
9621 +
9622 + /* If the key_offset is not pre-assigned, we need to find a
9623 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
9624 +index e25cda9fbf6c..342ca1778efd 100644
9625 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
9626 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
9627 +@@ -8,6 +8,7 @@
9628 + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9629 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
9630 + * Copyright(c) 2017 Intel Deutschland GmbH
9631 ++ * Copyright(c) 2018 Intel Corporation
9632 + *
9633 + * This program is free software; you can redistribute it and/or modify
9634 + * it under the terms of version 2 of the GNU General Public License as
9635 +@@ -18,11 +19,6 @@
9636 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9637 + * General Public License for more details.
9638 + *
9639 +- * You should have received a copy of the GNU General Public License
9640 +- * along with this program; if not, write to the Free Software
9641 +- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
9642 +- * USA
9643 +- *
9644 + * The full GNU General Public License is included in this distribution
9645 + * in the file called COPYING.
9646 + *
9647 +@@ -35,6 +31,7 @@
9648 + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9649 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
9650 + * Copyright(c) 2017 Intel Deutschland GmbH
9651 ++ * Copyright(c) 2018 Intel Corporation
9652 + * All rights reserved.
9653 + *
9654 + * Redistribution and use in source and binary forms, with or without
9655 +@@ -203,9 +200,13 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
9656 + struct ieee80211_vif *vif,
9657 + const char *errmsg)
9658 + {
9659 ++ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
9660 ++
9661 + if (vif->type != NL80211_IFTYPE_STATION)
9662 + return false;
9663 +- if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
9664 ++
9665 ++ if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc &&
9666 ++ vif->bss_conf.dtim_period)
9667 + return false;
9668 + if (errmsg)
9669 + IWL_ERR(mvm, "%s\n", errmsg);
9670 +@@ -349,7 +350,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
9671 + * and know the dtim period.
9672 + */
9673 + iwl_mvm_te_check_disconnect(mvm, te_data->vif,
9674 +- "No association and the time event is over already...");
9675 ++ "No beacon heard and the time event is over already...");
9676 + break;
9677 + default:
9678 + break;
9679 +@@ -621,7 +622,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
9680 + time_cmd.repeat = 1;
9681 + time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
9682 + TE_V2_NOTIF_HOST_EVENT_END |
9683 +- T2_V2_START_IMMEDIATELY);
9684 ++ TE_V2_START_IMMEDIATELY);
9685 +
9686 + if (!wait_for_notif) {
9687 + iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
9688 +@@ -814,7 +815,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
9689 + time_cmd.repeat = 1;
9690 + time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
9691 + TE_V2_NOTIF_HOST_EVENT_END |
9692 +- T2_V2_START_IMMEDIATELY);
9693 ++ TE_V2_START_IMMEDIATELY);
9694 +
9695 + return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
9696 + }
9697 +@@ -924,6 +925,8 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
9698 + time_cmd.interval = cpu_to_le32(1);
9699 + time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
9700 + TE_V2_ABSENCE);
9701 ++ if (!apply_time)
9702 ++ time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
9703 +
9704 + return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
9705 + }
9706 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
9707 +index 887a504ce64a..6c014c273922 100644
9708 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
9709 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
9710 +@@ -419,11 +419,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
9711 + {
9712 + struct ieee80211_key_conf *keyconf = info->control.hw_key;
9713 + u8 *crypto_hdr = skb_frag->data + hdrlen;
9714 ++ enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
9715 + u64 pn;
9716 +
9717 + switch (keyconf->cipher) {
9718 + case WLAN_CIPHER_SUITE_CCMP:
9719 +- case WLAN_CIPHER_SUITE_CCMP_256:
9720 + iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
9721 + iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
9722 + break;
9723 +@@ -447,13 +447,16 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
9724 + break;
9725 + case WLAN_CIPHER_SUITE_GCMP:
9726 + case WLAN_CIPHER_SUITE_GCMP_256:
9727 ++ type = TX_CMD_SEC_GCMP;
9728 ++ /* Fall through */
9729 ++ case WLAN_CIPHER_SUITE_CCMP_256:
9730 + /* TODO: Taking the key from the table might introduce a race
9731 + * when PTK rekeying is done, having an old packets with a PN
9732 + * based on the old key but the message encrypted with a new
9733 + * one.
9734 + * Need to handle this.
9735 + */
9736 +- tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE;
9737 ++ tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
9738 + tx_cmd->key[0] = keyconf->hw_key_idx;
9739 + iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
9740 + break;
9741 +@@ -645,7 +648,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
9742 + if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
9743 + info.control.vif->type == NL80211_IFTYPE_AP ||
9744 + info.control.vif->type == NL80211_IFTYPE_ADHOC) {
9745 +- sta_id = mvmvif->bcast_sta.sta_id;
9746 ++ if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
9747 ++ sta_id = mvmvif->bcast_sta.sta_id;
9748 ++ else
9749 ++ sta_id = mvmvif->mcast_sta.sta_id;
9750 ++
9751 + queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
9752 + hdr->frame_control);
9753 + if (queue < 0)
9754 +@@ -1872,14 +1879,12 @@ int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
9755 + struct iwl_mvm_int_sta *int_sta = sta;
9756 + struct iwl_mvm_sta *mvm_sta = sta;
9757 +
9758 +- if (iwl_mvm_has_new_tx_api(mvm)) {
9759 +- if (internal)
9760 +- return iwl_mvm_flush_sta_tids(mvm, int_sta->sta_id,
9761 +- BIT(IWL_MGMT_TID), flags);
9762 ++ BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) !=
9763 ++ offsetof(struct iwl_mvm_sta, sta_id));
9764 +
9765 ++ if (iwl_mvm_has_new_tx_api(mvm))
9766 + return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
9767 +- 0xFF, flags);
9768 +- }
9769 ++ 0xff | BIT(IWL_MGMT_TID), flags);
9770 +
9771 + if (internal)
9772 + return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
9773 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
9774 +index 43ab172d31cb..d2cada0ab426 100644
9775 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
9776 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
9777 +@@ -810,12 +810,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
9778 + .scd_queue = queue,
9779 + .action = SCD_CFG_DISABLE_QUEUE,
9780 + };
9781 +- bool remove_mac_queue = true;
9782 ++ bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
9783 + int ret;
9784 +
9785 ++ if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
9786 ++ return -EINVAL;
9787 ++
9788 + if (iwl_mvm_has_new_tx_api(mvm)) {
9789 + spin_lock_bh(&mvm->queue_info_lock);
9790 +- mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue);
9791 ++
9792 ++ if (remove_mac_queue)
9793 ++ mvm->hw_queue_to_mac80211[queue] &=
9794 ++ ~BIT(mac80211_queue);
9795 ++
9796 + spin_unlock_bh(&mvm->queue_info_lock);
9797 +
9798 + iwl_trans_txq_free(mvm->trans, queue);
9799 +diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
9800 +index 8d3a4839b6ef..370161ca2a1c 100644
9801 +--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
9802 ++++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
9803 +@@ -636,11 +636,14 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
9804 + u32 *read_buf, u16 size)
9805 + {
9806 + u32 addr_on_bus, *data;
9807 +- u32 align[2] = {};
9808 + u16 ms_addr;
9809 + int status;
9810 +
9811 +- data = PTR_ALIGN(&align[0], 8);
9812 ++ data = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL);
9813 ++ if (!data)
9814 ++ return -ENOMEM;
9815 ++
9816 ++ data = PTR_ALIGN(data, 8);
9817 +
9818 + ms_addr = (addr >> 16);
9819 + status = rsi_sdio_master_access_msword(adapter, ms_addr);
9820 +@@ -648,7 +651,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
9821 + rsi_dbg(ERR_ZONE,
9822 + "%s: Unable to set ms word to common reg\n",
9823 + __func__);
9824 +- return status;
9825 ++ goto err;
9826 + }
9827 + addr &= 0xFFFF;
9828 +
9829 +@@ -666,7 +669,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
9830 + (u8 *)data, 4);
9831 + if (status < 0) {
9832 + rsi_dbg(ERR_ZONE, "%s: AHB register read failed\n", __func__);
9833 +- return status;
9834 ++ goto err;
9835 + }
9836 + if (size == 2) {
9837 + if ((addr & 0x3) == 0)
9838 +@@ -688,17 +691,23 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
9839 + *read_buf = *data;
9840 + }
9841 +
9842 +- return 0;
9843 ++err:
9844 ++ kfree(data);
9845 ++ return status;
9846 + }
9847 +
9848 + static int rsi_sdio_master_reg_write(struct rsi_hw *adapter,
9849 + unsigned long addr,
9850 + unsigned long data, u16 size)
9851 + {
9852 +- unsigned long data1[2], *data_aligned;
9853 ++ unsigned long *data_aligned;
9854 + int status;
9855 +
9856 +- data_aligned = PTR_ALIGN(&data1[0], 8);
9857 ++ data_aligned = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL);
9858 ++ if (!data_aligned)
9859 ++ return -ENOMEM;
9860 ++
9861 ++ data_aligned = PTR_ALIGN(data_aligned, 8);
9862 +
9863 + if (size == 2) {
9864 + *data_aligned = ((data << 16) | (data & 0xFFFF));
9865 +@@ -717,6 +726,7 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter,
9866 + rsi_dbg(ERR_ZONE,
9867 + "%s: Unable to set ms word to common reg\n",
9868 + __func__);
9869 ++ kfree(data_aligned);
9870 + return -EIO;
9871 + }
9872 + addr = addr & 0xFFFF;
9873 +@@ -726,12 +736,12 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter,
9874 + (adapter,
9875 + (addr | RSI_SD_REQUEST_MASTER),
9876 + (u8 *)data_aligned, size);
9877 +- if (status < 0) {
9878 ++ if (status < 0)
9879 + rsi_dbg(ERR_ZONE,
9880 + "%s: Unable to do AHB reg write\n", __func__);
9881 +- return status;
9882 +- }
9883 +- return 0;
9884 ++
9885 ++ kfree(data_aligned);
9886 ++ return status;
9887 + }
9888 +
9889 + /**
9890 +diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
9891 +index 95e4bed57baf..903392039200 100644
9892 +--- a/drivers/net/wireless/rsi/rsi_sdio.h
9893 ++++ b/drivers/net/wireless/rsi/rsi_sdio.h
9894 +@@ -46,6 +46,8 @@ enum sdio_interrupt_type {
9895 + #define PKT_BUFF_AVAILABLE 1
9896 + #define FW_ASSERT_IND 2
9897 +
9898 ++#define RSI_MASTER_REG_BUF_SIZE 12
9899 ++
9900 + #define RSI_DEVICE_BUFFER_STATUS_REGISTER 0xf3
9901 + #define RSI_FN1_INT_REGISTER 0xf9
9902 + #define RSI_SD_REQUEST_MASTER 0x10000
9903 +diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
9904 +index 8cd42544c90e..740aae51e1c6 100644
9905 +--- a/drivers/nvme/host/fabrics.c
9906 ++++ b/drivers/nvme/host/fabrics.c
9907 +@@ -606,8 +606,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
9908 + opts->discovery_nqn =
9909 + !(strcmp(opts->subsysnqn,
9910 + NVME_DISC_SUBSYS_NAME));
9911 +- if (opts->discovery_nqn)
9912 ++ if (opts->discovery_nqn) {
9913 ++ opts->kato = 0;
9914 + opts->nr_io_queues = 0;
9915 ++ }
9916 + break;
9917 + case NVMF_OPT_TRADDR:
9918 + p = match_strdup(args);
9919 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
9920 +index eab17405e815..3d4724e38aa9 100644
9921 +--- a/drivers/nvme/host/pci.c
9922 ++++ b/drivers/nvme/host/pci.c
9923 +@@ -1013,12 +1013,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
9924 + if (!(csts & NVME_CSTS_CFS) && !nssro)
9925 + return false;
9926 +
9927 +- /* If PCI error recovery process is happening, we cannot reset or
9928 +- * the recovery mechanism will surely fail.
9929 +- */
9930 +- if (pci_channel_offline(to_pci_dev(dev->dev)))
9931 +- return false;
9932 +-
9933 + return true;
9934 + }
9935 +
9936 +@@ -1049,6 +1043,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
9937 + struct nvme_command cmd;
9938 + u32 csts = readl(dev->bar + NVME_REG_CSTS);
9939 +
9940 ++ /* If PCI error recovery process is happening, we cannot reset or
9941 ++ * the recovery mechanism will surely fail.
9942 ++ */
9943 ++ mb();
9944 ++ if (pci_channel_offline(to_pci_dev(dev->dev)))
9945 ++ return BLK_EH_RESET_TIMER;
9946 ++
9947 + /*
9948 + * Reset immediately if the controller is failed
9949 + */
9950 +@@ -1322,7 +1323,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
9951 + nvmeq->cq_vector = qid - 1;
9952 + result = adapter_alloc_cq(dev, qid, nvmeq);
9953 + if (result < 0)
9954 +- return result;
9955 ++ goto release_vector;
9956 +
9957 + result = adapter_alloc_sq(dev, qid, nvmeq);
9958 + if (result < 0)
9959 +@@ -1336,9 +1337,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
9960 + return result;
9961 +
9962 + release_sq:
9963 ++ dev->online_queues--;
9964 + adapter_delete_sq(dev, qid);
9965 + release_cq:
9966 + adapter_delete_cq(dev, qid);
9967 ++ release_vector:
9968 ++ nvmeq->cq_vector = -1;
9969 + return result;
9970 + }
9971 +
9972 +@@ -1766,7 +1770,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
9973 + int result, nr_io_queues;
9974 + unsigned long size;
9975 +
9976 +- nr_io_queues = num_present_cpus();
9977 ++ nr_io_queues = num_possible_cpus();
9978 + result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
9979 + if (result < 0)
9980 + return result;
9981 +@@ -2310,10 +2314,13 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
9982 + } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
9983 + /*
9984 + * Samsung SSD 960 EVO drops off the PCIe bus after system
9985 +- * suspend on a Ryzen board, ASUS PRIME B350M-A.
9986 ++ * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as
9987 ++ * within few minutes after bootup on a Coffee Lake board -
9988 ++ * ASUS PRIME Z370-A
9989 + */
9990 + if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
9991 +- dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
9992 ++ (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
9993 ++ dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
9994 + return NVME_QUIRK_NO_APST;
9995 + }
9996 +
9997 +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
9998 +index 645ba7eee35d..240b0d628222 100644
9999 +--- a/drivers/nvme/target/core.c
10000 ++++ b/drivers/nvme/target/core.c
10001 +@@ -505,9 +505,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
10002 + goto fail;
10003 + }
10004 +
10005 +- /* either variant of SGLs is fine, as we don't support metadata */
10006 +- if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
10007 +- (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
10008 ++ /*
10009 ++ * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
10010 ++ * contains an address of a single contiguous physical buffer that is
10011 ++ * byte aligned.
10012 ++ */
10013 ++ if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
10014 + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
10015 + goto fail;
10016 + }
10017 +diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
10018 +index 41b740aed3a3..69bd98421eb1 100644
10019 +--- a/drivers/parisc/lba_pci.c
10020 ++++ b/drivers/parisc/lba_pci.c
10021 +@@ -1403,9 +1403,27 @@ lba_hw_init(struct lba_device *d)
10022 + WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
10023 + }
10024 +
10025 +- /* Set HF mode as the default (vs. -1 mode). */
10026 ++
10027 ++ /*
10028 ++ * Hard Fail vs. Soft Fail on PCI "Master Abort".
10029 ++ *
10030 ++ * "Master Abort" means the MMIO transaction timed out - usually due to
10031 ++ * the device not responding to an MMIO read. We would like HF to be
10032 ++ * enabled to find driver problems, though it means the system will
10033 ++ * crash with a HPMC.
10034 ++ *
10035 ++ * In SoftFail mode "~0L" is returned as a result of a timeout on the
10036 ++ * pci bus. This is like how PCI busses on x86 and most other
10037 ++ * architectures behave. In order to increase compatibility with
10038 ++ * existing (x86) PCI hardware and existing Linux drivers we enable
10039 ++ * Soft Faul mode on PA-RISC now too.
10040 ++ */
10041 + stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
10042 ++#if defined(ENABLE_HARDFAIL)
10043 + WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
10044 ++#else
10045 ++ WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
10046 ++#endif
10047 +
10048 + /*
10049 + ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal
10050 +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
10051 +index bb0927de79dd..ea69b4dbab66 100644
10052 +--- a/drivers/pci/pci-driver.c
10053 ++++ b/drivers/pci/pci-driver.c
10054 +@@ -1164,11 +1164,14 @@ static int pci_pm_runtime_suspend(struct device *dev)
10055 + int error;
10056 +
10057 + /*
10058 +- * If pci_dev->driver is not set (unbound), the device should
10059 +- * always remain in D0 regardless of the runtime PM status
10060 ++ * If pci_dev->driver is not set (unbound), we leave the device in D0,
10061 ++ * but it may go to D3cold when the bridge above it runtime suspends.
10062 ++ * Save its config space in case that happens.
10063 + */
10064 +- if (!pci_dev->driver)
10065 ++ if (!pci_dev->driver) {
10066 ++ pci_save_state(pci_dev);
10067 + return 0;
10068 ++ }
10069 +
10070 + if (!pm || !pm->runtime_suspend)
10071 + return -ENOSYS;
10072 +@@ -1216,16 +1219,18 @@ static int pci_pm_runtime_resume(struct device *dev)
10073 + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
10074 +
10075 + /*
10076 +- * If pci_dev->driver is not set (unbound), the device should
10077 +- * always remain in D0 regardless of the runtime PM status
10078 ++ * Restoring config space is necessary even if the device is not bound
10079 ++ * to a driver because although we left it in D0, it may have gone to
10080 ++ * D3cold when the bridge above it runtime suspended.
10081 + */
10082 ++ pci_restore_standard_config(pci_dev);
10083 ++
10084 + if (!pci_dev->driver)
10085 + return 0;
10086 +
10087 + if (!pm || !pm->runtime_resume)
10088 + return -ENOSYS;
10089 +
10090 +- pci_restore_standard_config(pci_dev);
10091 + pci_fixup_device(pci_fixup_resume_early, pci_dev);
10092 + pci_enable_wake(pci_dev, PCI_D0, false);
10093 + pci_fixup_device(pci_fixup_resume, pci_dev);
10094 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
10095 +index 116127a0accb..929d68f744af 100644
10096 +--- a/drivers/pci/quirks.c
10097 ++++ b/drivers/pci/quirks.c
10098 +@@ -3896,6 +3896,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
10099 + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
10100 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
10101 + quirk_dma_func1_alias);
10102 ++/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
10103 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
10104 ++ quirk_dma_func1_alias);
10105 + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
10106 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
10107 + quirk_dma_func1_alias);
10108 +diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
10109 +index c3b615c94b4b..8c8caec3a72c 100644
10110 +--- a/drivers/pcmcia/cs.c
10111 ++++ b/drivers/pcmcia/cs.c
10112 +@@ -452,17 +452,20 @@ static int socket_insert(struct pcmcia_socket *skt)
10113 +
10114 + static int socket_suspend(struct pcmcia_socket *skt)
10115 + {
10116 +- if (skt->state & SOCKET_SUSPEND)
10117 ++ if ((skt->state & SOCKET_SUSPEND) && !(skt->state & SOCKET_IN_RESUME))
10118 + return -EBUSY;
10119 +
10120 + mutex_lock(&skt->ops_mutex);
10121 +- skt->suspended_state = skt->state;
10122 ++ /* store state on first suspend, but not after spurious wakeups */
10123 ++ if (!(skt->state & SOCKET_IN_RESUME))
10124 ++ skt->suspended_state = skt->state;
10125 +
10126 + skt->socket = dead_socket;
10127 + skt->ops->set_socket(skt, &skt->socket);
10128 + if (skt->ops->suspend)
10129 + skt->ops->suspend(skt);
10130 + skt->state |= SOCKET_SUSPEND;
10131 ++ skt->state &= ~SOCKET_IN_RESUME;
10132 + mutex_unlock(&skt->ops_mutex);
10133 + return 0;
10134 + }
10135 +@@ -475,6 +478,7 @@ static int socket_early_resume(struct pcmcia_socket *skt)
10136 + skt->ops->set_socket(skt, &skt->socket);
10137 + if (skt->state & SOCKET_PRESENT)
10138 + skt->resume_status = socket_setup(skt, resume_delay);
10139 ++ skt->state |= SOCKET_IN_RESUME;
10140 + mutex_unlock(&skt->ops_mutex);
10141 + return 0;
10142 + }
10143 +@@ -484,7 +488,7 @@ static int socket_late_resume(struct pcmcia_socket *skt)
10144 + int ret = 0;
10145 +
10146 + mutex_lock(&skt->ops_mutex);
10147 +- skt->state &= ~SOCKET_SUSPEND;
10148 ++ skt->state &= ~(SOCKET_SUSPEND | SOCKET_IN_RESUME);
10149 + mutex_unlock(&skt->ops_mutex);
10150 +
10151 + if (!(skt->state & SOCKET_PRESENT)) {
10152 +diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
10153 +index e86cd6b31773..384629ce48f5 100644
10154 +--- a/drivers/pcmcia/cs_internal.h
10155 ++++ b/drivers/pcmcia/cs_internal.h
10156 +@@ -70,6 +70,7 @@ struct pccard_resource_ops {
10157 + /* Flags in socket state */
10158 + #define SOCKET_PRESENT 0x0008
10159 + #define SOCKET_INUSE 0x0010
10160 ++#define SOCKET_IN_RESUME 0x0040
10161 + #define SOCKET_SUSPEND 0x0080
10162 + #define SOCKET_WIN_REQ(i) (0x0100<<(i))
10163 + #define SOCKET_CARDBUS 0x8000
10164 +diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
10165 +index e17f0351ccc2..2526971f9929 100644
10166 +--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
10167 ++++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
10168 +@@ -751,8 +751,6 @@ static int qcom_qmp_phy_poweroff(struct phy *phy)
10169 + struct qmp_phy *qphy = phy_get_drvdata(phy);
10170 + struct qcom_qmp *qmp = qphy->qmp;
10171 +
10172 +- clk_disable_unprepare(qphy->pipe_clk);
10173 +-
10174 + regulator_bulk_disable(qmp->cfg->num_vregs, qmp->vregs);
10175 +
10176 + return 0;
10177 +@@ -936,6 +934,8 @@ static int qcom_qmp_phy_exit(struct phy *phy)
10178 + const struct qmp_phy_cfg *cfg = qmp->cfg;
10179 + int i = cfg->num_clks;
10180 +
10181 ++ clk_disable_unprepare(qphy->pipe_clk);
10182 ++
10183 + /* PHY reset */
10184 + qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
10185 +
10186 +diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
10187 +index f1b24f18e9b2..b0d10934413f 100644
10188 +--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
10189 ++++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
10190 +@@ -76,6 +76,10 @@
10191 + #define PHYCTRL_OTAPDLYSEL_MASK 0xf
10192 + #define PHYCTRL_OTAPDLYSEL_SHIFT 0x7
10193 +
10194 ++#define PHYCTRL_IS_CALDONE(x) \
10195 ++ ((((x) >> PHYCTRL_CALDONE_SHIFT) & \
10196 ++ PHYCTRL_CALDONE_MASK) == PHYCTRL_CALDONE_DONE)
10197 ++
10198 + struct rockchip_emmc_phy {
10199 + unsigned int reg_offset;
10200 + struct regmap *reg_base;
10201 +@@ -90,6 +94,7 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off)
10202 + unsigned int freqsel = PHYCTRL_FREQSEL_200M;
10203 + unsigned long rate;
10204 + unsigned long timeout;
10205 ++ int ret;
10206 +
10207 + /*
10208 + * Keep phyctrl_pdb and phyctrl_endll low to allow
10209 +@@ -160,17 +165,19 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off)
10210 + PHYCTRL_PDB_SHIFT));
10211 +
10212 + /*
10213 +- * According to the user manual, it asks driver to
10214 +- * wait 5us for calpad busy trimming
10215 ++ * According to the user manual, it asks driver to wait 5us for
10216 ++ * calpad busy trimming. However it is documented that this value is
10217 ++ * PVT(A.K.A process,voltage and temperature) relevant, so some
10218 ++ * failure cases are found which indicates we should be more tolerant
10219 ++ * to calpad busy trimming.
10220 + */
10221 +- udelay(5);
10222 +- regmap_read(rk_phy->reg_base,
10223 +- rk_phy->reg_offset + GRF_EMMCPHY_STATUS,
10224 +- &caldone);
10225 +- caldone = (caldone >> PHYCTRL_CALDONE_SHIFT) & PHYCTRL_CALDONE_MASK;
10226 +- if (caldone != PHYCTRL_CALDONE_DONE) {
10227 +- pr_err("rockchip_emmc_phy_power: caldone timeout.\n");
10228 +- return -ETIMEDOUT;
10229 ++ ret = regmap_read_poll_timeout(rk_phy->reg_base,
10230 ++ rk_phy->reg_offset + GRF_EMMCPHY_STATUS,
10231 ++ caldone, PHYCTRL_IS_CALDONE(caldone),
10232 ++ 0, 50);
10233 ++ if (ret) {
10234 ++ pr_err("%s: caldone failed, ret=%d\n", __func__, ret);
10235 ++ return ret;
10236 + }
10237 +
10238 + /* Set the frequency of the DLL operation */
10239 +diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
10240 +index 1ff6c3573493..b601039d6c69 100644
10241 +--- a/drivers/pinctrl/devicetree.c
10242 ++++ b/drivers/pinctrl/devicetree.c
10243 +@@ -122,8 +122,10 @@ static int dt_to_map_one_config(struct pinctrl *p,
10244 + /* OK let's just assume this will appear later then */
10245 + return -EPROBE_DEFER;
10246 + }
10247 +- if (!pctldev)
10248 +- pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
10249 ++ /* If we're creating a hog we can use the passed pctldev */
10250 ++ if (pctldev && (np_pctldev == p->dev->of_node))
10251 ++ break;
10252 ++ pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
10253 + if (pctldev)
10254 + break;
10255 + /* Do not defer probing of hogs (circular loop) */
10256 +diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
10257 +index 447763aad815..db9cca4a83ff 100644
10258 +--- a/drivers/pinctrl/pinctrl-mcp23s08.c
10259 ++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
10260 +@@ -779,6 +779,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
10261 + {
10262 + int status, ret;
10263 + bool mirror = false;
10264 ++ struct regmap_config *one_regmap_config = NULL;
10265 +
10266 + mutex_init(&mcp->lock);
10267 +
10268 +@@ -799,22 +800,36 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
10269 + switch (type) {
10270 + #ifdef CONFIG_SPI_MASTER
10271 + case MCP_TYPE_S08:
10272 +- mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
10273 +- &mcp23x08_regmap);
10274 +- mcp->reg_shift = 0;
10275 +- mcp->chip.ngpio = 8;
10276 +- mcp->chip.label = "mcp23s08";
10277 +- break;
10278 +-
10279 + case MCP_TYPE_S17:
10280 ++ switch (type) {
10281 ++ case MCP_TYPE_S08:
10282 ++ one_regmap_config =
10283 ++ devm_kmemdup(dev, &mcp23x08_regmap,
10284 ++ sizeof(struct regmap_config), GFP_KERNEL);
10285 ++ mcp->reg_shift = 0;
10286 ++ mcp->chip.ngpio = 8;
10287 ++ mcp->chip.label = "mcp23s08";
10288 ++ break;
10289 ++ case MCP_TYPE_S17:
10290 ++ one_regmap_config =
10291 ++ devm_kmemdup(dev, &mcp23x17_regmap,
10292 ++ sizeof(struct regmap_config), GFP_KERNEL);
10293 ++ mcp->reg_shift = 1;
10294 ++ mcp->chip.ngpio = 16;
10295 ++ mcp->chip.label = "mcp23s17";
10296 ++ break;
10297 ++ }
10298 ++ if (!one_regmap_config)
10299 ++ return -ENOMEM;
10300 ++
10301 ++ one_regmap_config->name = devm_kasprintf(dev, GFP_KERNEL, "%d", (addr & ~0x40) >> 1);
10302 + mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
10303 +- &mcp23x17_regmap);
10304 +- mcp->reg_shift = 1;
10305 +- mcp->chip.ngpio = 16;
10306 +- mcp->chip.label = "mcp23s17";
10307 ++ one_regmap_config);
10308 + break;
10309 +
10310 + case MCP_TYPE_S18:
10311 ++ if (!one_regmap_config)
10312 ++ return -ENOMEM;
10313 + mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
10314 + &mcp23x17_regmap);
10315 + mcp->reg_shift = 1;
10316 +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
10317 +index ff491da64dab..19cd357bb464 100644
10318 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c
10319 ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
10320 +@@ -818,7 +818,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
10321 + return -EINVAL;
10322 +
10323 + chip = &pctrl->chip;
10324 +- chip->base = 0;
10325 ++ chip->base = -1;
10326 + chip->ngpio = ngpio;
10327 + chip->label = dev_name(pctrl->dev);
10328 + chip->parent = pctrl->dev;
10329 +diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
10330 +index 200e1f4f6db9..711333fb2c6e 100644
10331 +--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
10332 ++++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
10333 +@@ -1,7 +1,7 @@
10334 + /*
10335 + * R8A7796 processor support - PFC hardware block.
10336 + *
10337 +- * Copyright (C) 2016 Renesas Electronics Corp.
10338 ++ * Copyright (C) 2016-2017 Renesas Electronics Corp.
10339 + *
10340 + * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c
10341 + *
10342 +@@ -477,7 +477,7 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28
10343 + #define MOD_SEL1_26 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1)
10344 + #define MOD_SEL1_25_24 FM(SEL_SSP1_1_0) FM(SEL_SSP1_1_1) FM(SEL_SSP1_1_2) FM(SEL_SSP1_1_3)
10345 + #define MOD_SEL1_23_22_21 FM(SEL_SSP1_0_0) FM(SEL_SSP1_0_1) FM(SEL_SSP1_0_2) FM(SEL_SSP1_0_3) FM(SEL_SSP1_0_4) F_(0, 0) F_(0, 0) F_(0, 0)
10346 +-#define MOD_SEL1_20 FM(SEL_SSI_0) FM(SEL_SSI_1)
10347 ++#define MOD_SEL1_20 FM(SEL_SSI1_0) FM(SEL_SSI1_1)
10348 + #define MOD_SEL1_19 FM(SEL_SPEED_PULSE_0) FM(SEL_SPEED_PULSE_1)
10349 + #define MOD_SEL1_18_17 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1) FM(SEL_SIMCARD_2) FM(SEL_SIMCARD_3)
10350 + #define MOD_SEL1_16 FM(SEL_SDHI2_0) FM(SEL_SDHI2_1)
10351 +@@ -1224,7 +1224,7 @@ static const u16 pinmux_data[] = {
10352 + PINMUX_IPSR_GPSR(IP13_11_8, HSCK0),
10353 + PINMUX_IPSR_MSEL(IP13_11_8, MSIOF1_SCK_D, SEL_MSIOF1_3),
10354 + PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKB_A, SEL_ADG_B_0),
10355 +- PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI_1),
10356 ++ PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI1_1),
10357 + PINMUX_IPSR_MSEL(IP13_11_8, TS_SCK0_D, SEL_TSIF0_3),
10358 + PINMUX_IPSR_MSEL(IP13_11_8, STP_ISCLK_0_D, SEL_SSP1_0_3),
10359 + PINMUX_IPSR_MSEL(IP13_11_8, RIF0_CLK_C, SEL_DRIF0_2),
10360 +@@ -1232,14 +1232,14 @@ static const u16 pinmux_data[] = {
10361 +
10362 + PINMUX_IPSR_GPSR(IP13_15_12, HRX0),
10363 + PINMUX_IPSR_MSEL(IP13_15_12, MSIOF1_RXD_D, SEL_MSIOF1_3),
10364 +- PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA2_B, SEL_SSI_1),
10365 ++ PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA2_B, SEL_SSI2_1),
10366 + PINMUX_IPSR_MSEL(IP13_15_12, TS_SDEN0_D, SEL_TSIF0_3),
10367 + PINMUX_IPSR_MSEL(IP13_15_12, STP_ISEN_0_D, SEL_SSP1_0_3),
10368 + PINMUX_IPSR_MSEL(IP13_15_12, RIF0_D0_C, SEL_DRIF0_2),
10369 +
10370 + PINMUX_IPSR_GPSR(IP13_19_16, HTX0),
10371 + PINMUX_IPSR_MSEL(IP13_19_16, MSIOF1_TXD_D, SEL_MSIOF1_3),
10372 +- PINMUX_IPSR_MSEL(IP13_19_16, SSI_SDATA9_B, SEL_SSI_1),
10373 ++ PINMUX_IPSR_MSEL(IP13_19_16, SSI_SDATA9_B, SEL_SSI9_1),
10374 + PINMUX_IPSR_MSEL(IP13_19_16, TS_SDAT0_D, SEL_TSIF0_3),
10375 + PINMUX_IPSR_MSEL(IP13_19_16, STP_ISD_0_D, SEL_SSP1_0_3),
10376 + PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_C, SEL_DRIF0_2),
10377 +@@ -1247,7 +1247,7 @@ static const u16 pinmux_data[] = {
10378 + PINMUX_IPSR_GPSR(IP13_23_20, HCTS0_N),
10379 + PINMUX_IPSR_MSEL(IP13_23_20, RX2_B, SEL_SCIF2_1),
10380 + PINMUX_IPSR_MSEL(IP13_23_20, MSIOF1_SYNC_D, SEL_MSIOF1_3),
10381 +- PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK9_A, SEL_SSI_0),
10382 ++ PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK9_A, SEL_SSI9_0),
10383 + PINMUX_IPSR_MSEL(IP13_23_20, TS_SPSYNC0_D, SEL_TSIF0_3),
10384 + PINMUX_IPSR_MSEL(IP13_23_20, STP_ISSYNC_0_D, SEL_SSP1_0_3),
10385 + PINMUX_IPSR_MSEL(IP13_23_20, RIF0_SYNC_C, SEL_DRIF0_2),
10386 +@@ -1256,7 +1256,7 @@ static const u16 pinmux_data[] = {
10387 + PINMUX_IPSR_GPSR(IP13_27_24, HRTS0_N),
10388 + PINMUX_IPSR_MSEL(IP13_27_24, TX2_B, SEL_SCIF2_1),
10389 + PINMUX_IPSR_MSEL(IP13_27_24, MSIOF1_SS1_D, SEL_MSIOF1_3),
10390 +- PINMUX_IPSR_MSEL(IP13_27_24, SSI_WS9_A, SEL_SSI_0),
10391 ++ PINMUX_IPSR_MSEL(IP13_27_24, SSI_WS9_A, SEL_SSI9_0),
10392 + PINMUX_IPSR_MSEL(IP13_27_24, STP_IVCXO27_0_D, SEL_SSP1_0_3),
10393 + PINMUX_IPSR_MSEL(IP13_27_24, BPFCLK_A, SEL_FM_0),
10394 + PINMUX_IPSR_GPSR(IP13_27_24, AUDIO_CLKOUT2_A),
10395 +@@ -1271,7 +1271,7 @@ static const u16 pinmux_data[] = {
10396 + PINMUX_IPSR_MSEL(IP14_3_0, RX5_A, SEL_SCIF5_0),
10397 + PINMUX_IPSR_MSEL(IP14_3_0, NFWP_N_A, SEL_NDF_0),
10398 + PINMUX_IPSR_MSEL(IP14_3_0, AUDIO_CLKA_C, SEL_ADG_A_2),
10399 +- PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI_0),
10400 ++ PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI2_0),
10401 + PINMUX_IPSR_MSEL(IP14_3_0, STP_IVCXO27_0_C, SEL_SSP1_0_2),
10402 + PINMUX_IPSR_GPSR(IP14_3_0, AUDIO_CLKOUT3_A),
10403 + PINMUX_IPSR_MSEL(IP14_3_0, TCLK1_B, SEL_TIMER_TMU_1),
10404 +@@ -1280,7 +1280,7 @@ static const u16 pinmux_data[] = {
10405 + PINMUX_IPSR_MSEL(IP14_7_4, TX5_A, SEL_SCIF5_0),
10406 + PINMUX_IPSR_MSEL(IP14_7_4, MSIOF1_SS2_D, SEL_MSIOF1_3),
10407 + PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_A, SEL_ADG_C_0),
10408 +- PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI_0),
10409 ++ PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI2_0),
10410 + PINMUX_IPSR_MSEL(IP14_7_4, STP_OPWM_0_D, SEL_SSP1_0_3),
10411 + PINMUX_IPSR_GPSR(IP14_7_4, AUDIO_CLKOUT_D),
10412 + PINMUX_IPSR_MSEL(IP14_7_4, SPEEDIN_B, SEL_SPEED_PULSE_1),
10413 +@@ -1308,10 +1308,10 @@ static const u16 pinmux_data[] = {
10414 + PINMUX_IPSR_MSEL(IP14_31_28, MSIOF1_SS2_F, SEL_MSIOF1_5),
10415 +
10416 + /* IPSR15 */
10417 +- PINMUX_IPSR_MSEL(IP15_3_0, SSI_SDATA1_A, SEL_SSI_0),
10418 ++ PINMUX_IPSR_MSEL(IP15_3_0, SSI_SDATA1_A, SEL_SSI1_0),
10419 +
10420 +- PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI_0),
10421 +- PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI_1),
10422 ++ PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI2_0),
10423 ++ PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI1_1),
10424 +
10425 + PINMUX_IPSR_GPSR(IP15_11_8, SSI_SCK349),
10426 + PINMUX_IPSR_MSEL(IP15_11_8, MSIOF1_SS1_A, SEL_MSIOF1_0),
10427 +@@ -1397,11 +1397,11 @@ static const u16 pinmux_data[] = {
10428 + PINMUX_IPSR_MSEL(IP16_27_24, RIF1_D1_A, SEL_DRIF1_0),
10429 + PINMUX_IPSR_MSEL(IP16_27_24, RIF3_D1_A, SEL_DRIF3_0),
10430 +
10431 +- PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA9_A, SEL_SSI_0),
10432 ++ PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA9_A, SEL_SSI9_0),
10433 + PINMUX_IPSR_MSEL(IP16_31_28, HSCK2_B, SEL_HSCIF2_1),
10434 + PINMUX_IPSR_MSEL(IP16_31_28, MSIOF1_SS1_C, SEL_MSIOF1_2),
10435 + PINMUX_IPSR_MSEL(IP16_31_28, HSCK1_A, SEL_HSCIF1_0),
10436 +- PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS1_B, SEL_SSI_1),
10437 ++ PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS1_B, SEL_SSI1_1),
10438 + PINMUX_IPSR_GPSR(IP16_31_28, SCK1),
10439 + PINMUX_IPSR_MSEL(IP16_31_28, STP_IVCXO27_1_A, SEL_SSP1_1_0),
10440 + PINMUX_IPSR_MSEL(IP16_31_28, SCK5_A, SEL_SCIF5_0),
10441 +@@ -1433,7 +1433,7 @@ static const u16 pinmux_data[] = {
10442 +
10443 + PINMUX_IPSR_GPSR(IP17_19_16, USB1_PWEN),
10444 + PINMUX_IPSR_MSEL(IP17_19_16, SIM0_CLK_C, SEL_SIMCARD_2),
10445 +- PINMUX_IPSR_MSEL(IP17_19_16, SSI_SCK1_A, SEL_SSI_0),
10446 ++ PINMUX_IPSR_MSEL(IP17_19_16, SSI_SCK1_A, SEL_SSI1_0),
10447 + PINMUX_IPSR_MSEL(IP17_19_16, TS_SCK0_E, SEL_TSIF0_4),
10448 + PINMUX_IPSR_MSEL(IP17_19_16, STP_ISCLK_0_E, SEL_SSP1_0_4),
10449 + PINMUX_IPSR_MSEL(IP17_19_16, FMCLK_B, SEL_FM_1),
10450 +@@ -1443,7 +1443,7 @@ static const u16 pinmux_data[] = {
10451 +
10452 + PINMUX_IPSR_GPSR(IP17_23_20, USB1_OVC),
10453 + PINMUX_IPSR_MSEL(IP17_23_20, MSIOF1_SS2_C, SEL_MSIOF1_2),
10454 +- PINMUX_IPSR_MSEL(IP17_23_20, SSI_WS1_A, SEL_SSI_0),
10455 ++ PINMUX_IPSR_MSEL(IP17_23_20, SSI_WS1_A, SEL_SSI1_0),
10456 + PINMUX_IPSR_MSEL(IP17_23_20, TS_SDAT0_E, SEL_TSIF0_4),
10457 + PINMUX_IPSR_MSEL(IP17_23_20, STP_ISD_0_E, SEL_SSP1_0_4),
10458 + PINMUX_IPSR_MSEL(IP17_23_20, FMIN_B, SEL_FM_1),
10459 +@@ -1453,7 +1453,7 @@ static const u16 pinmux_data[] = {
10460 +
10461 + PINMUX_IPSR_GPSR(IP17_27_24, USB30_PWEN),
10462 + PINMUX_IPSR_GPSR(IP17_27_24, AUDIO_CLKOUT_B),
10463 +- PINMUX_IPSR_MSEL(IP17_27_24, SSI_SCK2_B, SEL_SSI_1),
10464 ++ PINMUX_IPSR_MSEL(IP17_27_24, SSI_SCK2_B, SEL_SSI2_1),
10465 + PINMUX_IPSR_MSEL(IP17_27_24, TS_SDEN1_D, SEL_TSIF1_3),
10466 + PINMUX_IPSR_MSEL(IP17_27_24, STP_ISEN_1_D, SEL_SSP1_1_3),
10467 + PINMUX_IPSR_MSEL(IP17_27_24, STP_OPWM_0_E, SEL_SSP1_0_4),
10468 +@@ -1465,7 +1465,7 @@ static const u16 pinmux_data[] = {
10469 +
10470 + PINMUX_IPSR_GPSR(IP17_31_28, USB30_OVC),
10471 + PINMUX_IPSR_GPSR(IP17_31_28, AUDIO_CLKOUT1_B),
10472 +- PINMUX_IPSR_MSEL(IP17_31_28, SSI_WS2_B, SEL_SSI_1),
10473 ++ PINMUX_IPSR_MSEL(IP17_31_28, SSI_WS2_B, SEL_SSI2_1),
10474 + PINMUX_IPSR_MSEL(IP17_31_28, TS_SPSYNC1_D, SEL_TSIF1_3),
10475 + PINMUX_IPSR_MSEL(IP17_31_28, STP_ISSYNC_1_D, SEL_SSP1_1_3),
10476 + PINMUX_IPSR_MSEL(IP17_31_28, STP_IVCXO27_0_E, SEL_SSP1_0_4),
10477 +@@ -1476,7 +1476,7 @@ static const u16 pinmux_data[] = {
10478 + /* IPSR18 */
10479 + PINMUX_IPSR_GPSR(IP18_3_0, GP6_30),
10480 + PINMUX_IPSR_GPSR(IP18_3_0, AUDIO_CLKOUT2_B),
10481 +- PINMUX_IPSR_MSEL(IP18_3_0, SSI_SCK9_B, SEL_SSI_1),
10482 ++ PINMUX_IPSR_MSEL(IP18_3_0, SSI_SCK9_B, SEL_SSI9_1),
10483 + PINMUX_IPSR_MSEL(IP18_3_0, TS_SDEN0_E, SEL_TSIF0_4),
10484 + PINMUX_IPSR_MSEL(IP18_3_0, STP_ISEN_0_E, SEL_SSP1_0_4),
10485 + PINMUX_IPSR_MSEL(IP18_3_0, RIF2_D0_B, SEL_DRIF2_1),
10486 +@@ -1486,7 +1486,7 @@ static const u16 pinmux_data[] = {
10487 +
10488 + PINMUX_IPSR_GPSR(IP18_7_4, GP6_31),
10489 + PINMUX_IPSR_GPSR(IP18_7_4, AUDIO_CLKOUT3_B),
10490 +- PINMUX_IPSR_MSEL(IP18_7_4, SSI_WS9_B, SEL_SSI_1),
10491 ++ PINMUX_IPSR_MSEL(IP18_7_4, SSI_WS9_B, SEL_SSI9_1),
10492 + PINMUX_IPSR_MSEL(IP18_7_4, TS_SPSYNC0_E, SEL_TSIF0_4),
10493 + PINMUX_IPSR_MSEL(IP18_7_4, STP_ISSYNC_0_E, SEL_SSP1_0_4),
10494 + PINMUX_IPSR_MSEL(IP18_7_4, RIF2_D1_B, SEL_DRIF2_1),
10495 +diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
10496 +index 08e4fd9ee607..9621d6dd88c6 100644
10497 +--- a/drivers/power/supply/ltc2941-battery-gauge.c
10498 ++++ b/drivers/power/supply/ltc2941-battery-gauge.c
10499 +@@ -316,15 +316,15 @@ static int ltc294x_get_temperature(const struct ltc294x_info *info, int *val)
10500 +
10501 + if (info->id == LTC2942_ID) {
10502 + reg = LTC2942_REG_TEMPERATURE_MSB;
10503 +- value = 60000; /* Full-scale is 600 Kelvin */
10504 ++ value = 6000; /* Full-scale is 600 Kelvin */
10505 + } else {
10506 + reg = LTC2943_REG_TEMPERATURE_MSB;
10507 +- value = 51000; /* Full-scale is 510 Kelvin */
10508 ++ value = 5100; /* Full-scale is 510 Kelvin */
10509 + }
10510 + ret = ltc294x_read_regs(info->client, reg, &datar[0], 2);
10511 + value *= (datar[0] << 8) | datar[1];
10512 +- /* Convert to centidegrees */
10513 +- *val = value / 0xFFFF - 27215;
10514 ++ /* Convert to tenths of degree Celsius */
10515 ++ *val = value / 0xFFFF - 2722;
10516 + return ret;
10517 + }
10518 +
10519 +diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
10520 +index 5b556a13f517..9c7eaaeda343 100644
10521 +--- a/drivers/power/supply/max17042_battery.c
10522 ++++ b/drivers/power/supply/max17042_battery.c
10523 +@@ -1021,6 +1021,7 @@ static int max17042_probe(struct i2c_client *client,
10524 +
10525 + i2c_set_clientdata(client, chip);
10526 + psy_cfg.drv_data = chip;
10527 ++ psy_cfg.of_node = dev->of_node;
10528 +
10529 + /* When current is not measured,
10530 + * CURRENT_NOW and CURRENT_AVG properties should be invisible. */
10531 +diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
10532 +index 0fce06acfaec..a2eb50719c7b 100644
10533 +--- a/drivers/regulator/gpio-regulator.c
10534 ++++ b/drivers/regulator/gpio-regulator.c
10535 +@@ -271,8 +271,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
10536 + drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
10537 + if (drvdata->desc.name == NULL) {
10538 + dev_err(&pdev->dev, "Failed to allocate supply name\n");
10539 +- ret = -ENOMEM;
10540 +- goto err;
10541 ++ return -ENOMEM;
10542 + }
10543 +
10544 + if (config->nr_gpios != 0) {
10545 +@@ -292,7 +291,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
10546 + dev_err(&pdev->dev,
10547 + "Could not obtain regulator setting GPIOs: %d\n",
10548 + ret);
10549 +- goto err_memstate;
10550 ++ goto err_memgpio;
10551 + }
10552 + }
10553 +
10554 +@@ -303,7 +302,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
10555 + if (drvdata->states == NULL) {
10556 + dev_err(&pdev->dev, "Failed to allocate state data\n");
10557 + ret = -ENOMEM;
10558 +- goto err_memgpio;
10559 ++ goto err_stategpio;
10560 + }
10561 + drvdata->nr_states = config->nr_states;
10562 +
10563 +@@ -324,7 +323,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
10564 + default:
10565 + dev_err(&pdev->dev, "No regulator type set\n");
10566 + ret = -EINVAL;
10567 +- goto err_memgpio;
10568 ++ goto err_memstate;
10569 + }
10570 +
10571 + /* build initial state from gpio init data. */
10572 +@@ -361,22 +360,21 @@ static int gpio_regulator_probe(struct platform_device *pdev)
10573 + if (IS_ERR(drvdata->dev)) {
10574 + ret = PTR_ERR(drvdata->dev);
10575 + dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
10576 +- goto err_stategpio;
10577 ++ goto err_memstate;
10578 + }
10579 +
10580 + platform_set_drvdata(pdev, drvdata);
10581 +
10582 + return 0;
10583 +
10584 +-err_stategpio:
10585 +- gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
10586 + err_memstate:
10587 + kfree(drvdata->states);
10588 ++err_stategpio:
10589 ++ gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
10590 + err_memgpio:
10591 + kfree(drvdata->gpios);
10592 + err_name:
10593 + kfree(drvdata->desc.name);
10594 +-err:
10595 + return ret;
10596 + }
10597 +
10598 +diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
10599 +index 14637a01ba2d..c9875355905d 100644
10600 +--- a/drivers/regulator/of_regulator.c
10601 ++++ b/drivers/regulator/of_regulator.c
10602 +@@ -305,6 +305,7 @@ int of_regulator_match(struct device *dev, struct device_node *node,
10603 + dev_err(dev,
10604 + "failed to parse DT for regulator %s\n",
10605 + child->name);
10606 ++ of_node_put(child);
10607 + return -EINVAL;
10608 + }
10609 + match->of_node = of_node_get(child);
10610 +diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
10611 +index 633268e9d550..05bcbce2013a 100644
10612 +--- a/drivers/remoteproc/imx_rproc.c
10613 ++++ b/drivers/remoteproc/imx_rproc.c
10614 +@@ -339,8 +339,10 @@ static int imx_rproc_probe(struct platform_device *pdev)
10615 + }
10616 +
10617 + dcfg = of_device_get_match_data(dev);
10618 +- if (!dcfg)
10619 +- return -EINVAL;
10620 ++ if (!dcfg) {
10621 ++ ret = -EINVAL;
10622 ++ goto err_put_rproc;
10623 ++ }
10624 +
10625 + priv = rproc->priv;
10626 + priv->rproc = rproc;
10627 +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
10628 +index 29f35e29d480..e67c1d8a193d 100644
10629 +--- a/drivers/s390/block/dasd.c
10630 ++++ b/drivers/s390/block/dasd.c
10631 +@@ -2596,8 +2596,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
10632 + case DASD_CQR_QUEUED:
10633 + /* request was not started - just set to cleared */
10634 + cqr->status = DASD_CQR_CLEARED;
10635 +- if (cqr->callback_data == DASD_SLEEPON_START_TAG)
10636 +- cqr->callback_data = DASD_SLEEPON_END_TAG;
10637 + break;
10638 + case DASD_CQR_IN_IO:
10639 + /* request in IO - terminate IO and release again */
10640 +@@ -3917,9 +3915,12 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)
10641 + wait_event(dasd_flush_wq,
10642 + (cqr->status != DASD_CQR_CLEAR_PENDING));
10643 +
10644 +- /* mark sleepon requests as ended */
10645 +- if (cqr->callback_data == DASD_SLEEPON_START_TAG)
10646 +- cqr->callback_data = DASD_SLEEPON_END_TAG;
10647 ++ /*
10648 ++ * requeue requests to blocklayer will only work
10649 ++ * for block device requests
10650 ++ */
10651 ++ if (_dasd_requeue_request(cqr))
10652 ++ continue;
10653 +
10654 + /* remove requests from device and block queue */
10655 + list_del_init(&cqr->devlist);
10656 +@@ -3932,13 +3933,6 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)
10657 + cqr = refers;
10658 + }
10659 +
10660 +- /*
10661 +- * requeue requests to blocklayer will only work
10662 +- * for block device requests
10663 +- */
10664 +- if (_dasd_requeue_request(cqr))
10665 +- continue;
10666 +-
10667 + if (cqr->block)
10668 + list_del_init(&cqr->blocklist);
10669 + cqr->block->base->discipline->free_cp(
10670 +@@ -3955,8 +3949,7 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)
10671 + list_splice_tail(&requeue_queue, &device->ccw_queue);
10672 + spin_unlock_irq(get_ccwdev_lock(device->cdev));
10673 + }
10674 +- /* wake up generic waitqueue for eventually ended sleepon requests */
10675 +- wake_up(&generic_waitq);
10676 ++ dasd_schedule_device_bh(device);
10677 + return rc;
10678 + }
10679 +
10680 +diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
10681 +index f98ea674c3d8..28837ad75712 100644
10682 +--- a/drivers/s390/cio/device_fsm.c
10683 ++++ b/drivers/s390/cio/device_fsm.c
10684 +@@ -796,6 +796,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
10685 +
10686 + ccw_device_set_timeout(cdev, 0);
10687 + cdev->private->iretry = 255;
10688 ++ cdev->private->async_kill_io_rc = -ETIMEDOUT;
10689 + ret = ccw_device_cancel_halt_clear(cdev);
10690 + if (ret == -EBUSY) {
10691 + ccw_device_set_timeout(cdev, 3*HZ);
10692 +@@ -872,7 +873,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
10693 + /* OK, i/o is dead now. Call interrupt handler. */
10694 + if (cdev->handler)
10695 + cdev->handler(cdev, cdev->private->intparm,
10696 +- ERR_PTR(-EIO));
10697 ++ ERR_PTR(cdev->private->async_kill_io_rc));
10698 + }
10699 +
10700 + static void
10701 +@@ -889,14 +890,16 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
10702 + ccw_device_online_verify(cdev, 0);
10703 + if (cdev->handler)
10704 + cdev->handler(cdev, cdev->private->intparm,
10705 +- ERR_PTR(-EIO));
10706 ++ ERR_PTR(cdev->private->async_kill_io_rc));
10707 + }
10708 +
10709 + void ccw_device_kill_io(struct ccw_device *cdev)
10710 + {
10711 + int ret;
10712 +
10713 ++ ccw_device_set_timeout(cdev, 0);
10714 + cdev->private->iretry = 255;
10715 ++ cdev->private->async_kill_io_rc = -EIO;
10716 + ret = ccw_device_cancel_halt_clear(cdev);
10717 + if (ret == -EBUSY) {
10718 + ccw_device_set_timeout(cdev, 3*HZ);
10719 +diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
10720 +index cf8c4ac6323a..b22922ec32d1 100644
10721 +--- a/drivers/s390/cio/device_ops.c
10722 ++++ b/drivers/s390/cio/device_ops.c
10723 +@@ -160,7 +160,7 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
10724 + }
10725 +
10726 + /**
10727 +- * ccw_device_start_key() - start a s390 channel program with key
10728 ++ * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
10729 + * @cdev: target ccw device
10730 + * @cpa: logical start address of channel program
10731 + * @intparm: user specific interruption parameter; will be presented back to
10732 +@@ -171,10 +171,15 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
10733 + * @key: storage key to be used for the I/O
10734 + * @flags: additional flags; defines the action to be performed for I/O
10735 + * processing.
10736 ++ * @expires: timeout value in jiffies
10737 + *
10738 + * Start a S/390 channel program. When the interrupt arrives, the
10739 + * IRQ handler is called, either immediately, delayed (dev-end missing,
10740 + * or sense required) or never (no IRQ handler registered).
10741 ++ * This function notifies the device driver if the channel program has not
10742 ++ * completed during the time specified by @expires. If a timeout occurs, the
10743 ++ * channel program is terminated via xsch, hsch or csch, and the device's
10744 ++ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
10745 + * Returns:
10746 + * %0, if the operation was successful;
10747 + * -%EBUSY, if the device is busy, or status pending;
10748 +@@ -183,9 +188,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
10749 + * Context:
10750 + * Interrupts disabled, ccw device lock held
10751 + */
10752 +-int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
10753 +- unsigned long intparm, __u8 lpm, __u8 key,
10754 +- unsigned long flags)
10755 ++int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
10756 ++ unsigned long intparm, __u8 lpm, __u8 key,
10757 ++ unsigned long flags, int expires)
10758 + {
10759 + struct subchannel *sch;
10760 + int ret;
10761 +@@ -225,6 +230,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
10762 + switch (ret) {
10763 + case 0:
10764 + cdev->private->intparm = intparm;
10765 ++ if (expires)
10766 ++ ccw_device_set_timeout(cdev, expires);
10767 + break;
10768 + case -EACCES:
10769 + case -ENODEV:
10770 +@@ -235,7 +242,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
10771 + }
10772 +
10773 + /**
10774 +- * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
10775 ++ * ccw_device_start_key() - start a s390 channel program with key
10776 + * @cdev: target ccw device
10777 + * @cpa: logical start address of channel program
10778 + * @intparm: user specific interruption parameter; will be presented back to
10779 +@@ -246,15 +253,10 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
10780 + * @key: storage key to be used for the I/O
10781 + * @flags: additional flags; defines the action to be performed for I/O
10782 + * processing.
10783 +- * @expires: timeout value in jiffies
10784 + *
10785 + * Start a S/390 channel program. When the interrupt arrives, the
10786 + * IRQ handler is called, either immediately, delayed (dev-end missing,
10787 + * or sense required) or never (no IRQ handler registered).
10788 +- * This function notifies the device driver if the channel program has not
10789 +- * completed during the time specified by @expires. If a timeout occurs, the
10790 +- * channel program is terminated via xsch, hsch or csch, and the device's
10791 +- * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
10792 + * Returns:
10793 + * %0, if the operation was successful;
10794 + * -%EBUSY, if the device is busy, or status pending;
10795 +@@ -263,19 +265,12 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
10796 + * Context:
10797 + * Interrupts disabled, ccw device lock held
10798 + */
10799 +-int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
10800 +- unsigned long intparm, __u8 lpm, __u8 key,
10801 +- unsigned long flags, int expires)
10802 ++int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
10803 ++ unsigned long intparm, __u8 lpm, __u8 key,
10804 ++ unsigned long flags)
10805 + {
10806 +- int ret;
10807 +-
10808 +- if (!cdev)
10809 +- return -ENODEV;
10810 +- ccw_device_set_timeout(cdev, expires);
10811 +- ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
10812 +- if (ret != 0)
10813 +- ccw_device_set_timeout(cdev, 0);
10814 +- return ret;
10815 ++ return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key,
10816 ++ flags, 0);
10817 + }
10818 +
10819 + /**
10820 +@@ -490,18 +485,20 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
10821 + EXPORT_SYMBOL(ccw_device_get_id);
10822 +
10823 + /**
10824 +- * ccw_device_tm_start_key() - perform start function
10825 ++ * ccw_device_tm_start_timeout_key() - perform start function
10826 + * @cdev: ccw device on which to perform the start function
10827 + * @tcw: transport-command word to be started
10828 + * @intparm: user defined parameter to be passed to the interrupt handler
10829 + * @lpm: mask of paths to use
10830 + * @key: storage key to use for storage access
10831 ++ * @expires: time span in jiffies after which to abort request
10832 + *
10833 + * Start the tcw on the given ccw device. Return zero on success, non-zero
10834 + * otherwise.
10835 + */
10836 +-int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
10837 +- unsigned long intparm, u8 lpm, u8 key)
10838 ++int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
10839 ++ unsigned long intparm, u8 lpm, u8 key,
10840 ++ int expires)
10841 + {
10842 + struct subchannel *sch;
10843 + int rc;
10844 +@@ -528,37 +525,32 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
10845 + return -EACCES;
10846 + }
10847 + rc = cio_tm_start_key(sch, tcw, lpm, key);
10848 +- if (rc == 0)
10849 ++ if (rc == 0) {
10850 + cdev->private->intparm = intparm;
10851 ++ if (expires)
10852 ++ ccw_device_set_timeout(cdev, expires);
10853 ++ }
10854 + return rc;
10855 + }
10856 +-EXPORT_SYMBOL(ccw_device_tm_start_key);
10857 ++EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
10858 +
10859 + /**
10860 +- * ccw_device_tm_start_timeout_key() - perform start function
10861 ++ * ccw_device_tm_start_key() - perform start function
10862 + * @cdev: ccw device on which to perform the start function
10863 + * @tcw: transport-command word to be started
10864 + * @intparm: user defined parameter to be passed to the interrupt handler
10865 + * @lpm: mask of paths to use
10866 + * @key: storage key to use for storage access
10867 +- * @expires: time span in jiffies after which to abort request
10868 + *
10869 + * Start the tcw on the given ccw device. Return zero on success, non-zero
10870 + * otherwise.
10871 + */
10872 +-int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
10873 +- unsigned long intparm, u8 lpm, u8 key,
10874 +- int expires)
10875 ++int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
10876 ++ unsigned long intparm, u8 lpm, u8 key)
10877 + {
10878 +- int ret;
10879 +-
10880 +- ccw_device_set_timeout(cdev, expires);
10881 +- ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
10882 +- if (ret != 0)
10883 +- ccw_device_set_timeout(cdev, 0);
10884 +- return ret;
10885 ++ return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);
10886 + }
10887 +-EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
10888 ++EXPORT_SYMBOL(ccw_device_tm_start_key);
10889 +
10890 + /**
10891 + * ccw_device_tm_start() - perform start function
10892 +diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
10893 +index af571d8d6925..90e4e3a7841b 100644
10894 +--- a/drivers/s390/cio/io_sch.h
10895 ++++ b/drivers/s390/cio/io_sch.h
10896 +@@ -157,6 +157,7 @@ struct ccw_device_private {
10897 + unsigned long intparm; /* user interruption parameter */
10898 + struct qdio_irq *qdio_data;
10899 + struct irb irb; /* device status */
10900 ++ int async_kill_io_rc;
10901 + struct senseid senseid; /* SenseID info */
10902 + struct pgid pgid[8]; /* path group IDs per chpid*/
10903 + struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
10904 +diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
10905 +index e96b85579f21..3c800642134e 100644
10906 +--- a/drivers/s390/cio/vfio_ccw_fsm.c
10907 ++++ b/drivers/s390/cio/vfio_ccw_fsm.c
10908 +@@ -129,6 +129,11 @@ static void fsm_io_request(struct vfio_ccw_private *private,
10909 + if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
10910 + orb = (union orb *)io_region->orb_area;
10911 +
10912 ++ /* Don't try to build a cp if transport mode is specified. */
10913 ++ if (orb->tm.b) {
10914 ++ io_region->ret_code = -EOPNOTSUPP;
10915 ++ goto err_out;
10916 ++ }
10917 + io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
10918 + orb);
10919 + if (io_region->ret_code)
10920 +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
10921 +index 9be34d37c356..3f3cb72e0c0c 100644
10922 +--- a/drivers/scsi/sr.c
10923 ++++ b/drivers/scsi/sr.c
10924 +@@ -525,6 +525,8 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
10925 + struct scsi_cd *cd;
10926 + int ret = -ENXIO;
10927 +
10928 ++ check_disk_change(bdev);
10929 ++
10930 + mutex_lock(&sr_mutex);
10931 + cd = scsi_cd_get(bdev->bd_disk);
10932 + if (cd) {
10933 +@@ -585,18 +587,28 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
10934 + static unsigned int sr_block_check_events(struct gendisk *disk,
10935 + unsigned int clearing)
10936 + {
10937 +- struct scsi_cd *cd = scsi_cd(disk);
10938 ++ unsigned int ret = 0;
10939 ++ struct scsi_cd *cd;
10940 +
10941 +- if (atomic_read(&cd->device->disk_events_disable_depth))
10942 ++ cd = scsi_cd_get(disk);
10943 ++ if (!cd)
10944 + return 0;
10945 +
10946 +- return cdrom_check_events(&cd->cdi, clearing);
10947 ++ if (!atomic_read(&cd->device->disk_events_disable_depth))
10948 ++ ret = cdrom_check_events(&cd->cdi, clearing);
10949 ++
10950 ++ scsi_cd_put(cd);
10951 ++ return ret;
10952 + }
10953 +
10954 + static int sr_block_revalidate_disk(struct gendisk *disk)
10955 + {
10956 +- struct scsi_cd *cd = scsi_cd(disk);
10957 + struct scsi_sense_hdr sshdr;
10958 ++ struct scsi_cd *cd;
10959 ++
10960 ++ cd = scsi_cd_get(disk);
10961 ++ if (!cd)
10962 ++ return -ENXIO;
10963 +
10964 + /* if the unit is not ready, nothing more to do */
10965 + if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
10966 +@@ -605,6 +617,7 @@ static int sr_block_revalidate_disk(struct gendisk *disk)
10967 + sr_cd_check(&cd->cdi);
10968 + get_sectorsize(cd);
10969 + out:
10970 ++ scsi_cd_put(cd);
10971 + return 0;
10972 + }
10973 +
10974 +diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
10975 +index 2a21f2d48592..35fab1e18adc 100644
10976 +--- a/drivers/scsi/sr_ioctl.c
10977 ++++ b/drivers/scsi/sr_ioctl.c
10978 +@@ -188,9 +188,13 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
10979 + struct scsi_device *SDev;
10980 + struct scsi_sense_hdr sshdr;
10981 + int result, err = 0, retries = 0;
10982 ++ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL;
10983 +
10984 + SDev = cd->device;
10985 +
10986 ++ if (cgc->sense)
10987 ++ senseptr = sense_buffer;
10988 ++
10989 + retry:
10990 + if (!scsi_block_when_processing_errors(SDev)) {
10991 + err = -ENODEV;
10992 +@@ -198,10 +202,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
10993 + }
10994 +
10995 + result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
10996 +- cgc->buffer, cgc->buflen,
10997 +- (unsigned char *)cgc->sense, &sshdr,
10998 ++ cgc->buffer, cgc->buflen, senseptr, &sshdr,
10999 + cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
11000 +
11001 ++ if (cgc->sense)
11002 ++ memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense));
11003 ++
11004 + /* Minimal error checking. Ignore cases we know about, and report the rest. */
11005 + if (driver_byte(result) != 0) {
11006 + switch (sshdr.sense_key) {
11007 +diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
11008 +index 47e7aa963dbb..1613ccf0c059 100644
11009 +--- a/drivers/soc/imx/gpc.c
11010 ++++ b/drivers/soc/imx/gpc.c
11011 +@@ -456,13 +456,21 @@ static int imx_gpc_probe(struct platform_device *pdev)
11012 +
11013 + static int imx_gpc_remove(struct platform_device *pdev)
11014 + {
11015 ++ struct device_node *pgc_node;
11016 + int ret;
11017 +
11018 ++ pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc");
11019 ++
11020 ++ /* bail out if DT too old and doesn't provide the necessary info */
11021 ++ if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") &&
11022 ++ !pgc_node)
11023 ++ return 0;
11024 ++
11025 + /*
11026 + * If the old DT binding is used the toplevel driver needs to
11027 + * de-register the power domains
11028 + */
11029 +- if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) {
11030 ++ if (!pgc_node) {
11031 + of_genpd_del_provider(pdev->dev.of_node);
11032 +
11033 + ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base);
11034 +diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
11035 +index d008e5b82db4..df3ccb30bc2d 100644
11036 +--- a/drivers/soc/qcom/wcnss_ctrl.c
11037 ++++ b/drivers/soc/qcom/wcnss_ctrl.c
11038 +@@ -249,7 +249,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
11039 + /* Increment for next fragment */
11040 + req->seq++;
11041 +
11042 +- data += req->hdr.len;
11043 ++ data += NV_FRAGMENT_SIZE;
11044 + left -= NV_FRAGMENT_SIZE;
11045 + } while (left > 0);
11046 +
11047 +diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
11048 +index ff01f865a173..6573152ce893 100644
11049 +--- a/drivers/spi/spi-bcm-qspi.c
11050 ++++ b/drivers/spi/spi-bcm-qspi.c
11051 +@@ -1255,7 +1255,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
11052 + qspi->base[MSPI] = devm_ioremap_resource(dev, res);
11053 + if (IS_ERR(qspi->base[MSPI])) {
11054 + ret = PTR_ERR(qspi->base[MSPI]);
11055 +- goto qspi_probe_err;
11056 ++ goto qspi_resource_err;
11057 + }
11058 + } else {
11059 + goto qspi_resource_err;
11060 +@@ -1266,7 +1266,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
11061 + qspi->base[BSPI] = devm_ioremap_resource(dev, res);
11062 + if (IS_ERR(qspi->base[BSPI])) {
11063 + ret = PTR_ERR(qspi->base[BSPI]);
11064 +- goto qspi_probe_err;
11065 ++ goto qspi_resource_err;
11066 + }
11067 + qspi->bspi_mode = true;
11068 + } else {
11069 +diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
11070 +index 6dda3623a276..e1faee1f8602 100644
11071 +--- a/drivers/usb/host/pci-quirks.c
11072 ++++ b/drivers/usb/host/pci-quirks.c
11073 +@@ -65,6 +65,23 @@
11074 + #define AX_INDXC 0x30
11075 + #define AX_DATAC 0x34
11076 +
11077 ++#define PT_ADDR_INDX 0xE8
11078 ++#define PT_READ_INDX 0xE4
11079 ++#define PT_SIG_1_ADDR 0xA520
11080 ++#define PT_SIG_2_ADDR 0xA521
11081 ++#define PT_SIG_3_ADDR 0xA522
11082 ++#define PT_SIG_4_ADDR 0xA523
11083 ++#define PT_SIG_1_DATA 0x78
11084 ++#define PT_SIG_2_DATA 0x56
11085 ++#define PT_SIG_3_DATA 0x34
11086 ++#define PT_SIG_4_DATA 0x12
11087 ++#define PT4_P1_REG 0xB521
11088 ++#define PT4_P2_REG 0xB522
11089 ++#define PT2_P1_REG 0xD520
11090 ++#define PT2_P2_REG 0xD521
11091 ++#define PT1_P1_REG 0xD522
11092 ++#define PT1_P2_REG 0xD523
11093 ++
11094 + #define NB_PCIE_INDX_ADDR 0xe0
11095 + #define NB_PCIE_INDX_DATA 0xe4
11096 + #define PCIE_P_CNTL 0x10040
11097 +@@ -511,6 +528,98 @@ void usb_amd_dev_put(void)
11098 + }
11099 + EXPORT_SYMBOL_GPL(usb_amd_dev_put);
11100 +
11101 ++/*
11102 ++ * Check if port is disabled in BIOS on AMD Promontory host.
11103 ++ * BIOS Disabled ports may wake on connect/disconnect and need
11104 ++ * driver workaround to keep them disabled.
11105 ++ * Returns true if port is marked disabled.
11106 ++ */
11107 ++bool usb_amd_pt_check_port(struct device *device, int port)
11108 ++{
11109 ++ unsigned char value, port_shift;
11110 ++ struct pci_dev *pdev;
11111 ++ u16 reg;
11112 ++
11113 ++ pdev = to_pci_dev(device);
11114 ++ pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR);
11115 ++
11116 ++ pci_read_config_byte(pdev, PT_READ_INDX, &value);
11117 ++ if (value != PT_SIG_1_DATA)
11118 ++ return false;
11119 ++
11120 ++ pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR);
11121 ++
11122 ++ pci_read_config_byte(pdev, PT_READ_INDX, &value);
11123 ++ if (value != PT_SIG_2_DATA)
11124 ++ return false;
11125 ++
11126 ++ pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR);
11127 ++
11128 ++ pci_read_config_byte(pdev, PT_READ_INDX, &value);
11129 ++ if (value != PT_SIG_3_DATA)
11130 ++ return false;
11131 ++
11132 ++ pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR);
11133 ++
11134 ++ pci_read_config_byte(pdev, PT_READ_INDX, &value);
11135 ++ if (value != PT_SIG_4_DATA)
11136 ++ return false;
11137 ++
11138 ++ /* Check disabled port setting, if bit is set port is enabled */
11139 ++ switch (pdev->device) {
11140 ++ case 0x43b9:
11141 ++ case 0x43ba:
11142 ++ /*
11143 ++ * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba)
11144 ++ * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0
11145 ++ * PT4_P2_REG bits[6..0] represents ports 13 to 7
11146 ++ */
11147 ++ if (port > 6) {
11148 ++ reg = PT4_P2_REG;
11149 ++ port_shift = port - 7;
11150 ++ } else {
11151 ++ reg = PT4_P1_REG;
11152 ++ port_shift = port + 1;
11153 ++ }
11154 ++ break;
11155 ++ case 0x43bb:
11156 ++ /*
11157 ++ * device is AMD_PROMONTORYA_2(0x43bb)
11158 ++ * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0
11159 ++ * PT2_P2_REG bits[5..0] represents ports 9 to 3
11160 ++ */
11161 ++ if (port > 2) {
11162 ++ reg = PT2_P2_REG;
11163 ++ port_shift = port - 3;
11164 ++ } else {
11165 ++ reg = PT2_P1_REG;
11166 ++ port_shift = port + 5;
11167 ++ }
11168 ++ break;
11169 ++ case 0x43bc:
11170 ++ /*
11171 ++ * device is AMD_PROMONTORYA_1(0x43bc)
11172 ++ * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0
11173 ++ * PT1_P2_REG[5..0] represents ports 9 to 4
11174 ++ */
11175 ++ if (port > 3) {
11176 ++ reg = PT1_P2_REG;
11177 ++ port_shift = port - 4;
11178 ++ } else {
11179 ++ reg = PT1_P1_REG;
11180 ++ port_shift = port + 4;
11181 ++ }
11182 ++ break;
11183 ++ default:
11184 ++ return false;
11185 ++ }
11186 ++ pci_write_config_word(pdev, PT_ADDR_INDX, reg);
11187 ++ pci_read_config_byte(pdev, PT_READ_INDX, &value);
11188 ++
11189 ++ return !(value & BIT(port_shift));
11190 ++}
11191 ++EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
11192 ++
11193 + /*
11194 + * Make sure the controller is completely inactive, unable to
11195 + * generate interrupts or do DMA.
11196 +diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
11197 +index b68dcb5dd0fd..4ca0d9b7e463 100644
11198 +--- a/drivers/usb/host/pci-quirks.h
11199 ++++ b/drivers/usb/host/pci-quirks.h
11200 +@@ -17,6 +17,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
11201 + void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
11202 + void sb800_prefetch(struct device *dev, int on);
11203 + bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
11204 ++bool usb_amd_pt_check_port(struct device *device, int port);
11205 + #else
11206 + struct pci_dev;
11207 + static inline void usb_amd_quirk_pll_disable(void) {}
11208 +@@ -25,6 +26,10 @@ static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
11209 + static inline void usb_amd_dev_put(void) {}
11210 + static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
11211 + static inline void sb800_prefetch(struct device *dev, int on) {}
11212 ++static inline bool usb_amd_pt_check_port(struct device *device, int port)
11213 ++{
11214 ++ return false;
11215 ++}
11216 + #endif /* CONFIG_USB_PCI */
11217 +
11218 + #endif /* __LINUX_USB_PCI_QUIRKS_H */
11219 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
11220 +index 9762333d8d7f..00b8d4cdcac3 100644
11221 +--- a/drivers/usb/host/xhci-hub.c
11222 ++++ b/drivers/usb/host/xhci-hub.c
11223 +@@ -1531,6 +1531,13 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
11224 + t2 |= PORT_WKOC_E | PORT_WKCONN_E;
11225 + t2 &= ~PORT_WKDISC_E;
11226 + }
11227 ++
11228 ++ if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
11229 ++ (hcd->speed < HCD_USB3)) {
11230 ++ if (usb_amd_pt_check_port(hcd->self.controller,
11231 ++ port_index))
11232 ++ t2 &= ~PORT_WAKE_BITS;
11233 ++ }
11234 + } else
11235 + t2 &= ~PORT_WAKE_BITS;
11236 +
11237 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
11238 +index d79ab0d85924..838d37e79fa2 100644
11239 +--- a/drivers/usb/host/xhci-pci.c
11240 ++++ b/drivers/usb/host/xhci-pci.c
11241 +@@ -54,6 +54,10 @@
11242 + #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
11243 + #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
11244 +
11245 ++#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
11246 ++#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
11247 ++#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
11248 ++#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
11249 + #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
11250 +
11251 + static const char hcd_name[] = "xhci_hcd";
11252 +@@ -143,6 +147,13 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
11253 + if (pdev->vendor == PCI_VENDOR_ID_AMD)
11254 + xhci->quirks |= XHCI_TRUST_TX_LENGTH;
11255 +
11256 ++ if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
11257 ++ ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
11258 ++ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
11259 ++ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
11260 ++ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
11261 ++ xhci->quirks |= XHCI_U2_DISABLE_WAKE;
11262 ++
11263 + if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
11264 + xhci->quirks |= XHCI_LPM_SUPPORT;
11265 + xhci->quirks |= XHCI_INTEL_HOST;
11266 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
11267 +index f5fb1f4a092c..2a72060dda1b 100644
11268 +--- a/drivers/usb/host/xhci.h
11269 ++++ b/drivers/usb/host/xhci.h
11270 +@@ -1829,7 +1829,7 @@ struct xhci_hcd {
11271 + /* For controller with a broken Port Disable implementation */
11272 + #define XHCI_BROKEN_PORT_PED (1 << 25)
11273 + #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
11274 +-/* Reserved. It was XHCI_U2_DISABLE_WAKE */
11275 ++#define XHCI_U2_DISABLE_WAKE (1 << 27)
11276 + #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
11277 + #define XHCI_SUSPEND_DELAY (1 << 30)
11278 +
11279 +diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
11280 +index af6fc97f4ba4..a436d44f1b7f 100644
11281 +--- a/drivers/video/fbdev/sbuslib.c
11282 ++++ b/drivers/video/fbdev/sbuslib.c
11283 +@@ -122,7 +122,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
11284 + unsigned char __user *ured;
11285 + unsigned char __user *ugreen;
11286 + unsigned char __user *ublue;
11287 +- int index, count, i;
11288 ++ unsigned int index, count, i;
11289 +
11290 + if (get_user(index, &c->index) ||
11291 + __get_user(count, &c->count) ||
11292 +@@ -161,7 +161,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
11293 + unsigned char __user *ugreen;
11294 + unsigned char __user *ublue;
11295 + struct fb_cmap *cmap = &info->cmap;
11296 +- int index, count, i;
11297 ++ unsigned int index, count, i;
11298 + u8 red, green, blue;
11299 +
11300 + if (get_user(index, &c->index) ||
11301 +diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c
11302 +index 7dd0da644a7f..2cf56b459d84 100644
11303 +--- a/drivers/watchdog/asm9260_wdt.c
11304 ++++ b/drivers/watchdog/asm9260_wdt.c
11305 +@@ -292,14 +292,14 @@ static int asm9260_wdt_probe(struct platform_device *pdev)
11306 + if (IS_ERR(priv->iobase))
11307 + return PTR_ERR(priv->iobase);
11308 +
11309 +- ret = asm9260_wdt_get_dt_clks(priv);
11310 +- if (ret)
11311 +- return ret;
11312 +-
11313 + priv->rst = devm_reset_control_get_exclusive(&pdev->dev, "wdt_rst");
11314 + if (IS_ERR(priv->rst))
11315 + return PTR_ERR(priv->rst);
11316 +
11317 ++ ret = asm9260_wdt_get_dt_clks(priv);
11318 ++ if (ret)
11319 ++ return ret;
11320 ++
11321 + wdd = &priv->wdd;
11322 + wdd->info = &asm9260_wdt_ident;
11323 + wdd->ops = &asm9260_wdt_ops;
11324 +diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
11325 +index 79cc766cd30f..fd91007b4e41 100644
11326 +--- a/drivers/watchdog/aspeed_wdt.c
11327 ++++ b/drivers/watchdog/aspeed_wdt.c
11328 +@@ -46,6 +46,7 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
11329 + #define WDT_RELOAD_VALUE 0x04
11330 + #define WDT_RESTART 0x08
11331 + #define WDT_CTRL 0x0C
11332 ++#define WDT_CTRL_BOOT_SECONDARY BIT(7)
11333 + #define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
11334 + #define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
11335 + #define WDT_CTRL_RESET_MODE_ARM_CPU (0x10 << 5)
11336 +@@ -158,6 +159,7 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd,
11337 + {
11338 + struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
11339 +
11340 ++ wdt->ctrl &= ~WDT_CTRL_BOOT_SECONDARY;
11341 + aspeed_wdt_enable(wdt, 128 * WDT_RATE_1MHZ / 1000);
11342 +
11343 + mdelay(1000);
11344 +@@ -232,16 +234,21 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
11345 + wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | WDT_CTRL_RESET_SYSTEM;
11346 + } else {
11347 + if (!strcmp(reset_type, "cpu"))
11348 +- wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU;
11349 ++ wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU |
11350 ++ WDT_CTRL_RESET_SYSTEM;
11351 + else if (!strcmp(reset_type, "soc"))
11352 +- wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC;
11353 ++ wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC |
11354 ++ WDT_CTRL_RESET_SYSTEM;
11355 + else if (!strcmp(reset_type, "system"))
11356 +- wdt->ctrl |= WDT_CTRL_RESET_SYSTEM;
11357 ++ wdt->ctrl |= WDT_CTRL_RESET_MODE_FULL_CHIP |
11358 ++ WDT_CTRL_RESET_SYSTEM;
11359 + else if (strcmp(reset_type, "none"))
11360 + return -EINVAL;
11361 + }
11362 + if (of_property_read_bool(np, "aspeed,external-signal"))
11363 + wdt->ctrl |= WDT_CTRL_WDT_EXT;
11364 ++ if (of_property_read_bool(np, "aspeed,alt-boot"))
11365 ++ wdt->ctrl |= WDT_CTRL_BOOT_SECONDARY;
11366 +
11367 + writel(wdt->ctrl, wdt->base + WDT_CTRL);
11368 +
11369 +diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
11370 +index 2f46487af86d..6d9a5d8c3c8d 100644
11371 +--- a/drivers/watchdog/davinci_wdt.c
11372 ++++ b/drivers/watchdog/davinci_wdt.c
11373 +@@ -198,15 +198,22 @@ static int davinci_wdt_probe(struct platform_device *pdev)
11374 +
11375 + wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
11376 + davinci_wdt->base = devm_ioremap_resource(dev, wdt_mem);
11377 +- if (IS_ERR(davinci_wdt->base))
11378 +- return PTR_ERR(davinci_wdt->base);
11379 ++ if (IS_ERR(davinci_wdt->base)) {
11380 ++ ret = PTR_ERR(davinci_wdt->base);
11381 ++ goto err_clk_disable;
11382 ++ }
11383 +
11384 + ret = watchdog_register_device(wdd);
11385 +- if (ret < 0) {
11386 +- clk_disable_unprepare(davinci_wdt->clk);
11387 ++ if (ret) {
11388 + dev_err(dev, "cannot register watchdog device\n");
11389 ++ goto err_clk_disable;
11390 + }
11391 +
11392 ++ return 0;
11393 ++
11394 ++err_clk_disable:
11395 ++ clk_disable_unprepare(davinci_wdt->clk);
11396 ++
11397 + return ret;
11398 + }
11399 +
11400 +diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
11401 +index c2f4ff516230..918357bccf5e 100644
11402 +--- a/drivers/watchdog/dw_wdt.c
11403 ++++ b/drivers/watchdog/dw_wdt.c
11404 +@@ -34,6 +34,7 @@
11405 +
11406 + #define WDOG_CONTROL_REG_OFFSET 0x00
11407 + #define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
11408 ++#define WDOG_CONTROL_REG_RESP_MODE_MASK 0x02
11409 + #define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04
11410 + #define WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT 4
11411 + #define WDOG_CURRENT_COUNT_REG_OFFSET 0x08
11412 +@@ -121,14 +122,23 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
11413 + return 0;
11414 + }
11415 +
11416 ++static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt)
11417 ++{
11418 ++ u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
11419 ++
11420 ++ /* Disable interrupt mode; always perform system reset. */
11421 ++ val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
11422 ++ /* Enable watchdog. */
11423 ++ val |= WDOG_CONTROL_REG_WDT_EN_MASK;
11424 ++ writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
11425 ++}
11426 ++
11427 + static int dw_wdt_start(struct watchdog_device *wdd)
11428 + {
11429 + struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
11430 +
11431 + dw_wdt_set_timeout(wdd, wdd->timeout);
11432 +-
11433 +- writel(WDOG_CONTROL_REG_WDT_EN_MASK,
11434 +- dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
11435 ++ dw_wdt_arm_system_reset(dw_wdt);
11436 +
11437 + return 0;
11438 + }
11439 +@@ -152,16 +162,13 @@ static int dw_wdt_restart(struct watchdog_device *wdd,
11440 + unsigned long action, void *data)
11441 + {
11442 + struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
11443 +- u32 val;
11444 +
11445 + writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
11446 +- val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
11447 +- if (val & WDOG_CONTROL_REG_WDT_EN_MASK)
11448 ++ if (dw_wdt_is_enabled(dw_wdt))
11449 + writel(WDOG_COUNTER_RESTART_KICK_VALUE,
11450 + dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET);
11451 + else
11452 +- writel(WDOG_CONTROL_REG_WDT_EN_MASK,
11453 +- dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
11454 ++ dw_wdt_arm_system_reset(dw_wdt);
11455 +
11456 + /* wait for reset to assert... */
11457 + mdelay(500);
11458 +diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
11459 +index e682bf046e50..88cd2a52d8d3 100644
11460 +--- a/drivers/watchdog/f71808e_wdt.c
11461 ++++ b/drivers/watchdog/f71808e_wdt.c
11462 +@@ -566,7 +566,8 @@ static ssize_t watchdog_write(struct file *file, const char __user *buf,
11463 + char c;
11464 + if (get_user(c, buf + i))
11465 + return -EFAULT;
11466 +- expect_close = (c == 'V');
11467 ++ if (c == 'V')
11468 ++ expect_close = true;
11469 + }
11470 +
11471 + /* Properly order writes across fork()ed processes */
11472 +diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
11473 +index 316c2eb122d2..e8bd9887c566 100644
11474 +--- a/drivers/watchdog/sbsa_gwdt.c
11475 ++++ b/drivers/watchdog/sbsa_gwdt.c
11476 +@@ -50,6 +50,7 @@
11477 + */
11478 +
11479 + #include <linux/io.h>
11480 ++#include <linux/io-64-nonatomic-lo-hi.h>
11481 + #include <linux/interrupt.h>
11482 + #include <linux/module.h>
11483 + #include <linux/moduleparam.h>
11484 +@@ -159,7 +160,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)
11485 + !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0))
11486 + timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
11487 +
11488 +- timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) -
11489 ++ timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
11490 + arch_counter_get_cntvct();
11491 +
11492 + do_div(timeleft, gwdt->clk);
11493 +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
11494 +index 1ab4bd11f5f3..762378f1811c 100644
11495 +--- a/drivers/xen/events/events_base.c
11496 ++++ b/drivers/xen/events/events_base.c
11497 +@@ -755,8 +755,8 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
11498 + mutex_unlock(&irq_mapping_update_lock);
11499 + return irq;
11500 + error_irq:
11501 +- for (; i >= 0; i--)
11502 +- __unbind_from_irq(irq + i);
11503 ++ while (nvec--)
11504 ++ __unbind_from_irq(irq + nvec);
11505 + mutex_unlock(&irq_mapping_update_lock);
11506 + return ret;
11507 + }
11508 +diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
11509 +index b209cd44bb8d..169293c25a91 100644
11510 +--- a/drivers/xen/pvcalls-back.c
11511 ++++ b/drivers/xen/pvcalls-back.c
11512 +@@ -424,7 +424,7 @@ static int pvcalls_back_connect(struct xenbus_device *dev,
11513 + sock);
11514 + if (!map) {
11515 + ret = -EFAULT;
11516 +- sock_release(map->sock);
11517 ++ sock_release(sock);
11518 + }
11519 +
11520 + out:
11521 +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
11522 +index 82fc54f8eb77..f98b8c135db9 100644
11523 +--- a/drivers/xen/swiotlb-xen.c
11524 ++++ b/drivers/xen/swiotlb-xen.c
11525 +@@ -365,7 +365,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
11526 + * physical address */
11527 + phys = xen_bus_to_phys(dev_addr);
11528 +
11529 +- if (((dev_addr + size - 1 > dma_mask)) ||
11530 ++ if (((dev_addr + size - 1 <= dma_mask)) ||
11531 + range_straddles_page_boundary(phys, size))
11532 + xen_destroy_contiguous_region(phys, order);
11533 +
11534 +diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
11535 +index 23e391d3ec01..22863f5f2474 100644
11536 +--- a/drivers/xen/xen-acpi-processor.c
11537 ++++ b/drivers/xen/xen-acpi-processor.c
11538 +@@ -362,9 +362,9 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
11539 + }
11540 + /* There are more ACPI Processor objects than in x2APIC or MADT.
11541 + * This can happen with incorrect ACPI SSDT declerations. */
11542 +- if (acpi_id > nr_acpi_bits) {
11543 +- pr_debug("We only have %u, trying to set %u\n",
11544 +- nr_acpi_bits, acpi_id);
11545 ++ if (acpi_id >= nr_acpi_bits) {
11546 ++ pr_debug("max acpi id %u, trying to set %u\n",
11547 ++ nr_acpi_bits - 1, acpi_id);
11548 + return AE_OK;
11549 + }
11550 + /* OK, There is a ACPI Processor object */
11551 +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
11552 +index 74888cacd0b0..ec9eb4fba59c 100644
11553 +--- a/drivers/xen/xenbus/xenbus_probe.c
11554 ++++ b/drivers/xen/xenbus/xenbus_probe.c
11555 +@@ -466,8 +466,11 @@ int xenbus_probe_node(struct xen_bus_type *bus,
11556 +
11557 + /* Register with generic device framework. */
11558 + err = device_register(&xendev->dev);
11559 +- if (err)
11560 ++ if (err) {
11561 ++ put_device(&xendev->dev);
11562 ++ xendev = NULL;
11563 + goto fail;
11564 ++ }
11565 +
11566 + return 0;
11567 + fail:
11568 +diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
11569 +index cc1b1ac57d61..47728477297e 100644
11570 +--- a/drivers/zorro/zorro.c
11571 ++++ b/drivers/zorro/zorro.c
11572 +@@ -16,6 +16,7 @@
11573 + #include <linux/bitops.h>
11574 + #include <linux/string.h>
11575 + #include <linux/platform_device.h>
11576 ++#include <linux/dma-mapping.h>
11577 + #include <linux/slab.h>
11578 +
11579 + #include <asm/byteorder.h>
11580 +@@ -185,6 +186,17 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
11581 + z->dev.parent = &bus->dev;
11582 + z->dev.bus = &zorro_bus_type;
11583 + z->dev.id = i;
11584 ++ switch (z->rom.er_Type & ERT_TYPEMASK) {
11585 ++ case ERT_ZORROIII:
11586 ++ z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
11587 ++ break;
11588 ++
11589 ++ case ERT_ZORROII:
11590 ++ default:
11591 ++ z->dev.coherent_dma_mask = DMA_BIT_MASK(24);
11592 ++ break;
11593 ++ }
11594 ++ z->dev.dma_mask = &z->dev.coherent_dma_mask;
11595 + }
11596 +
11597 + /* ... then register them */
11598 +diff --git a/fs/affs/namei.c b/fs/affs/namei.c
11599 +index d8aa0ae3d037..1ed0fa4c4d48 100644
11600 +--- a/fs/affs/namei.c
11601 ++++ b/fs/affs/namei.c
11602 +@@ -206,9 +206,10 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
11603 +
11604 + affs_lock_dir(dir);
11605 + bh = affs_find_entry(dir, dentry);
11606 +- affs_unlock_dir(dir);
11607 +- if (IS_ERR(bh))
11608 ++ if (IS_ERR(bh)) {
11609 ++ affs_unlock_dir(dir);
11610 + return ERR_CAST(bh);
11611 ++ }
11612 + if (bh) {
11613 + u32 ino = bh->b_blocknr;
11614 +
11615 +@@ -222,10 +223,13 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
11616 + }
11617 + affs_brelse(bh);
11618 + inode = affs_iget(sb, ino);
11619 +- if (IS_ERR(inode))
11620 ++ if (IS_ERR(inode)) {
11621 ++ affs_unlock_dir(dir);
11622 + return ERR_CAST(inode);
11623 ++ }
11624 + }
11625 + d_add(dentry, inode);
11626 ++ affs_unlock_dir(dir);
11627 + return NULL;
11628 + }
11629 +
11630 +diff --git a/fs/aio.c b/fs/aio.c
11631 +index c3ace7833a03..4e23958c2509 100644
11632 +--- a/fs/aio.c
11633 ++++ b/fs/aio.c
11634 +@@ -1087,8 +1087,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
11635 +
11636 + ctx = rcu_dereference(table->table[id]);
11637 + if (ctx && ctx->user_id == ctx_id) {
11638 +- percpu_ref_get(&ctx->users);
11639 +- ret = ctx;
11640 ++ if (percpu_ref_tryget_live(&ctx->users))
11641 ++ ret = ctx;
11642 + }
11643 + out:
11644 + rcu_read_unlock();
11645 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
11646 +index c44703e21396..588760c49fe2 100644
11647 +--- a/fs/btrfs/ctree.h
11648 ++++ b/fs/btrfs/ctree.h
11649 +@@ -2969,7 +2969,7 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
11650 + kfree(fs_info->super_copy);
11651 + kfree(fs_info->super_for_commit);
11652 + security_free_mnt_opts(&fs_info->security_opts);
11653 +- kfree(fs_info);
11654 ++ kvfree(fs_info);
11655 + }
11656 +
11657 + /* tree mod log functions from ctree.c */
11658 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
11659 +index 4a630aeabb10..27d59cf36341 100644
11660 +--- a/fs/btrfs/disk-io.c
11661 ++++ b/fs/btrfs/disk-io.c
11662 +@@ -1276,7 +1276,7 @@ static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
11663 + if (!writers)
11664 + return ERR_PTR(-ENOMEM);
11665 +
11666 +- ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
11667 ++ ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
11668 + if (ret < 0) {
11669 + kfree(writers);
11670 + return ERR_PTR(ret);
11671 +@@ -3896,7 +3896,8 @@ void close_ctree(struct btrfs_fs_info *fs_info)
11672 + btrfs_err(fs_info, "commit super ret %d", ret);
11673 + }
11674 +
11675 +- if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
11676 ++ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
11677 ++ test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
11678 + btrfs_error_commit_super(fs_info);
11679 +
11680 + kthread_stop(fs_info->transaction_kthread);
11681 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
11682 +index 1bc62294fe6b..53487102081d 100644
11683 +--- a/fs/btrfs/extent-tree.c
11684 ++++ b/fs/btrfs/extent-tree.c
11685 +@@ -4675,6 +4675,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
11686 + if (wait_for_alloc) {
11687 + mutex_unlock(&fs_info->chunk_mutex);
11688 + wait_for_alloc = 0;
11689 ++ cond_resched();
11690 + goto again;
11691 + }
11692 +
11693 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
11694 +index b0fa3a032143..8ecbac3b862e 100644
11695 +--- a/fs/btrfs/inode.c
11696 ++++ b/fs/btrfs/inode.c
11697 +@@ -6664,8 +6664,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
11698 + goto out_unlock_inode;
11699 + } else {
11700 + btrfs_update_inode(trans, root, inode);
11701 +- unlock_new_inode(inode);
11702 +- d_instantiate(dentry, inode);
11703 ++ d_instantiate_new(dentry, inode);
11704 + }
11705 +
11706 + out_unlock:
11707 +@@ -6742,8 +6741,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
11708 + goto out_unlock_inode;
11709 +
11710 + BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
11711 +- unlock_new_inode(inode);
11712 +- d_instantiate(dentry, inode);
11713 ++ d_instantiate_new(dentry, inode);
11714 +
11715 + out_unlock:
11716 + btrfs_end_transaction(trans);
11717 +@@ -6890,12 +6888,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
11718 + if (err)
11719 + goto out_fail_inode;
11720 +
11721 +- d_instantiate(dentry, inode);
11722 +- /*
11723 +- * mkdir is special. We're unlocking after we call d_instantiate
11724 +- * to avoid a race with nfsd calling d_instantiate.
11725 +- */
11726 +- unlock_new_inode(inode);
11727 ++ d_instantiate_new(dentry, inode);
11728 + drop_on_err = 0;
11729 +
11730 + out_fail:
11731 +@@ -10573,8 +10566,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
11732 + goto out_unlock_inode;
11733 + }
11734 +
11735 +- unlock_new_inode(inode);
11736 +- d_instantiate(dentry, inode);
11737 ++ d_instantiate_new(dentry, inode);
11738 +
11739 + out_unlock:
11740 + btrfs_end_transaction(trans);
11741 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
11742 +index 2c35717a3470..baf5a4cd7ffc 100644
11743 +--- a/fs/btrfs/send.c
11744 ++++ b/fs/btrfs/send.c
11745 +@@ -5008,6 +5008,9 @@ static int send_hole(struct send_ctx *sctx, u64 end)
11746 + u64 len;
11747 + int ret = 0;
11748 +
11749 ++ if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
11750 ++ return send_update_extent(sctx, offset, end - offset);
11751 ++
11752 + p = fs_path_alloc();
11753 + if (!p)
11754 + return -ENOMEM;
11755 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
11756 +index e8f5e24325f3..8e3ce81d3f44 100644
11757 +--- a/fs/btrfs/super.c
11758 ++++ b/fs/btrfs/super.c
11759 +@@ -1581,7 +1581,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
11760 + * it for searching for existing supers, so this lets us do that and
11761 + * then open_ctree will properly initialize everything later.
11762 + */
11763 +- fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
11764 ++ fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
11765 + if (!fs_info) {
11766 + error = -ENOMEM;
11767 + goto error_sec_opts;
11768 +diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
11769 +index 0f4ce970d195..578fd045e859 100644
11770 +--- a/fs/btrfs/tests/qgroup-tests.c
11771 ++++ b/fs/btrfs/tests/qgroup-tests.c
11772 +@@ -63,7 +63,7 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
11773 + btrfs_set_extent_generation(leaf, item, 1);
11774 + btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK);
11775 + block_info = (struct btrfs_tree_block_info *)(item + 1);
11776 +- btrfs_set_tree_block_level(leaf, block_info, 1);
11777 ++ btrfs_set_tree_block_level(leaf, block_info, 0);
11778 + iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
11779 + if (parent > 0) {
11780 + btrfs_set_extent_inline_ref_type(leaf, iref,
11781 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
11782 +index f615d59b0489..27638b96079d 100644
11783 +--- a/fs/btrfs/transaction.c
11784 ++++ b/fs/btrfs/transaction.c
11785 +@@ -319,7 +319,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
11786 + if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
11787 + root->last_trans < trans->transid) || force) {
11788 + WARN_ON(root == fs_info->extent_root);
11789 +- WARN_ON(root->commit_root != root->node);
11790 ++ WARN_ON(!force && root->commit_root != root->node);
11791 +
11792 + /*
11793 + * see below for IN_TRANS_SETUP usage rules
11794 +@@ -1365,6 +1365,14 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
11795 + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
11796 + return 0;
11797 +
11798 ++ /*
11799 ++ * Ensure dirty @src will be commited. Or, after comming
11800 ++ * commit_fs_roots() and switch_commit_roots(), any dirty but not
11801 ++ * recorded root will never be updated again, causing an outdated root
11802 ++ * item.
11803 ++ */
11804 ++ record_root_in_trans(trans, src, 1);
11805 ++
11806 + /*
11807 + * We are going to commit transaction, see btrfs_commit_transaction()
11808 + * comment for reason locking tree_log_mutex
11809 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
11810 +index 2794f3550db6..fc4c14a72366 100644
11811 +--- a/fs/btrfs/tree-log.c
11812 ++++ b/fs/btrfs/tree-log.c
11813 +@@ -2272,8 +2272,10 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
11814 + nritems = btrfs_header_nritems(path->nodes[0]);
11815 + if (path->slots[0] >= nritems) {
11816 + ret = btrfs_next_leaf(root, path);
11817 +- if (ret)
11818 ++ if (ret == 1)
11819 + break;
11820 ++ else if (ret < 0)
11821 ++ goto out;
11822 + }
11823 + btrfs_item_key_to_cpu(path->nodes[0], &found_key,
11824 + path->slots[0]);
11825 +@@ -2377,13 +2379,41 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
11826 + if (ret)
11827 + break;
11828 +
11829 +- /* for regular files, make sure corresponding
11830 +- * orphan item exist. extents past the new EOF
11831 +- * will be truncated later by orphan cleanup.
11832 ++ /*
11833 ++ * Before replaying extents, truncate the inode to its
11834 ++ * size. We need to do it now and not after log replay
11835 ++ * because before an fsync we can have prealloc extents
11836 ++ * added beyond the inode's i_size. If we did it after,
11837 ++ * through orphan cleanup for example, we would drop
11838 ++ * those prealloc extents just after replaying them.
11839 + */
11840 + if (S_ISREG(mode)) {
11841 +- ret = insert_orphan_item(wc->trans, root,
11842 +- key.objectid);
11843 ++ struct inode *inode;
11844 ++ u64 from;
11845 ++
11846 ++ inode = read_one_inode(root, key.objectid);
11847 ++ if (!inode) {
11848 ++ ret = -EIO;
11849 ++ break;
11850 ++ }
11851 ++ from = ALIGN(i_size_read(inode),
11852 ++ root->fs_info->sectorsize);
11853 ++ ret = btrfs_drop_extents(wc->trans, root, inode,
11854 ++ from, (u64)-1, 1);
11855 ++ /*
11856 ++ * If the nlink count is zero here, the iput
11857 ++ * will free the inode. We bump it to make
11858 ++ * sure it doesn't get freed until the link
11859 ++ * count fixup is done.
11860 ++ */
11861 ++ if (!ret) {
11862 ++ if (inode->i_nlink == 0)
11863 ++ inc_nlink(inode);
11864 ++ /* Update link count and nbytes. */
11865 ++ ret = btrfs_update_inode(wc->trans,
11866 ++ root, inode);
11867 ++ }
11868 ++ iput(inode);
11869 + if (ret)
11870 + break;
11871 + }
11872 +@@ -3432,8 +3462,11 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
11873 + * from this directory and from this transaction
11874 + */
11875 + ret = btrfs_next_leaf(root, path);
11876 +- if (ret == 1) {
11877 +- last_offset = (u64)-1;
11878 ++ if (ret) {
11879 ++ if (ret == 1)
11880 ++ last_offset = (u64)-1;
11881 ++ else
11882 ++ err = ret;
11883 + goto done;
11884 + }
11885 + btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
11886 +@@ -3885,6 +3918,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
11887 + ASSERT(ret == 0);
11888 + src = src_path->nodes[0];
11889 + i = 0;
11890 ++ need_find_last_extent = true;
11891 + }
11892 +
11893 + btrfs_item_key_to_cpu(src, &key, i);
11894 +@@ -4234,6 +4268,31 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
11895 + num++;
11896 + }
11897 +
11898 ++ /*
11899 ++ * Add all prealloc extents beyond the inode's i_size to make sure we
11900 ++ * don't lose them after doing a fast fsync and replaying the log.
11901 ++ */
11902 ++ if (inode->flags & BTRFS_INODE_PREALLOC) {
11903 ++ struct rb_node *node;
11904 ++
11905 ++ for (node = rb_last(&tree->map); node; node = rb_prev(node)) {
11906 ++ em = rb_entry(node, struct extent_map, rb_node);
11907 ++ if (em->start < i_size_read(&inode->vfs_inode))
11908 ++ break;
11909 ++ if (!list_empty(&em->list))
11910 ++ continue;
11911 ++ /* Same as above loop. */
11912 ++ if (++num > 32768) {
11913 ++ list_del_init(&tree->modified_extents);
11914 ++ ret = -EFBIG;
11915 ++ goto process;
11916 ++ }
11917 ++ refcount_inc(&em->refs);
11918 ++ set_bit(EXTENT_FLAG_LOGGING, &em->flags);
11919 ++ list_add_tail(&em->list, &extents);
11920 ++ }
11921 ++ }
11922 ++
11923 + list_sort(NULL, &extents, extent_cmp);
11924 + btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end);
11925 + /*
11926 +@@ -5888,7 +5947,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
11927 + * this will force the logging code to walk the dentry chain
11928 + * up for the file
11929 + */
11930 +- if (S_ISREG(inode->vfs_inode.i_mode))
11931 ++ if (!S_ISDIR(inode->vfs_inode.i_mode))
11932 + inode->last_unlink_trans = trans->transid;
11933 +
11934 + /*
11935 +diff --git a/fs/ceph/super.c b/fs/ceph/super.c
11936 +index e4082afedcb1..48ffe720bf09 100644
11937 +--- a/fs/ceph/super.c
11938 ++++ b/fs/ceph/super.c
11939 +@@ -224,6 +224,7 @@ static int parse_fsopt_token(char *c, void *private)
11940 + return -ENOMEM;
11941 + break;
11942 + case Opt_mds_namespace:
11943 ++ kfree(fsopt->mds_namespace);
11944 + fsopt->mds_namespace = kstrndup(argstr[0].from,
11945 + argstr[0].to-argstr[0].from,
11946 + GFP_KERNEL);
11947 +@@ -231,6 +232,7 @@ static int parse_fsopt_token(char *c, void *private)
11948 + return -ENOMEM;
11949 + break;
11950 + case Opt_fscache_uniq:
11951 ++ kfree(fsopt->fscache_uniq);
11952 + fsopt->fscache_uniq = kstrndup(argstr[0].from,
11953 + argstr[0].to-argstr[0].from,
11954 + GFP_KERNEL);
11955 +@@ -710,14 +712,17 @@ static int __init init_caches(void)
11956 + goto bad_dentry;
11957 +
11958 + ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
11959 +-
11960 + if (!ceph_file_cachep)
11961 + goto bad_file;
11962 +
11963 +- if ((error = ceph_fscache_register()))
11964 +- goto bad_file;
11965 ++ error = ceph_fscache_register();
11966 ++ if (error)
11967 ++ goto bad_fscache;
11968 +
11969 + return 0;
11970 ++
11971 ++bad_fscache:
11972 ++ kmem_cache_destroy(ceph_file_cachep);
11973 + bad_file:
11974 + kmem_cache_destroy(ceph_dentry_cachep);
11975 + bad_dentry:
11976 +@@ -835,7 +840,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
11977 + int err;
11978 + unsigned long started = jiffies; /* note the start time */
11979 + struct dentry *root;
11980 +- int first = 0; /* first vfsmount for this super_block */
11981 +
11982 + dout("mount start %p\n", fsc);
11983 + mutex_lock(&fsc->client->mount_mutex);
11984 +@@ -860,17 +864,17 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
11985 + path = fsc->mount_options->server_path + 1;
11986 + dout("mount opening path %s\n", path);
11987 + }
11988 ++
11989 ++ err = ceph_fs_debugfs_init(fsc);
11990 ++ if (err < 0)
11991 ++ goto out;
11992 ++
11993 + root = open_root_dentry(fsc, path, started);
11994 + if (IS_ERR(root)) {
11995 + err = PTR_ERR(root);
11996 + goto out;
11997 + }
11998 + fsc->sb->s_root = dget(root);
11999 +- first = 1;
12000 +-
12001 +- err = ceph_fs_debugfs_init(fsc);
12002 +- if (err < 0)
12003 +- goto fail;
12004 + } else {
12005 + root = dget(fsc->sb->s_root);
12006 + }
12007 +@@ -880,11 +884,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
12008 + mutex_unlock(&fsc->client->mount_mutex);
12009 + return root;
12010 +
12011 +-fail:
12012 +- if (first) {
12013 +- dput(fsc->sb->s_root);
12014 +- fsc->sb->s_root = NULL;
12015 +- }
12016 + out:
12017 + mutex_unlock(&fsc->client->mount_mutex);
12018 + return ERR_PTR(err);
12019 +diff --git a/fs/dcache.c b/fs/dcache.c
12020 +index c28b9c91b5cb..5f31a93150d1 100644
12021 +--- a/fs/dcache.c
12022 ++++ b/fs/dcache.c
12023 +@@ -1867,6 +1867,28 @@ void d_instantiate(struct dentry *entry, struct inode * inode)
12024 + }
12025 + EXPORT_SYMBOL(d_instantiate);
12026 +
12027 ++/*
12028 ++ * This should be equivalent to d_instantiate() + unlock_new_inode(),
12029 ++ * with lockdep-related part of unlock_new_inode() done before
12030 ++ * anything else. Use that instead of open-coding d_instantiate()/
12031 ++ * unlock_new_inode() combinations.
12032 ++ */
12033 ++void d_instantiate_new(struct dentry *entry, struct inode *inode)
12034 ++{
12035 ++ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
12036 ++ BUG_ON(!inode);
12037 ++ lockdep_annotate_inode_mutex_key(inode);
12038 ++ security_d_instantiate(entry, inode);
12039 ++ spin_lock(&inode->i_lock);
12040 ++ __d_instantiate(entry, inode);
12041 ++ WARN_ON(!(inode->i_state & I_NEW));
12042 ++ inode->i_state &= ~I_NEW;
12043 ++ smp_mb();
12044 ++ wake_up_bit(&inode->i_state, __I_NEW);
12045 ++ spin_unlock(&inode->i_lock);
12046 ++}
12047 ++EXPORT_SYMBOL(d_instantiate_new);
12048 ++
12049 + /**
12050 + * d_instantiate_no_diralias - instantiate a non-aliased dentry
12051 + * @entry: dentry to complete
12052 +@@ -2460,7 +2482,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
12053 +
12054 + retry:
12055 + rcu_read_lock();
12056 +- seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
12057 ++ seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
12058 + r_seq = read_seqbegin(&rename_lock);
12059 + dentry = __d_lookup_rcu(parent, name, &d_seq);
12060 + if (unlikely(dentry)) {
12061 +@@ -2481,8 +2503,14 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
12062 + rcu_read_unlock();
12063 + goto retry;
12064 + }
12065 ++
12066 ++ if (unlikely(seq & 1)) {
12067 ++ rcu_read_unlock();
12068 ++ goto retry;
12069 ++ }
12070 ++
12071 + hlist_bl_lock(b);
12072 +- if (unlikely(parent->d_inode->i_dir_seq != seq)) {
12073 ++ if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
12074 + hlist_bl_unlock(b);
12075 + rcu_read_unlock();
12076 + goto retry;
12077 +diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
12078 +index efc2db42d175..bda65a730790 100644
12079 +--- a/fs/ecryptfs/inode.c
12080 ++++ b/fs/ecryptfs/inode.c
12081 +@@ -284,8 +284,7 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
12082 + iget_failed(ecryptfs_inode);
12083 + goto out;
12084 + }
12085 +- unlock_new_inode(ecryptfs_inode);
12086 +- d_instantiate(ecryptfs_dentry, ecryptfs_inode);
12087 ++ d_instantiate_new(ecryptfs_dentry, ecryptfs_inode);
12088 + out:
12089 + return rc;
12090 + }
12091 +diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
12092 +index e078075dc66f..aa6ec191cac0 100644
12093 +--- a/fs/ext2/namei.c
12094 ++++ b/fs/ext2/namei.c
12095 +@@ -41,8 +41,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
12096 + {
12097 + int err = ext2_add_link(dentry, inode);
12098 + if (!err) {
12099 +- unlock_new_inode(inode);
12100 +- d_instantiate(dentry, inode);
12101 ++ d_instantiate_new(dentry, inode);
12102 + return 0;
12103 + }
12104 + inode_dec_link_count(inode);
12105 +@@ -269,8 +268,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
12106 + if (err)
12107 + goto out_fail;
12108 +
12109 +- unlock_new_inode(inode);
12110 +- d_instantiate(dentry, inode);
12111 ++ d_instantiate_new(dentry, inode);
12112 + out:
12113 + return err;
12114 +
12115 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
12116 +index fccf295fcb03..6747861f9b70 100644
12117 +--- a/fs/ext4/namei.c
12118 ++++ b/fs/ext4/namei.c
12119 +@@ -2420,8 +2420,7 @@ static int ext4_add_nondir(handle_t *handle,
12120 + int err = ext4_add_entry(handle, dentry, inode);
12121 + if (!err) {
12122 + ext4_mark_inode_dirty(handle, inode);
12123 +- unlock_new_inode(inode);
12124 +- d_instantiate(dentry, inode);
12125 ++ d_instantiate_new(dentry, inode);
12126 + return 0;
12127 + }
12128 + drop_nlink(inode);
12129 +@@ -2660,8 +2659,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
12130 + err = ext4_mark_inode_dirty(handle, dir);
12131 + if (err)
12132 + goto out_clear_inode;
12133 +- unlock_new_inode(inode);
12134 +- d_instantiate(dentry, inode);
12135 ++ d_instantiate_new(dentry, inode);
12136 + if (IS_DIRSYNC(dir))
12137 + ext4_handle_sync(handle);
12138 +
12139 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
12140 +index 9102ae7709d3..ec74d06fa24a 100644
12141 +--- a/fs/ext4/super.c
12142 ++++ b/fs/ext4/super.c
12143 +@@ -3663,6 +3663,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
12144 + ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
12145 + "using the ext4 subsystem");
12146 + else {
12147 ++ /*
12148 ++ * If we're probing be silent, if this looks like
12149 ++ * it's actually an ext[34] filesystem.
12150 ++ */
12151 ++ if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
12152 ++ goto failed_mount;
12153 + ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
12154 + "to feature incompatibilities");
12155 + goto failed_mount;
12156 +@@ -3674,6 +3680,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
12157 + ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
12158 + "using the ext4 subsystem");
12159 + else {
12160 ++ /*
12161 ++ * If we're probing be silent, if this looks like
12162 ++ * it's actually an ext4 filesystem.
12163 ++ */
12164 ++ if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
12165 ++ goto failed_mount;
12166 + ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
12167 + "to feature incompatibilities");
12168 + goto failed_mount;
12169 +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
12170 +index 04fe1df052b2..c282e21f5b5e 100644
12171 +--- a/fs/f2fs/checkpoint.c
12172 ++++ b/fs/f2fs/checkpoint.c
12173 +@@ -1108,6 +1108,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
12174 +
12175 + if (cpc->reason & CP_TRIMMED)
12176 + __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
12177 ++ else
12178 ++ __clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
12179 +
12180 + if (cpc->reason & CP_UMOUNT)
12181 + __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
12182 +diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
12183 +index ff2352a0ed15..aff6c2ed1c02 100644
12184 +--- a/fs/f2fs/extent_cache.c
12185 ++++ b/fs/f2fs/extent_cache.c
12186 +@@ -706,6 +706,9 @@ void f2fs_drop_extent_tree(struct inode *inode)
12187 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
12188 + struct extent_tree *et = F2FS_I(inode)->extent_tree;
12189 +
12190 ++ if (!f2fs_may_extent_tree(inode))
12191 ++ return;
12192 ++
12193 + set_inode_flag(inode, FI_NO_EXTENT);
12194 +
12195 + write_lock(&et->lock);
12196 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
12197 +index b8372095ba0a..29c5f799890c 100644
12198 +--- a/fs/f2fs/file.c
12199 ++++ b/fs/f2fs/file.c
12200 +@@ -1321,8 +1321,12 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
12201 + }
12202 +
12203 + out:
12204 +- if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
12205 +- f2fs_i_size_write(inode, new_size);
12206 ++ if (new_size > i_size_read(inode)) {
12207 ++ if (mode & FALLOC_FL_KEEP_SIZE)
12208 ++ file_set_keep_isize(inode);
12209 ++ else
12210 ++ f2fs_i_size_write(inode, new_size);
12211 ++ }
12212 + out_sem:
12213 + up_write(&F2FS_I(inode)->i_mmap_sem);
12214 +
12215 +diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
12216 +index a4dab98c4b7b..b80e7db3b55b 100644
12217 +--- a/fs/f2fs/namei.c
12218 ++++ b/fs/f2fs/namei.c
12219 +@@ -201,8 +201,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
12220 +
12221 + alloc_nid_done(sbi, ino);
12222 +
12223 +- d_instantiate(dentry, inode);
12224 +- unlock_new_inode(inode);
12225 ++ d_instantiate_new(dentry, inode);
12226 +
12227 + if (IS_DIRSYNC(dir))
12228 + f2fs_sync_fs(sbi->sb, 1);
12229 +@@ -529,8 +528,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
12230 + err = page_symlink(inode, disk_link.name, disk_link.len);
12231 +
12232 + err_out:
12233 +- d_instantiate(dentry, inode);
12234 +- unlock_new_inode(inode);
12235 ++ d_instantiate_new(dentry, inode);
12236 +
12237 + /*
12238 + * Let's flush symlink data in order to avoid broken symlink as much as
12239 +@@ -588,8 +586,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
12240 +
12241 + alloc_nid_done(sbi, inode->i_ino);
12242 +
12243 +- d_instantiate(dentry, inode);
12244 +- unlock_new_inode(inode);
12245 ++ d_instantiate_new(dentry, inode);
12246 +
12247 + if (IS_DIRSYNC(dir))
12248 + f2fs_sync_fs(sbi->sb, 1);
12249 +@@ -637,8 +634,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
12250 +
12251 + alloc_nid_done(sbi, inode->i_ino);
12252 +
12253 +- d_instantiate(dentry, inode);
12254 +- unlock_new_inode(inode);
12255 ++ d_instantiate_new(dentry, inode);
12256 +
12257 + if (IS_DIRSYNC(dir))
12258 + f2fs_sync_fs(sbi->sb, 1);
12259 +diff --git a/fs/fscache/page.c b/fs/fscache/page.c
12260 +index 0ad3fd3ad0b4..ae9470f3643c 100644
12261 +--- a/fs/fscache/page.c
12262 ++++ b/fs/fscache/page.c
12263 +@@ -776,6 +776,7 @@ static void fscache_write_op(struct fscache_operation *_op)
12264 +
12265 + _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
12266 +
12267 ++again:
12268 + spin_lock(&object->lock);
12269 + cookie = object->cookie;
12270 +
12271 +@@ -816,10 +817,6 @@ static void fscache_write_op(struct fscache_operation *_op)
12272 + goto superseded;
12273 + page = results[0];
12274 + _debug("gang %d [%lx]", n, page->index);
12275 +- if (page->index >= op->store_limit) {
12276 +- fscache_stat(&fscache_n_store_pages_over_limit);
12277 +- goto superseded;
12278 +- }
12279 +
12280 + radix_tree_tag_set(&cookie->stores, page->index,
12281 + FSCACHE_COOKIE_STORING_TAG);
12282 +@@ -829,6 +826,9 @@ static void fscache_write_op(struct fscache_operation *_op)
12283 + spin_unlock(&cookie->stores_lock);
12284 + spin_unlock(&object->lock);
12285 +
12286 ++ if (page->index >= op->store_limit)
12287 ++ goto discard_page;
12288 ++
12289 + fscache_stat(&fscache_n_store_pages);
12290 + fscache_stat(&fscache_n_cop_write_page);
12291 + ret = object->cache->ops->write_page(op, page);
12292 +@@ -844,6 +844,11 @@ static void fscache_write_op(struct fscache_operation *_op)
12293 + _leave("");
12294 + return;
12295 +
12296 ++discard_page:
12297 ++ fscache_stat(&fscache_n_store_pages_over_limit);
12298 ++ fscache_end_page_write(object, page);
12299 ++ goto again;
12300 ++
12301 + superseded:
12302 + /* this writer is going away and there aren't any more things to
12303 + * write */
12304 +diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
12305 +index 2a29cf3371f6..10f0fac031f4 100644
12306 +--- a/fs/gfs2/file.c
12307 ++++ b/fs/gfs2/file.c
12308 +@@ -803,7 +803,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
12309 + struct gfs2_inode *ip = GFS2_I(inode);
12310 + struct gfs2_alloc_parms ap = { .aflags = 0, };
12311 + unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
12312 +- loff_t bytes, max_bytes, max_blks = UINT_MAX;
12313 ++ loff_t bytes, max_bytes, max_blks;
12314 + int error;
12315 + const loff_t pos = offset;
12316 + const loff_t count = len;
12317 +@@ -855,7 +855,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
12318 + return error;
12319 + /* ap.allowed tells us how many blocks quota will allow
12320 + * us to write. Check if this reduces max_blks */
12321 +- if (ap.allowed && ap.allowed < max_blks)
12322 ++ max_blks = UINT_MAX;
12323 ++ if (ap.allowed)
12324 + max_blks = ap.allowed;
12325 +
12326 + error = gfs2_inplace_reserve(ip, &ap);
12327 +diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
12328 +index 5e47c935a515..836f29480be6 100644
12329 +--- a/fs/gfs2/quota.h
12330 ++++ b/fs/gfs2/quota.h
12331 +@@ -45,6 +45,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
12332 + {
12333 + struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
12334 + int ret;
12335 ++
12336 ++ ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
12337 + if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
12338 + return 0;
12339 + ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
12340 +diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
12341 +index 0a754f38462e..e5a6deb38e1e 100644
12342 +--- a/fs/jffs2/dir.c
12343 ++++ b/fs/jffs2/dir.c
12344 +@@ -209,8 +209,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
12345 + __func__, inode->i_ino, inode->i_mode, inode->i_nlink,
12346 + f->inocache->pino_nlink, inode->i_mapping->nrpages);
12347 +
12348 +- unlock_new_inode(inode);
12349 +- d_instantiate(dentry, inode);
12350 ++ d_instantiate_new(dentry, inode);
12351 + return 0;
12352 +
12353 + fail:
12354 +@@ -430,8 +429,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
12355 + mutex_unlock(&dir_f->sem);
12356 + jffs2_complete_reservation(c);
12357 +
12358 +- unlock_new_inode(inode);
12359 +- d_instantiate(dentry, inode);
12360 ++ d_instantiate_new(dentry, inode);
12361 + return 0;
12362 +
12363 + fail:
12364 +@@ -575,8 +573,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode
12365 + mutex_unlock(&dir_f->sem);
12366 + jffs2_complete_reservation(c);
12367 +
12368 +- unlock_new_inode(inode);
12369 +- d_instantiate(dentry, inode);
12370 ++ d_instantiate_new(dentry, inode);
12371 + return 0;
12372 +
12373 + fail:
12374 +@@ -747,8 +744,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
12375 + mutex_unlock(&dir_f->sem);
12376 + jffs2_complete_reservation(c);
12377 +
12378 +- unlock_new_inode(inode);
12379 +- d_instantiate(dentry, inode);
12380 ++ d_instantiate_new(dentry, inode);
12381 + return 0;
12382 +
12383 + fail:
12384 +diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
12385 +index b41596d71858..56c3fcbfe80e 100644
12386 +--- a/fs/jfs/namei.c
12387 ++++ b/fs/jfs/namei.c
12388 +@@ -178,8 +178,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
12389 + unlock_new_inode(ip);
12390 + iput(ip);
12391 + } else {
12392 +- unlock_new_inode(ip);
12393 +- d_instantiate(dentry, ip);
12394 ++ d_instantiate_new(dentry, ip);
12395 + }
12396 +
12397 + out2:
12398 +@@ -313,8 +312,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
12399 + unlock_new_inode(ip);
12400 + iput(ip);
12401 + } else {
12402 +- unlock_new_inode(ip);
12403 +- d_instantiate(dentry, ip);
12404 ++ d_instantiate_new(dentry, ip);
12405 + }
12406 +
12407 + out2:
12408 +@@ -1059,8 +1057,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
12409 + unlock_new_inode(ip);
12410 + iput(ip);
12411 + } else {
12412 +- unlock_new_inode(ip);
12413 +- d_instantiate(dentry, ip);
12414 ++ d_instantiate_new(dentry, ip);
12415 + }
12416 +
12417 + out2:
12418 +@@ -1447,8 +1444,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
12419 + unlock_new_inode(ip);
12420 + iput(ip);
12421 + } else {
12422 +- unlock_new_inode(ip);
12423 +- d_instantiate(dentry, ip);
12424 ++ d_instantiate_new(dentry, ip);
12425 + }
12426 +
12427 + out1:
12428 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
12429 +index e9bea90dc017..fb85d04fdc4c 100644
12430 +--- a/fs/nfs/nfs4client.c
12431 ++++ b/fs/nfs/nfs4client.c
12432 +@@ -858,8 +858,10 @@ static int nfs4_set_client(struct nfs_server *server,
12433 + if (IS_ERR(clp))
12434 + return PTR_ERR(clp);
12435 +
12436 +- if (server->nfs_client == clp)
12437 ++ if (server->nfs_client == clp) {
12438 ++ nfs_put_client(clp);
12439 + return -ELOOP;
12440 ++ }
12441 +
12442 + /*
12443 + * Query for the lease time on clientid setup or renewal
12444 +@@ -1217,11 +1219,11 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
12445 + clp->cl_proto, clnt->cl_timeout,
12446 + clp->cl_minorversion, net);
12447 + clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
12448 +- nfs_put_client(clp);
12449 + if (error != 0) {
12450 + nfs_server_insert_lists(server);
12451 + return error;
12452 + }
12453 ++ nfs_put_client(clp);
12454 +
12455 + if (server->nfs_client->cl_hostname == NULL)
12456 + server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL);
12457 +diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
12458 +index 515d13c196da..1ba4719de70d 100644
12459 +--- a/fs/nilfs2/namei.c
12460 ++++ b/fs/nilfs2/namei.c
12461 +@@ -46,8 +46,7 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
12462 + int err = nilfs_add_link(dentry, inode);
12463 +
12464 + if (!err) {
12465 +- d_instantiate(dentry, inode);
12466 +- unlock_new_inode(inode);
12467 ++ d_instantiate_new(dentry, inode);
12468 + return 0;
12469 + }
12470 + inode_dec_link_count(inode);
12471 +@@ -243,8 +242,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
12472 + goto out_fail;
12473 +
12474 + nilfs_mark_inode_dirty(inode);
12475 +- d_instantiate(dentry, inode);
12476 +- unlock_new_inode(inode);
12477 ++ d_instantiate_new(dentry, inode);
12478 + out:
12479 + if (!err)
12480 + err = nilfs_transaction_commit(dir->i_sb);
12481 +diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
12482 +index a2b19fbdcf46..6099a8034b17 100644
12483 +--- a/fs/ocfs2/dlm/dlmdomain.c
12484 ++++ b/fs/ocfs2/dlm/dlmdomain.c
12485 +@@ -676,20 +676,6 @@ static void dlm_leave_domain(struct dlm_ctxt *dlm)
12486 + spin_unlock(&dlm->spinlock);
12487 + }
12488 +
12489 +-int dlm_shutting_down(struct dlm_ctxt *dlm)
12490 +-{
12491 +- int ret = 0;
12492 +-
12493 +- spin_lock(&dlm_domain_lock);
12494 +-
12495 +- if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
12496 +- ret = 1;
12497 +-
12498 +- spin_unlock(&dlm_domain_lock);
12499 +-
12500 +- return ret;
12501 +-}
12502 +-
12503 + void dlm_unregister_domain(struct dlm_ctxt *dlm)
12504 + {
12505 + int leave = 0;
12506 +diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h
12507 +index fd6122a38dbd..8a9281411c18 100644
12508 +--- a/fs/ocfs2/dlm/dlmdomain.h
12509 ++++ b/fs/ocfs2/dlm/dlmdomain.h
12510 +@@ -28,7 +28,30 @@
12511 + extern spinlock_t dlm_domain_lock;
12512 + extern struct list_head dlm_domains;
12513 +
12514 +-int dlm_shutting_down(struct dlm_ctxt *dlm);
12515 ++static inline int dlm_joined(struct dlm_ctxt *dlm)
12516 ++{
12517 ++ int ret = 0;
12518 ++
12519 ++ spin_lock(&dlm_domain_lock);
12520 ++ if (dlm->dlm_state == DLM_CTXT_JOINED)
12521 ++ ret = 1;
12522 ++ spin_unlock(&dlm_domain_lock);
12523 ++
12524 ++ return ret;
12525 ++}
12526 ++
12527 ++static inline int dlm_shutting_down(struct dlm_ctxt *dlm)
12528 ++{
12529 ++ int ret = 0;
12530 ++
12531 ++ spin_lock(&dlm_domain_lock);
12532 ++ if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
12533 ++ ret = 1;
12534 ++ spin_unlock(&dlm_domain_lock);
12535 ++
12536 ++ return ret;
12537 ++}
12538 ++
12539 + void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
12540 + int node_num);
12541 +
12542 +diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
12543 +index ec8f75813beb..505ab4281f36 100644
12544 +--- a/fs/ocfs2/dlm/dlmrecovery.c
12545 ++++ b/fs/ocfs2/dlm/dlmrecovery.c
12546 +@@ -1378,6 +1378,15 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
12547 + if (!dlm_grab(dlm))
12548 + return -EINVAL;
12549 +
12550 ++ if (!dlm_joined(dlm)) {
12551 ++ mlog(ML_ERROR, "Domain %s not joined! "
12552 ++ "lockres %.*s, master %u\n",
12553 ++ dlm->name, mres->lockname_len,
12554 ++ mres->lockname, mres->master);
12555 ++ dlm_put(dlm);
12556 ++ return -EINVAL;
12557 ++ }
12558 ++
12559 + BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
12560 +
12561 + real_master = mres->master;
12562 +diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
12563 +index 7e9e5d0ea3bc..f8f3c73d2664 100644
12564 +--- a/fs/orangefs/namei.c
12565 ++++ b/fs/orangefs/namei.c
12566 +@@ -71,8 +71,7 @@ static int orangefs_create(struct inode *dir,
12567 + get_khandle_from_ino(inode),
12568 + dentry);
12569 +
12570 +- d_instantiate(dentry, inode);
12571 +- unlock_new_inode(inode);
12572 ++ d_instantiate_new(dentry, inode);
12573 + orangefs_set_timeout(dentry);
12574 + ORANGEFS_I(inode)->getattr_time = jiffies - 1;
12575 + ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
12576 +@@ -320,8 +319,7 @@ static int orangefs_symlink(struct inode *dir,
12577 + "Assigned symlink inode new number of %pU\n",
12578 + get_khandle_from_ino(inode));
12579 +
12580 +- d_instantiate(dentry, inode);
12581 +- unlock_new_inode(inode);
12582 ++ d_instantiate_new(dentry, inode);
12583 + orangefs_set_timeout(dentry);
12584 + ORANGEFS_I(inode)->getattr_time = jiffies - 1;
12585 + ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
12586 +@@ -385,8 +383,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
12587 + "Assigned dir inode new number of %pU\n",
12588 + get_khandle_from_ino(inode));
12589 +
12590 +- d_instantiate(dentry, inode);
12591 +- unlock_new_inode(inode);
12592 ++ d_instantiate_new(dentry, inode);
12593 + orangefs_set_timeout(dentry);
12594 + ORANGEFS_I(inode)->getattr_time = jiffies - 1;
12595 + ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
12596 +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
12597 +index c5cbbdff3c3d..82ac5f682b73 100644
12598 +--- a/fs/proc/proc_sysctl.c
12599 ++++ b/fs/proc/proc_sysctl.c
12600 +@@ -707,7 +707,10 @@ static bool proc_sys_link_fill_cache(struct file *file,
12601 + struct ctl_table *table)
12602 + {
12603 + bool ret = true;
12604 ++
12605 + head = sysctl_head_grab(head);
12606 ++ if (IS_ERR(head))
12607 ++ return false;
12608 +
12609 + if (S_ISLNK(table->mode)) {
12610 + /* It is not an error if we can not follow the link ignore it */
12611 +diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
12612 +index bd39a998843d..5089dac02660 100644
12613 +--- a/fs/reiserfs/namei.c
12614 ++++ b/fs/reiserfs/namei.c
12615 +@@ -687,8 +687,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod
12616 + reiserfs_update_inode_transaction(inode);
12617 + reiserfs_update_inode_transaction(dir);
12618 +
12619 +- unlock_new_inode(inode);
12620 +- d_instantiate(dentry, inode);
12621 ++ d_instantiate_new(dentry, inode);
12622 + retval = journal_end(&th);
12623 +
12624 + out_failed:
12625 +@@ -771,8 +770,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
12626 + goto out_failed;
12627 + }
12628 +
12629 +- unlock_new_inode(inode);
12630 +- d_instantiate(dentry, inode);
12631 ++ d_instantiate_new(dentry, inode);
12632 + retval = journal_end(&th);
12633 +
12634 + out_failed:
12635 +@@ -871,8 +869,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
12636 + /* the above add_entry did not update dir's stat data */
12637 + reiserfs_update_sd(&th, dir);
12638 +
12639 +- unlock_new_inode(inode);
12640 +- d_instantiate(dentry, inode);
12641 ++ d_instantiate_new(dentry, inode);
12642 + retval = journal_end(&th);
12643 + out_failed:
12644 + reiserfs_write_unlock(dir->i_sb);
12645 +@@ -1187,8 +1184,7 @@ static int reiserfs_symlink(struct inode *parent_dir,
12646 + goto out_failed;
12647 + }
12648 +
12649 +- unlock_new_inode(inode);
12650 +- d_instantiate(dentry, inode);
12651 ++ d_instantiate_new(dentry, inode);
12652 + retval = journal_end(&th);
12653 + out_failed:
12654 + reiserfs_write_unlock(parent_dir->i_sb);
12655 +diff --git a/fs/super.c b/fs/super.c
12656 +index 79d7fc5e0ddd..219f7ca7c5d2 100644
12657 +--- a/fs/super.c
12658 ++++ b/fs/super.c
12659 +@@ -120,13 +120,23 @@ static unsigned long super_cache_count(struct shrinker *shrink,
12660 + sb = container_of(shrink, struct super_block, s_shrink);
12661 +
12662 + /*
12663 +- * Don't call trylock_super as it is a potential
12664 +- * scalability bottleneck. The counts could get updated
12665 +- * between super_cache_count and super_cache_scan anyway.
12666 +- * Call to super_cache_count with shrinker_rwsem held
12667 +- * ensures the safety of call to list_lru_shrink_count() and
12668 +- * s_op->nr_cached_objects().
12669 ++ * We don't call trylock_super() here as it is a scalability bottleneck,
12670 ++ * so we're exposed to partial setup state. The shrinker rwsem does not
12671 ++ * protect filesystem operations backing list_lru_shrink_count() or
12672 ++ * s_op->nr_cached_objects(). Counts can change between
12673 ++ * super_cache_count and super_cache_scan, so we really don't need locks
12674 ++ * here.
12675 ++ *
12676 ++ * However, if we are currently mounting the superblock, the underlying
12677 ++ * filesystem might be in a state of partial construction and hence it
12678 ++ * is dangerous to access it. trylock_super() uses a SB_BORN check to
12679 ++ * avoid this situation, so do the same here. The memory barrier is
12680 ++ * matched with the one in mount_fs() as we don't hold locks here.
12681 + */
12682 ++ if (!(sb->s_flags & SB_BORN))
12683 ++ return 0;
12684 ++ smp_rmb();
12685 ++
12686 + if (sb->s_op && sb->s_op->nr_cached_objects)
12687 + total_objects = sb->s_op->nr_cached_objects(sb, sc);
12688 +
12689 +@@ -1232,6 +1242,14 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
12690 + sb = root->d_sb;
12691 + BUG_ON(!sb);
12692 + WARN_ON(!sb->s_bdi);
12693 ++
12694 ++ /*
12695 ++ * Write barrier is for super_cache_count(). We place it before setting
12696 ++ * SB_BORN as the data dependency between the two functions is the
12697 ++ * superblock structure contents that we just set up, not the SB_BORN
12698 ++ * flag.
12699 ++ */
12700 ++ smp_wmb();
12701 + sb->s_flags |= SB_BORN;
12702 +
12703 + error = security_sb_kern_mount(sb, flags, secdata);
12704 +diff --git a/fs/udf/namei.c b/fs/udf/namei.c
12705 +index 885198dfd9f8..041bf34f781f 100644
12706 +--- a/fs/udf/namei.c
12707 ++++ b/fs/udf/namei.c
12708 +@@ -621,8 +621,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
12709 + if (fibh.sbh != fibh.ebh)
12710 + brelse(fibh.ebh);
12711 + brelse(fibh.sbh);
12712 +- unlock_new_inode(inode);
12713 +- d_instantiate(dentry, inode);
12714 ++ d_instantiate_new(dentry, inode);
12715 +
12716 + return 0;
12717 + }
12718 +@@ -732,8 +731,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
12719 + inc_nlink(dir);
12720 + dir->i_ctime = dir->i_mtime = current_time(dir);
12721 + mark_inode_dirty(dir);
12722 +- unlock_new_inode(inode);
12723 +- d_instantiate(dentry, inode);
12724 ++ d_instantiate_new(dentry, inode);
12725 + if (fibh.sbh != fibh.ebh)
12726 + brelse(fibh.ebh);
12727 + brelse(fibh.sbh);
12728 +diff --git a/fs/udf/super.c b/fs/udf/super.c
12729 +index 08bf097507f6..9b0d6562d0a1 100644
12730 +--- a/fs/udf/super.c
12731 ++++ b/fs/udf/super.c
12732 +@@ -2091,8 +2091,9 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
12733 + bool lvid_open = false;
12734 +
12735 + uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
12736 +- uopt.uid = INVALID_UID;
12737 +- uopt.gid = INVALID_GID;
12738 ++ /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
12739 ++ uopt.uid = make_kuid(current_user_ns(), overflowuid);
12740 ++ uopt.gid = make_kgid(current_user_ns(), overflowgid);
12741 + uopt.umask = 0;
12742 + uopt.fmode = UDF_INVALID_MODE;
12743 + uopt.dmode = UDF_INVALID_MODE;
12744 +diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
12745 +index 32545cd00ceb..d5f43ba76c59 100644
12746 +--- a/fs/ufs/namei.c
12747 ++++ b/fs/ufs/namei.c
12748 +@@ -39,8 +39,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
12749 + {
12750 + int err = ufs_add_link(dentry, inode);
12751 + if (!err) {
12752 +- unlock_new_inode(inode);
12753 +- d_instantiate(dentry, inode);
12754 ++ d_instantiate_new(dentry, inode);
12755 + return 0;
12756 + }
12757 + inode_dec_link_count(inode);
12758 +@@ -193,8 +192,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
12759 + if (err)
12760 + goto out_fail;
12761 +
12762 +- unlock_new_inode(inode);
12763 +- d_instantiate(dentry, inode);
12764 ++ d_instantiate_new(dentry, inode);
12765 + return 0;
12766 +
12767 + out_fail:
12768 +diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
12769 +index b2cde5426182..7b68e6c9a474 100644
12770 +--- a/fs/xfs/xfs_discard.c
12771 ++++ b/fs/xfs/xfs_discard.c
12772 +@@ -50,19 +50,19 @@ xfs_trim_extents(
12773 +
12774 + pag = xfs_perag_get(mp, agno);
12775 +
12776 +- error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
12777 +- if (error || !agbp)
12778 +- goto out_put_perag;
12779 +-
12780 +- cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
12781 +-
12782 + /*
12783 + * Force out the log. This means any transactions that might have freed
12784 +- * space before we took the AGF buffer lock are now on disk, and the
12785 ++ * space before we take the AGF buffer lock are now on disk, and the
12786 + * volatile disk cache is flushed.
12787 + */
12788 + xfs_log_force(mp, XFS_LOG_SYNC);
12789 +
12790 ++ error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
12791 ++ if (error || !agbp)
12792 ++ goto out_put_perag;
12793 ++
12794 ++ cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
12795 ++
12796 + /*
12797 + * Look up the longest btree in the AGF and start with it.
12798 + */
12799 +diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
12800 +index af2cc94a61bf..ae1a33aa8955 100644
12801 +--- a/include/asm-generic/bug.h
12802 ++++ b/include/asm-generic/bug.h
12803 +@@ -50,6 +50,7 @@ struct bug_entry {
12804 + #ifndef HAVE_ARCH_BUG
12805 + #define BUG() do { \
12806 + printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
12807 ++ barrier_before_unreachable(); \
12808 + panic("BUG!"); \
12809 + } while (0)
12810 + #endif
12811 +diff --git a/include/linux/bio.h b/include/linux/bio.h
12812 +index 45f00dd6323c..5aa40f4712ff 100644
12813 +--- a/include/linux/bio.h
12814 ++++ b/include/linux/bio.h
12815 +@@ -501,6 +501,7 @@ void zero_fill_bio(struct bio *bio);
12816 + extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
12817 + extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
12818 + extern unsigned int bvec_nr_vecs(unsigned short idx);
12819 ++extern const char *bio_devname(struct bio *bio, char *buffer);
12820 +
12821 + #define bio_set_dev(bio, bdev) \
12822 + do { \
12823 +@@ -519,9 +520,6 @@ do { \
12824 + #define bio_dev(bio) \
12825 + disk_devt((bio)->bi_disk)
12826 +
12827 +-#define bio_devname(bio, buf) \
12828 +- __bdevname(bio_dev(bio), (buf))
12829 +-
12830 + #ifdef CONFIG_BLK_CGROUP
12831 + int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
12832 + int bio_associate_current(struct bio *bio);
12833 +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
12834 +index b78b31af36f8..f43113b8890b 100644
12835 +--- a/include/linux/compiler-gcc.h
12836 ++++ b/include/linux/compiler-gcc.h
12837 +@@ -211,6 +211,15 @@
12838 + #endif
12839 + #endif
12840 +
12841 ++/*
12842 ++ * calling noreturn functions, __builtin_unreachable() and __builtin_trap()
12843 ++ * confuse the stack allocation in gcc, leading to overly large stack
12844 ++ * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
12845 ++ *
12846 ++ * Adding an empty inline assembly before it works around the problem
12847 ++ */
12848 ++#define barrier_before_unreachable() asm volatile("")
12849 ++
12850 + /*
12851 + * Mark a position in code as unreachable. This can be used to
12852 + * suppress control flow warnings after asm blocks that transfer
12853 +@@ -221,7 +230,11 @@
12854 + * unreleased. Really, we need to have autoconf for the kernel.
12855 + */
12856 + #define unreachable() \
12857 +- do { annotate_unreachable(); __builtin_unreachable(); } while (0)
12858 ++ do { \
12859 ++ annotate_unreachable(); \
12860 ++ barrier_before_unreachable(); \
12861 ++ __builtin_unreachable(); \
12862 ++ } while (0)
12863 +
12864 + /* Mark a function definition as prohibited from being cloned. */
12865 + #define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
12866 +diff --git a/include/linux/compiler.h b/include/linux/compiler.h
12867 +index e8c9cd18bb05..853929f98962 100644
12868 +--- a/include/linux/compiler.h
12869 ++++ b/include/linux/compiler.h
12870 +@@ -86,6 +86,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
12871 + # define barrier_data(ptr) barrier()
12872 + #endif
12873 +
12874 ++/* workaround for GCC PR82365 if needed */
12875 ++#ifndef barrier_before_unreachable
12876 ++# define barrier_before_unreachable() do { } while (0)
12877 ++#endif
12878 ++
12879 + /* Unreachable code */
12880 + #ifdef CONFIG_STACK_VALIDATION
12881 + #define annotate_reachable() ({ \
12882 +diff --git a/include/linux/dcache.h b/include/linux/dcache.h
12883 +index f05a659cdf34..006f4ccda5f5 100644
12884 +--- a/include/linux/dcache.h
12885 ++++ b/include/linux/dcache.h
12886 +@@ -226,6 +226,7 @@ extern seqlock_t rename_lock;
12887 + * These are the low-level FS interfaces to the dcache..
12888 + */
12889 + extern void d_instantiate(struct dentry *, struct inode *);
12890 ++extern void d_instantiate_new(struct dentry *, struct inode *);
12891 + extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
12892 + extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
12893 + extern void __d_drop(struct dentry *dentry);
12894 +diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
12895 +index ab927383c99d..87b8c20d5b27 100644
12896 +--- a/include/linux/if_vlan.h
12897 ++++ b/include/linux/if_vlan.h
12898 +@@ -300,32 +300,47 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
12899 + }
12900 +
12901 + /**
12902 +- * __vlan_insert_tag - regular VLAN tag inserting
12903 ++ * __vlan_insert_inner_tag - inner VLAN tag inserting
12904 + * @skb: skbuff to tag
12905 + * @vlan_proto: VLAN encapsulation protocol
12906 + * @vlan_tci: VLAN TCI to insert
12907 ++ * @mac_len: MAC header length including outer vlan headers
12908 + *
12909 +- * Inserts the VLAN tag into @skb as part of the payload
12910 ++ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
12911 + * Returns error if skb_cow_head failes.
12912 + *
12913 + * Does not change skb->protocol so this function can be used during receive.
12914 + */
12915 +-static inline int __vlan_insert_tag(struct sk_buff *skb,
12916 +- __be16 vlan_proto, u16 vlan_tci)
12917 ++static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
12918 ++ __be16 vlan_proto, u16 vlan_tci,
12919 ++ unsigned int mac_len)
12920 + {
12921 + struct vlan_ethhdr *veth;
12922 +
12923 + if (skb_cow_head(skb, VLAN_HLEN) < 0)
12924 + return -ENOMEM;
12925 +
12926 +- veth = skb_push(skb, VLAN_HLEN);
12927 ++ skb_push(skb, VLAN_HLEN);
12928 +
12929 +- /* Move the mac addresses to the beginning of the new header. */
12930 +- memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
12931 ++ /* Move the mac header sans proto to the beginning of the new header. */
12932 ++ if (likely(mac_len > ETH_TLEN))
12933 ++ memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
12934 + skb->mac_header -= VLAN_HLEN;
12935 +
12936 ++ veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
12937 ++
12938 + /* first, the ethernet type */
12939 +- veth->h_vlan_proto = vlan_proto;
12940 ++ if (likely(mac_len >= ETH_TLEN)) {
12941 ++ /* h_vlan_encapsulated_proto should already be populated, and
12942 ++ * skb->data has space for h_vlan_proto
12943 ++ */
12944 ++ veth->h_vlan_proto = vlan_proto;
12945 ++ } else {
12946 ++ /* h_vlan_encapsulated_proto should not be populated, and
12947 ++ * skb->data has no space for h_vlan_proto
12948 ++ */
12949 ++ veth->h_vlan_encapsulated_proto = skb->protocol;
12950 ++ }
12951 +
12952 + /* now, the TCI */
12953 + veth->h_vlan_TCI = htons(vlan_tci);
12954 +@@ -334,12 +349,30 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
12955 + }
12956 +
12957 + /**
12958 +- * vlan_insert_tag - regular VLAN tag inserting
12959 ++ * __vlan_insert_tag - regular VLAN tag inserting
12960 + * @skb: skbuff to tag
12961 + * @vlan_proto: VLAN encapsulation protocol
12962 + * @vlan_tci: VLAN TCI to insert
12963 + *
12964 + * Inserts the VLAN tag into @skb as part of the payload
12965 ++ * Returns error if skb_cow_head failes.
12966 ++ *
12967 ++ * Does not change skb->protocol so this function can be used during receive.
12968 ++ */
12969 ++static inline int __vlan_insert_tag(struct sk_buff *skb,
12970 ++ __be16 vlan_proto, u16 vlan_tci)
12971 ++{
12972 ++ return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
12973 ++}
12974 ++
12975 ++/**
12976 ++ * vlan_insert_inner_tag - inner VLAN tag inserting
12977 ++ * @skb: skbuff to tag
12978 ++ * @vlan_proto: VLAN encapsulation protocol
12979 ++ * @vlan_tci: VLAN TCI to insert
12980 ++ * @mac_len: MAC header length including outer vlan headers
12981 ++ *
12982 ++ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
12983 + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
12984 + *
12985 + * Following the skb_unshare() example, in case of error, the calling function
12986 +@@ -347,12 +380,14 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
12987 + *
12988 + * Does not change skb->protocol so this function can be used during receive.
12989 + */
12990 +-static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
12991 +- __be16 vlan_proto, u16 vlan_tci)
12992 ++static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
12993 ++ __be16 vlan_proto,
12994 ++ u16 vlan_tci,
12995 ++ unsigned int mac_len)
12996 + {
12997 + int err;
12998 +
12999 +- err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
13000 ++ err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
13001 + if (err) {
13002 + dev_kfree_skb_any(skb);
13003 + return NULL;
13004 +@@ -360,6 +395,26 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
13005 + return skb;
13006 + }
13007 +
13008 ++/**
13009 ++ * vlan_insert_tag - regular VLAN tag inserting
13010 ++ * @skb: skbuff to tag
13011 ++ * @vlan_proto: VLAN encapsulation protocol
13012 ++ * @vlan_tci: VLAN TCI to insert
13013 ++ *
13014 ++ * Inserts the VLAN tag into @skb as part of the payload
13015 ++ * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
13016 ++ *
13017 ++ * Following the skb_unshare() example, in case of error, the calling function
13018 ++ * doesn't have to worry about freeing the original skb.
13019 ++ *
13020 ++ * Does not change skb->protocol so this function can be used during receive.
13021 ++ */
13022 ++static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
13023 ++ __be16 vlan_proto, u16 vlan_tci)
13024 ++{
13025 ++ return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
13026 ++}
13027 ++
13028 + /**
13029 + * vlan_insert_tag_set_proto - regular VLAN tag inserting
13030 + * @skb: skbuff to tag
13031 +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
13032 +index 5a8019befafd..39f0489eb137 100644
13033 +--- a/include/linux/kvm_host.h
13034 ++++ b/include/linux/kvm_host.h
13035 +@@ -1104,7 +1104,6 @@ static inline void kvm_irq_routing_update(struct kvm *kvm)
13036 + {
13037 + }
13038 + #endif
13039 +-void kvm_arch_irq_routing_update(struct kvm *kvm);
13040 +
13041 + static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
13042 + {
13043 +@@ -1113,6 +1112,8 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
13044 +
13045 + #endif /* CONFIG_HAVE_KVM_EVENTFD */
13046 +
13047 ++void kvm_arch_irq_routing_update(struct kvm *kvm);
13048 ++
13049 + static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
13050 + {
13051 + /*
13052 +diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
13053 +index 35d125569e68..e8b12b79a0de 100644
13054 +--- a/include/linux/ptr_ring.h
13055 ++++ b/include/linux/ptr_ring.h
13056 +@@ -450,7 +450,7 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
13057 + */
13058 + static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
13059 + {
13060 +- if (size * sizeof(void *) > KMALLOC_MAX_SIZE)
13061 ++ if (size > KMALLOC_MAX_SIZE / sizeof(void *))
13062 + return NULL;
13063 + return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
13064 + }
13065 +diff --git a/include/net/ip.h b/include/net/ip.h
13066 +index af8addbaa3c1..81da1123fc8e 100644
13067 +--- a/include/net/ip.h
13068 ++++ b/include/net/ip.h
13069 +@@ -326,6 +326,13 @@ int ip_decrease_ttl(struct iphdr *iph)
13070 + return --iph->ttl;
13071 + }
13072 +
13073 ++static inline int ip_mtu_locked(const struct dst_entry *dst)
13074 ++{
13075 ++ const struct rtable *rt = (const struct rtable *)dst;
13076 ++
13077 ++ return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
13078 ++}
13079 ++
13080 + static inline
13081 + int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
13082 + {
13083 +@@ -333,7 +340,7 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
13084 +
13085 + return pmtudisc == IP_PMTUDISC_DO ||
13086 + (pmtudisc == IP_PMTUDISC_WANT &&
13087 +- !(dst_metric_locked(dst, RTAX_MTU)));
13088 ++ !ip_mtu_locked(dst));
13089 + }
13090 +
13091 + static inline bool ip_sk_accept_pmtu(const struct sock *sk)
13092 +@@ -359,7 +366,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
13093 + struct net *net = dev_net(dst->dev);
13094 +
13095 + if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
13096 +- dst_metric_locked(dst, RTAX_MTU) ||
13097 ++ ip_mtu_locked(dst) ||
13098 + !forwarding)
13099 + return dst_mtu(dst);
13100 +
13101 +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
13102 +index 1a7f7e424320..5c5d344c0629 100644
13103 +--- a/include/net/ip_fib.h
13104 ++++ b/include/net/ip_fib.h
13105 +@@ -59,6 +59,7 @@ struct fib_nh_exception {
13106 + int fnhe_genid;
13107 + __be32 fnhe_daddr;
13108 + u32 fnhe_pmtu;
13109 ++ bool fnhe_mtu_locked;
13110 + __be32 fnhe_gw;
13111 + unsigned long fnhe_expires;
13112 + struct rtable __rcu *fnhe_rth_input;
13113 +diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
13114 +index ea985aa7a6c5..df528a623548 100644
13115 +--- a/include/net/llc_conn.h
13116 ++++ b/include/net/llc_conn.h
13117 +@@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk);
13118 +
13119 + /* Access to a connection */
13120 + int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
13121 +-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
13122 ++int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
13123 + void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
13124 + void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
13125 + void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
13126 +diff --git a/include/net/mac80211.h b/include/net/mac80211.h
13127 +index 4f1d2dec43ce..87b62bae20af 100644
13128 +--- a/include/net/mac80211.h
13129 ++++ b/include/net/mac80211.h
13130 +@@ -4141,7 +4141,7 @@ void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid);
13131 + * The TX headroom reserved by mac80211 for its own tx_status functions.
13132 + * This is enough for the radiotap header.
13133 + */
13134 +-#define IEEE80211_TX_STATUS_HEADROOM 14
13135 ++#define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4)
13136 +
13137 + /**
13138 + * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
13139 +diff --git a/include/net/regulatory.h b/include/net/regulatory.h
13140 +index ebc5a2ed8631..f83cacce3308 100644
13141 +--- a/include/net/regulatory.h
13142 ++++ b/include/net/regulatory.h
13143 +@@ -78,7 +78,7 @@ struct regulatory_request {
13144 + int wiphy_idx;
13145 + enum nl80211_reg_initiator initiator;
13146 + enum nl80211_user_reg_hint_type user_reg_hint_type;
13147 +- char alpha2[2];
13148 ++ char alpha2[3];
13149 + enum nl80211_dfs_regions dfs_region;
13150 + bool intersect;
13151 + bool processed;
13152 +diff --git a/include/net/route.h b/include/net/route.h
13153 +index d538e6db1afe..6077a0fb3044 100644
13154 +--- a/include/net/route.h
13155 ++++ b/include/net/route.h
13156 +@@ -63,7 +63,8 @@ struct rtable {
13157 + __be32 rt_gateway;
13158 +
13159 + /* Miscellaneous cached information */
13160 +- u32 rt_pmtu;
13161 ++ u32 rt_mtu_locked:1,
13162 ++ rt_pmtu:31;
13163 +
13164 + u32 rt_table_id;
13165 +
13166 +diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
13167 +index 23159dd5be18..a1fd63871d17 100644
13168 +--- a/include/rdma/ib_umem.h
13169 ++++ b/include/rdma/ib_umem.h
13170 +@@ -48,7 +48,6 @@ struct ib_umem {
13171 + int writable;
13172 + int hugetlb;
13173 + struct work_struct work;
13174 +- struct pid *pid;
13175 + struct mm_struct *mm;
13176 + unsigned long diff;
13177 + struct ib_umem_odp *odp_data;
13178 +diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
13179 +index c2d1b15da136..a91f25151a5b 100644
13180 +--- a/include/soc/arc/mcip.h
13181 ++++ b/include/soc/arc/mcip.h
13182 +@@ -15,6 +15,7 @@
13183 +
13184 + #define ARC_REG_MCIP_BCR 0x0d0
13185 + #define ARC_REG_MCIP_IDU_BCR 0x0D5
13186 ++#define ARC_REG_GFRC_BUILD 0x0D6
13187 + #define ARC_REG_MCIP_CMD 0x600
13188 + #define ARC_REG_MCIP_WDATA 0x601
13189 + #define ARC_REG_MCIP_READBACK 0x602
13190 +@@ -36,10 +37,14 @@ struct mcip_cmd {
13191 + #define CMD_SEMA_RELEASE 0x12
13192 +
13193 + #define CMD_DEBUG_SET_MASK 0x34
13194 ++#define CMD_DEBUG_READ_MASK 0x35
13195 + #define CMD_DEBUG_SET_SELECT 0x36
13196 ++#define CMD_DEBUG_READ_SELECT 0x37
13197 +
13198 + #define CMD_GFRC_READ_LO 0x42
13199 + #define CMD_GFRC_READ_HI 0x43
13200 ++#define CMD_GFRC_SET_CORE 0x47
13201 ++#define CMD_GFRC_READ_CORE 0x48
13202 +
13203 + #define CMD_IDU_ENABLE 0x71
13204 + #define CMD_IDU_DISABLE 0x72
13205 +diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
13206 +index 91a31ffed828..9a781f0611df 100644
13207 +--- a/include/uapi/drm/virtgpu_drm.h
13208 ++++ b/include/uapi/drm/virtgpu_drm.h
13209 +@@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer {
13210 + };
13211 +
13212 + #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
13213 ++#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
13214 +
13215 + struct drm_virtgpu_getparam {
13216 + __u64 param;
13217 +diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
13218 +index 3ee3bf7c8526..244e3213ecb0 100644
13219 +--- a/include/uapi/linux/if_ether.h
13220 ++++ b/include/uapi/linux/if_ether.h
13221 +@@ -30,6 +30,7 @@
13222 + */
13223 +
13224 + #define ETH_ALEN 6 /* Octets in one ethernet addr */
13225 ++#define ETH_TLEN 2 /* Octets in ethernet type field */
13226 + #define ETH_HLEN 14 /* Total octets in header. */
13227 + #define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
13228 + #define ETH_DATA_LEN 1500 /* Max. octets in payload */
13229 +diff --git a/ipc/shm.c b/ipc/shm.c
13230 +index a9cce632ed48..44cca2529a95 100644
13231 +--- a/ipc/shm.c
13232 ++++ b/ipc/shm.c
13233 +@@ -1309,14 +1309,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
13234 +
13235 + if (addr) {
13236 + if (addr & (shmlba - 1)) {
13237 +- /*
13238 +- * Round down to the nearest multiple of shmlba.
13239 +- * For sane do_mmap_pgoff() parameters, avoid
13240 +- * round downs that trigger nil-page and MAP_FIXED.
13241 +- */
13242 +- if ((shmflg & SHM_RND) && addr >= shmlba)
13243 +- addr &= ~(shmlba - 1);
13244 +- else
13245 ++ if (shmflg & SHM_RND) {
13246 ++ addr &= ~(shmlba - 1); /* round down */
13247 ++
13248 ++ /*
13249 ++ * Ensure that the round-down is non-nil
13250 ++ * when remapping. This can happen for
13251 ++ * cases when addr < shmlba.
13252 ++ */
13253 ++ if (!addr && (shmflg & SHM_REMAP))
13254 ++ goto out;
13255 ++ } else
13256 + #ifndef __ARCH_FORCE_SHMLBA
13257 + if (addr & ~PAGE_MASK)
13258 + #endif
13259 +diff --git a/kernel/audit.c b/kernel/audit.c
13260 +index 5b34d3114af4..d301276bca58 100644
13261 +--- a/kernel/audit.c
13262 ++++ b/kernel/audit.c
13263 +@@ -1058,6 +1058,8 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
13264 + return;
13265 +
13266 + ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
13267 ++ if (!ab)
13268 ++ return;
13269 + audit_log_task_info(ab, current);
13270 + audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
13271 + audit_feature_names[which], !!old_feature, !!new_feature,
13272 +diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
13273 +index c8146d53ca67..07aefa8dbee8 100644
13274 +--- a/kernel/debug/kdb/kdb_main.c
13275 ++++ b/kernel/debug/kdb/kdb_main.c
13276 +@@ -1566,6 +1566,7 @@ static int kdb_md(int argc, const char **argv)
13277 + int symbolic = 0;
13278 + int valid = 0;
13279 + int phys = 0;
13280 ++ int raw = 0;
13281 +
13282 + kdbgetintenv("MDCOUNT", &mdcount);
13283 + kdbgetintenv("RADIX", &radix);
13284 +@@ -1575,9 +1576,10 @@ static int kdb_md(int argc, const char **argv)
13285 + repeat = mdcount * 16 / bytesperword;
13286 +
13287 + if (strcmp(argv[0], "mdr") == 0) {
13288 +- if (argc != 2)
13289 ++ if (argc == 2 || (argc == 0 && last_addr != 0))
13290 ++ valid = raw = 1;
13291 ++ else
13292 + return KDB_ARGCOUNT;
13293 +- valid = 1;
13294 + } else if (isdigit(argv[0][2])) {
13295 + bytesperword = (int)(argv[0][2] - '0');
13296 + if (bytesperword == 0) {
13297 +@@ -1613,7 +1615,10 @@ static int kdb_md(int argc, const char **argv)
13298 + radix = last_radix;
13299 + bytesperword = last_bytesperword;
13300 + repeat = last_repeat;
13301 +- mdcount = ((repeat * bytesperword) + 15) / 16;
13302 ++ if (raw)
13303 ++ mdcount = repeat;
13304 ++ else
13305 ++ mdcount = ((repeat * bytesperword) + 15) / 16;
13306 + }
13307 +
13308 + if (argc) {
13309 +@@ -1630,7 +1635,10 @@ static int kdb_md(int argc, const char **argv)
13310 + diag = kdbgetularg(argv[nextarg], &val);
13311 + if (!diag) {
13312 + mdcount = (int) val;
13313 +- repeat = mdcount * 16 / bytesperword;
13314 ++ if (raw)
13315 ++ repeat = mdcount;
13316 ++ else
13317 ++ repeat = mdcount * 16 / bytesperword;
13318 + }
13319 + }
13320 + if (argc >= nextarg+1) {
13321 +@@ -1640,8 +1648,15 @@ static int kdb_md(int argc, const char **argv)
13322 + }
13323 + }
13324 +
13325 +- if (strcmp(argv[0], "mdr") == 0)
13326 +- return kdb_mdr(addr, mdcount);
13327 ++ if (strcmp(argv[0], "mdr") == 0) {
13328 ++ int ret;
13329 ++ last_addr = addr;
13330 ++ ret = kdb_mdr(addr, mdcount);
13331 ++ last_addr += mdcount;
13332 ++ last_repeat = mdcount;
13333 ++ last_bytesperword = bytesperword; // to make REPEAT happy
13334 ++ return ret;
13335 ++ }
13336 +
13337 + switch (radix) {
13338 + case 10:
13339 +diff --git a/kernel/events/core.c b/kernel/events/core.c
13340 +index cb8274d7824f..7c394ddf1ce6 100644
13341 +--- a/kernel/events/core.c
13342 ++++ b/kernel/events/core.c
13343 +@@ -642,9 +642,15 @@ static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
13344 +
13345 + static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
13346 + {
13347 +- struct perf_cgroup *cgrp_out = cpuctx->cgrp;
13348 +- if (cgrp_out)
13349 +- __update_cgrp_time(cgrp_out);
13350 ++ struct perf_cgroup *cgrp = cpuctx->cgrp;
13351 ++ struct cgroup_subsys_state *css;
13352 ++
13353 ++ if (cgrp) {
13354 ++ for (css = &cgrp->css; css; css = css->parent) {
13355 ++ cgrp = container_of(css, struct perf_cgroup, css);
13356 ++ __update_cgrp_time(cgrp);
13357 ++ }
13358 ++ }
13359 + }
13360 +
13361 + static inline void update_cgrp_time_from_event(struct perf_event *event)
13362 +@@ -672,6 +678,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
13363 + {
13364 + struct perf_cgroup *cgrp;
13365 + struct perf_cgroup_info *info;
13366 ++ struct cgroup_subsys_state *css;
13367 +
13368 + /*
13369 + * ctx->lock held by caller
13370 +@@ -682,8 +689,12 @@ perf_cgroup_set_timestamp(struct task_struct *task,
13371 + return;
13372 +
13373 + cgrp = perf_cgroup_from_task(task, ctx);
13374 +- info = this_cpu_ptr(cgrp->info);
13375 +- info->timestamp = ctx->timestamp;
13376 ++
13377 ++ for (css = &cgrp->css; css; css = css->parent) {
13378 ++ cgrp = container_of(css, struct perf_cgroup, css);
13379 ++ info = this_cpu_ptr(cgrp->info);
13380 ++ info->timestamp = ctx->timestamp;
13381 ++ }
13382 + }
13383 +
13384 + static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
13385 +@@ -889,27 +900,39 @@ list_update_cgroup_event(struct perf_event *event,
13386 + if (!is_cgroup_event(event))
13387 + return;
13388 +
13389 +- if (add && ctx->nr_cgroups++)
13390 +- return;
13391 +- else if (!add && --ctx->nr_cgroups)
13392 +- return;
13393 + /*
13394 + * Because cgroup events are always per-cpu events,
13395 + * this will always be called from the right CPU.
13396 + */
13397 + cpuctx = __get_cpu_context(ctx);
13398 +- cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
13399 +- /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
13400 +- if (add) {
13401 ++
13402 ++ /*
13403 ++ * Since setting cpuctx->cgrp is conditional on the current @cgrp
13404 ++ * matching the event's cgroup, we must do this for every new event,
13405 ++ * because if the first would mismatch, the second would not try again
13406 ++ * and we would leave cpuctx->cgrp unset.
13407 ++ */
13408 ++ if (add && !cpuctx->cgrp) {
13409 + struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
13410 +
13411 +- list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
13412 + if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
13413 + cpuctx->cgrp = cgrp;
13414 +- } else {
13415 +- list_del(cpuctx_entry);
13416 +- cpuctx->cgrp = NULL;
13417 + }
13418 ++
13419 ++ if (add && ctx->nr_cgroups++)
13420 ++ return;
13421 ++ else if (!add && --ctx->nr_cgroups)
13422 ++ return;
13423 ++
13424 ++ /* no cgroup running */
13425 ++ if (!add)
13426 ++ cpuctx->cgrp = NULL;
13427 ++
13428 ++ cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
13429 ++ if (add)
13430 ++ list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
13431 ++ else
13432 ++ list_del(cpuctx_entry);
13433 + }
13434 +
13435 + #else /* !CONFIG_CGROUP_PERF */
13436 +@@ -2393,6 +2416,18 @@ static int __perf_install_in_context(void *info)
13437 + raw_spin_lock(&task_ctx->lock);
13438 + }
13439 +
13440 ++#ifdef CONFIG_CGROUP_PERF
13441 ++ if (is_cgroup_event(event)) {
13442 ++ /*
13443 ++ * If the current cgroup doesn't match the event's
13444 ++ * cgroup, we should not try to schedule it.
13445 ++ */
13446 ++ struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
13447 ++ reprogram = cgroup_is_descendant(cgrp->css.cgroup,
13448 ++ event->cgrp->css.cgroup);
13449 ++ }
13450 ++#endif
13451 ++
13452 + if (reprogram) {
13453 + ctx_sched_out(ctx, cpuctx, EVENT_TIME);
13454 + add_event_to_ctx(event, ctx);
13455 +@@ -5802,7 +5837,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
13456 + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
13457 + values[n++] = running;
13458 +
13459 +- if (leader != event)
13460 ++ if ((leader != event) &&
13461 ++ (leader->state == PERF_EVENT_STATE_ACTIVE))
13462 + leader->pmu->read(leader);
13463 +
13464 + values[n++] = perf_event_count(leader);
13465 +diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
13466 +index fed95fa941e6..8b3102d22823 100644
13467 +--- a/kernel/rcu/tree_plugin.h
13468 ++++ b/kernel/rcu/tree_plugin.h
13469 +@@ -559,8 +559,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
13470 + }
13471 + t = list_entry(rnp->gp_tasks->prev,
13472 + struct task_struct, rcu_node_entry);
13473 +- list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
13474 ++ list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
13475 ++ /*
13476 ++ * We could be printing a lot while holding a spinlock.
13477 ++ * Avoid triggering hard lockup.
13478 ++ */
13479 ++ touch_nmi_watchdog();
13480 + sched_show_task(t);
13481 ++ }
13482 + raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
13483 + }
13484 +
13485 +@@ -1677,6 +1683,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
13486 + char *ticks_title;
13487 + unsigned long ticks_value;
13488 +
13489 ++ /*
13490 ++ * We could be printing a lot while holding a spinlock. Avoid
13491 ++ * triggering hard lockup.
13492 ++ */
13493 ++ touch_nmi_watchdog();
13494 ++
13495 + if (rsp->gpnum == rdp->gpnum) {
13496 + ticks_title = "ticks this GP";
13497 + ticks_value = rdp->ticks_this_gp;
13498 +diff --git a/kernel/relay.c b/kernel/relay.c
13499 +index 55da824f4adc..1537158c67b3 100644
13500 +--- a/kernel/relay.c
13501 ++++ b/kernel/relay.c
13502 +@@ -163,7 +163,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
13503 + {
13504 + struct rchan_buf *buf;
13505 +
13506 +- if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
13507 ++ if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
13508 + return NULL;
13509 +
13510 + buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
13511 +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
13512 +index 470a0c9e93de..113eaeb6c0f8 100644
13513 +--- a/kernel/sched/rt.c
13514 ++++ b/kernel/sched/rt.c
13515 +@@ -843,6 +843,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
13516 + continue;
13517 +
13518 + raw_spin_lock(&rq->lock);
13519 ++ update_rq_clock(rq);
13520 ++
13521 + if (rt_rq->rt_time) {
13522 + u64 runtime;
13523 +
13524 +diff --git a/kernel/sys.c b/kernel/sys.c
13525 +index b5c1bc9e3769..de4ed027dfd7 100644
13526 +--- a/kernel/sys.c
13527 ++++ b/kernel/sys.c
13528 +@@ -1395,6 +1395,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
13529 + if (resource >= RLIM_NLIMITS)
13530 + return -EINVAL;
13531 +
13532 ++ resource = array_index_nospec(resource, RLIM_NLIMITS);
13533 + task_lock(current->group_leader);
13534 + x = current->signal->rlim[resource];
13535 + task_unlock(current->group_leader);
13536 +@@ -1414,6 +1415,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
13537 + if (resource >= RLIM_NLIMITS)
13538 + return -EINVAL;
13539 +
13540 ++ resource = array_index_nospec(resource, RLIM_NLIMITS);
13541 + task_lock(current->group_leader);
13542 + r = current->signal->rlim[resource];
13543 + task_unlock(current->group_leader);
13544 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
13545 +index d0c6b50792c8..d8a7f8939c81 100644
13546 +--- a/kernel/workqueue.c
13547 ++++ b/kernel/workqueue.c
13548 +@@ -5350,7 +5350,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
13549 +
13550 + ret = device_register(&wq_dev->dev);
13551 + if (ret) {
13552 +- kfree(wq_dev);
13553 ++ put_device(&wq_dev->dev);
13554 + wq->wq_dev = NULL;
13555 + return ret;
13556 + }
13557 +diff --git a/lib/radix-tree.c b/lib/radix-tree.c
13558 +index 70d677820740..d172f0341b80 100644
13559 +--- a/lib/radix-tree.c
13560 ++++ b/lib/radix-tree.c
13561 +@@ -2037,10 +2037,12 @@ void *radix_tree_delete_item(struct radix_tree_root *root,
13562 + unsigned long index, void *item)
13563 + {
13564 + struct radix_tree_node *node = NULL;
13565 +- void __rcu **slot;
13566 ++ void __rcu **slot = NULL;
13567 + void *entry;
13568 +
13569 + entry = __radix_tree_lookup(root, index, &node, &slot);
13570 ++ if (!slot)
13571 ++ return NULL;
13572 + if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
13573 + get_slot_offset(node, slot))))
13574 + return NULL;
13575 +diff --git a/lib/test_kmod.c b/lib/test_kmod.c
13576 +index fba78d25e825..96c304fd656a 100644
13577 +--- a/lib/test_kmod.c
13578 ++++ b/lib/test_kmod.c
13579 +@@ -1149,7 +1149,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
13580 + mutex_lock(&reg_dev_mutex);
13581 +
13582 + /* int should suffice for number of devices, test for wrap */
13583 +- if (unlikely(num_test_devs + 1) < 0) {
13584 ++ if (num_test_devs + 1 == INT_MAX) {
13585 + pr_err("reached limit of number of test devices\n");
13586 + goto out;
13587 + }
13588 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
13589 +index a403d29da6fd..e774898c91d5 100644
13590 +--- a/mm/huge_memory.c
13591 ++++ b/mm/huge_memory.c
13592 +@@ -555,7 +555,8 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
13593 +
13594 + VM_BUG_ON_PAGE(!PageCompound(page), page);
13595 +
13596 +- if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) {
13597 ++ if (mem_cgroup_try_charge(page, vma->vm_mm, gfp | __GFP_NORETRY, &memcg,
13598 ++ true)) {
13599 + put_page(page);
13600 + count_vm_event(THP_FAULT_FALLBACK);
13601 + return VM_FAULT_FALLBACK;
13602 +@@ -1304,7 +1305,7 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
13603 + }
13604 +
13605 + if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
13606 +- huge_gfp, &memcg, true))) {
13607 ++ huge_gfp | __GFP_NORETRY, &memcg, true))) {
13608 + put_page(new_page);
13609 + split_huge_pmd(vma, vmf->pmd, vmf->address);
13610 + if (page)
13611 +diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
13612 +index 6f319fb81718..d90f29a166d8 100644
13613 +--- a/mm/kasan/kasan.c
13614 ++++ b/mm/kasan/kasan.c
13615 +@@ -737,6 +737,40 @@ void __asan_unpoison_stack_memory(const void *addr, size_t size)
13616 + EXPORT_SYMBOL(__asan_unpoison_stack_memory);
13617 +
13618 + #ifdef CONFIG_MEMORY_HOTPLUG
13619 ++static bool shadow_mapped(unsigned long addr)
13620 ++{
13621 ++ pgd_t *pgd = pgd_offset_k(addr);
13622 ++ p4d_t *p4d;
13623 ++ pud_t *pud;
13624 ++ pmd_t *pmd;
13625 ++ pte_t *pte;
13626 ++
13627 ++ if (pgd_none(*pgd))
13628 ++ return false;
13629 ++ p4d = p4d_offset(pgd, addr);
13630 ++ if (p4d_none(*p4d))
13631 ++ return false;
13632 ++ pud = pud_offset(p4d, addr);
13633 ++ if (pud_none(*pud))
13634 ++ return false;
13635 ++
13636 ++ /*
13637 ++ * We can't use pud_large() or pud_huge(), the first one is
13638 ++ * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
13639 ++ * pud_bad(), if pud is bad then it's bad because it's huge.
13640 ++ */
13641 ++ if (pud_bad(*pud))
13642 ++ return true;
13643 ++ pmd = pmd_offset(pud, addr);
13644 ++ if (pmd_none(*pmd))
13645 ++ return false;
13646 ++
13647 ++ if (pmd_bad(*pmd))
13648 ++ return true;
13649 ++ pte = pte_offset_kernel(pmd, addr);
13650 ++ return !pte_none(*pte);
13651 ++}
13652 ++
13653 + static int __meminit kasan_mem_notifier(struct notifier_block *nb,
13654 + unsigned long action, void *data)
13655 + {
13656 +@@ -758,6 +792,14 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
13657 + case MEM_GOING_ONLINE: {
13658 + void *ret;
13659 +
13660 ++ /*
13661 ++ * If shadow is mapped already than it must have been mapped
13662 ++ * during the boot. This could happen if we onlining previously
13663 ++ * offlined memory.
13664 ++ */
13665 ++ if (shadow_mapped(shadow_start))
13666 ++ return NOTIFY_OK;
13667 ++
13668 + ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
13669 + shadow_end, GFP_KERNEL,
13670 + PAGE_KERNEL, VM_NO_GUARD,
13671 +@@ -769,8 +811,26 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
13672 + kmemleak_ignore(ret);
13673 + return NOTIFY_OK;
13674 + }
13675 +- case MEM_OFFLINE:
13676 +- vfree((void *)shadow_start);
13677 ++ case MEM_CANCEL_ONLINE:
13678 ++ case MEM_OFFLINE: {
13679 ++ struct vm_struct *vm;
13680 ++
13681 ++ /*
13682 ++ * shadow_start was either mapped during boot by kasan_init()
13683 ++ * or during memory online by __vmalloc_node_range().
13684 ++ * In the latter case we can use vfree() to free shadow.
13685 ++ * Non-NULL result of the find_vm_area() will tell us if
13686 ++ * that was the second case.
13687 ++ *
13688 ++ * Currently it's not possible to free shadow mapped
13689 ++ * during boot by kasan_init(). It's because the code
13690 ++ * to do that hasn't been written yet. So we'll just
13691 ++ * leak the memory.
13692 ++ */
13693 ++ vm = find_vm_area((void *)shadow_start);
13694 ++ if (vm)
13695 ++ vfree((void *)shadow_start);
13696 ++ }
13697 + }
13698 +
13699 + return NOTIFY_OK;
13700 +@@ -783,5 +843,5 @@ static int __init kasan_memhotplug_init(void)
13701 + return 0;
13702 + }
13703 +
13704 +-module_init(kasan_memhotplug_init);
13705 ++core_initcall(kasan_memhotplug_init);
13706 + #endif
13707 +diff --git a/mm/khugepaged.c b/mm/khugepaged.c
13708 +index 29221602d802..0a5bb3e8a8a3 100644
13709 +--- a/mm/khugepaged.c
13710 ++++ b/mm/khugepaged.c
13711 +@@ -965,7 +965,9 @@ static void collapse_huge_page(struct mm_struct *mm,
13712 + goto out_nolock;
13713 + }
13714 +
13715 +- if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
13716 ++ /* Do not oom kill for khugepaged charges */
13717 ++ if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
13718 ++ &memcg, true))) {
13719 + result = SCAN_CGROUP_CHARGE_FAIL;
13720 + goto out_nolock;
13721 + }
13722 +@@ -1324,7 +1326,9 @@ static void collapse_shmem(struct mm_struct *mm,
13723 + goto out;
13724 + }
13725 +
13726 +- if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
13727 ++ /* Do not oom kill for khugepaged charges */
13728 ++ if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
13729 ++ &memcg, true))) {
13730 + result = SCAN_CGROUP_CHARGE_FAIL;
13731 + goto out;
13732 + }
13733 +diff --git a/mm/kmemleak.c b/mm/kmemleak.c
13734 +index bd1374f402cd..d9e0be2a8189 100644
13735 +--- a/mm/kmemleak.c
13736 ++++ b/mm/kmemleak.c
13737 +@@ -1658,8 +1658,7 @@ static void start_scan_thread(void)
13738 + }
13739 +
13740 + /*
13741 +- * Stop the automatic memory scanning thread. This function must be called
13742 +- * with the scan_mutex held.
13743 ++ * Stop the automatic memory scanning thread.
13744 + */
13745 + static void stop_scan_thread(void)
13746 + {
13747 +@@ -1922,12 +1921,15 @@ static void kmemleak_do_cleanup(struct work_struct *work)
13748 + {
13749 + stop_scan_thread();
13750 +
13751 ++ mutex_lock(&scan_mutex);
13752 + /*
13753 +- * Once the scan thread has stopped, it is safe to no longer track
13754 +- * object freeing. Ordering of the scan thread stopping and the memory
13755 +- * accesses below is guaranteed by the kthread_stop() function.
13756 ++ * Once it is made sure that kmemleak_scan has stopped, it is safe to no
13757 ++ * longer track object freeing. Ordering of the scan thread stopping and
13758 ++ * the memory accesses below is guaranteed by the kthread_stop()
13759 ++ * function.
13760 + */
13761 + kmemleak_free_enabled = 0;
13762 ++ mutex_unlock(&scan_mutex);
13763 +
13764 + if (!kmemleak_found_leaks)
13765 + __kmemleak_do_cleanup();
13766 +diff --git a/mm/ksm.c b/mm/ksm.c
13767 +index 5b6be9eeb095..fdc8746ebcb4 100644
13768 +--- a/mm/ksm.c
13769 ++++ b/mm/ksm.c
13770 +@@ -2085,8 +2085,22 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
13771 + tree_rmap_item =
13772 + unstable_tree_search_insert(rmap_item, page, &tree_page);
13773 + if (tree_rmap_item) {
13774 ++ bool split;
13775 ++
13776 + kpage = try_to_merge_two_pages(rmap_item, page,
13777 + tree_rmap_item, tree_page);
13778 ++ /*
13779 ++ * If both pages we tried to merge belong to the same compound
13780 ++ * page, then we actually ended up increasing the reference
13781 ++ * count of the same compound page twice, and split_huge_page
13782 ++ * failed.
13783 ++ * Here we set a flag if that happened, and we use it later to
13784 ++ * try split_huge_page again. Since we call put_page right
13785 ++ * afterwards, the reference count will be correct and
13786 ++ * split_huge_page should succeed.
13787 ++ */
13788 ++ split = PageTransCompound(page)
13789 ++ && compound_head(page) == compound_head(tree_page);
13790 + put_page(tree_page);
13791 + if (kpage) {
13792 + /*
13793 +@@ -2113,6 +2127,20 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
13794 + break_cow(tree_rmap_item);
13795 + break_cow(rmap_item);
13796 + }
13797 ++ } else if (split) {
13798 ++ /*
13799 ++ * We are here if we tried to merge two pages and
13800 ++ * failed because they both belonged to the same
13801 ++ * compound page. We will split the page now, but no
13802 ++ * merging will take place.
13803 ++ * We do not want to add the cost of a full lock; if
13804 ++ * the page is locked, it is better to skip it and
13805 ++ * perhaps try again later.
13806 ++ */
13807 ++ if (!trylock_page(page))
13808 ++ return;
13809 ++ split_huge_page(page);
13810 ++ unlock_page(page);
13811 + }
13812 + }
13813 + }
13814 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
13815 +index 2d3077ce50cd..ecbda7f5d494 100644
13816 +--- a/mm/mempolicy.c
13817 ++++ b/mm/mempolicy.c
13818 +@@ -2128,6 +2128,9 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
13819 + case MPOL_INTERLEAVE:
13820 + return !!nodes_equal(a->v.nodes, b->v.nodes);
13821 + case MPOL_PREFERRED:
13822 ++ /* a's ->flags is the same as b's */
13823 ++ if (a->flags & MPOL_F_LOCAL)
13824 ++ return true;
13825 + return a->v.preferred_node == b->v.preferred_node;
13826 + default:
13827 + BUG();
13828 +diff --git a/mm/page_idle.c b/mm/page_idle.c
13829 +index 0a49374e6931..e412a63b2b74 100644
13830 +--- a/mm/page_idle.c
13831 ++++ b/mm/page_idle.c
13832 +@@ -65,11 +65,15 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
13833 + while (page_vma_mapped_walk(&pvmw)) {
13834 + addr = pvmw.address;
13835 + if (pvmw.pte) {
13836 +- referenced = ptep_clear_young_notify(vma, addr,
13837 +- pvmw.pte);
13838 ++ /*
13839 ++ * For PTE-mapped THP, one sub page is referenced,
13840 ++ * the whole THP is referenced.
13841 ++ */
13842 ++ if (ptep_clear_young_notify(vma, addr, pvmw.pte))
13843 ++ referenced = true;
13844 + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
13845 +- referenced = pmdp_clear_young_notify(vma, addr,
13846 +- pvmw.pmd);
13847 ++ if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
13848 ++ referenced = true;
13849 + } else {
13850 + /* unexpected pmd-mapped page? */
13851 + WARN_ON_ONCE(1);
13852 +diff --git a/mm/page_owner.c b/mm/page_owner.c
13853 +index 4f44b95b9d1e..a71fe4c623ef 100644
13854 +--- a/mm/page_owner.c
13855 ++++ b/mm/page_owner.c
13856 +@@ -123,13 +123,13 @@ void __reset_page_owner(struct page *page, unsigned int order)
13857 + static inline bool check_recursive_alloc(struct stack_trace *trace,
13858 + unsigned long ip)
13859 + {
13860 +- int i, count;
13861 ++ int i;
13862 +
13863 + if (!trace->nr_entries)
13864 + return false;
13865 +
13866 +- for (i = 0, count = 0; i < trace->nr_entries; i++) {
13867 +- if (trace->entries[i] == ip && ++count == 2)
13868 ++ for (i = 0; i < trace->nr_entries; i++) {
13869 ++ if (trace->entries[i] == ip)
13870 + return true;
13871 + }
13872 +
13873 +diff --git a/mm/slab.c b/mm/slab.c
13874 +index 1bfc3d847a0a..198c1e2c5358 100644
13875 +--- a/mm/slab.c
13876 ++++ b/mm/slab.c
13877 +@@ -1283,6 +1283,7 @@ void __init kmem_cache_init(void)
13878 + nr_node_ids * sizeof(struct kmem_cache_node *),
13879 + SLAB_HWCACHE_ALIGN);
13880 + list_add(&kmem_cache->list, &slab_caches);
13881 ++ memcg_link_cache(kmem_cache);
13882 + slab_state = PARTIAL;
13883 +
13884 + /*
13885 +diff --git a/mm/swapfile.c b/mm/swapfile.c
13886 +index e47a21e64764..03d2ce288d83 100644
13887 +--- a/mm/swapfile.c
13888 ++++ b/mm/swapfile.c
13889 +@@ -2954,6 +2954,10 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
13890 + maxpages = swp_offset(pte_to_swp_entry(
13891 + swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
13892 + last_page = swap_header->info.last_page;
13893 ++ if (!last_page) {
13894 ++ pr_warn("Empty swap-file\n");
13895 ++ return 0;
13896 ++ }
13897 + if (last_page > maxpages) {
13898 + pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
13899 + maxpages << (PAGE_SHIFT - 10),
13900 +diff --git a/mm/vmscan.c b/mm/vmscan.c
13901 +index b3f5e337b64a..1a581468a9cf 100644
13902 +--- a/mm/vmscan.c
13903 ++++ b/mm/vmscan.c
13904 +@@ -3961,7 +3961,13 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
13905 + */
13906 + int page_evictable(struct page *page)
13907 + {
13908 +- return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
13909 ++ int ret;
13910 ++
13911 ++ /* Prevent address_space of inode and swap cache from being freed */
13912 ++ rcu_read_lock();
13913 ++ ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
13914 ++ rcu_read_unlock();
13915 ++ return ret;
13916 + }
13917 +
13918 + #ifdef CONFIG_SHMEM
13919 +diff --git a/mm/vmstat.c b/mm/vmstat.c
13920 +index 4bb13e72ac97..e085b13c572e 100644
13921 +--- a/mm/vmstat.c
13922 ++++ b/mm/vmstat.c
13923 +@@ -1770,9 +1770,11 @@ static void vmstat_update(struct work_struct *w)
13924 + * to occur in the future. Keep on running the
13925 + * update worker thread.
13926 + */
13927 ++ preempt_disable();
13928 + queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
13929 + this_cpu_ptr(&vmstat_work),
13930 + round_jiffies_relative(sysctl_stat_interval));
13931 ++ preempt_enable();
13932 + }
13933 + }
13934 +
13935 +diff --git a/mm/z3fold.c b/mm/z3fold.c
13936 +index ddfb20cfd9af..f33403d718ac 100644
13937 +--- a/mm/z3fold.c
13938 ++++ b/mm/z3fold.c
13939 +@@ -469,6 +469,8 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
13940 + spin_lock_init(&pool->lock);
13941 + spin_lock_init(&pool->stale_lock);
13942 + pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
13943 ++ if (!pool->unbuddied)
13944 ++ goto out_pool;
13945 + for_each_possible_cpu(cpu) {
13946 + struct list_head *unbuddied =
13947 + per_cpu_ptr(pool->unbuddied, cpu);
13948 +@@ -481,7 +483,7 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
13949 + pool->name = name;
13950 + pool->compact_wq = create_singlethread_workqueue(pool->name);
13951 + if (!pool->compact_wq)
13952 +- goto out;
13953 ++ goto out_unbuddied;
13954 + pool->release_wq = create_singlethread_workqueue(pool->name);
13955 + if (!pool->release_wq)
13956 + goto out_wq;
13957 +@@ -491,8 +493,11 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
13958 +
13959 + out_wq:
13960 + destroy_workqueue(pool->compact_wq);
13961 +-out:
13962 ++out_unbuddied:
13963 ++ free_percpu(pool->unbuddied);
13964 ++out_pool:
13965 + kfree(pool);
13966 ++out:
13967 + return NULL;
13968 + }
13969 +
13970 +diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
13971 +index 64aa9f755e1d..45c9bf5ff3a0 100644
13972 +--- a/net/8021q/vlan_core.c
13973 ++++ b/net/8021q/vlan_core.c
13974 +@@ -48,8 +48,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
13975 + * original position later
13976 + */
13977 + skb_push(skb, offset);
13978 +- skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
13979 +- skb->vlan_tci);
13980 ++ skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
13981 ++ skb->vlan_tci, skb->mac_len);
13982 + if (!skb)
13983 + return false;
13984 + skb_pull(skb, offset + VLAN_HLEN);
13985 +diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
13986 +index 83ba5483455a..71d8809fbe94 100644
13987 +--- a/net/batman-adv/bat_iv_ogm.c
13988 ++++ b/net/batman-adv/bat_iv_ogm.c
13989 +@@ -2719,7 +2719,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
13990 + struct batadv_neigh_ifinfo *router_ifinfo = NULL;
13991 + struct batadv_neigh_node *router;
13992 + struct batadv_gw_node *curr_gw;
13993 +- int ret = -EINVAL;
13994 ++ int ret = 0;
13995 + void *hdr;
13996 +
13997 + router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
13998 +diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
13999 +index 4e2724c5b33d..a8f4c3902cf5 100644
14000 +--- a/net/batman-adv/bat_v.c
14001 ++++ b/net/batman-adv/bat_v.c
14002 +@@ -930,7 +930,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
14003 + struct batadv_neigh_ifinfo *router_ifinfo = NULL;
14004 + struct batadv_neigh_node *router;
14005 + struct batadv_gw_node *curr_gw;
14006 +- int ret = -EINVAL;
14007 ++ int ret = 0;
14008 + void *hdr;
14009 +
14010 + router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
14011 +diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
14012 +index cdd8e8e4df0b..422ee16b7854 100644
14013 +--- a/net/batman-adv/bridge_loop_avoidance.c
14014 ++++ b/net/batman-adv/bridge_loop_avoidance.c
14015 +@@ -2161,22 +2161,25 @@ batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
14016 + {
14017 + struct batadv_bla_claim *claim;
14018 + int idx = 0;
14019 ++ int ret = 0;
14020 +
14021 + rcu_read_lock();
14022 + hlist_for_each_entry_rcu(claim, head, hash_entry) {
14023 + if (idx++ < *idx_skip)
14024 + continue;
14025 +- if (batadv_bla_claim_dump_entry(msg, portid, seq,
14026 +- primary_if, claim)) {
14027 ++
14028 ++ ret = batadv_bla_claim_dump_entry(msg, portid, seq,
14029 ++ primary_if, claim);
14030 ++ if (ret) {
14031 + *idx_skip = idx - 1;
14032 + goto unlock;
14033 + }
14034 + }
14035 +
14036 +- *idx_skip = idx;
14037 ++ *idx_skip = 0;
14038 + unlock:
14039 + rcu_read_unlock();
14040 +- return 0;
14041 ++ return ret;
14042 + }
14043 +
14044 + /**
14045 +@@ -2391,22 +2394,25 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
14046 + {
14047 + struct batadv_bla_backbone_gw *backbone_gw;
14048 + int idx = 0;
14049 ++ int ret = 0;
14050 +
14051 + rcu_read_lock();
14052 + hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
14053 + if (idx++ < *idx_skip)
14054 + continue;
14055 +- if (batadv_bla_backbone_dump_entry(msg, portid, seq,
14056 +- primary_if, backbone_gw)) {
14057 ++
14058 ++ ret = batadv_bla_backbone_dump_entry(msg, portid, seq,
14059 ++ primary_if, backbone_gw);
14060 ++ if (ret) {
14061 + *idx_skip = idx - 1;
14062 + goto unlock;
14063 + }
14064 + }
14065 +
14066 +- *idx_skip = idx;
14067 ++ *idx_skip = 0;
14068 + unlock:
14069 + rcu_read_unlock();
14070 +- return 0;
14071 ++ return ret;
14072 + }
14073 +
14074 + /**
14075 +diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
14076 +index b6cfa78e9381..4f0111bc6621 100644
14077 +--- a/net/batman-adv/distributed-arp-table.c
14078 ++++ b/net/batman-adv/distributed-arp-table.c
14079 +@@ -391,7 +391,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
14080 + batadv_arp_hw_src(skb, hdr_size), &ip_src,
14081 + batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
14082 +
14083 +- if (hdr_size == 0)
14084 ++ if (hdr_size < sizeof(struct batadv_unicast_packet))
14085 + return;
14086 +
14087 + unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
14088 +diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
14089 +index a98cf1104a30..b6abd19ab23e 100644
14090 +--- a/net/batman-adv/fragmentation.c
14091 ++++ b/net/batman-adv/fragmentation.c
14092 +@@ -287,7 +287,8 @@ batadv_frag_merge_packets(struct hlist_head *chain)
14093 + /* Move the existing MAC header to just before the payload. (Override
14094 + * the fragment header.)
14095 + */
14096 +- skb_pull_rcsum(skb_out, hdr_size);
14097 ++ skb_pull(skb_out, hdr_size);
14098 ++ skb_out->ip_summed = CHECKSUM_NONE;
14099 + memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
14100 + skb_set_mac_header(skb_out, -ETH_HLEN);
14101 + skb_reset_network_header(skb_out);
14102 +diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
14103 +index de9955d5224d..06276ae9f752 100644
14104 +--- a/net/batman-adv/gateway_client.c
14105 ++++ b/net/batman-adv/gateway_client.c
14106 +@@ -705,7 +705,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
14107 + {
14108 + struct batadv_neigh_node *neigh_curr = NULL;
14109 + struct batadv_neigh_node *neigh_old = NULL;
14110 +- struct batadv_orig_node *orig_dst_node;
14111 ++ struct batadv_orig_node *orig_dst_node = NULL;
14112 + struct batadv_gw_node *gw_node = NULL;
14113 + struct batadv_gw_node *curr_gw = NULL;
14114 + struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo;
14115 +@@ -716,6 +716,9 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
14116 +
14117 + vid = batadv_get_vid(skb, 0);
14118 +
14119 ++ if (is_multicast_ether_addr(ethhdr->h_dest))
14120 ++ goto out;
14121 ++
14122 + orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
14123 + ethhdr->h_dest, vid);
14124 + if (!orig_dst_node)
14125 +diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
14126 +index d327670641ac..fa02fb73367c 100644
14127 +--- a/net/batman-adv/multicast.c
14128 ++++ b/net/batman-adv/multicast.c
14129 +@@ -540,8 +540,8 @@ static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv)
14130 + bat_priv->mcast.enabled = true;
14131 + }
14132 +
14133 +- return !(mcast_data.flags &
14134 +- (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6));
14135 ++ return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 &&
14136 ++ mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6);
14137 + }
14138 +
14139 + /**
14140 +@@ -809,8 +809,8 @@ static struct batadv_orig_node *
14141 + batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
14142 + struct ethhdr *ethhdr)
14143 + {
14144 +- return batadv_transtable_search(bat_priv, ethhdr->h_source,
14145 +- ethhdr->h_dest, BATADV_NO_FLAGS);
14146 ++ return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
14147 ++ BATADV_NO_FLAGS);
14148 + }
14149 +
14150 + /**
14151 +diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
14152 +index f10e3ff26f9d..cd82cff716c7 100644
14153 +--- a/net/batman-adv/routing.c
14154 ++++ b/net/batman-adv/routing.c
14155 +@@ -743,6 +743,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
14156 + /**
14157 + * batadv_reroute_unicast_packet - update the unicast header for re-routing
14158 + * @bat_priv: the bat priv with all the soft interface information
14159 ++ * @skb: unicast packet to process
14160 + * @unicast_packet: the unicast header to be updated
14161 + * @dst_addr: the payload destination
14162 + * @vid: VLAN identifier
14163 +@@ -754,7 +755,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
14164 + * Return: true if the packet header has been updated, false otherwise
14165 + */
14166 + static bool
14167 +-batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
14168 ++batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
14169 + struct batadv_unicast_packet *unicast_packet,
14170 + u8 *dst_addr, unsigned short vid)
14171 + {
14172 +@@ -783,8 +784,10 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
14173 + }
14174 +
14175 + /* update the packet header */
14176 ++ skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
14177 + ether_addr_copy(unicast_packet->dest, orig_addr);
14178 + unicast_packet->ttvn = orig_ttvn;
14179 ++ skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
14180 +
14181 + ret = true;
14182 + out:
14183 +@@ -825,7 +828,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
14184 + * the packet to
14185 + */
14186 + if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
14187 +- if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
14188 ++ if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
14189 + ethhdr->h_dest, vid))
14190 + batadv_dbg_ratelimited(BATADV_DBG_TT,
14191 + bat_priv,
14192 +@@ -871,7 +874,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
14193 + * destination can possibly be updated and forwarded towards the new
14194 + * target host
14195 + */
14196 +- if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
14197 ++ if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
14198 + ethhdr->h_dest, vid)) {
14199 + batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
14200 + "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
14201 +@@ -894,12 +897,14 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
14202 + if (!primary_if)
14203 + return false;
14204 +
14205 ++ /* update the packet header */
14206 ++ skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
14207 + ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
14208 ++ unicast_packet->ttvn = curr_ttvn;
14209 ++ skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
14210 +
14211 + batadv_hardif_put(primary_if);
14212 +
14213 +- unicast_packet->ttvn = curr_ttvn;
14214 +-
14215 + return true;
14216 + }
14217 +
14218 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
14219 +index 10f7edfb176e..aa2c49fa31ce 100644
14220 +--- a/net/batman-adv/soft-interface.c
14221 ++++ b/net/batman-adv/soft-interface.c
14222 +@@ -451,13 +451,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
14223 +
14224 + /* skb->dev & skb->pkt_type are set here */
14225 + skb->protocol = eth_type_trans(skb, soft_iface);
14226 +-
14227 +- /* should not be necessary anymore as we use skb_pull_rcsum()
14228 +- * TODO: please verify this and remove this TODO
14229 +- * -- Dec 21st 2009, Simon Wunderlich
14230 +- */
14231 +-
14232 +- /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
14233 ++ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
14234 +
14235 + batadv_inc_counter(bat_priv, BATADV_CNT_RX);
14236 + batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
14237 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
14238 +index 2800c4c4978c..5b8cd359c4c0 100644
14239 +--- a/net/bridge/netfilter/ebtables.c
14240 ++++ b/net/bridge/netfilter/ebtables.c
14241 +@@ -1641,7 +1641,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
14242 + int off = ebt_compat_match_offset(match, m->match_size);
14243 + compat_uint_t msize = m->match_size - off;
14244 +
14245 +- BUG_ON(off >= m->match_size);
14246 ++ if (WARN_ON(off >= m->match_size))
14247 ++ return -EINVAL;
14248 +
14249 + if (copy_to_user(cm->u.name, match->name,
14250 + strlen(match->name) + 1) || put_user(msize, &cm->match_size))
14251 +@@ -1671,7 +1672,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
14252 + int off = xt_compat_target_offset(target);
14253 + compat_uint_t tsize = t->target_size - off;
14254 +
14255 +- BUG_ON(off >= t->target_size);
14256 ++ if (WARN_ON(off >= t->target_size))
14257 ++ return -EINVAL;
14258 +
14259 + if (copy_to_user(cm->u.name, target->name,
14260 + strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
14261 +@@ -1907,7 +1909,8 @@ static int ebt_buf_add(struct ebt_entries_buf_state *state,
14262 + if (state->buf_kern_start == NULL)
14263 + goto count_only;
14264 +
14265 +- BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
14266 ++ if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
14267 ++ return -EINVAL;
14268 +
14269 + memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
14270 +
14271 +@@ -1920,7 +1923,8 @@ static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
14272 + {
14273 + char *b = state->buf_kern_start;
14274 +
14275 +- BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
14276 ++ if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
14277 ++ return -EINVAL;
14278 +
14279 + if (b != NULL && sz > 0)
14280 + memset(b + state->buf_kern_offset, 0, sz);
14281 +@@ -1997,8 +2001,10 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
14282 + pad = XT_ALIGN(size_kern) - size_kern;
14283 +
14284 + if (pad > 0 && dst) {
14285 +- BUG_ON(state->buf_kern_len <= pad);
14286 +- BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
14287 ++ if (WARN_ON(state->buf_kern_len <= pad))
14288 ++ return -EINVAL;
14289 ++ if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
14290 ++ return -EINVAL;
14291 + memset(dst + size_kern, 0, pad);
14292 + }
14293 + return off + match_size;
14294 +@@ -2048,7 +2054,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
14295 + if (ret < 0)
14296 + return ret;
14297 +
14298 +- BUG_ON(ret < match32->match_size);
14299 ++ if (WARN_ON(ret < match32->match_size))
14300 ++ return -EINVAL;
14301 + growth += ret - match32->match_size;
14302 + growth += ebt_compat_entry_padsize();
14303 +
14304 +@@ -2117,8 +2124,12 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
14305 + * offsets are relative to beginning of struct ebt_entry (i.e., 0).
14306 + */
14307 + for (i = 0; i < 4 ; ++i) {
14308 +- if (offsets[i] >= *total)
14309 ++ if (offsets[i] > *total)
14310 ++ return -EINVAL;
14311 ++
14312 ++ if (i < 3 && offsets[i] == *total)
14313 + return -EINVAL;
14314 ++
14315 + if (i == 0)
14316 + continue;
14317 + if (offsets[i-1] > offsets[i])
14318 +@@ -2157,7 +2168,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
14319 +
14320 + startoff = state->buf_user_offset - startoff;
14321 +
14322 +- BUG_ON(*total < startoff);
14323 ++ if (WARN_ON(*total < startoff))
14324 ++ return -EINVAL;
14325 + *total -= startoff;
14326 + return 0;
14327 + }
14328 +@@ -2286,7 +2298,8 @@ static int compat_do_replace(struct net *net, void __user *user,
14329 + state.buf_kern_len = size64;
14330 +
14331 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
14332 +- BUG_ON(ret < 0); /* parses same data again */
14333 ++ if (WARN_ON(ret < 0))
14334 ++ goto out_unlock;
14335 +
14336 + vfree(entries_tmp);
14337 + tmp.entries_size = size64;
14338 +diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
14339 +index 5c036d2f401e..cdb5b693a135 100644
14340 +--- a/net/ceph/ceph_common.c
14341 ++++ b/net/ceph/ceph_common.c
14342 +@@ -418,11 +418,15 @@ ceph_parse_options(char *options, const char *dev_name,
14343 + opt->flags |= CEPH_OPT_FSID;
14344 + break;
14345 + case Opt_name:
14346 ++ kfree(opt->name);
14347 + opt->name = kstrndup(argstr[0].from,
14348 + argstr[0].to-argstr[0].from,
14349 + GFP_KERNEL);
14350 + break;
14351 + case Opt_secret:
14352 ++ ceph_crypto_key_destroy(opt->key);
14353 ++ kfree(opt->key);
14354 ++
14355 + opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
14356 + if (!opt->key) {
14357 + err = -ENOMEM;
14358 +@@ -433,6 +437,9 @@ ceph_parse_options(char *options, const char *dev_name,
14359 + goto out;
14360 + break;
14361 + case Opt_key:
14362 ++ ceph_crypto_key_destroy(opt->key);
14363 ++ kfree(opt->key);
14364 ++
14365 + opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
14366 + if (!opt->key) {
14367 + err = -ENOMEM;
14368 +diff --git a/net/core/ethtool.c b/net/core/ethtool.c
14369 +index d374a904f1b1..490eab16b04b 100644
14370 +--- a/net/core/ethtool.c
14371 ++++ b/net/core/ethtool.c
14372 +@@ -2505,11 +2505,14 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr)
14373 + static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr)
14374 + {
14375 + struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM };
14376 ++ int rc;
14377 +
14378 + if (!dev->ethtool_ops->get_fecparam)
14379 + return -EOPNOTSUPP;
14380 +
14381 +- dev->ethtool_ops->get_fecparam(dev, &fecparam);
14382 ++ rc = dev->ethtool_ops->get_fecparam(dev, &fecparam);
14383 ++ if (rc)
14384 ++ return rc;
14385 +
14386 + if (copy_to_user(useraddr, &fecparam, sizeof(fecparam)))
14387 + return -EFAULT;
14388 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
14389 +index ef734ad1d852..c132eca9e383 100644
14390 +--- a/net/core/skbuff.c
14391 ++++ b/net/core/skbuff.c
14392 +@@ -4939,13 +4939,18 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
14393 +
14394 + static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
14395 + {
14396 ++ int mac_len;
14397 ++
14398 + if (skb_cow(skb, skb_headroom(skb)) < 0) {
14399 + kfree_skb(skb);
14400 + return NULL;
14401 + }
14402 +
14403 +- memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
14404 +- 2 * ETH_ALEN);
14405 ++ mac_len = skb->data - skb_mac_header(skb);
14406 ++ if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
14407 ++ memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
14408 ++ mac_len - VLAN_HLEN - ETH_TLEN);
14409 ++ }
14410 + skb->mac_header += VLAN_HLEN;
14411 + return skb;
14412 + }
14413 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
14414 +index 23e6d5532b5c..2459e9cc22a6 100644
14415 +--- a/net/ipv4/ip_gre.c
14416 ++++ b/net/ipv4/ip_gre.c
14417 +@@ -951,9 +951,6 @@ static void __gre_tunnel_init(struct net_device *dev)
14418 +
14419 + t_hlen = tunnel->hlen + sizeof(struct iphdr);
14420 +
14421 +- dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
14422 +- dev->mtu = ETH_DATA_LEN - t_hlen - 4;
14423 +-
14424 + dev->features |= GRE_FEATURES;
14425 + dev->hw_features |= GRE_FEATURES;
14426 +
14427 +@@ -1253,8 +1250,6 @@ static int erspan_tunnel_init(struct net_device *dev)
14428 + sizeof(struct erspanhdr);
14429 + t_hlen = tunnel->hlen + sizeof(struct iphdr);
14430 +
14431 +- dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
14432 +- dev->mtu = ETH_DATA_LEN - t_hlen - 4;
14433 + dev->features |= GRE_FEATURES;
14434 + dev->hw_features |= GRE_FEATURES;
14435 + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
14436 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
14437 +index a2fcc20774a6..4784f3f36b7e 100644
14438 +--- a/net/ipv4/ip_tunnel.c
14439 ++++ b/net/ipv4/ip_tunnel.c
14440 +@@ -1103,8 +1103,14 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
14441 + eth_hw_addr_random(dev);
14442 +
14443 + mtu = ip_tunnel_bind_dev(dev);
14444 +- if (!tb[IFLA_MTU])
14445 ++ if (tb[IFLA_MTU]) {
14446 ++ unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen;
14447 ++
14448 ++ dev->mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
14449 ++ (unsigned int)(max - sizeof(struct iphdr)));
14450 ++ } else {
14451 + dev->mtu = mtu;
14452 ++ }
14453 +
14454 + ip_tunnel_add(itn, nt);
14455 + out:
14456 +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
14457 +index c9cd891f69c2..5c5699c08575 100644
14458 +--- a/net/ipv4/ip_vti.c
14459 ++++ b/net/ipv4/ip_vti.c
14460 +@@ -396,8 +396,6 @@ static int vti_tunnel_init(struct net_device *dev)
14461 + memcpy(dev->dev_addr, &iph->saddr, 4);
14462 + memcpy(dev->broadcast, &iph->daddr, 4);
14463 +
14464 +- dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
14465 +- dev->mtu = ETH_DATA_LEN;
14466 + dev->flags = IFF_NOARP;
14467 + dev->addr_len = 4;
14468 + dev->features |= NETIF_F_LLTX;
14469 +diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
14470 +index c07e9db95ccc..cc7c9d67ac19 100644
14471 +--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
14472 ++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
14473 +@@ -228,7 +228,6 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
14474 + c->hash_mode = i->hash_mode;
14475 + c->hash_initval = i->hash_initval;
14476 + refcount_set(&c->refcount, 1);
14477 +- refcount_set(&c->entries, 1);
14478 +
14479 + spin_lock_bh(&cn->lock);
14480 + if (__clusterip_config_find(net, ip)) {
14481 +@@ -259,8 +258,10 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
14482 +
14483 + c->notifier.notifier_call = clusterip_netdev_event;
14484 + err = register_netdevice_notifier(&c->notifier);
14485 +- if (!err)
14486 ++ if (!err) {
14487 ++ refcount_set(&c->entries, 1);
14488 + return c;
14489 ++ }
14490 +
14491 + #ifdef CONFIG_PROC_FS
14492 + proc_remove(c->pde);
14493 +@@ -269,7 +270,7 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
14494 + spin_lock_bh(&cn->lock);
14495 + list_del_rcu(&c->list);
14496 + spin_unlock_bh(&cn->lock);
14497 +- kfree(c);
14498 ++ clusterip_config_put(c);
14499 +
14500 + return ERR_PTR(err);
14501 + }
14502 +@@ -492,12 +493,15 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
14503 + return PTR_ERR(config);
14504 + }
14505 + }
14506 +- cipinfo->config = config;
14507 +
14508 + ret = nf_ct_netns_get(par->net, par->family);
14509 +- if (ret < 0)
14510 ++ if (ret < 0) {
14511 + pr_info("cannot load conntrack support for proto=%u\n",
14512 + par->family);
14513 ++ clusterip_config_entry_put(par->net, config);
14514 ++ clusterip_config_put(config);
14515 ++ return ret;
14516 ++ }
14517 +
14518 + if (!par->net->xt.clusterip_deprecated_warning) {
14519 + pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, "
14520 +@@ -505,6 +509,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
14521 + par->net->xt.clusterip_deprecated_warning = true;
14522 + }
14523 +
14524 ++ cipinfo->config = config;
14525 + return ret;
14526 + }
14527 +
14528 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
14529 +index 28bc3a98adc7..7afa8d2463d8 100644
14530 +--- a/net/ipv4/route.c
14531 ++++ b/net/ipv4/route.c
14532 +@@ -639,6 +639,7 @@ static inline u32 fnhe_hashfun(__be32 daddr)
14533 + static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
14534 + {
14535 + rt->rt_pmtu = fnhe->fnhe_pmtu;
14536 ++ rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
14537 + rt->dst.expires = fnhe->fnhe_expires;
14538 +
14539 + if (fnhe->fnhe_gw) {
14540 +@@ -649,7 +650,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh
14541 + }
14542 +
14543 + static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
14544 +- u32 pmtu, unsigned long expires)
14545 ++ u32 pmtu, bool lock, unsigned long expires)
14546 + {
14547 + struct fnhe_hash_bucket *hash;
14548 + struct fib_nh_exception *fnhe;
14549 +@@ -686,8 +687,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
14550 + fnhe->fnhe_genid = genid;
14551 + if (gw)
14552 + fnhe->fnhe_gw = gw;
14553 +- if (pmtu)
14554 ++ if (pmtu) {
14555 + fnhe->fnhe_pmtu = pmtu;
14556 ++ fnhe->fnhe_mtu_locked = lock;
14557 ++ }
14558 + fnhe->fnhe_expires = max(1UL, expires);
14559 + /* Update all cached dsts too */
14560 + rt = rcu_dereference(fnhe->fnhe_rth_input);
14561 +@@ -711,6 +714,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
14562 + fnhe->fnhe_daddr = daddr;
14563 + fnhe->fnhe_gw = gw;
14564 + fnhe->fnhe_pmtu = pmtu;
14565 ++ fnhe->fnhe_mtu_locked = lock;
14566 + fnhe->fnhe_expires = max(1UL, expires);
14567 +
14568 + /* Exception created; mark the cached routes for the nexthop
14569 +@@ -792,7 +796,8 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
14570 + struct fib_nh *nh = &FIB_RES_NH(res);
14571 +
14572 + update_or_create_fnhe(nh, fl4->daddr, new_gw,
14573 +- 0, jiffies + ip_rt_gc_timeout);
14574 ++ 0, false,
14575 ++ jiffies + ip_rt_gc_timeout);
14576 + }
14577 + if (kill_route)
14578 + rt->dst.obsolete = DST_OBSOLETE_KILL;
14579 +@@ -1005,15 +1010,18 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
14580 + {
14581 + struct dst_entry *dst = &rt->dst;
14582 + struct fib_result res;
14583 ++ bool lock = false;
14584 +
14585 +- if (dst_metric_locked(dst, RTAX_MTU))
14586 ++ if (ip_mtu_locked(dst))
14587 + return;
14588 +
14589 + if (ipv4_mtu(dst) < mtu)
14590 + return;
14591 +
14592 +- if (mtu < ip_rt_min_pmtu)
14593 ++ if (mtu < ip_rt_min_pmtu) {
14594 ++ lock = true;
14595 + mtu = ip_rt_min_pmtu;
14596 ++ }
14597 +
14598 + if (rt->rt_pmtu == mtu &&
14599 + time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
14600 +@@ -1023,7 +1031,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
14601 + if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
14602 + struct fib_nh *nh = &FIB_RES_NH(res);
14603 +
14604 +- update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
14605 ++ update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
14606 + jiffies + ip_rt_mtu_expires);
14607 + }
14608 + rcu_read_unlock();
14609 +@@ -1276,7 +1284,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
14610 +
14611 + mtu = READ_ONCE(dst->dev->mtu);
14612 +
14613 +- if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
14614 ++ if (unlikely(ip_mtu_locked(dst))) {
14615 + if (rt->rt_uses_gateway && mtu > 576)
14616 + mtu = 576;
14617 + }
14618 +@@ -1548,6 +1556,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
14619 + rt->rt_is_input = 0;
14620 + rt->rt_iif = 0;
14621 + rt->rt_pmtu = 0;
14622 ++ rt->rt_mtu_locked = 0;
14623 + rt->rt_gateway = 0;
14624 + rt->rt_uses_gateway = 0;
14625 + rt->rt_table_id = 0;
14626 +@@ -2526,6 +2535,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
14627 + rt->rt_is_input = ort->rt_is_input;
14628 + rt->rt_iif = ort->rt_iif;
14629 + rt->rt_pmtu = ort->rt_pmtu;
14630 ++ rt->rt_mtu_locked = ort->rt_mtu_locked;
14631 +
14632 + rt->rt_genid = rt_genid_ipv4(net);
14633 + rt->rt_flags = ort->rt_flags;
14634 +@@ -2628,6 +2638,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
14635 + memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
14636 + if (rt->rt_pmtu && expires)
14637 + metrics[RTAX_MTU - 1] = rt->rt_pmtu;
14638 ++ if (rt->rt_mtu_locked && expires)
14639 ++ metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
14640 + if (rtnetlink_put_metrics(skb, metrics) < 0)
14641 + goto nla_put_failure;
14642 +
14643 +diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
14644 +index 7c843578f233..faddf4f9a707 100644
14645 +--- a/net/ipv4/tcp_illinois.c
14646 ++++ b/net/ipv4/tcp_illinois.c
14647 +@@ -6,7 +6,7 @@
14648 + * The algorithm is described in:
14649 + * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
14650 + * for High-Speed Networks"
14651 +- * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf
14652 ++ * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf
14653 + *
14654 + * Implemented from description in paper and ns-2 simulation.
14655 + * Copyright (C) 2007 Stephen Hemminger <shemminger@××××××××××××××××.org>
14656 +diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
14657 +index 05017e2c849c..4b586e7d5637 100644
14658 +--- a/net/ipv4/xfrm4_policy.c
14659 ++++ b/net/ipv4/xfrm4_policy.c
14660 +@@ -100,6 +100,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
14661 + xdst->u.rt.rt_gateway = rt->rt_gateway;
14662 + xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
14663 + xdst->u.rt.rt_pmtu = rt->rt_pmtu;
14664 ++ xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
14665 + xdst->u.rt.rt_table_id = rt->rt_table_id;
14666 + INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
14667 +
14668 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
14669 +index d61a82fd4b60..565a0388587a 100644
14670 +--- a/net/ipv6/ip6_tunnel.c
14671 ++++ b/net/ipv6/ip6_tunnel.c
14672 +@@ -1990,14 +1990,14 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
14673 + {
14674 + struct net *net = dev_net(dev);
14675 + struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
14676 +- struct ip6_tnl *nt, *t;
14677 + struct ip_tunnel_encap ipencap;
14678 ++ struct ip6_tnl *nt, *t;
14679 ++ int err;
14680 +
14681 + nt = netdev_priv(dev);
14682 +
14683 + if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
14684 +- int err = ip6_tnl_encap_setup(nt, &ipencap);
14685 +-
14686 ++ err = ip6_tnl_encap_setup(nt, &ipencap);
14687 + if (err < 0)
14688 + return err;
14689 + }
14690 +@@ -2013,7 +2013,11 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
14691 + return -EEXIST;
14692 + }
14693 +
14694 +- return ip6_tnl_create2(dev);
14695 ++ err = ip6_tnl_create2(dev);
14696 ++ if (!err && tb[IFLA_MTU])
14697 ++ ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
14698 ++
14699 ++ return err;
14700 + }
14701 +
14702 + static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
14703 +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
14704 +index 2493a40bc4b1..0e0ab90a4334 100644
14705 +--- a/net/ipv6/ip6_vti.c
14706 ++++ b/net/ipv6/ip6_vti.c
14707 +@@ -852,7 +852,7 @@ static void vti6_dev_setup(struct net_device *dev)
14708 + dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
14709 + dev->mtu = ETH_DATA_LEN;
14710 + dev->min_mtu = IPV6_MIN_MTU;
14711 +- dev->max_mtu = IP_MAX_MTU;
14712 ++ dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr);
14713 + dev->flags |= IFF_NOARP;
14714 + dev->addr_len = sizeof(struct in6_addr);
14715 + netif_keep_dst(dev);
14716 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
14717 +index b35d8905794c..ad1e7e6ce009 100644
14718 +--- a/net/ipv6/sit.c
14719 ++++ b/net/ipv6/sit.c
14720 +@@ -1569,6 +1569,13 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
14721 + if (err < 0)
14722 + return err;
14723 +
14724 ++ if (tb[IFLA_MTU]) {
14725 ++ u32 mtu = nla_get_u32(tb[IFLA_MTU]);
14726 ++
14727 ++ if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
14728 ++ dev->mtu = mtu;
14729 ++ }
14730 ++
14731 + #ifdef CONFIG_IPV6_SIT_6RD
14732 + if (ipip6_netlink_6rd_parms(data, &ip6rd))
14733 + err = ipip6_tunnel_update_6rd(nt, &ip6rd);
14734 +diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
14735 +index f8d4ab8ca1a5..4b60f68cb492 100644
14736 +--- a/net/llc/llc_c_ac.c
14737 ++++ b/net/llc/llc_c_ac.c
14738 +@@ -389,7 +389,7 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
14739 + llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
14740 + rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
14741 + if (likely(!rc)) {
14742 +- llc_conn_send_pdu(sk, skb);
14743 ++ rc = llc_conn_send_pdu(sk, skb);
14744 + llc_conn_ac_inc_vs_by_1(sk, skb);
14745 + }
14746 + return rc;
14747 +@@ -916,7 +916,7 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
14748 + llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
14749 + rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
14750 + if (likely(!rc)) {
14751 +- llc_conn_send_pdu(sk, skb);
14752 ++ rc = llc_conn_send_pdu(sk, skb);
14753 + llc_conn_ac_inc_vs_by_1(sk, skb);
14754 + }
14755 + return rc;
14756 +@@ -935,14 +935,17 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
14757 + int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb)
14758 + {
14759 + struct llc_sock *llc = llc_sk(sk);
14760 ++ int ret;
14761 +
14762 + if (llc->ack_must_be_send) {
14763 +- llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
14764 ++ ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
14765 + llc->ack_must_be_send = 0 ;
14766 + llc->ack_pf = 0;
14767 +- } else
14768 +- llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
14769 +- return 0;
14770 ++ } else {
14771 ++ ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
14772 ++ }
14773 ++
14774 ++ return ret;
14775 + }
14776 +
14777 + /**
14778 +diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
14779 +index 9a42448eb182..b084fd19ad32 100644
14780 +--- a/net/llc/llc_conn.c
14781 ++++ b/net/llc/llc_conn.c
14782 +@@ -30,7 +30,7 @@
14783 + #endif
14784 +
14785 + static int llc_find_offset(int state, int ev_type);
14786 +-static void llc_conn_send_pdus(struct sock *sk);
14787 ++static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
14788 + static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
14789 + static int llc_exec_conn_trans_actions(struct sock *sk,
14790 + struct llc_conn_state_trans *trans,
14791 +@@ -193,11 +193,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
14792 + return rc;
14793 + }
14794 +
14795 +-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
14796 ++int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
14797 + {
14798 + /* queue PDU to send to MAC layer */
14799 + skb_queue_tail(&sk->sk_write_queue, skb);
14800 +- llc_conn_send_pdus(sk);
14801 ++ return llc_conn_send_pdus(sk, skb);
14802 + }
14803 +
14804 + /**
14805 +@@ -255,7 +255,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
14806 + if (howmany_resend > 0)
14807 + llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
14808 + /* any PDUs to re-send are queued up; start sending to MAC */
14809 +- llc_conn_send_pdus(sk);
14810 ++ llc_conn_send_pdus(sk, NULL);
14811 + out:;
14812 + }
14813 +
14814 +@@ -296,7 +296,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
14815 + if (howmany_resend > 0)
14816 + llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
14817 + /* any PDUs to re-send are queued up; start sending to MAC */
14818 +- llc_conn_send_pdus(sk);
14819 ++ llc_conn_send_pdus(sk, NULL);
14820 + out:;
14821 + }
14822 +
14823 +@@ -340,12 +340,16 @@ int llc_conn_remove_acked_pdus(struct sock *sk, u8 nr, u16 *how_many_unacked)
14824 + /**
14825 + * llc_conn_send_pdus - Sends queued PDUs
14826 + * @sk: active connection
14827 ++ * @hold_skb: the skb held by caller, or NULL if does not care
14828 + *
14829 +- * Sends queued pdus to MAC layer for transmission.
14830 ++ * Sends queued pdus to MAC layer for transmission. When @hold_skb is
14831 ++ * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
14832 ++ * successfully, or 1 for failure.
14833 + */
14834 +-static void llc_conn_send_pdus(struct sock *sk)
14835 ++static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
14836 + {
14837 + struct sk_buff *skb;
14838 ++ int ret = 0;
14839 +
14840 + while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
14841 + struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
14842 +@@ -357,10 +361,20 @@ static void llc_conn_send_pdus(struct sock *sk)
14843 + skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
14844 + if (!skb2)
14845 + break;
14846 +- skb = skb2;
14847 ++ dev_queue_xmit(skb2);
14848 ++ } else {
14849 ++ bool is_target = skb == hold_skb;
14850 ++ int rc;
14851 ++
14852 ++ if (is_target)
14853 ++ skb_get(skb);
14854 ++ rc = dev_queue_xmit(skb);
14855 ++ if (is_target)
14856 ++ ret = rc;
14857 + }
14858 +- dev_queue_xmit(skb);
14859 + }
14860 ++
14861 ++ return ret;
14862 + }
14863 +
14864 + /**
14865 +diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
14866 +index 2849a1fc41c5..3a7cfe01ee6d 100644
14867 +--- a/net/mac80211/agg-rx.c
14868 ++++ b/net/mac80211/agg-rx.c
14869 +@@ -8,6 +8,7 @@
14870 + * Copyright 2007, Michael Wu <flamingice@××××××××.net>
14871 + * Copyright 2007-2010, Intel Corporation
14872 + * Copyright(c) 2015-2017 Intel Deutschland GmbH
14873 ++ * Copyright (C) 2018 Intel Corporation
14874 + *
14875 + * This program is free software; you can redistribute it and/or modify
14876 + * it under the terms of the GNU General Public License version 2 as
14877 +@@ -322,9 +323,6 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
14878 + * driver so reject the timeout update.
14879 + */
14880 + status = WLAN_STATUS_REQUEST_DECLINED;
14881 +- ieee80211_send_addba_resp(sta->sdata, sta->sta.addr,
14882 +- tid, dialog_token, status,
14883 +- 1, buf_size, timeout);
14884 + goto end;
14885 + }
14886 +
14887 +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
14888 +index 9675814f64db..894937bcd479 100644
14889 +--- a/net/mac80211/ieee80211_i.h
14890 ++++ b/net/mac80211/ieee80211_i.h
14891 +@@ -1466,7 +1466,7 @@ struct ieee802_11_elems {
14892 + const struct ieee80211_timeout_interval_ie *timeout_int;
14893 + const u8 *opmode_notif;
14894 + const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
14895 +- const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie;
14896 ++ struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie;
14897 + const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie;
14898 +
14899 + /* length of them, respectively */
14900 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
14901 +index a550c707cd8a..96e57d7c2872 100644
14902 +--- a/net/mac80211/mesh.c
14903 ++++ b/net/mac80211/mesh.c
14904 +@@ -1253,13 +1253,12 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
14905 + }
14906 +
14907 + static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
14908 +- struct ieee80211_mgmt *mgmt, size_t len)
14909 ++ struct ieee80211_mgmt *mgmt, size_t len,
14910 ++ struct ieee802_11_elems *elems)
14911 + {
14912 + struct ieee80211_mgmt *mgmt_fwd;
14913 + struct sk_buff *skb;
14914 + struct ieee80211_local *local = sdata->local;
14915 +- u8 *pos = mgmt->u.action.u.chan_switch.variable;
14916 +- size_t offset_ttl;
14917 +
14918 + skb = dev_alloc_skb(local->tx_headroom + len);
14919 + if (!skb)
14920 +@@ -1267,13 +1266,9 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
14921 + skb_reserve(skb, local->tx_headroom);
14922 + mgmt_fwd = skb_put(skb, len);
14923 +
14924 +- /* offset_ttl is based on whether the secondary channel
14925 +- * offset is available or not. Subtract 1 from the mesh TTL
14926 +- * and disable the initiator flag before forwarding.
14927 +- */
14928 +- offset_ttl = (len < 42) ? 7 : 10;
14929 +- *(pos + offset_ttl) -= 1;
14930 +- *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
14931 ++ elems->mesh_chansw_params_ie->mesh_ttl--;
14932 ++ elems->mesh_chansw_params_ie->mesh_flags &=
14933 ++ ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
14934 +
14935 + memcpy(mgmt_fwd, mgmt, len);
14936 + eth_broadcast_addr(mgmt_fwd->da);
14937 +@@ -1321,7 +1316,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
14938 +
14939 + /* forward or re-broadcast the CSA frame */
14940 + if (fwd_csa) {
14941 +- if (mesh_fwd_csa_frame(sdata, mgmt, len) < 0)
14942 ++ if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0)
14943 + mcsa_dbg(sdata, "Failed to forward the CSA frame");
14944 + }
14945 + }
14946 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
14947 +index 4daafb07602f..dddd498e1338 100644
14948 +--- a/net/mac80211/rx.c
14949 ++++ b/net/mac80211/rx.c
14950 +@@ -3928,7 +3928,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
14951 + if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
14952 + IEEE80211_FCTL_TODS)) !=
14953 + fast_rx->expected_ds_bits)
14954 +- goto drop;
14955 ++ return false;
14956 +
14957 + /* assign the key to drop unencrypted frames (later)
14958 + * and strip the IV/MIC if necessary
14959 +diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
14960 +index ee0181778a42..029334835747 100644
14961 +--- a/net/mac80211/spectmgmt.c
14962 ++++ b/net/mac80211/spectmgmt.c
14963 +@@ -8,6 +8,7 @@
14964 + * Copyright 2007, Michael Wu <flamingice@××××××××.net>
14965 + * Copyright 2007-2008, Intel Corporation
14966 + * Copyright 2008, Johannes Berg <johannes@××××××××××××.net>
14967 ++ * Copyright (C) 2018 Intel Corporation
14968 + *
14969 + * This program is free software; you can redistribute it and/or modify
14970 + * it under the terms of the GNU General Public License version 2 as
14971 +@@ -27,7 +28,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
14972 + u32 sta_flags, u8 *bssid,
14973 + struct ieee80211_csa_ie *csa_ie)
14974 + {
14975 +- enum nl80211_band new_band;
14976 ++ enum nl80211_band new_band = current_band;
14977 + int new_freq;
14978 + u8 new_chan_no;
14979 + struct ieee80211_channel *new_chan;
14980 +@@ -55,15 +56,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
14981 + elems->ext_chansw_ie->new_operating_class,
14982 + &new_band)) {
14983 + sdata_info(sdata,
14984 +- "cannot understand ECSA IE operating class %d, disconnecting\n",
14985 ++ "cannot understand ECSA IE operating class, %d, ignoring\n",
14986 + elems->ext_chansw_ie->new_operating_class);
14987 +- return -EINVAL;
14988 + }
14989 + new_chan_no = elems->ext_chansw_ie->new_ch_num;
14990 + csa_ie->count = elems->ext_chansw_ie->count;
14991 + csa_ie->mode = elems->ext_chansw_ie->mode;
14992 + } else if (elems->ch_switch_ie) {
14993 +- new_band = current_band;
14994 + new_chan_no = elems->ch_switch_ie->new_ch_num;
14995 + csa_ie->count = elems->ch_switch_ie->count;
14996 + csa_ie->mode = elems->ch_switch_ie->mode;
14997 +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
14998 +index 69615016d5bf..f1b496222bda 100644
14999 +--- a/net/mac80211/sta_info.c
15000 ++++ b/net/mac80211/sta_info.c
15001 +@@ -314,7 +314,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
15002 +
15003 + if (ieee80211_hw_check(hw, USES_RSS)) {
15004 + sta->pcpu_rx_stats =
15005 +- alloc_percpu(struct ieee80211_sta_rx_stats);
15006 ++ alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
15007 + if (!sta->pcpu_rx_stats)
15008 + goto free;
15009 + }
15010 +@@ -439,6 +439,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
15011 + if (sta->sta.txq[0])
15012 + kfree(to_txq_info(sta->sta.txq[0]));
15013 + free:
15014 ++ free_percpu(sta->pcpu_rx_stats);
15015 + #ifdef CONFIG_MAC80211_MESH
15016 + kfree(sta->mesh);
15017 + #endif
15018 +diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
15019 +index 3e17d32b629d..58d5d05aec24 100644
15020 +--- a/net/netfilter/ipvs/ip_vs_ftp.c
15021 ++++ b/net/netfilter/ipvs/ip_vs_ftp.c
15022 +@@ -260,7 +260,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
15023 + buf_len = strlen(buf);
15024 +
15025 + ct = nf_ct_get(skb, &ctinfo);
15026 +- if (ct && (ct->status & IPS_NAT_MASK)) {
15027 ++ if (ct) {
15028 + bool mangled;
15029 +
15030 + /* If mangling fails this function will return 0
15031 +diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
15032 +index 22dc1b9d6362..c070dfc0190a 100644
15033 +--- a/net/netlabel/netlabel_unlabeled.c
15034 ++++ b/net/netlabel/netlabel_unlabeled.c
15035 +@@ -1472,6 +1472,16 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
15036 + iface = rcu_dereference(netlbl_unlhsh_def);
15037 + if (iface == NULL || !iface->valid)
15038 + goto unlabel_getattr_nolabel;
15039 ++
15040 ++#if IS_ENABLED(CONFIG_IPV6)
15041 ++ /* When resolving a fallback label, check the sk_buff version as
15042 ++ * it is possible (e.g. SCTP) to have family = PF_INET6 while
15043 ++ * receiving ip_hdr(skb)->version = 4.
15044 ++ */
15045 ++ if (family == PF_INET6 && ip_hdr(skb)->version == 4)
15046 ++ family = PF_INET;
15047 ++#endif /* IPv6 */
15048 ++
15049 + switch (family) {
15050 + case PF_INET: {
15051 + struct iphdr *hdr4;
15052 +diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
15053 +index 367d8c027101..2ceefa183cee 100644
15054 +--- a/net/nfc/llcp_commands.c
15055 ++++ b/net/nfc/llcp_commands.c
15056 +@@ -149,6 +149,10 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
15057 +
15058 + pr_debug("uri: %s, len: %zu\n", uri, uri_len);
15059 +
15060 ++ /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */
15061 ++ if (WARN_ON_ONCE(uri_len > U8_MAX - 4))
15062 ++ return NULL;
15063 ++
15064 + sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
15065 + if (sdreq == NULL)
15066 + return NULL;
15067 +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
15068 +index b251fb936a27..08ed6abe4aae 100644
15069 +--- a/net/nfc/netlink.c
15070 ++++ b/net/nfc/netlink.c
15071 +@@ -61,7 +61,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
15072 + };
15073 +
15074 + static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
15075 +- [NFC_SDP_ATTR_URI] = { .type = NLA_STRING },
15076 ++ [NFC_SDP_ATTR_URI] = { .type = NLA_STRING,
15077 ++ .len = U8_MAX - 4 },
15078 + [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 },
15079 + };
15080 +
15081 +diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c
15082 +index 50615d5efac1..9cf089b9754e 100644
15083 +--- a/net/qrtr/smd.c
15084 ++++ b/net/qrtr/smd.c
15085 +@@ -114,5 +114,6 @@ static struct rpmsg_driver qcom_smd_qrtr_driver = {
15086 +
15087 + module_rpmsg_driver(qcom_smd_qrtr_driver);
15088 +
15089 ++MODULE_ALIAS("rpmsg:IPCRTR");
15090 + MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver");
15091 + MODULE_LICENSE("GPL v2");
15092 +diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
15093 +index c061d6eb465d..22571189f21e 100644
15094 +--- a/net/rds/tcp_listen.c
15095 ++++ b/net/rds/tcp_listen.c
15096 +@@ -1,5 +1,5 @@
15097 + /*
15098 +- * Copyright (c) 2006 Oracle. All rights reserved.
15099 ++ * Copyright (c) 2006, 2018 Oracle. All rights reserved.
15100 + *
15101 + * This software is available to you under a choice of one of two
15102 + * licenses. You may choose to be licensed under the terms of the GNU
15103 +@@ -142,12 +142,20 @@ int rds_tcp_accept_one(struct socket *sock)
15104 + if (ret)
15105 + goto out;
15106 +
15107 +- new_sock->type = sock->type;
15108 +- new_sock->ops = sock->ops;
15109 + ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);
15110 + if (ret < 0)
15111 + goto out;
15112 +
15113 ++ /* sock_create_lite() does not get a hold on the owner module so we
15114 ++ * need to do it here. Note that sock_release() uses sock->ops to
15115 ++ * determine if it needs to decrement the reference count. So set
15116 ++ * sock->ops after calling accept() in case that fails. And there's
15117 ++ * no need to do try_module_get() as the listener should have a hold
15118 ++ * already.
15119 ++ */
15120 ++ new_sock->ops = sock->ops;
15121 ++ __module_get(new_sock->ops->owner);
15122 ++
15123 + ret = rds_tcp_keepalive(new_sock);
15124 + if (ret < 0)
15125 + goto out;
15126 +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
15127 +index e56e23ed2229..5edb636dbc4d 100644
15128 +--- a/net/rxrpc/input.c
15129 ++++ b/net/rxrpc/input.c
15130 +@@ -1175,16 +1175,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
15131 + goto discard_unlock;
15132 +
15133 + if (sp->hdr.callNumber == chan->last_call) {
15134 +- /* For the previous service call, if completed successfully, we
15135 +- * discard all further packets.
15136 ++ if (chan->call ||
15137 ++ sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
15138 ++ goto discard_unlock;
15139 ++
15140 ++ /* For the previous service call, if completed
15141 ++ * successfully, we discard all further packets.
15142 + */
15143 + if (rxrpc_conn_is_service(conn) &&
15144 +- (chan->last_type == RXRPC_PACKET_TYPE_ACK ||
15145 +- sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
15146 ++ chan->last_type == RXRPC_PACKET_TYPE_ACK)
15147 + goto discard_unlock;
15148 +
15149 +- /* But otherwise we need to retransmit the final packet from
15150 +- * data cached in the connection record.
15151 ++ /* But otherwise we need to retransmit the final packet
15152 ++ * from data cached in the connection record.
15153 + */
15154 + rxrpc_post_packet_to_conn(conn, skb);
15155 + goto out_unlock;
15156 +diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
15157 +index bdece21f313d..abcf48026d99 100644
15158 +--- a/net/rxrpc/recvmsg.c
15159 ++++ b/net/rxrpc/recvmsg.c
15160 +@@ -513,9 +513,10 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
15161 + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
15162 + sizeof(unsigned int), &id32);
15163 + } else {
15164 ++ unsigned long idl = call->user_call_ID;
15165 ++
15166 + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
15167 +- sizeof(unsigned long),
15168 +- &call->user_call_ID);
15169 ++ sizeof(unsigned long), &idl);
15170 + }
15171 + if (ret < 0)
15172 + goto error_unlock_call;
15173 +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
15174 +index d2f51d6a253c..016e293681b8 100644
15175 +--- a/net/rxrpc/sendmsg.c
15176 ++++ b/net/rxrpc/sendmsg.c
15177 +@@ -92,7 +92,9 @@ static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix)
15178 + spin_lock_bh(&call->lock);
15179 +
15180 + if (call->state < RXRPC_CALL_COMPLETE) {
15181 +- call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS;
15182 ++ call->rxtx_annotations[ix] =
15183 ++ (call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) |
15184 ++ RXRPC_TX_ANNO_RETRANS;
15185 + if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
15186 + rxrpc_queue_call(call);
15187 + }
15188 +diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
15189 +index 2b087623fb1d..364a878e51cb 100644
15190 +--- a/net/sched/act_bpf.c
15191 ++++ b/net/sched/act_bpf.c
15192 +@@ -356,7 +356,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
15193 + return res;
15194 + out:
15195 + if (res == ACT_P_CREATED)
15196 +- tcf_idr_cleanup(*act, est);
15197 ++ tcf_idr_release(*act, bind);
15198 +
15199 + return ret;
15200 + }
15201 +diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
15202 +index d9e399a7e3d5..18b2fd2ba7d7 100644
15203 +--- a/net/sched/act_ipt.c
15204 ++++ b/net/sched/act_ipt.c
15205 +@@ -80,9 +80,12 @@ static void ipt_destroy_target(struct xt_entry_target *t)
15206 + static void tcf_ipt_release(struct tc_action *a, int bind)
15207 + {
15208 + struct tcf_ipt *ipt = to_ipt(a);
15209 +- ipt_destroy_target(ipt->tcfi_t);
15210 ++
15211 ++ if (ipt->tcfi_t) {
15212 ++ ipt_destroy_target(ipt->tcfi_t);
15213 ++ kfree(ipt->tcfi_t);
15214 ++ }
15215 + kfree(ipt->tcfi_tname);
15216 +- kfree(ipt->tcfi_t);
15217 + }
15218 +
15219 + static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
15220 +@@ -187,7 +190,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
15221 + kfree(tname);
15222 + err1:
15223 + if (ret == ACT_P_CREATED)
15224 +- tcf_idr_cleanup(*a, est);
15225 ++ tcf_idr_release(*a, bind);
15226 + return err;
15227 + }
15228 +
15229 +diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
15230 +index 491fe5deb09e..51ab463d9e16 100644
15231 +--- a/net/sched/act_pedit.c
15232 ++++ b/net/sched/act_pedit.c
15233 +@@ -176,7 +176,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
15234 + p = to_pedit(*a);
15235 + keys = kmalloc(ksize, GFP_KERNEL);
15236 + if (keys == NULL) {
15237 +- tcf_idr_cleanup(*a, est);
15238 ++ tcf_idr_release(*a, bind);
15239 + kfree(keys_ex);
15240 + return -ENOMEM;
15241 + }
15242 +diff --git a/net/sched/act_police.c b/net/sched/act_police.c
15243 +index 3bb2ebf9e9ae..c16127109f21 100644
15244 +--- a/net/sched/act_police.c
15245 ++++ b/net/sched/act_police.c
15246 +@@ -194,7 +194,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
15247 + qdisc_put_rtab(P_tab);
15248 + qdisc_put_rtab(R_tab);
15249 + if (ret == ACT_P_CREATED)
15250 +- tcf_idr_cleanup(*a, est);
15251 ++ tcf_idr_release(*a, bind);
15252 + return err;
15253 + }
15254 +
15255 +diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
15256 +index 8b5abcd2f32f..53752b9327d0 100644
15257 +--- a/net/sched/act_sample.c
15258 ++++ b/net/sched/act_sample.c
15259 +@@ -103,7 +103,8 @@ static void tcf_sample_cleanup_rcu(struct rcu_head *rcu)
15260 +
15261 + psample_group = rcu_dereference_protected(s->psample_group, 1);
15262 + RCU_INIT_POINTER(s->psample_group, NULL);
15263 +- psample_group_put(psample_group);
15264 ++ if (psample_group)
15265 ++ psample_group_put(psample_group);
15266 + }
15267 +
15268 + static void tcf_sample_cleanup(struct tc_action *a, int bind)
15269 +diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
15270 +index e7b57e5071a3..b5f80e675783 100644
15271 +--- a/net/sched/act_simple.c
15272 ++++ b/net/sched/act_simple.c
15273 +@@ -121,7 +121,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
15274 + d = to_defact(*a);
15275 + ret = alloc_defdata(d, defdata);
15276 + if (ret < 0) {
15277 +- tcf_idr_cleanup(*a, est);
15278 ++ tcf_idr_release(*a, bind);
15279 + return ret;
15280 + }
15281 + d->tcf_action = parm->action;
15282 +diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
15283 +index 821823b2518a..d227599f7e73 100644
15284 +--- a/net/sched/act_skbmod.c
15285 ++++ b/net/sched/act_skbmod.c
15286 +@@ -155,7 +155,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
15287 + ASSERT_RTNL();
15288 + p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
15289 + if (unlikely(!p)) {
15290 +- if (ovr)
15291 ++ if (ret == ACT_P_CREATED)
15292 + tcf_idr_release(*a, bind);
15293 + return -ENOMEM;
15294 + }
15295 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
15296 +index 7166e7ecbe86..f04a037dc967 100644
15297 +--- a/net/smc/smc_core.c
15298 ++++ b/net/smc/smc_core.c
15299 +@@ -174,6 +174,7 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr,
15300 +
15301 + lnk = &lgr->lnk[SMC_SINGLE_LINK];
15302 + /* initialize link */
15303 ++ lnk->link_id = SMC_SINGLE_LINK;
15304 + lnk->smcibdev = smcibdev;
15305 + lnk->ibport = ibport;
15306 + lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
15307 +diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
15308 +index 9033b8a36fe1..4410d0071515 100644
15309 +--- a/net/smc/smc_ib.c
15310 ++++ b/net/smc/smc_ib.c
15311 +@@ -23,6 +23,8 @@
15312 + #include "smc_wr.h"
15313 + #include "smc.h"
15314 +
15315 ++#define SMC_MAX_CQE 32766 /* max. # of completion queue elements */
15316 ++
15317 + #define SMC_QP_MIN_RNR_TIMER 5
15318 + #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
15319 + #define SMC_QP_RETRY_CNT 7 /* 7: infinite */
15320 +@@ -435,9 +437,15 @@ int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
15321 + long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
15322 + {
15323 + struct ib_cq_init_attr cqattr = {
15324 +- .cqe = SMC_WR_MAX_CQE, .comp_vector = 0 };
15325 ++ .cqe = SMC_MAX_CQE, .comp_vector = 0 };
15326 ++ int cqe_size_order, smc_order;
15327 + long rc;
15328 +
15329 ++ /* the calculated number of cq entries fits to mlx5 cq allocation */
15330 ++ cqe_size_order = cache_line_size() == 128 ? 7 : 6;
15331 ++ smc_order = MAX_ORDER - cqe_size_order - 1;
15332 ++ if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
15333 ++ cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
15334 + smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
15335 + smc_wr_tx_cq_handler, NULL,
15336 + smcibdev, &cqattr);
15337 +diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
15338 +index 92fe4cc8c82c..b4aa4fcedb96 100644
15339 +--- a/net/smc/smc_llc.c
15340 ++++ b/net/smc/smc_llc.c
15341 +@@ -92,7 +92,7 @@ int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[],
15342 + memcpy(confllc->sender_mac, mac, ETH_ALEN);
15343 + memcpy(confllc->sender_gid, gid, SMC_GID_SIZE);
15344 + hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
15345 +- /* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */
15346 ++ confllc->link_num = link->link_id;
15347 + memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
15348 + confllc->max_links = SMC_LINKS_PER_LGR_MAX;
15349 + /* send llc message */
15350 +diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
15351 +index 2acf12b06063..c307402e67d6 100644
15352 +--- a/net/smc/smc_wr.h
15353 ++++ b/net/smc/smc_wr.h
15354 +@@ -19,7 +19,6 @@
15355 + #include "smc.h"
15356 + #include "smc_core.h"
15357 +
15358 +-#define SMC_WR_MAX_CQE 32768 /* max. # of completion queue elements */
15359 + #define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */
15360 +
15361 + #define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ)
15362 +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
15363 +index dfef930d1e50..ffb1a3a69bdd 100644
15364 +--- a/net/tls/tls_main.c
15365 ++++ b/net/tls/tls_main.c
15366 +@@ -299,7 +299,8 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
15367 + goto out;
15368 + }
15369 + lock_sock(sk);
15370 +- memcpy(crypto_info_aes_gcm_128->iv, ctx->iv,
15371 ++ memcpy(crypto_info_aes_gcm_128->iv,
15372 ++ ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
15373 + TLS_CIPHER_AES_GCM_128_IV_SIZE);
15374 + release_sock(sk);
15375 + if (copy_to_user(optval,
15376 +diff --git a/net/wireless/sme.c b/net/wireless/sme.c
15377 +index 3dd05a08c60a..d014aea07160 100644
15378 +--- a/net/wireless/sme.c
15379 ++++ b/net/wireless/sme.c
15380 +@@ -989,6 +989,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
15381 + wdev->current_bss = NULL;
15382 + wdev->ssid_len = 0;
15383 + wdev->conn_owner_nlportid = 0;
15384 ++ kzfree(wdev->connect_keys);
15385 ++ wdev->connect_keys = NULL;
15386 +
15387 + nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
15388 +
15389 +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
15390 +index 5b2409746ae0..9f492dc417d5 100644
15391 +--- a/net/xfrm/xfrm_input.c
15392 ++++ b/net/xfrm/xfrm_input.c
15393 +@@ -26,6 +26,12 @@ struct xfrm_trans_tasklet {
15394 + };
15395 +
15396 + struct xfrm_trans_cb {
15397 ++ union {
15398 ++ struct inet_skb_parm h4;
15399 ++#if IS_ENABLED(CONFIG_IPV6)
15400 ++ struct inet6_skb_parm h6;
15401 ++#endif
15402 ++ } header;
15403 + int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
15404 + };
15405 +
15406 +diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
15407 +index 73ad8c8ef344..35610cc881a9 100644
15408 +--- a/net/xfrm/xfrm_output.c
15409 ++++ b/net/xfrm/xfrm_output.c
15410 +@@ -285,8 +285,9 @@ void xfrm_local_error(struct sk_buff *skb, int mtu)
15411 + return;
15412 +
15413 + afinfo = xfrm_state_get_afinfo(proto);
15414 +- if (afinfo)
15415 ++ if (afinfo) {
15416 + afinfo->local_error(skb, mtu);
15417 +- rcu_read_unlock();
15418 ++ rcu_read_unlock();
15419 ++ }
15420 + }
15421 + EXPORT_SYMBOL_GPL(xfrm_local_error);
15422 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
15423 +index 7d17c207fc8a..9c57d6a5816c 100644
15424 +--- a/net/xfrm/xfrm_policy.c
15425 ++++ b/net/xfrm/xfrm_policy.c
15426 +@@ -1459,10 +1459,13 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
15427 + static int xfrm_get_tos(const struct flowi *fl, int family)
15428 + {
15429 + const struct xfrm_policy_afinfo *afinfo;
15430 +- int tos = 0;
15431 ++ int tos;
15432 +
15433 + afinfo = xfrm_policy_get_afinfo(family);
15434 +- tos = afinfo ? afinfo->get_tos(fl) : 0;
15435 ++ if (!afinfo)
15436 ++ return 0;
15437 ++
15438 ++ tos = afinfo->get_tos(fl);
15439 +
15440 + rcu_read_unlock();
15441 +
15442 +diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
15443 +index 02501817227b..bdb9b5121ba8 100644
15444 +--- a/net/xfrm/xfrm_replay.c
15445 ++++ b/net/xfrm/xfrm_replay.c
15446 +@@ -658,7 +658,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
15447 + } else {
15448 + XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
15449 + XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
15450 +- xo->seq.low = oseq = oseq + 1;
15451 ++ xo->seq.low = oseq + 1;
15452 + xo->seq.hi = oseq_hi;
15453 + oseq += skb_shinfo(skb)->gso_segs;
15454 + }
15455 +diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh
15456 +index 513da1a4a2da..d67830e6e360 100755
15457 +--- a/scripts/adjust_autoksyms.sh
15458 ++++ b/scripts/adjust_autoksyms.sh
15459 +@@ -84,6 +84,13 @@ while read sympath; do
15460 + depfile="include/config/ksym/${sympath}.h"
15461 + mkdir -p "$(dirname "$depfile")"
15462 + touch "$depfile"
15463 ++ # Filesystems with coarse time precision may create timestamps
15464 ++ # equal to the one from a file that was very recently built and that
15465 ++ # needs to be rebuild. Let's guard against that by making sure our
15466 ++ # dep files are always newer than the first file we created here.
15467 ++ while [ ! "$depfile" -nt "$new_ksyms_file" ]; do
15468 ++ touch "$depfile"
15469 ++ done
15470 + echo $((count += 1))
15471 + done | tail -1 )
15472 + changed=${changed:-0}
15473 +diff --git a/scripts/package/builddeb b/scripts/package/builddeb
15474 +index 0bc87473f68f..e15159d0a884 100755
15475 +--- a/scripts/package/builddeb
15476 ++++ b/scripts/package/builddeb
15477 +@@ -313,7 +313,7 @@ fi
15478 +
15479 + # Build kernel header package
15480 + (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles"
15481 +-(cd $srctree; find arch/*/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles"
15482 ++(cd $srctree; find arch/*/include include scripts -type f -o -type l) >> "$objtree/debian/hdrsrcfiles"
15483 + (cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
15484 + (cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
15485 + if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
15486 +diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
15487 +index 06554c448dce..9676c8887da9 100644
15488 +--- a/security/integrity/digsig.c
15489 ++++ b/security/integrity/digsig.c
15490 +@@ -18,6 +18,7 @@
15491 + #include <linux/cred.h>
15492 + #include <linux/key-type.h>
15493 + #include <linux/digsig.h>
15494 ++#include <linux/vmalloc.h>
15495 + #include <crypto/public_key.h>
15496 + #include <keys/system_keyring.h>
15497 +
15498 +diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
15499 +index 35ef69312811..6a8f67714c83 100644
15500 +--- a/security/integrity/ima/Kconfig
15501 ++++ b/security/integrity/ima/Kconfig
15502 +@@ -10,6 +10,7 @@ config IMA
15503 + select CRYPTO_HASH_INFO
15504 + select TCG_TPM if HAS_IOMEM && !UML
15505 + select TCG_TIS if TCG_TPM && X86
15506 ++ select TCG_CRB if TCG_TPM && ACPI
15507 + select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
15508 + help
15509 + The Trusted Computing Group(TCG) runtime Integrity
15510 +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
15511 +index 802d5d20f36f..90453aa1c813 100644
15512 +--- a/security/integrity/ima/ima_crypto.c
15513 ++++ b/security/integrity/ima/ima_crypto.c
15514 +@@ -78,6 +78,8 @@ int __init ima_init_crypto(void)
15515 + hash_algo_name[ima_hash_algo], rc);
15516 + return rc;
15517 + }
15518 ++ pr_info("Allocated hash algorithm: %s\n",
15519 ++ hash_algo_name[ima_hash_algo]);
15520 + return 0;
15521 + }
15522 +
15523 +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
15524 +index ab70a395f490..7e334fd31c15 100644
15525 +--- a/security/integrity/ima/ima_main.c
15526 ++++ b/security/integrity/ima/ima_main.c
15527 +@@ -16,6 +16,9 @@
15528 + * implements the IMA hooks: ima_bprm_check, ima_file_mmap,
15529 + * and ima_file_check.
15530 + */
15531 ++
15532 ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15533 ++
15534 + #include <linux/module.h>
15535 + #include <linux/file.h>
15536 + #include <linux/binfmts.h>
15537 +@@ -427,6 +430,16 @@ static int __init init_ima(void)
15538 + ima_init_template_list();
15539 + hash_setup(CONFIG_IMA_DEFAULT_HASH);
15540 + error = ima_init();
15541 ++
15542 ++ if (error && strcmp(hash_algo_name[ima_hash_algo],
15543 ++ CONFIG_IMA_DEFAULT_HASH) != 0) {
15544 ++ pr_info("Allocating %s failed, going to use default hash algorithm %s\n",
15545 ++ hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH);
15546 ++ hash_setup_done = 0;
15547 ++ hash_setup(CONFIG_IMA_DEFAULT_HASH);
15548 ++ error = ima_init();
15549 ++ }
15550 ++
15551 + if (!error) {
15552 + ima_initialized = 1;
15553 + ima_update_policy_flag();
15554 +diff --git a/sound/core/timer.c b/sound/core/timer.c
15555 +index 15e82a656d96..4fdc9e11e832 100644
15556 +--- a/sound/core/timer.c
15557 ++++ b/sound/core/timer.c
15558 +@@ -592,7 +592,7 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
15559 + else
15560 + timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
15561 + snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
15562 +- SNDRV_TIMER_EVENT_CONTINUE);
15563 ++ SNDRV_TIMER_EVENT_PAUSE);
15564 + unlock:
15565 + spin_unlock_irqrestore(&timer->lock, flags);
15566 + return result;
15567 +@@ -614,7 +614,7 @@ static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
15568 + list_del_init(&timeri->ack_list);
15569 + list_del_init(&timeri->active_list);
15570 + snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
15571 +- SNDRV_TIMER_EVENT_CONTINUE);
15572 ++ SNDRV_TIMER_EVENT_PAUSE);
15573 + spin_unlock(&timeri->timer->lock);
15574 + }
15575 + spin_unlock_irqrestore(&slave_active_lock, flags);
15576 +diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
15577 +index 8632301489fa..b67de2bb06a2 100644
15578 +--- a/sound/core/vmaster.c
15579 ++++ b/sound/core/vmaster.c
15580 +@@ -68,10 +68,13 @@ static int slave_update(struct link_slave *slave)
15581 + return -ENOMEM;
15582 + uctl->id = slave->slave.id;
15583 + err = slave->slave.get(&slave->slave, uctl);
15584 ++ if (err < 0)
15585 ++ goto error;
15586 + for (ch = 0; ch < slave->info.count; ch++)
15587 + slave->vals[ch] = uctl->value.integer.value[ch];
15588 ++ error:
15589 + kfree(uctl);
15590 +- return 0;
15591 ++ return err < 0 ? err : 0;
15592 + }
15593 +
15594 + /* get the slave ctl info and save the initial values */
15595 +diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
15596 +index 457a1521f32f..785f4e95148c 100644
15597 +--- a/tools/hv/hv_fcopy_daemon.c
15598 ++++ b/tools/hv/hv_fcopy_daemon.c
15599 +@@ -23,13 +23,14 @@
15600 + #include <unistd.h>
15601 + #include <errno.h>
15602 + #include <linux/hyperv.h>
15603 ++#include <linux/limits.h>
15604 + #include <syslog.h>
15605 + #include <sys/stat.h>
15606 + #include <fcntl.h>
15607 + #include <getopt.h>
15608 +
15609 + static int target_fd;
15610 +-static char target_fname[W_MAX_PATH];
15611 ++static char target_fname[PATH_MAX];
15612 + static unsigned long long filesize;
15613 +
15614 + static int hv_start_fcopy(struct hv_start_fcopy *smsg)
15615 +diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
15616 +index b2b4ebffab8c..34031a297f02 100644
15617 +--- a/tools/hv/hv_vss_daemon.c
15618 ++++ b/tools/hv/hv_vss_daemon.c
15619 +@@ -22,6 +22,7 @@
15620 + #include <sys/poll.h>
15621 + #include <sys/ioctl.h>
15622 + #include <sys/stat.h>
15623 ++#include <sys/sysmacros.h>
15624 + #include <fcntl.h>
15625 + #include <stdio.h>
15626 + #include <mntent.h>
15627 +diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
15628 +index 91ef44bfaf3e..2a858ea56a81 100644
15629 +--- a/tools/perf/Makefile.perf
15630 ++++ b/tools/perf/Makefile.perf
15631 +@@ -368,7 +368,8 @@ LIBS = -Wl,--whole-archive $(PERFLIBS) $(EXTRA_PERFLIBS) -Wl,--no-whole-archive
15632 +
15633 + ifeq ($(USE_CLANG), 1)
15634 + CLANGLIBS_LIST = AST Basic CodeGen Driver Frontend Lex Tooling Edit Sema Analysis Parse Serialization
15635 +- LIBCLANG = $(foreach l,$(CLANGLIBS_LIST),$(wildcard $(shell $(LLVM_CONFIG) --libdir)/libclang$(l).a))
15636 ++ CLANGLIBS_NOEXT_LIST = $(foreach l,$(CLANGLIBS_LIST),$(shell $(LLVM_CONFIG) --libdir)/libclang$(l))
15637 ++ LIBCLANG = $(foreach l,$(CLANGLIBS_NOEXT_LIST),$(wildcard $(l).a $(l).so))
15638 + LIBS += -Wl,--start-group $(LIBCLANG) -Wl,--end-group
15639 + endif
15640 +
15641 +diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
15642 +index b205c1340456..3b570e808b31 100644
15643 +--- a/tools/perf/builtin-record.c
15644 ++++ b/tools/perf/builtin-record.c
15645 +@@ -926,6 +926,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
15646 + }
15647 + }
15648 +
15649 ++ /*
15650 ++ * If we have just single event and are sending data
15651 ++ * through pipe, we need to force the ids allocation,
15652 ++ * because we synthesize event name through the pipe
15653 ++ * and need the id for that.
15654 ++ */
15655 ++ if (data->is_pipe && rec->evlist->nr_entries == 1)
15656 ++ rec->opts.sample_id = true;
15657 ++
15658 + if (record__open(rec) != 0) {
15659 + err = -1;
15660 + goto out_child;
15661 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
15662 +index 9df0af17e9c2..52486c90ab93 100644
15663 +--- a/tools/perf/builtin-stat.c
15664 ++++ b/tools/perf/builtin-stat.c
15665 +@@ -2185,11 +2185,16 @@ static int add_default_attributes(void)
15666 + return 0;
15667 +
15668 + if (transaction_run) {
15669 ++ struct parse_events_error errinfo;
15670 ++
15671 + if (pmu_have_event("cpu", "cycles-ct") &&
15672 + pmu_have_event("cpu", "el-start"))
15673 +- err = parse_events(evsel_list, transaction_attrs, NULL);
15674 ++ err = parse_events(evsel_list, transaction_attrs,
15675 ++ &errinfo);
15676 + else
15677 +- err = parse_events(evsel_list, transaction_limited_attrs, NULL);
15678 ++ err = parse_events(evsel_list,
15679 ++ transaction_limited_attrs,
15680 ++ &errinfo);
15681 + if (err) {
15682 + fprintf(stderr, "Cannot set up transaction events\n");
15683 + return -1;
15684 +diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
15685 +index dd57978b2096..3103a33c13a8 100644
15686 +--- a/tools/perf/builtin-top.c
15687 ++++ b/tools/perf/builtin-top.c
15688 +@@ -1080,8 +1080,10 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
15689 +
15690 + static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
15691 + {
15692 +- if (!strcmp(var, "top.call-graph"))
15693 +- var = "call-graph.record-mode"; /* fall-through */
15694 ++ if (!strcmp(var, "top.call-graph")) {
15695 ++ var = "call-graph.record-mode";
15696 ++ return perf_default_config(var, value, cb);
15697 ++ }
15698 + if (!strcmp(var, "top.children")) {
15699 + symbol_conf.cumulate_callchain = perf_config_bool(var, value);
15700 + return 0;
15701 +diff --git a/tools/perf/perf.h b/tools/perf/perf.h
15702 +index 55086389fc06..de1debcd3ee7 100644
15703 +--- a/tools/perf/perf.h
15704 ++++ b/tools/perf/perf.h
15705 +@@ -61,6 +61,7 @@ struct record_opts {
15706 + bool tail_synthesize;
15707 + bool overwrite;
15708 + bool ignore_missing_thread;
15709 ++ bool sample_id;
15710 + unsigned int freq;
15711 + unsigned int mmap_pages;
15712 + unsigned int auxtrace_mmap_pages;
15713 +diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
15714 +index 260418969120..2f008067d989 100644
15715 +--- a/tools/perf/tests/dwarf-unwind.c
15716 ++++ b/tools/perf/tests/dwarf-unwind.c
15717 +@@ -37,6 +37,19 @@ static int init_live_machine(struct machine *machine)
15718 + mmap_handler, machine, true, 500);
15719 + }
15720 +
15721 ++/*
15722 ++ * We need to keep these functions global, despite the
15723 ++ * fact that they are used only locally in this object,
15724 ++ * in order to keep them around even if the binary is
15725 ++ * stripped. If they are gone, the unwind check for
15726 ++ * symbol fails.
15727 ++ */
15728 ++int test_dwarf_unwind__thread(struct thread *thread);
15729 ++int test_dwarf_unwind__compare(void *p1, void *p2);
15730 ++int test_dwarf_unwind__krava_3(struct thread *thread);
15731 ++int test_dwarf_unwind__krava_2(struct thread *thread);
15732 ++int test_dwarf_unwind__krava_1(struct thread *thread);
15733 ++
15734 + #define MAX_STACK 8
15735 +
15736 + static int unwind_entry(struct unwind_entry *entry, void *arg)
15737 +@@ -45,12 +58,12 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
15738 + char *symbol = entry->sym ? entry->sym->name : NULL;
15739 + static const char *funcs[MAX_STACK] = {
15740 + "test__arch_unwind_sample",
15741 +- "unwind_thread",
15742 +- "compare",
15743 ++ "test_dwarf_unwind__thread",
15744 ++ "test_dwarf_unwind__compare",
15745 + "bsearch",
15746 +- "krava_3",
15747 +- "krava_2",
15748 +- "krava_1",
15749 ++ "test_dwarf_unwind__krava_3",
15750 ++ "test_dwarf_unwind__krava_2",
15751 ++ "test_dwarf_unwind__krava_1",
15752 + "test__dwarf_unwind"
15753 + };
15754 + /*
15755 +@@ -77,7 +90,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
15756 + return strcmp((const char *) symbol, funcs[idx]);
15757 + }
15758 +
15759 +-static noinline int unwind_thread(struct thread *thread)
15760 ++noinline int test_dwarf_unwind__thread(struct thread *thread)
15761 + {
15762 + struct perf_sample sample;
15763 + unsigned long cnt = 0;
15764 +@@ -108,7 +121,7 @@ static noinline int unwind_thread(struct thread *thread)
15765 +
15766 + static int global_unwind_retval = -INT_MAX;
15767 +
15768 +-static noinline int compare(void *p1, void *p2)
15769 ++noinline int test_dwarf_unwind__compare(void *p1, void *p2)
15770 + {
15771 + /* Any possible value should be 'thread' */
15772 + struct thread *thread = *(struct thread **)p1;
15773 +@@ -117,17 +130,17 @@ static noinline int compare(void *p1, void *p2)
15774 + /* Call unwinder twice for both callchain orders. */
15775 + callchain_param.order = ORDER_CALLER;
15776 +
15777 +- global_unwind_retval = unwind_thread(thread);
15778 ++ global_unwind_retval = test_dwarf_unwind__thread(thread);
15779 + if (!global_unwind_retval) {
15780 + callchain_param.order = ORDER_CALLEE;
15781 +- global_unwind_retval = unwind_thread(thread);
15782 ++ global_unwind_retval = test_dwarf_unwind__thread(thread);
15783 + }
15784 + }
15785 +
15786 + return p1 - p2;
15787 + }
15788 +
15789 +-static noinline int krava_3(struct thread *thread)
15790 ++noinline int test_dwarf_unwind__krava_3(struct thread *thread)
15791 + {
15792 + struct thread *array[2] = {thread, thread};
15793 + void *fp = &bsearch;
15794 +@@ -141,18 +154,19 @@ static noinline int krava_3(struct thread *thread)
15795 + size_t, int (*)(void *, void *));
15796 +
15797 + _bsearch = fp;
15798 +- _bsearch(array, &thread, 2, sizeof(struct thread **), compare);
15799 ++ _bsearch(array, &thread, 2, sizeof(struct thread **),
15800 ++ test_dwarf_unwind__compare);
15801 + return global_unwind_retval;
15802 + }
15803 +
15804 +-static noinline int krava_2(struct thread *thread)
15805 ++noinline int test_dwarf_unwind__krava_2(struct thread *thread)
15806 + {
15807 +- return krava_3(thread);
15808 ++ return test_dwarf_unwind__krava_3(thread);
15809 + }
15810 +
15811 +-static noinline int krava_1(struct thread *thread)
15812 ++noinline int test_dwarf_unwind__krava_1(struct thread *thread)
15813 + {
15814 +- return krava_2(thread);
15815 ++ return test_dwarf_unwind__krava_2(thread);
15816 + }
15817 +
15818 + int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
15819 +@@ -189,7 +203,7 @@ int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unu
15820 + goto out;
15821 + }
15822 +
15823 +- err = krava_1(thread);
15824 ++ err = test_dwarf_unwind__krava_1(thread);
15825 + thread__put(thread);
15826 +
15827 + out:
15828 +diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
15829 +index a2f757da49d9..73bea00f590f 100755
15830 +--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
15831 ++++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
15832 +@@ -21,12 +21,12 @@ trace_libc_inet_pton_backtrace() {
15833 + expected[3]=".*packets transmitted.*"
15834 + expected[4]="rtt min.*"
15835 + expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
15836 +- expected[6]=".*inet_pton[[:space:]]\($libc\)$"
15837 ++ expected[6]=".*inet_pton[[:space:]]\($libc|inlined\)$"
15838 + case "$(uname -m)" in
15839 + s390x)
15840 + eventattr='call-graph=dwarf'
15841 +- expected[7]="gaih_inet[[:space:]]\(inlined\)$"
15842 +- expected[8]="__GI_getaddrinfo[[:space:]]\(inlined\)$"
15843 ++ expected[7]="gaih_inet.*[[:space:]]\($libc|inlined\)$"
15844 ++ expected[8]="__GI_getaddrinfo[[:space:]]\($libc|inlined\)$"
15845 + expected[9]="main[[:space:]]\(.*/bin/ping.*\)$"
15846 + expected[10]="__libc_start_main[[:space:]]\($libc\)$"
15847 + expected[11]="_start[[:space:]]\(.*/bin/ping.*\)$"
15848 +diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
15849 +index f6789fb029d6..884cad122acf 100644
15850 +--- a/tools/perf/tests/vmlinux-kallsyms.c
15851 ++++ b/tools/perf/tests/vmlinux-kallsyms.c
15852 +@@ -125,7 +125,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
15853 +
15854 + if (pair && UM(pair->start) == mem_start) {
15855 + next_pair:
15856 +- if (strcmp(sym->name, pair->name) == 0) {
15857 ++ if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
15858 + /*
15859 + * kallsyms don't have the symbol end, so we
15860 + * set that by using the next symbol start - 1,
15861 +diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
15862 +index 8f7f59d1a2b5..0c486d2683c4 100644
15863 +--- a/tools/perf/ui/browsers/annotate.c
15864 ++++ b/tools/perf/ui/browsers/annotate.c
15865 +@@ -312,6 +312,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
15866 + struct map_symbol *ms = ab->b.priv;
15867 + struct symbol *sym = ms->sym;
15868 + u8 pcnt_width = annotate_browser__pcnt_width(ab);
15869 ++ int width = 0;
15870 +
15871 + /* PLT symbols contain external offsets */
15872 + if (strstr(sym->name, "@plt"))
15873 +@@ -335,13 +336,17 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
15874 + to = (u64)btarget->idx;
15875 + }
15876 +
15877 ++ if (ab->have_cycles)
15878 ++ width = IPC_WIDTH + CYCLES_WIDTH;
15879 ++
15880 + ui_browser__set_color(browser, HE_COLORSET_JUMP_ARROWS);
15881 +- __ui_browser__line_arrow(browser, pcnt_width + 2 + ab->addr_width,
15882 ++ __ui_browser__line_arrow(browser,
15883 ++ pcnt_width + 2 + ab->addr_width + width,
15884 + from, to);
15885 +
15886 + if (is_fused(ab, cursor)) {
15887 + ui_browser__mark_fused(browser,
15888 +- pcnt_width + 3 + ab->addr_width,
15889 ++ pcnt_width + 3 + ab->addr_width + width,
15890 + from - 1,
15891 + to > from ? true : false);
15892 + }
15893 +diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
15894 +index 1bfc946e37dc..bf31ceab33bd 100644
15895 +--- a/tools/perf/util/c++/clang.cpp
15896 ++++ b/tools/perf/util/c++/clang.cpp
15897 +@@ -9,6 +9,7 @@
15898 + * Copyright (C) 2016 Huawei Inc.
15899 + */
15900 +
15901 ++#include "clang/Basic/Version.h"
15902 + #include "clang/CodeGen/CodeGenAction.h"
15903 + #include "clang/Frontend/CompilerInvocation.h"
15904 + #include "clang/Frontend/CompilerInstance.h"
15905 +@@ -58,7 +59,8 @@ createCompilerInvocation(llvm::opt::ArgStringList CFlags, StringRef& Path,
15906 +
15907 + FrontendOptions& Opts = CI->getFrontendOpts();
15908 + Opts.Inputs.clear();
15909 +- Opts.Inputs.emplace_back(Path, IK_C);
15910 ++ Opts.Inputs.emplace_back(Path,
15911 ++ FrontendOptions::getInputKindForExtension("c"));
15912 + return CI;
15913 + }
15914 +
15915 +@@ -71,10 +73,17 @@ getModuleFromSource(llvm::opt::ArgStringList CFlags,
15916 +
15917 + Clang.setVirtualFileSystem(&*VFS);
15918 +
15919 ++#if CLANG_VERSION_MAJOR < 4
15920 + IntrusiveRefCntPtr<CompilerInvocation> CI =
15921 + createCompilerInvocation(std::move(CFlags), Path,
15922 + Clang.getDiagnostics());
15923 + Clang.setInvocation(&*CI);
15924 ++#else
15925 ++ std::shared_ptr<CompilerInvocation> CI(
15926 ++ createCompilerInvocation(std::move(CFlags), Path,
15927 ++ Clang.getDiagnostics()));
15928 ++ Clang.setInvocation(CI);
15929 ++#endif
15930 +
15931 + std::unique_ptr<CodeGenAction> Act(new EmitLLVMOnlyAction(&*LLVMCtx));
15932 + if (!Clang.ExecuteAction(*Act))
15933 +diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
15934 +index 097473600d94..5d420209505e 100644
15935 +--- a/tools/perf/util/hist.c
15936 ++++ b/tools/perf/util/hist.c
15937 +@@ -878,7 +878,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
15938 + * cumulated only one time to prevent entries more than 100%
15939 + * overhead.
15940 + */
15941 +- he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
15942 ++ he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
15943 + if (he_cache == NULL)
15944 + return -ENOMEM;
15945 +
15946 +@@ -1043,8 +1043,6 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
15947 + if (err)
15948 + return err;
15949 +
15950 +- iter->max_stack = max_stack_depth;
15951 +-
15952 + err = iter->ops->prepare_entry(iter, al);
15953 + if (err)
15954 + goto out;
15955 +diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
15956 +index f6630cb95eff..b99d68943f25 100644
15957 +--- a/tools/perf/util/hist.h
15958 ++++ b/tools/perf/util/hist.h
15959 +@@ -107,7 +107,6 @@ struct hist_entry_iter {
15960 + int curr;
15961 +
15962 + bool hide_unresolved;
15963 +- int max_stack;
15964 +
15965 + struct perf_evsel *evsel;
15966 + struct perf_sample *sample;
15967 +diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
15968 +index 1e97937b03a9..6f09e4962dad 100644
15969 +--- a/tools/perf/util/record.c
15970 ++++ b/tools/perf/util/record.c
15971 +@@ -137,6 +137,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
15972 + struct perf_evsel *evsel;
15973 + bool use_sample_identifier = false;
15974 + bool use_comm_exec;
15975 ++ bool sample_id = opts->sample_id;
15976 +
15977 + /*
15978 + * Set the evsel leader links before we configure attributes,
15979 +@@ -163,8 +164,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
15980 + * match the id.
15981 + */
15982 + use_sample_identifier = perf_can_sample_identifier();
15983 +- evlist__for_each_entry(evlist, evsel)
15984 +- perf_evsel__set_sample_id(evsel, use_sample_identifier);
15985 ++ sample_id = true;
15986 + } else if (evlist->nr_entries > 1) {
15987 + struct perf_evsel *first = perf_evlist__first(evlist);
15988 +
15989 +@@ -174,6 +174,10 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
15990 + use_sample_identifier = perf_can_sample_identifier();
15991 + break;
15992 + }
15993 ++ sample_id = true;
15994 ++ }
15995 ++
15996 ++ if (sample_id) {
15997 + evlist__for_each_entry(evlist, evsel)
15998 + perf_evsel__set_sample_id(evsel, use_sample_identifier);
15999 + }
16000 +diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
16001 +index 30cd0b296f1a..8e61aad0ca3f 100644
16002 +--- a/tools/testing/radix-tree/idr-test.c
16003 ++++ b/tools/testing/radix-tree/idr-test.c
16004 +@@ -202,6 +202,13 @@ void idr_checks(void)
16005 + idr_remove(&idr, 3);
16006 + idr_remove(&idr, 0);
16007 +
16008 ++ assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0);
16009 ++ idr_remove(&idr, 1);
16010 ++ for (i = 1; i < RADIX_TREE_MAP_SIZE; i++)
16011 ++ assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i);
16012 ++ idr_remove(&idr, 1 << 30);
16013 ++ idr_destroy(&idr);
16014 ++
16015 + for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
16016 + struct item *item = item_create(i, 0);
16017 + assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
16018 +diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
16019 +index 3c9c0bbe7dbb..ea300e7818a7 100644
16020 +--- a/tools/testing/selftests/Makefile
16021 ++++ b/tools/testing/selftests/Makefile
16022 +@@ -122,6 +122,7 @@ ifdef INSTALL_PATH
16023 + BUILD_TARGET=$$BUILD/$$TARGET; \
16024 + echo "echo ; echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \
16025 + echo "echo ========================================" >> $(ALL_SCRIPT); \
16026 ++ echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
16027 + echo "cd $$TARGET" >> $(ALL_SCRIPT); \
16028 + make -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
16029 + echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
16030 +diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
16031 +index 8b9470b5af6d..96c6238a4a1f 100644
16032 +--- a/tools/testing/selftests/bpf/test_maps.c
16033 ++++ b/tools/testing/selftests/bpf/test_maps.c
16034 +@@ -126,6 +126,8 @@ static void test_hashmap_sizes(int task, void *data)
16035 + fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j,
16036 + 2, map_flags);
16037 + if (fd < 0) {
16038 ++ if (errno == ENOMEM)
16039 ++ return;
16040 + printf("Failed to create hashmap key=%d value=%d '%s'\n",
16041 + i, j, strerror(errno));
16042 + exit(1);
16043 +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
16044 +new file mode 100644
16045 +index 000000000000..5ba73035e1d9
16046 +--- /dev/null
16047 ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
16048 +@@ -0,0 +1,46 @@
16049 ++#!/bin/sh
16050 ++# SPDX-License-Identifier: GPL-2.0
16051 ++# description: Kprobe event string type argument
16052 ++
16053 ++[ -f kprobe_events ] || exit_unsupported # this is configurable
16054 ++
16055 ++echo 0 > events/enable
16056 ++echo > kprobe_events
16057 ++
16058 ++case `uname -m` in
16059 ++x86_64)
16060 ++ ARG2=%si
16061 ++ OFFS=8
16062 ++;;
16063 ++i[3456]86)
16064 ++ ARG2=%cx
16065 ++ OFFS=4
16066 ++;;
16067 ++aarch64)
16068 ++ ARG2=%x1
16069 ++ OFFS=8
16070 ++;;
16071 ++arm*)
16072 ++ ARG2=%r1
16073 ++ OFFS=4
16074 ++;;
16075 ++*)
16076 ++ echo "Please implement other architecture here"
16077 ++ exit_untested
16078 ++esac
16079 ++
16080 ++: "Test get argument (1)"
16081 ++echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string" > kprobe_events
16082 ++echo 1 > events/kprobes/testprobe/enable
16083 ++! echo test >> kprobe_events
16084 ++tail -n 1 trace | grep -qe "testprobe.* arg1=\"test\""
16085 ++
16086 ++echo 0 > events/kprobes/testprobe/enable
16087 ++: "Test get argument (2)"
16088 ++echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string arg2=+0(+${OFFS}(${ARG2})):string" > kprobe_events
16089 ++echo 1 > events/kprobes/testprobe/enable
16090 ++! echo test1 test2 >> kprobe_events
16091 ++tail -n 1 trace | grep -qe "testprobe.* arg1=\"test1\" arg2=\"test2\""
16092 ++
16093 ++echo 0 > events/enable
16094 ++echo > kprobe_events
16095 +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
16096 +new file mode 100644
16097 +index 000000000000..231bcd2c4eb5
16098 +--- /dev/null
16099 ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
16100 +@@ -0,0 +1,97 @@
16101 ++#!/bin/sh
16102 ++# SPDX-License-Identifier: GPL-2.0
16103 ++# description: Kprobe event argument syntax
16104 ++
16105 ++[ -f kprobe_events ] || exit_unsupported # this is configurable
16106 ++
16107 ++grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue
16108 ++
16109 ++echo 0 > events/enable
16110 ++echo > kprobe_events
16111 ++
16112 ++PROBEFUNC="vfs_read"
16113 ++GOODREG=
16114 ++BADREG=
16115 ++GOODSYM="_sdata"
16116 ++if ! grep -qw ${GOODSYM} /proc/kallsyms ; then
16117 ++ GOODSYM=$PROBEFUNC
16118 ++fi
16119 ++BADSYM="deaqswdefr"
16120 ++SYMADDR=0x`grep -w ${GOODSYM} /proc/kallsyms | cut -f 1 -d " "`
16121 ++GOODTYPE="x16"
16122 ++BADTYPE="y16"
16123 ++
16124 ++case `uname -m` in
16125 ++x86_64|i[3456]86)
16126 ++ GOODREG=%ax
16127 ++ BADREG=%ex
16128 ++;;
16129 ++aarch64)
16130 ++ GOODREG=%x0
16131 ++ BADREG=%ax
16132 ++;;
16133 ++arm*)
16134 ++ GOODREG=%r0
16135 ++ BADREG=%ax
16136 ++;;
16137 ++esac
16138 ++
16139 ++test_goodarg() # Good-args
16140 ++{
16141 ++ while [ "$1" ]; do
16142 ++ echo "p ${PROBEFUNC} $1" > kprobe_events
16143 ++ shift 1
16144 ++ done;
16145 ++}
16146 ++
16147 ++test_badarg() # Bad-args
16148 ++{
16149 ++ while [ "$1" ]; do
16150 ++ ! echo "p ${PROBEFUNC} $1" > kprobe_events
16151 ++ shift 1
16152 ++ done;
16153 ++}
16154 ++
16155 ++echo > kprobe_events
16156 ++
16157 ++: "Register access"
16158 ++test_goodarg ${GOODREG}
16159 ++test_badarg ${BADREG}
16160 ++
16161 ++: "Symbol access"
16162 ++test_goodarg "@${GOODSYM}" "@${SYMADDR}" "@${GOODSYM}+10" "@${GOODSYM}-10"
16163 ++test_badarg "@" "@${BADSYM}" "@${GOODSYM}*10" "@${GOODSYM}/10" \
16164 ++ "@${GOODSYM}%10" "@${GOODSYM}&10" "@${GOODSYM}|10"
16165 ++
16166 ++: "Stack access"
16167 ++test_goodarg "\$stack" "\$stack0" "\$stack1"
16168 ++test_badarg "\$stackp" "\$stack0+10" "\$stack1-10"
16169 ++
16170 ++: "Retval access"
16171 ++echo "r ${PROBEFUNC} \$retval" > kprobe_events
16172 ++! echo "p ${PROBEFUNC} \$retval" > kprobe_events
16173 ++
16174 ++: "Comm access"
16175 ++test_goodarg "\$comm"
16176 ++
16177 ++: "Indirect memory access"
16178 ++test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \
16179 ++ "+0(\$stack1)" "+10(@${GOODSYM}-10)" "+0(+10(+20(\$stack)))"
16180 ++test_badarg "+(${GOODREG})" "(${GOODREG}+10)" "-(${GOODREG})" "(${GOODREG})" \
16181 ++ "+10(\$comm)" "+0(${GOODREG})+10"
16182 ++
16183 ++: "Name assignment"
16184 ++test_goodarg "varname=${GOODREG}"
16185 ++test_badarg "varname=varname2=${GOODREG}"
16186 ++
16187 ++: "Type syntax"
16188 ++test_goodarg "${GOODREG}:${GOODTYPE}"
16189 ++test_badarg "${GOODREG}::${GOODTYPE}" "${GOODREG}:${BADTYPE}" \
16190 ++ "${GOODTYPE}:${GOODREG}"
16191 ++
16192 ++: "Combination check"
16193 ++
16194 ++test_goodarg "\$comm:string" "+0(\$stack):string"
16195 ++test_badarg "\$comm:x64" "\$stack:string" "${GOODREG}:string"
16196 ++
16197 ++echo > kprobe_events
16198 +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
16199 +new file mode 100644
16200 +index 000000000000..4fda01a08da4
16201 +--- /dev/null
16202 ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
16203 +@@ -0,0 +1,43 @@
16204 ++#!/bin/sh
16205 ++# SPDX-License-Identifier: GPL-2.0
16206 ++# description: Kprobe events - probe points
16207 ++
16208 ++[ -f kprobe_events ] || exit_unsupported # this is configurable
16209 ++
16210 ++TARGET_FUNC=create_trace_kprobe
16211 ++
16212 ++dec_addr() { # hexaddr
16213 ++ printf "%d" "0x"`echo $1 | tail -c 8`
16214 ++}
16215 ++
16216 ++set_offs() { # prev target next
16217 ++ A1=`dec_addr $1`
16218 ++ A2=`dec_addr $2`
16219 ++ A3=`dec_addr $3`
16220 ++ TARGET="0x$2" # an address
16221 ++ PREV=`expr $A1 - $A2` # offset to previous symbol
16222 ++ NEXT=+`expr $A3 - $A2` # offset to next symbol
16223 ++ OVERFLOW=+`printf "0x%x" ${PREV}` # overflow offset to previous symbol
16224 ++}
16225 ++
16226 ++# We have to decode symbol addresses to get correct offsets.
16227 ++# If the offset is not an instruction boundary, it cause -EILSEQ.
16228 ++set_offs `grep -A1 -B1 ${TARGET_FUNC} /proc/kallsyms | cut -f 1 -d " " | xargs`
16229 ++
16230 ++UINT_TEST=no
16231 ++# printf "%x" -1 returns (unsigned long)-1.
16232 ++if [ `printf "%x" -1 | wc -c` != 9 ]; then
16233 ++ UINT_TEST=yes
16234 ++fi
16235 ++
16236 ++echo 0 > events/enable
16237 ++echo > kprobe_events
16238 ++echo "p:testprobe ${TARGET_FUNC}" > kprobe_events
16239 ++echo "p:testprobe ${TARGET}" > kprobe_events
16240 ++echo "p:testprobe ${TARGET_FUNC}${NEXT}" > kprobe_events
16241 ++! echo "p:testprobe ${TARGET_FUNC}${PREV}" > kprobe_events
16242 ++if [ "${UINT_TEST}" = yes ]; then
16243 ++! echo "p:testprobe ${TARGET_FUNC}${OVERFLOW}" > kprobe_events
16244 ++fi
16245 ++echo > kprobe_events
16246 ++clear_trace
16247 +diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile
16248 +index cea4adcd42b8..a63e8453984d 100644
16249 +--- a/tools/testing/selftests/futex/Makefile
16250 ++++ b/tools/testing/selftests/futex/Makefile
16251 +@@ -12,9 +12,9 @@ all:
16252 + BUILD_TARGET=$(OUTPUT)/$$DIR; \
16253 + mkdir $$BUILD_TARGET -p; \
16254 + make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
16255 +- if [ -e $$DIR/$(TEST_PROGS) ]; then
16256 +- rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/;
16257 +- fi
16258 ++ if [ -e $$DIR/$(TEST_PROGS) ]; then \
16259 ++ rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \
16260 ++ fi \
16261 + done
16262 +
16263 + override define RUN_TESTS
16264 +diff --git a/tools/testing/selftests/memfd/Makefile b/tools/testing/selftests/memfd/Makefile
16265 +index 3926a0409dda..36409cb7288c 100644
16266 +--- a/tools/testing/selftests/memfd/Makefile
16267 ++++ b/tools/testing/selftests/memfd/Makefile
16268 +@@ -5,6 +5,7 @@ CFLAGS += -I../../../../include/
16269 + CFLAGS += -I../../../../usr/include/
16270 +
16271 + TEST_PROGS := run_tests.sh
16272 ++TEST_FILES := run_fuse_test.sh
16273 + TEST_GEN_FILES := memfd_test fuse_mnt fuse_test
16274 +
16275 + fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags)
16276 +diff --git a/tools/testing/selftests/memfd/config b/tools/testing/selftests/memfd/config
16277 +new file mode 100644
16278 +index 000000000000..835c7f4dadcd
16279 +--- /dev/null
16280 ++++ b/tools/testing/selftests/memfd/config
16281 +@@ -0,0 +1 @@
16282 ++CONFIG_FUSE_FS=m
16283 +diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
16284 +index 989f917068d1..d4346b16b2c1 100644
16285 +--- a/tools/testing/selftests/net/psock_fanout.c
16286 ++++ b/tools/testing/selftests/net/psock_fanout.c
16287 +@@ -128,6 +128,8 @@ static void sock_fanout_getopts(int fd, uint16_t *typeflags, uint16_t *group_id)
16288 +
16289 + static void sock_fanout_set_ebpf(int fd)
16290 + {
16291 ++ static char log_buf[65536];
16292 ++
16293 + const int len_off = __builtin_offsetof(struct __sk_buff, len);
16294 + struct bpf_insn prog[] = {
16295 + { BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0 },
16296 +@@ -140,7 +142,6 @@ static void sock_fanout_set_ebpf(int fd)
16297 + { BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0 },
16298 + { BPF_JMP | BPF_EXIT, 0, 0, 0, 0 }
16299 + };
16300 +- char log_buf[512];
16301 + union bpf_attr attr;
16302 + int pfd;
16303 +
16304 +diff --git a/tools/testing/selftests/powerpc/mm/subpage_prot.c b/tools/testing/selftests/powerpc/mm/subpage_prot.c
16305 +index 35ade7406dcd..3ae77ba93208 100644
16306 +--- a/tools/testing/selftests/powerpc/mm/subpage_prot.c
16307 ++++ b/tools/testing/selftests/powerpc/mm/subpage_prot.c
16308 +@@ -135,6 +135,16 @@ static int run_test(void *addr, unsigned long size)
16309 + return 0;
16310 + }
16311 +
16312 ++static int syscall_available(void)
16313 ++{
16314 ++ int rc;
16315 ++
16316 ++ errno = 0;
16317 ++ rc = syscall(__NR_subpage_prot, 0, 0, 0);
16318 ++
16319 ++ return rc == 0 || (errno != ENOENT && errno != ENOSYS);
16320 ++}
16321 ++
16322 + int test_anon(void)
16323 + {
16324 + unsigned long align;
16325 +@@ -145,6 +155,8 @@ int test_anon(void)
16326 + void *mallocblock;
16327 + unsigned long mallocsize;
16328 +
16329 ++ SKIP_IF(!syscall_available());
16330 ++
16331 + if (getpagesize() != 0x10000) {
16332 + fprintf(stderr, "Kernel page size must be 64K!\n");
16333 + return 1;
16334 +@@ -180,6 +192,8 @@ int test_file(void)
16335 + off_t filesize;
16336 + int fd;
16337 +
16338 ++ SKIP_IF(!syscall_available());
16339 ++
16340 + fd = open(file_name, O_RDWR);
16341 + if (fd == -1) {
16342 + perror("failed to open file");
16343 +diff --git a/tools/testing/selftests/pstore/config b/tools/testing/selftests/pstore/config
16344 +index 6a8e5a9bfc10..d148f9f89fb6 100644
16345 +--- a/tools/testing/selftests/pstore/config
16346 ++++ b/tools/testing/selftests/pstore/config
16347 +@@ -2,3 +2,4 @@ CONFIG_MISC_FILESYSTEMS=y
16348 + CONFIG_PSTORE=y
16349 + CONFIG_PSTORE_PMSG=y
16350 + CONFIG_PSTORE_CONSOLE=y
16351 ++CONFIG_PSTORE_RAM=m
16352 +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
16353 +index 194759ec9e70..e350cf3d4f90 100644
16354 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
16355 ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
16356 +@@ -145,6 +145,15 @@ struct seccomp_data {
16357 + #define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
16358 + #endif
16359 +
16360 ++#ifndef PTRACE_SECCOMP_GET_METADATA
16361 ++#define PTRACE_SECCOMP_GET_METADATA 0x420d
16362 ++
16363 ++struct seccomp_metadata {
16364 ++ __u64 filter_off; /* Input: which filter */
16365 ++ __u64 flags; /* Output: filter's flags */
16366 ++};
16367 ++#endif
16368 ++
16369 + #ifndef seccomp
16370 + int seccomp(unsigned int op, unsigned int flags, void *args)
16371 + {
16372 +@@ -2861,6 +2870,58 @@ TEST(get_action_avail)
16373 + EXPECT_EQ(errno, EOPNOTSUPP);
16374 + }
16375 +
16376 ++TEST(get_metadata)
16377 ++{
16378 ++ pid_t pid;
16379 ++ int pipefd[2];
16380 ++ char buf;
16381 ++ struct seccomp_metadata md;
16382 ++
16383 ++ ASSERT_EQ(0, pipe(pipefd));
16384 ++
16385 ++ pid = fork();
16386 ++ ASSERT_GE(pid, 0);
16387 ++ if (pid == 0) {
16388 ++ struct sock_filter filter[] = {
16389 ++ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
16390 ++ };
16391 ++ struct sock_fprog prog = {
16392 ++ .len = (unsigned short)ARRAY_SIZE(filter),
16393 ++ .filter = filter,
16394 ++ };
16395 ++
16396 ++ /* one with log, one without */
16397 ++ ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
16398 ++ SECCOMP_FILTER_FLAG_LOG, &prog));
16399 ++ ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
16400 ++
16401 ++ ASSERT_EQ(0, close(pipefd[0]));
16402 ++ ASSERT_EQ(1, write(pipefd[1], "1", 1));
16403 ++ ASSERT_EQ(0, close(pipefd[1]));
16404 ++
16405 ++ while (1)
16406 ++ sleep(100);
16407 ++ }
16408 ++
16409 ++ ASSERT_EQ(0, close(pipefd[1]));
16410 ++ ASSERT_EQ(1, read(pipefd[0], &buf, 1));
16411 ++
16412 ++ ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid));
16413 ++ ASSERT_EQ(pid, waitpid(pid, NULL, 0));
16414 ++
16415 ++ md.filter_off = 0;
16416 ++ ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md));
16417 ++ EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG);
16418 ++ EXPECT_EQ(md.filter_off, 0);
16419 ++
16420 ++ md.filter_off = 1;
16421 ++ ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md));
16422 ++ EXPECT_EQ(md.flags, 0);
16423 ++ EXPECT_EQ(md.filter_off, 1);
16424 ++
16425 ++ ASSERT_EQ(0, kill(pid, SIGKILL));
16426 ++}
16427 ++
16428 + /*
16429 + * TODO:
16430 + * - add microbenchmarks
16431 +diff --git a/tools/testing/selftests/sync/Makefile b/tools/testing/selftests/sync/Makefile
16432 +index b3c8ba3cb668..d0121a8a3523 100644
16433 +--- a/tools/testing/selftests/sync/Makefile
16434 ++++ b/tools/testing/selftests/sync/Makefile
16435 +@@ -30,7 +30,7 @@ $(TEST_CUSTOM_PROGS): $(TESTS) $(OBJS)
16436 + $(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) $(CFLAGS) $(LDFLAGS)
16437 +
16438 + $(OBJS): $(OUTPUT)/%.o: %.c
16439 +- $(CC) -c $^ -o $@
16440 ++ $(CC) -c $^ -o $@ $(CFLAGS)
16441 +
16442 + $(TESTS): $(OUTPUT)/%.o: %.c
16443 + $(CC) -c $^ -o $@
16444 +diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile
16445 +index 3d5a62ff7d31..f5d7a7851e21 100644
16446 +--- a/tools/testing/selftests/vDSO/Makefile
16447 ++++ b/tools/testing/selftests/vDSO/Makefile
16448 +@@ -1,4 +1,6 @@
16449 + # SPDX-License-Identifier: GPL-2.0
16450 ++include ../lib.mk
16451 ++
16452 + ifndef CROSS_COMPILE
16453 + CFLAGS := -std=gnu99
16454 + CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector
16455 +@@ -6,16 +8,14 @@ ifeq ($(CONFIG_X86_32),y)
16456 + LDLIBS += -lgcc_s
16457 + endif
16458 +
16459 +-TEST_PROGS := vdso_test vdso_standalone_test_x86
16460 ++TEST_PROGS := $(OUTPUT)/vdso_test $(OUTPUT)/vdso_standalone_test_x86
16461 +
16462 + all: $(TEST_PROGS)
16463 +-vdso_test: parse_vdso.c vdso_test.c
16464 +-vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
16465 ++$(OUTPUT)/vdso_test: parse_vdso.c vdso_test.c
16466 ++$(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
16467 + $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \
16468 + vdso_standalone_test_x86.c parse_vdso.c \
16469 +- -o vdso_standalone_test_x86
16470 ++ -o $@
16471 +
16472 +-include ../lib.mk
16473 +-clean:
16474 +- rm -fr $(TEST_PROGS)
16475 ++EXTRA_CLEAN := $(TEST_PROGS)
16476 + endif
16477 +diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
16478 +index cc826326de87..45708aa3ce47 100755
16479 +--- a/tools/testing/selftests/vm/run_vmtests
16480 ++++ b/tools/testing/selftests/vm/run_vmtests
16481 +@@ -2,25 +2,33 @@
16482 + # SPDX-License-Identifier: GPL-2.0
16483 + #please run as root
16484 +
16485 +-#we need 256M, below is the size in kB
16486 +-needmem=262144
16487 + mnt=./huge
16488 + exitcode=0
16489 +
16490 +-#get pagesize and freepages from /proc/meminfo
16491 ++#get huge pagesize and freepages from /proc/meminfo
16492 + while read name size unit; do
16493 + if [ "$name" = "HugePages_Free:" ]; then
16494 + freepgs=$size
16495 + fi
16496 + if [ "$name" = "Hugepagesize:" ]; then
16497 +- pgsize=$size
16498 ++ hpgsize_KB=$size
16499 + fi
16500 + done < /proc/meminfo
16501 +
16502 ++# Simple hugetlbfs tests have a hardcoded minimum requirement of
16503 ++# huge pages totaling 256MB (262144KB) in size. The userfaultfd
16504 ++# hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take
16505 ++# both of these requirements into account and attempt to increase
16506 ++# number of huge pages available.
16507 ++nr_cpus=$(nproc)
16508 ++hpgsize_MB=$((hpgsize_KB / 1024))
16509 ++half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128))
16510 ++needmem_KB=$((half_ufd_size_MB * 2 * 1024))
16511 ++
16512 + #set proper nr_hugepages
16513 +-if [ -n "$freepgs" ] && [ -n "$pgsize" ]; then
16514 ++if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
16515 + nr_hugepgs=`cat /proc/sys/vm/nr_hugepages`
16516 +- needpgs=`expr $needmem / $pgsize`
16517 ++ needpgs=$((needmem_KB / hpgsize_KB))
16518 + tries=2
16519 + while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do
16520 + lackpgs=$(( $needpgs - $freepgs ))
16521 +@@ -107,8 +115,9 @@ fi
16522 + echo "---------------------------"
16523 + echo "running userfaultfd_hugetlb"
16524 + echo "---------------------------"
16525 +-# 256MB total huge pages == 128MB src and 128MB dst
16526 +-./userfaultfd hugetlb 128 32 $mnt/ufd_test_file
16527 ++# Test requires source and destination huge pages. Size of source
16528 ++# (half_ufd_size_MB) is passed as argument to test.
16529 ++./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file
16530 + if [ $? -ne 0 ]; then
16531 + echo "[FAIL]"
16532 + exitcode=1
16533 +diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
16534 +index 1c12536f2081..18f523557983 100644
16535 +--- a/tools/thermal/tmon/sysfs.c
16536 ++++ b/tools/thermal/tmon/sysfs.c
16537 +@@ -486,6 +486,7 @@ int zone_instance_to_index(int zone_inst)
16538 + int update_thermal_data()
16539 + {
16540 + int i;
16541 ++ int next_thermal_record = cur_thermal_record + 1;
16542 + char tz_name[256];
16543 + static unsigned long samples;
16544 +
16545 +@@ -495,9 +496,9 @@ int update_thermal_data()
16546 + }
16547 +
16548 + /* circular buffer for keeping historic data */
16549 +- if (cur_thermal_record >= NR_THERMAL_RECORDS)
16550 +- cur_thermal_record = 0;
16551 +- gettimeofday(&trec[cur_thermal_record].tv, NULL);
16552 ++ if (next_thermal_record >= NR_THERMAL_RECORDS)
16553 ++ next_thermal_record = 0;
16554 ++ gettimeofday(&trec[next_thermal_record].tv, NULL);
16555 + if (tmon_log) {
16556 + fprintf(tmon_log, "%lu ", ++samples);
16557 + fprintf(tmon_log, "%3.1f ", p_param.t_target);
16558 +@@ -507,11 +508,12 @@ int update_thermal_data()
16559 + snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE,
16560 + ptdata.tzi[i].instance);
16561 + sysfs_get_ulong(tz_name, "temp",
16562 +- &trec[cur_thermal_record].temp[i]);
16563 ++ &trec[next_thermal_record].temp[i]);
16564 + if (tmon_log)
16565 + fprintf(tmon_log, "%lu ",
16566 +- trec[cur_thermal_record].temp[i]/1000);
16567 ++ trec[next_thermal_record].temp[i] / 1000);
16568 + }
16569 ++ cur_thermal_record = next_thermal_record;
16570 + for (i = 0; i < ptdata.nr_cooling_dev; i++) {
16571 + char cdev_name[256];
16572 + unsigned long val;
16573 +diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
16574 +index 9aa19652e8e8..b43138f8b862 100644
16575 +--- a/tools/thermal/tmon/tmon.c
16576 ++++ b/tools/thermal/tmon/tmon.c
16577 +@@ -336,7 +336,6 @@ int main(int argc, char **argv)
16578 + show_data_w();
16579 + show_cooling_device();
16580 + }
16581 +- cur_thermal_record++;
16582 + time_elapsed += ticktime;
16583 + controller_handler(trec[0].temp[target_tz_index] / 1000,
16584 + &yk);
16585 +diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
16586 +index c1e4bdd66131..b4c5baf4af45 100644
16587 +--- a/virt/kvm/arm/vgic/vgic-mmio.c
16588 ++++ b/virt/kvm/arm/vgic/vgic-mmio.c
16589 +@@ -110,9 +110,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
16590 + /* Loop over all IRQs affected by this read */
16591 + for (i = 0; i < len * 8; i++) {
16592 + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
16593 ++ unsigned long flags;
16594 +
16595 ++ spin_lock_irqsave(&irq->irq_lock, flags);
16596 + if (irq_is_pending(irq))
16597 + value |= (1U << i);
16598 ++ spin_unlock_irqrestore(&irq->irq_lock, flags);
16599 +
16600 + vgic_put_irq(vcpu->kvm, irq);
16601 + }
16602 +diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
16603 +index f7450dc41ab3..21a2240164f3 100644
16604 +--- a/virt/kvm/arm/vgic/vgic.h
16605 ++++ b/virt/kvm/arm/vgic/vgic.h
16606 +@@ -96,6 +96,7 @@
16607 + /* we only support 64 kB translation table page size */
16608 + #define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16)
16609 +
16610 ++/* Requires the irq_lock to be held by the caller. */
16611 + static inline bool irq_is_pending(struct vgic_irq *irq)
16612 + {
16613 + if (irq->config == VGIC_CONFIG_EDGE)
16614
16615 diff --git a/1045_linux-4.14.46.patch b/1045_linux-4.14.46.patch
16616 new file mode 100644
16617 index 0000000..1414cad
16618 --- /dev/null
16619 +++ b/1045_linux-4.14.46.patch
16620 @@ -0,0 +1,850 @@
16621 +diff --git a/Makefile b/Makefile
16622 +index f3ea74e7a516..3b1845f2b8f8 100644
16623 +--- a/Makefile
16624 ++++ b/Makefile
16625 +@@ -1,7 +1,7 @@
16626 + # SPDX-License-Identifier: GPL-2.0
16627 + VERSION = 4
16628 + PATCHLEVEL = 14
16629 +-SUBLEVEL = 45
16630 ++SUBLEVEL = 46
16631 + EXTRAVERSION =
16632 + NAME = Petit Gorille
16633 +
16634 +diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
16635 +index 1f57bbe82b6f..df24fc8da1bc 100644
16636 +--- a/tools/arch/arm/include/uapi/asm/kvm.h
16637 ++++ b/tools/arch/arm/include/uapi/asm/kvm.h
16638 +@@ -180,6 +180,12 @@ struct kvm_arch_memory_slot {
16639 + #define KVM_REG_ARM_VFP_FPINST 0x1009
16640 + #define KVM_REG_ARM_VFP_FPINST2 0x100A
16641 +
16642 ++/* KVM-as-firmware specific pseudo-registers */
16643 ++#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
16644 ++#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
16645 ++ KVM_REG_ARM_FW | ((r) & 0xffff))
16646 ++#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
16647 ++
16648 + /* Device Control API: ARM VGIC */
16649 + #define KVM_DEV_ARM_VGIC_GRP_ADDR 0
16650 + #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
16651 +diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
16652 +index 51149ec75fe4..9f74ce5899f0 100644
16653 +--- a/tools/arch/arm64/include/uapi/asm/kvm.h
16654 ++++ b/tools/arch/arm64/include/uapi/asm/kvm.h
16655 +@@ -200,6 +200,12 @@ struct kvm_arch_memory_slot {
16656 + #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
16657 + #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
16658 +
16659 ++/* KVM-as-firmware specific pseudo-registers */
16660 ++#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
16661 ++#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
16662 ++ KVM_REG_ARM_FW | ((r) & 0xffff))
16663 ++#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
16664 ++
16665 + /* Device Control API: ARM VGIC */
16666 + #define KVM_DEV_ARM_VGIC_GRP_ADDR 0
16667 + #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
16668 +diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
16669 +index 61d6049f4c1e..8aaec831053a 100644
16670 +--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
16671 ++++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
16672 +@@ -607,6 +607,8 @@ struct kvm_ppc_rmmu_info {
16673 + #define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
16674 + #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
16675 +
16676 ++#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
16677 ++
16678 + /* Transactional Memory checkpointed state:
16679 + * This is all GPRs, all VSX regs and a subset of SPRs
16680 + */
16681 +diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
16682 +index 9ad172dcd912..a3938db010f7 100644
16683 +--- a/tools/arch/s390/include/uapi/asm/kvm.h
16684 ++++ b/tools/arch/s390/include/uapi/asm/kvm.h
16685 +@@ -228,6 +228,7 @@ struct kvm_guest_debug_arch {
16686 + #define KVM_SYNC_RICCB (1UL << 7)
16687 + #define KVM_SYNC_FPRS (1UL << 8)
16688 + #define KVM_SYNC_GSCB (1UL << 9)
16689 ++#define KVM_SYNC_BPBC (1UL << 10)
16690 + /* length and alignment of the sdnx as a power of two */
16691 + #define SDNXC 8
16692 + #define SDNXL (1UL << SDNXC)
16693 +@@ -251,7 +252,9 @@ struct kvm_sync_regs {
16694 + };
16695 + __u8 reserved[512]; /* for future vector expansion */
16696 + __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
16697 +- __u8 padding1[52]; /* riccb needs to be 64byte aligned */
16698 ++ __u8 bpbc : 1; /* bp mode */
16699 ++ __u8 reserved2 : 7;
16700 ++ __u8 padding1[51]; /* riccb needs to be 64byte aligned */
16701 + __u8 riccb[64]; /* runtime instrumentation controls block */
16702 + __u8 padding2[192]; /* sdnx needs to be 256byte aligned */
16703 + union {
16704 +diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
16705 +index 793690fbda36..403e97d5e243 100644
16706 +--- a/tools/arch/x86/include/asm/cpufeatures.h
16707 ++++ b/tools/arch/x86/include/asm/cpufeatures.h
16708 +@@ -13,173 +13,176 @@
16709 + /*
16710 + * Defines x86 CPU feature bits
16711 + */
16712 +-#define NCAPINTS 18 /* N 32-bit words worth of info */
16713 +-#define NBUGINTS 1 /* N 32-bit bug flags */
16714 ++#define NCAPINTS 19 /* N 32-bit words worth of info */
16715 ++#define NBUGINTS 1 /* N 32-bit bug flags */
16716 +
16717 + /*
16718 + * Note: If the comment begins with a quoted string, that string is used
16719 + * in /proc/cpuinfo instead of the macro name. If the string is "",
16720 + * this feature bit is not displayed in /proc/cpuinfo at all.
16721 ++ *
16722 ++ * When adding new features here that depend on other features,
16723 ++ * please update the table in kernel/cpu/cpuid-deps.c as well.
16724 + */
16725 +
16726 +-/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
16727 +-#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
16728 +-#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
16729 +-#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
16730 +-#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
16731 +-#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
16732 +-#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
16733 +-#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
16734 +-#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
16735 +-#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
16736 +-#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
16737 +-#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
16738 +-#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
16739 +-#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
16740 +-#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
16741 +-#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
16742 +- /* (plus FCMOVcc, FCOMI with FPU) */
16743 +-#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
16744 +-#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
16745 +-#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
16746 +-#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
16747 +-#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
16748 +-#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
16749 +-#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
16750 +-#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
16751 +-#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
16752 +-#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
16753 +-#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
16754 +-#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
16755 +-#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
16756 +-#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
16757 +-#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
16758 ++/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
16759 ++#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
16760 ++#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
16761 ++#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
16762 ++#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
16763 ++#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
16764 ++#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
16765 ++#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
16766 ++#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
16767 ++#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
16768 ++#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
16769 ++#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
16770 ++#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
16771 ++#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
16772 ++#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
16773 ++#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
16774 ++#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
16775 ++#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
16776 ++#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
16777 ++#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
16778 ++#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
16779 ++#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
16780 ++#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
16781 ++#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
16782 ++#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
16783 ++#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
16784 ++#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
16785 ++#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
16786 ++#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
16787 ++#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
16788 ++#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
16789 +
16790 + /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
16791 + /* Don't duplicate feature flags which are redundant with Intel! */
16792 +-#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
16793 +-#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
16794 +-#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
16795 +-#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
16796 +-#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
16797 +-#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
16798 +-#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
16799 +-#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
16800 +-#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
16801 +-#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
16802 ++#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
16803 ++#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */
16804 ++#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
16805 ++#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
16806 ++#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
16807 ++#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
16808 ++#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
16809 ++#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */
16810 ++#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */
16811 ++#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */
16812 +
16813 + /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
16814 +-#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
16815 +-#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
16816 +-#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
16817 ++#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
16818 ++#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
16819 ++#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
16820 +
16821 + /* Other features, Linux-defined mapping, word 3 */
16822 + /* This range is used for feature bits which conflict or are synthesized */
16823 +-#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
16824 +-#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
16825 +-#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
16826 +-#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
16827 +-/* cpu types for specific tunings: */
16828 +-#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
16829 +-#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
16830 +-#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
16831 +-#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
16832 +-#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
16833 +-#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
16834 +-#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */
16835 +-#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
16836 +-#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
16837 +-#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
16838 +-#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
16839 +-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
16840 +-#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
16841 +-#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
16842 +-#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
16843 +-#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
16844 +-#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
16845 +-#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
16846 +-#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
16847 +-#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
16848 +-#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
16849 +-#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
16850 +-#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
16851 +-#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
16852 +-#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
16853 +-#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
16854 +-#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
16855 ++#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
16856 ++#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
16857 ++#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
16858 ++#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
16859 ++
16860 ++/* CPU types for specific tunings: */
16861 ++#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
16862 ++#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
16863 ++#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
16864 ++#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
16865 ++#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
16866 ++#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */
16867 ++#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */
16868 ++#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
16869 ++#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
16870 ++#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
16871 ++#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
16872 ++#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
16873 ++#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
16874 ++#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */
16875 ++#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
16876 ++#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
16877 ++#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
16878 ++#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
16879 ++#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */
16880 ++#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
16881 ++#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
16882 ++#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
16883 ++#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
16884 ++#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
16885 ++#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
16886 ++#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
16887 ++#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
16888 +
16889 +-/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
16890 +-#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
16891 +-#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
16892 +-#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
16893 +-#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
16894 +-#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
16895 +-#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
16896 +-#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
16897 +-#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
16898 +-#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
16899 +-#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
16900 +-#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
16901 +-#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
16902 +-#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
16903 +-#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
16904 +-#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
16905 +-#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
16906 +-#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
16907 +-#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
16908 +-#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
16909 +-#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
16910 +-#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
16911 +-#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
16912 +-#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
16913 +-#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
16914 +-#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
16915 +-#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
16916 +-#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
16917 +-#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
16918 +-#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
16919 +-#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
16920 +-#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
16921 ++/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
16922 ++#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
16923 ++#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
16924 ++#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
16925 ++#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
16926 ++#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
16927 ++#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
16928 ++#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */
16929 ++#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
16930 ++#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
16931 ++#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
16932 ++#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
16933 ++#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
16934 ++#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
16935 ++#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */
16936 ++#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
16937 ++#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */
16938 ++#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
16939 ++#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
16940 ++#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
16941 ++#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
16942 ++#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */
16943 ++#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
16944 ++#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
16945 ++#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */
16946 ++#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
16947 ++#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
16948 ++#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */
16949 ++#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
16950 ++#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */
16951 ++#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */
16952 ++#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
16953 +
16954 + /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
16955 +-#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
16956 +-#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
16957 +-#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
16958 +-#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
16959 +-#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
16960 +-#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
16961 +-#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
16962 +-#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
16963 +-#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
16964 +-#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
16965 ++#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
16966 ++#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
16967 ++#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
16968 ++#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
16969 ++#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
16970 ++#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
16971 ++#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
16972 ++#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
16973 ++#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
16974 ++#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
16975 +
16976 +-/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
16977 +-#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
16978 +-#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
16979 +-#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
16980 +-#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
16981 +-#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
16982 +-#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
16983 +-#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
16984 +-#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
16985 +-#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
16986 +-#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
16987 +-#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
16988 +-#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
16989 +-#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
16990 +-#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
16991 +-#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
16992 +-#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
16993 +-#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
16994 +-#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
16995 +-#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
16996 +-#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
16997 +-#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
16998 +-#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
16999 +-#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
17000 +-#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */
17001 +-#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
17002 +-#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
17003 ++/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
17004 ++#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
17005 ++#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
17006 ++#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */
17007 ++#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
17008 ++#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
17009 ++#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
17010 ++#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
17011 ++#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
17012 ++#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
17013 ++#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
17014 ++#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
17015 ++#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
17016 ++#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
17017 ++#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
17018 ++#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
17019 ++#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
17020 ++#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */
17021 ++#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
17022 ++#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */
17023 ++#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */
17024 ++#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */
17025 ++#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
17026 ++#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */
17027 ++#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */
17028 ++#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
17029 ++#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
17030 +
17031 + /*
17032 + * Auxiliary flags: Linux defined - For features scattered in various
17033 +@@ -187,146 +190,185 @@
17034 + *
17035 + * Reuse free bits when adding new feature flags!
17036 + */
17037 +-#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */
17038 +-#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
17039 +-#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
17040 +-#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
17041 +-#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
17042 +-#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
17043 +-#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
17044 +-
17045 +-#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
17046 +-#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
17047 +-#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
17048 ++#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */
17049 ++#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
17050 ++#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
17051 ++#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
17052 ++#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
17053 ++#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
17054 ++#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
17055 ++#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
17056 ++#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
17057 ++#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
17058 ++#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
17059 ++#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
17060 ++#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
17061 ++#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
17062 ++#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
17063 +
17064 +-#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
17065 +-#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
17066 +-#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
17067 +-#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
17068 ++#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
17069 ++#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
17070 ++#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
17071 ++#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
17072 +
17073 +-#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
17074 ++#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
17075 ++#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
17076 ++#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
17077 ++#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
17078 ++#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
17079 ++#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
17080 ++#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
17081 ++#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
17082 +
17083 + /* Virtualization flags: Linux defined, word 8 */
17084 +-#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
17085 +-#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
17086 +-#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
17087 +-#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
17088 +-#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
17089 ++#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
17090 ++#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
17091 ++#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
17092 ++#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
17093 ++#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
17094 +
17095 +-#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
17096 +-#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
17097 ++#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
17098 ++#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
17099 +
17100 +
17101 +-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
17102 +-#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
17103 +-#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
17104 +-#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
17105 +-#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
17106 +-#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
17107 +-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
17108 +-#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
17109 +-#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
17110 +-#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
17111 +-#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
17112 +-#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
17113 +-#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
17114 +-#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
17115 +-#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
17116 +-#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
17117 +-#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
17118 +-#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
17119 +-#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
17120 +-#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
17121 +-#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
17122 +-#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
17123 +-#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
17124 +-#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
17125 +-#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
17126 +-#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
17127 +-#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
17128 +-#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
17129 ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
17130 ++#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
17131 ++#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
17132 ++#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
17133 ++#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
17134 ++#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
17135 ++#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
17136 ++#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
17137 ++#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
17138 ++#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
17139 ++#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
17140 ++#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
17141 ++#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
17142 ++#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
17143 ++#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
17144 ++#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
17145 ++#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */
17146 ++#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */
17147 ++#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
17148 ++#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
17149 ++#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
17150 ++#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
17151 ++#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
17152 ++#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
17153 ++#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
17154 ++#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
17155 ++#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
17156 ++#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
17157 ++#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
17158 +
17159 +-/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
17160 +-#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
17161 +-#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
17162 +-#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
17163 +-#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
17164 ++/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
17165 ++#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */
17166 ++#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
17167 ++#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
17168 ++#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
17169 +
17170 +-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
17171 +-#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
17172 ++/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
17173 ++#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
17174 +
17175 +-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
17176 +-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
17177 +-#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
17178 +-#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
17179 ++/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
17180 ++#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
17181 ++#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
17182 ++#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
17183 +
17184 +-/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
17185 +-#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
17186 +-#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */
17187 ++/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
17188 ++#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
17189 ++#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
17190 ++#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
17191 ++#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
17192 ++#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
17193 ++#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
17194 ++#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
17195 +
17196 +-/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
17197 +-#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
17198 +-#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
17199 +-#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
17200 +-#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
17201 +-#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
17202 +-#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
17203 +-#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
17204 +-#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
17205 +-#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
17206 +-#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
17207 ++/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
17208 ++#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
17209 ++#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
17210 ++#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
17211 ++#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
17212 ++#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
17213 ++#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
17214 ++#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
17215 ++#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
17216 ++#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
17217 ++#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
17218 +
17219 +-/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
17220 +-#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
17221 +-#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
17222 +-#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
17223 +-#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
17224 +-#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
17225 +-#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
17226 +-#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
17227 +-#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
17228 +-#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
17229 +-#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
17230 +-#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
17231 +-#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
17232 +-#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
17233 ++/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
17234 ++#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
17235 ++#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
17236 ++#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
17237 ++#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
17238 ++#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
17239 ++#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
17240 ++#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
17241 ++#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
17242 ++#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
17243 ++#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
17244 ++#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
17245 ++#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
17246 ++#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
17247 +
17248 +-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
17249 +-#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
17250 +-#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
17251 +-#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
17252 +-#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
17253 +-#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
17254 +-#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
17255 ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
17256 ++#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
17257 ++#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */
17258 ++#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
17259 ++#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
17260 ++#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
17261 ++#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
17262 ++#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
17263 ++#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
17264 ++#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
17265 ++#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
17266 ++#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */
17267 ++#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
17268 ++#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
17269 ++#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
17270 +
17271 +-/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
17272 +-#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
17273 +-#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
17274 +-#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */
17275 ++/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
17276 ++#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
17277 ++#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
17278 ++#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
17279 ++
17280 ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
17281 ++#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
17282 ++#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
17283 ++#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
17284 ++#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
17285 ++#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
17286 ++#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
17287 ++#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
17288 +
17289 + /*
17290 + * BUG word(s)
17291 + */
17292 +-#define X86_BUG(x) (NCAPINTS*32 + (x))
17293 ++#define X86_BUG(x) (NCAPINTS*32 + (x))
17294 +
17295 +-#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
17296 +-#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
17297 +-#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
17298 +-#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
17299 +-#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
17300 +-#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
17301 +-#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
17302 +-#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
17303 +-#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
17304 ++#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
17305 ++#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
17306 ++#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
17307 ++#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
17308 ++#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
17309 ++#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
17310 ++#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
17311 ++#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
17312 ++#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
17313 + #ifdef CONFIG_X86_32
17314 + /*
17315 + * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
17316 + * to avoid confusion.
17317 + */
17318 +-#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
17319 ++#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
17320 + #endif
17321 +-#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
17322 +-#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
17323 +-#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
17324 +-#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
17325 ++#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
17326 ++#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
17327 ++#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
17328 ++#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
17329 ++#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
17330 ++#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
17331 ++#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
17332 ++#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
17333 ++
17334 + #endif /* _ASM_X86_CPUFEATURES_H */
17335 +diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
17336 +index c10c9128f54e..c6a3af198294 100644
17337 +--- a/tools/arch/x86/include/asm/disabled-features.h
17338 ++++ b/tools/arch/x86/include/asm/disabled-features.h
17339 +@@ -44,6 +44,12 @@
17340 + # define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31))
17341 + #endif
17342 +
17343 ++#ifdef CONFIG_PAGE_TABLE_ISOLATION
17344 ++# define DISABLE_PTI 0
17345 ++#else
17346 ++# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
17347 ++#endif
17348 ++
17349 + /*
17350 + * Make sure to add features to the correct mask
17351 + */
17352 +@@ -54,7 +60,7 @@
17353 + #define DISABLED_MASK4 (DISABLE_PCID)
17354 + #define DISABLED_MASK5 0
17355 + #define DISABLED_MASK6 0
17356 +-#define DISABLED_MASK7 0
17357 ++#define DISABLED_MASK7 (DISABLE_PTI)
17358 + #define DISABLED_MASK8 0
17359 + #define DISABLED_MASK9 (DISABLE_MPX)
17360 + #define DISABLED_MASK10 0
17361 +@@ -65,6 +71,7 @@
17362 + #define DISABLED_MASK15 0
17363 + #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
17364 + #define DISABLED_MASK17 0
17365 +-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
17366 ++#define DISABLED_MASK18 0
17367 ++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
17368 +
17369 + #endif /* _ASM_X86_DISABLED_FEATURES_H */
17370 +diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
17371 +index d91ba04dd007..fb3a6de7440b 100644
17372 +--- a/tools/arch/x86/include/asm/required-features.h
17373 ++++ b/tools/arch/x86/include/asm/required-features.h
17374 +@@ -106,6 +106,7 @@
17375 + #define REQUIRED_MASK15 0
17376 + #define REQUIRED_MASK16 (NEED_LA57)
17377 + #define REQUIRED_MASK17 0
17378 +-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
17379 ++#define REQUIRED_MASK18 0
17380 ++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
17381 +
17382 + #endif /* _ASM_X86_REQUIRED_FEATURES_H */
17383 +diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
17384 +index 7e99999d6236..857bad91c454 100644
17385 +--- a/tools/include/uapi/linux/kvm.h
17386 ++++ b/tools/include/uapi/linux/kvm.h
17387 +@@ -931,6 +931,7 @@ struct kvm_ppc_resize_hpt {
17388 + #define KVM_CAP_PPC_SMT_POSSIBLE 147
17389 + #define KVM_CAP_HYPERV_SYNIC2 148
17390 + #define KVM_CAP_HYPERV_VP_INDEX 149
17391 ++#define KVM_CAP_S390_BPB 152
17392 +
17393 + #ifdef KVM_CAP_IRQ_ROUTING
17394 +
17395 +diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
17396 +index 643cc4ba6872..3e5135dded16 100644
17397 +--- a/tools/perf/.gitignore
17398 ++++ b/tools/perf/.gitignore
17399 +@@ -31,5 +31,6 @@ config.mak.autogen
17400 + .config-detected
17401 + util/intel-pt-decoder/inat-tables.c
17402 + arch/*/include/generated/
17403 ++trace/beauty/generated/
17404 + pmu-events/pmu-events.c
17405 + pmu-events/jevents
17406 +diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
17407 +index 3b570e808b31..b205c1340456 100644
17408 +--- a/tools/perf/builtin-record.c
17409 ++++ b/tools/perf/builtin-record.c
17410 +@@ -926,15 +926,6 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
17411 + }
17412 + }
17413 +
17414 +- /*
17415 +- * If we have just single event and are sending data
17416 +- * through pipe, we need to force the ids allocation,
17417 +- * because we synthesize event name through the pipe
17418 +- * and need the id for that.
17419 +- */
17420 +- if (data->is_pipe && rec->evlist->nr_entries == 1)
17421 +- rec->opts.sample_id = true;
17422 +-
17423 + if (record__open(rec) != 0) {
17424 + err = -1;
17425 + goto out_child;
17426 +diff --git a/tools/perf/perf.h b/tools/perf/perf.h
17427 +index de1debcd3ee7..55086389fc06 100644
17428 +--- a/tools/perf/perf.h
17429 ++++ b/tools/perf/perf.h
17430 +@@ -61,7 +61,6 @@ struct record_opts {
17431 + bool tail_synthesize;
17432 + bool overwrite;
17433 + bool ignore_missing_thread;
17434 +- bool sample_id;
17435 + unsigned int freq;
17436 + unsigned int mmap_pages;
17437 + unsigned int auxtrace_mmap_pages;
17438 +diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
17439 +index 6f09e4962dad..1e97937b03a9 100644
17440 +--- a/tools/perf/util/record.c
17441 ++++ b/tools/perf/util/record.c
17442 +@@ -137,7 +137,6 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
17443 + struct perf_evsel *evsel;
17444 + bool use_sample_identifier = false;
17445 + bool use_comm_exec;
17446 +- bool sample_id = opts->sample_id;
17447 +
17448 + /*
17449 + * Set the evsel leader links before we configure attributes,
17450 +@@ -164,7 +163,8 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
17451 + * match the id.
17452 + */
17453 + use_sample_identifier = perf_can_sample_identifier();
17454 +- sample_id = true;
17455 ++ evlist__for_each_entry(evlist, evsel)
17456 ++ perf_evsel__set_sample_id(evsel, use_sample_identifier);
17457 + } else if (evlist->nr_entries > 1) {
17458 + struct perf_evsel *first = perf_evlist__first(evlist);
17459 +
17460 +@@ -174,10 +174,6 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
17461 + use_sample_identifier = perf_can_sample_identifier();
17462 + break;
17463 + }
17464 +- sample_id = true;
17465 +- }
17466 +-
17467 +- if (sample_id) {
17468 + evlist__for_each_entry(evlist, evsel)
17469 + perf_evsel__set_sample_id(evsel, use_sample_identifier);
17470 + }