Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Sun, 25 Feb 2018 13:40:20
Message-Id: 1519566006.b6564907ab32c1a8c890c671c1c1e8b83e4967bc.alicef@gentoo
1 commit: b6564907ab32c1a8c890c671c1c1e8b83e4967bc
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Sun Feb 25 13:40:06 2018 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Sun Feb 25 13:40:06 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b6564907
7
8 linux kernel 4.14.21
9
10 0000_README | 4 +
11 1021_linux-4.14.22.patch | 5047 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 5051 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index f9abc2d..d7b4bf6 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -127,6 +127,10 @@ Patch: 1020_linux-4.14.21.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.14.21
21
22 +Patch: 1021_linux-4.14.22.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.14.22
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1021_linux-4.14.22.patch b/1021_linux-4.14.22.patch
31 new file mode 100644
32 index 0000000..e7dd4a5
33 --- /dev/null
34 +++ b/1021_linux-4.14.22.patch
35 @@ -0,0 +1,5047 @@
36 +diff --git a/Makefile b/Makefile
37 +index 68d70485b088..03d41143900c 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 14
44 +-SUBLEVEL = 21
45 ++SUBLEVEL = 22
46 + EXTRAVERSION =
47 + NAME = Petit Gorille
48 +
49 +diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
50 +index e5b061469bf8..4714a59fd86d 100644
51 +--- a/arch/arm/boot/dts/am4372.dtsi
52 ++++ b/arch/arm/boot/dts/am4372.dtsi
53 +@@ -927,7 +927,8 @@
54 + reg = <0x48038000 0x2000>,
55 + <0x46000000 0x400000>;
56 + reg-names = "mpu", "dat";
57 +- interrupts = <80>, <81>;
58 ++ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
59 ++ <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
60 + interrupt-names = "tx", "rx";
61 + status = "disabled";
62 + dmas = <&edma 8 2>,
63 +@@ -941,7 +942,8 @@
64 + reg = <0x4803C000 0x2000>,
65 + <0x46400000 0x400000>;
66 + reg-names = "mpu", "dat";
67 +- interrupts = <82>, <83>;
68 ++ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
69 ++ <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
70 + interrupt-names = "tx", "rx";
71 + status = "disabled";
72 + dmas = <&edma 10 2>,
73 +diff --git a/arch/arm/boot/dts/am437x-cm-t43.dts b/arch/arm/boot/dts/am437x-cm-t43.dts
74 +index 9e92d480576b..3b9a94c274a7 100644
75 +--- a/arch/arm/boot/dts/am437x-cm-t43.dts
76 ++++ b/arch/arm/boot/dts/am437x-cm-t43.dts
77 +@@ -301,8 +301,8 @@
78 + status = "okay";
79 + pinctrl-names = "default";
80 + pinctrl-0 = <&spi0_pins>;
81 +- dmas = <&edma 16
82 +- &edma 17>;
83 ++ dmas = <&edma 16 0
84 ++ &edma 17 0>;
85 + dma-names = "tx0", "rx0";
86 +
87 + flash: w25q64cvzpig@0 {
88 +diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
89 +index 38faa90007d7..2fa5eb4bd402 100644
90 +--- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
91 ++++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
92 +@@ -72,7 +72,8 @@
93 + };
94 +
95 + &gpmc {
96 +- ranges = <1 0 0x08000000 0x1000000>; /* CS1: 16MB for LAN9221 */
97 ++ ranges = <0 0 0x30000000 0x1000000 /* CS0: 16MB for NAND */
98 ++ 1 0 0x2c000000 0x1000000>; /* CS1: 16MB for LAN9221 */
99 +
100 + ethernet@gpmc {
101 + pinctrl-names = "default";
102 +diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
103 +index 26cce4d18405..4f2c5ec75714 100644
104 +--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
105 ++++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
106 +@@ -37,7 +37,7 @@
107 + };
108 +
109 + &gpmc {
110 +- ranges = <0 0 0x00000000 0x1000000>; /* CS0: 16MB for NAND */
111 ++ ranges = <0 0 0x30000000 0x1000000>; /* CS0: 16MB for NAND */
112 +
113 + nand@0,0 {
114 + compatible = "ti,omap2-nand";
115 +@@ -121,7 +121,7 @@
116 +
117 + &mmc3 {
118 + interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
119 +- pinctrl-0 = <&mmc3_pins>;
120 ++ pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
121 + pinctrl-names = "default";
122 + vmmc-supply = <&wl12xx_vmmc>;
123 + non-removable;
124 +@@ -132,8 +132,8 @@
125 + wlcore: wlcore@2 {
126 + compatible = "ti,wl1273";
127 + reg = <2>;
128 +- interrupt-parent = <&gpio5>;
129 +- interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */
130 ++ interrupt-parent = <&gpio1>;
131 ++ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; /* gpio 2 */
132 + ref-clock-frequency = <26000000>;
133 + };
134 + };
135 +@@ -157,8 +157,6 @@
136 + OMAP3_CORE1_IOPAD(0x2166, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat5.sdmmc3_dat1 */
137 + OMAP3_CORE1_IOPAD(0x2168, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat2 */
138 + OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat3 */
139 +- OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT_PULLUP | MUX_MODE4) /* mcbsp4_clkx.gpio_152 */
140 +- OMAP3_CORE1_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
141 + OMAP3_CORE1_IOPAD(0x21d0, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.sdmmc3_cmd */
142 + OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs2.sdmmc_clk */
143 + >;
144 +@@ -228,6 +226,12 @@
145 + OMAP3_WKUP_IOPAD(0x2a0e, PIN_OUTPUT | MUX_MODE4) /* sys_boot2.gpio_4 */
146 + >;
147 + };
148 ++ wl127x_gpio: pinmux_wl127x_gpio_pin {
149 ++ pinctrl-single,pins = <
150 ++ OMAP3_WKUP_IOPAD(0x2a0c, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */
151 ++ OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
152 ++ >;
153 ++ };
154 + };
155 +
156 + &omap3_pmx_core2 {
157 +diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
158 +index 64d00f5893a6..28d10abd8b04 100644
159 +--- a/arch/arm/boot/dts/omap4.dtsi
160 ++++ b/arch/arm/boot/dts/omap4.dtsi
161 +@@ -354,7 +354,7 @@
162 + elm: elm@48078000 {
163 + compatible = "ti,am3352-elm";
164 + reg = <0x48078000 0x2000>;
165 +- interrupts = <4>;
166 ++ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
167 + ti,hwmods = "elm";
168 + status = "disabled";
169 + };
170 +@@ -861,14 +861,12 @@
171 + usbhsohci: ohci@4a064800 {
172 + compatible = "ti,ohci-omap3";
173 + reg = <0x4a064800 0x400>;
174 +- interrupt-parent = <&gic>;
175 + interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
176 + };
177 +
178 + usbhsehci: ehci@4a064c00 {
179 + compatible = "ti,ehci-omap";
180 + reg = <0x4a064c00 0x400>;
181 +- interrupt-parent = <&gic>;
182 + interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
183 + };
184 + };
185 +diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c
186 +index 4c10c6452678..f4dc1714a79e 100644
187 +--- a/arch/arm/common/bL_switcher_dummy_if.c
188 ++++ b/arch/arm/common/bL_switcher_dummy_if.c
189 +@@ -57,3 +57,7 @@ static struct miscdevice bL_switcher_device = {
190 + &bL_switcher_fops
191 + };
192 + module_misc_device(bL_switcher_device);
193 ++
194 ++MODULE_AUTHOR("Nicolas Pitre <nico@××××××.org>");
195 ++MODULE_LICENSE("GPL v2");
196 ++MODULE_DESCRIPTION("big.LITTLE switcher dummy user interface");
197 +diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
198 +index 5ac122e88f67..9ff92050053c 100644
199 +--- a/arch/arm/mach-omap2/omap-secure.c
200 ++++ b/arch/arm/mach-omap2/omap-secure.c
201 +@@ -73,6 +73,25 @@ phys_addr_t omap_secure_ram_mempool_base(void)
202 + return omap_secure_memblock_base;
203 + }
204 +
205 ++u32 omap3_save_secure_ram(void __iomem *addr, int size)
206 ++{
207 ++ u32 ret;
208 ++ u32 param[5];
209 ++
210 ++ if (size != OMAP3_SAVE_SECURE_RAM_SZ)
211 ++ return OMAP3_SAVE_SECURE_RAM_SZ;
212 ++
213 ++ param[0] = 4; /* Number of arguments */
214 ++ param[1] = __pa(addr); /* Physical address for saving */
215 ++ param[2] = 0;
216 ++ param[3] = 1;
217 ++ param[4] = 1;
218 ++
219 ++ ret = save_secure_ram_context(__pa(param));
220 ++
221 ++ return ret;
222 ++}
223 ++
224 + /**
225 + * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls
226 + * @idx: The PPA API index
227 +diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
228 +index bae263fba640..c509cde71f93 100644
229 +--- a/arch/arm/mach-omap2/omap-secure.h
230 ++++ b/arch/arm/mach-omap2/omap-secure.h
231 +@@ -31,6 +31,8 @@
232 + /* Maximum Secure memory storage size */
233 + #define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K)
234 +
235 ++#define OMAP3_SAVE_SECURE_RAM_SZ 0x803F
236 ++
237 + /* Secure low power HAL API index */
238 + #define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a
239 + #define OMAP4_HAL_SAVEHW_INDEX 0x1b
240 +@@ -65,6 +67,8 @@ extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
241 + extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs);
242 + extern phys_addr_t omap_secure_ram_mempool_base(void);
243 + extern int omap_secure_ram_reserve_memblock(void);
244 ++extern u32 save_secure_ram_context(u32 args_pa);
245 ++extern u32 omap3_save_secure_ram(void __iomem *save_regs, int size);
246 +
247 + extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
248 + u32 arg1, u32 arg2, u32 arg3, u32 arg4);
249 +diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
250 +index b668719b9b25..8e30772cfe32 100644
251 +--- a/arch/arm/mach-omap2/pm.h
252 ++++ b/arch/arm/mach-omap2/pm.h
253 +@@ -81,10 +81,6 @@ extern unsigned int omap3_do_wfi_sz;
254 + /* ... and its pointer from SRAM after copy */
255 + extern void (*omap3_do_wfi_sram)(void);
256 +
257 +-/* save_secure_ram_context function pointer and size, for copy to SRAM */
258 +-extern int save_secure_ram_context(u32 *addr);
259 +-extern unsigned int save_secure_ram_context_sz;
260 +-
261 + extern void omap3_save_scratchpad_contents(void);
262 +
263 + #define PM_RTA_ERRATUM_i608 (1 << 0)
264 +diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
265 +index 841ba19d64a6..36c55547137c 100644
266 +--- a/arch/arm/mach-omap2/pm34xx.c
267 ++++ b/arch/arm/mach-omap2/pm34xx.c
268 +@@ -48,6 +48,7 @@
269 + #include "prm3xxx.h"
270 + #include "pm.h"
271 + #include "sdrc.h"
272 ++#include "omap-secure.h"
273 + #include "sram.h"
274 + #include "control.h"
275 + #include "vc.h"
276 +@@ -66,7 +67,6 @@ struct power_state {
277 +
278 + static LIST_HEAD(pwrst_list);
279 +
280 +-static int (*_omap_save_secure_sram)(u32 *addr);
281 + void (*omap3_do_wfi_sram)(void);
282 +
283 + static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
284 +@@ -121,8 +121,8 @@ static void omap3_save_secure_ram_context(void)
285 + * will hang the system.
286 + */
287 + pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
288 +- ret = _omap_save_secure_sram((u32 *)(unsigned long)
289 +- __pa(omap3_secure_ram_storage));
290 ++ ret = omap3_save_secure_ram(omap3_secure_ram_storage,
291 ++ OMAP3_SAVE_SECURE_RAM_SZ);
292 + pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
293 + /* Following is for error tracking, it should not happen */
294 + if (ret) {
295 +@@ -434,15 +434,10 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
296 + *
297 + * The minimum set of functions is pushed to SRAM for execution:
298 + * - omap3_do_wfi for erratum i581 WA,
299 +- * - save_secure_ram_context for security extensions.
300 + */
301 + void omap_push_sram_idle(void)
302 + {
303 + omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
304 +-
305 +- if (omap_type() != OMAP2_DEVICE_TYPE_GP)
306 +- _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
307 +- save_secure_ram_context_sz);
308 + }
309 +
310 + static void __init pm_errata_configure(void)
311 +@@ -553,7 +548,7 @@ int __init omap3_pm_init(void)
312 + clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
313 + if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
314 + omap3_secure_ram_storage =
315 +- kmalloc(0x803F, GFP_KERNEL);
316 ++ kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL);
317 + if (!omap3_secure_ram_storage)
318 + pr_err("Memory allocation failed when allocating for secure sram context\n");
319 +
320 +diff --git a/arch/arm/mach-omap2/prm33xx.c b/arch/arm/mach-omap2/prm33xx.c
321 +index d2c5bcabdbeb..ebaf80d72a10 100644
322 +--- a/arch/arm/mach-omap2/prm33xx.c
323 ++++ b/arch/arm/mach-omap2/prm33xx.c
324 +@@ -176,17 +176,6 @@ static int am33xx_pwrdm_read_pwrst(struct powerdomain *pwrdm)
325 + return v;
326 + }
327 +
328 +-static int am33xx_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm)
329 +-{
330 +- u32 v;
331 +-
332 +- v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs);
333 +- v &= AM33XX_LASTPOWERSTATEENTERED_MASK;
334 +- v >>= AM33XX_LASTPOWERSTATEENTERED_SHIFT;
335 +-
336 +- return v;
337 +-}
338 +-
339 + static int am33xx_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm)
340 + {
341 + am33xx_prm_rmw_reg_bits(AM33XX_LOWPOWERSTATECHANGE_MASK,
342 +@@ -357,7 +346,6 @@ struct pwrdm_ops am33xx_pwrdm_operations = {
343 + .pwrdm_set_next_pwrst = am33xx_pwrdm_set_next_pwrst,
344 + .pwrdm_read_next_pwrst = am33xx_pwrdm_read_next_pwrst,
345 + .pwrdm_read_pwrst = am33xx_pwrdm_read_pwrst,
346 +- .pwrdm_read_prev_pwrst = am33xx_pwrdm_read_prev_pwrst,
347 + .pwrdm_set_logic_retst = am33xx_pwrdm_set_logic_retst,
348 + .pwrdm_read_logic_pwrst = am33xx_pwrdm_read_logic_pwrst,
349 + .pwrdm_read_logic_retst = am33xx_pwrdm_read_logic_retst,
350 +diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
351 +index fa5fd24f524c..22daf4efed68 100644
352 +--- a/arch/arm/mach-omap2/sleep34xx.S
353 ++++ b/arch/arm/mach-omap2/sleep34xx.S
354 +@@ -93,20 +93,13 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
355 + ENDPROC(enable_omap3630_toggle_l2_on_restore)
356 +
357 + /*
358 +- * Function to call rom code to save secure ram context. This gets
359 +- * relocated to SRAM, so it can be all in .data section. Otherwise
360 +- * we need to initialize api_params separately.
361 ++ * Function to call rom code to save secure ram context.
362 ++ *
363 ++ * r0 = physical address of the parameters
364 + */
365 +- .data
366 +- .align 3
367 + ENTRY(save_secure_ram_context)
368 + stmfd sp!, {r4 - r11, lr} @ save registers on stack
369 +- adr r3, api_params @ r3 points to parameters
370 +- str r0, [r3,#0x4] @ r0 has sdram address
371 +- ldr r12, high_mask
372 +- and r3, r3, r12
373 +- ldr r12, sram_phy_addr_mask
374 +- orr r3, r3, r12
375 ++ mov r3, r0 @ physical address of parameters
376 + mov r0, #25 @ set service ID for PPA
377 + mov r12, r0 @ copy secure service ID in r12
378 + mov r1, #0 @ set task id for ROM code in r1
379 +@@ -120,18 +113,7 @@ ENTRY(save_secure_ram_context)
380 + nop
381 + nop
382 + ldmfd sp!, {r4 - r11, pc}
383 +- .align
384 +-sram_phy_addr_mask:
385 +- .word SRAM_BASE_P
386 +-high_mask:
387 +- .word 0xffff
388 +-api_params:
389 +- .word 0x4, 0x0, 0x0, 0x1, 0x1
390 + ENDPROC(save_secure_ram_context)
391 +-ENTRY(save_secure_ram_context_sz)
392 +- .word . - save_secure_ram_context
393 +-
394 +- .text
395 +
396 + /*
397 + * ======================
398 +diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
399 +index b99a27372965..da64e1cab233 100644
400 +--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
401 ++++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
402 +@@ -81,6 +81,7 @@
403 + reg = <0x000>;
404 + enable-method = "psci";
405 + cpu-idle-states = <&CPU_SLEEP_0>;
406 ++ #cooling-cells = <2>;
407 + };
408 +
409 + cpu1: cpu@1 {
410 +@@ -97,6 +98,7 @@
411 + reg = <0x100>;
412 + enable-method = "psci";
413 + cpu-idle-states = <&CPU_SLEEP_0>;
414 ++ #cooling-cells = <2>;
415 + };
416 +
417 + cpu3: cpu@101 {
418 +diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
419 +index 3aa571a513b5..cf6edda38971 100644
420 +--- a/arch/m68k/kernel/vmlinux-nommu.lds
421 ++++ b/arch/m68k/kernel/vmlinux-nommu.lds
422 +@@ -45,6 +45,8 @@ SECTIONS {
423 + .text : {
424 + HEAD_TEXT
425 + TEXT_TEXT
426 ++ IRQENTRY_TEXT
427 ++ SOFTIRQENTRY_TEXT
428 + SCHED_TEXT
429 + CPUIDLE_TEXT
430 + LOCK_TEXT
431 +diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
432 +index 89172b8974b9..625a5785804f 100644
433 +--- a/arch/m68k/kernel/vmlinux-std.lds
434 ++++ b/arch/m68k/kernel/vmlinux-std.lds
435 +@@ -16,6 +16,8 @@ SECTIONS
436 + .text : {
437 + HEAD_TEXT
438 + TEXT_TEXT
439 ++ IRQENTRY_TEXT
440 ++ SOFTIRQENTRY_TEXT
441 + SCHED_TEXT
442 + CPUIDLE_TEXT
443 + LOCK_TEXT
444 +diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
445 +index 293990efc917..9868270b0984 100644
446 +--- a/arch/m68k/kernel/vmlinux-sun3.lds
447 ++++ b/arch/m68k/kernel/vmlinux-sun3.lds
448 +@@ -16,6 +16,8 @@ SECTIONS
449 + .text : {
450 + HEAD_TEXT
451 + TEXT_TEXT
452 ++ IRQENTRY_TEXT
453 ++ SOFTIRQENTRY_TEXT
454 + SCHED_TEXT
455 + CPUIDLE_TEXT
456 + LOCK_TEXT
457 +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
458 +index b4209a68b85d..fce545774d50 100644
459 +--- a/arch/powerpc/perf/core-book3s.c
460 ++++ b/arch/powerpc/perf/core-book3s.c
461 +@@ -1419,7 +1419,7 @@ static int collect_events(struct perf_event *group, int max_count,
462 + int n = 0;
463 + struct perf_event *event;
464 +
465 +- if (!is_software_event(group)) {
466 ++ if (group->pmu->task_ctx_nr == perf_hw_context) {
467 + if (n >= max_count)
468 + return -1;
469 + ctrs[n] = group;
470 +@@ -1427,7 +1427,7 @@ static int collect_events(struct perf_event *group, int max_count,
471 + events[n++] = group->hw.config;
472 + }
473 + list_for_each_entry(event, &group->sibling_list, group_entry) {
474 +- if (!is_software_event(event) &&
475 ++ if (event->pmu->task_ctx_nr == perf_hw_context &&
476 + event->state != PERF_EVENT_STATE_OFF) {
477 + if (n >= max_count)
478 + return -1;
479 +diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
480 +index cf64e16f92c2..da6ba9ba73ed 100644
481 +--- a/arch/powerpc/perf/imc-pmu.c
482 ++++ b/arch/powerpc/perf/imc-pmu.c
483 +@@ -308,6 +308,19 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
484 + if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask))
485 + return 0;
486 +
487 ++ /*
488 ++ * Check whether nest_imc is registered. We could end up here if the
489 ++ * cpuhotplug callback registration fails. i.e, callback invokes the
490 ++ * offline path for all successfully registered nodes. At this stage,
491 ++ * nest_imc pmu will not be registered and we should return here.
492 ++ *
493 ++ * We return with a zero since this is not an offline failure. And
494 ++ * cpuhp_setup_state() returns the actual failure reason to the caller,
495 ++ * which in turn will call the cleanup routine.
496 ++ */
497 ++ if (!nest_pmus)
498 ++ return 0;
499 ++
500 + /*
501 + * Now that this cpu is one of the designated,
502 + * find a next cpu a) which is online and b) in same chip.
503 +diff --git a/arch/s390/include/uapi/asm/virtio-ccw.h b/arch/s390/include/uapi/asm/virtio-ccw.h
504 +index 967aad390105..9e62587d9472 100644
505 +--- a/arch/s390/include/uapi/asm/virtio-ccw.h
506 ++++ b/arch/s390/include/uapi/asm/virtio-ccw.h
507 +@@ -1,4 +1,4 @@
508 +-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
509 ++/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
510 + /*
511 + * Definitions for virtio-ccw devices.
512 + *
513 +diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
514 +index 1c3b7ceb36d2..e7273a606a07 100644
515 +--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
516 ++++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
517 +@@ -55,29 +55,31 @@
518 + #define RAB1bl %bl
519 + #define RAB2bl %cl
520 +
521 ++#define CD0 0x0(%rsp)
522 ++#define CD1 0x8(%rsp)
523 ++#define CD2 0x10(%rsp)
524 ++
525 ++# used only before/after all rounds
526 + #define RCD0 %r8
527 + #define RCD1 %r9
528 + #define RCD2 %r10
529 +
530 +-#define RCD0d %r8d
531 +-#define RCD1d %r9d
532 +-#define RCD2d %r10d
533 +-
534 +-#define RX0 %rbp
535 +-#define RX1 %r11
536 +-#define RX2 %r12
537 ++# used only during rounds
538 ++#define RX0 %r8
539 ++#define RX1 %r9
540 ++#define RX2 %r10
541 +
542 +-#define RX0d %ebp
543 +-#define RX1d %r11d
544 +-#define RX2d %r12d
545 ++#define RX0d %r8d
546 ++#define RX1d %r9d
547 ++#define RX2d %r10d
548 +
549 +-#define RY0 %r13
550 +-#define RY1 %r14
551 +-#define RY2 %r15
552 ++#define RY0 %r11
553 ++#define RY1 %r12
554 ++#define RY2 %r13
555 +
556 +-#define RY0d %r13d
557 +-#define RY1d %r14d
558 +-#define RY2d %r15d
559 ++#define RY0d %r11d
560 ++#define RY1d %r12d
561 ++#define RY2d %r13d
562 +
563 + #define RT0 %rdx
564 + #define RT1 %rsi
565 +@@ -85,6 +87,8 @@
566 + #define RT0d %edx
567 + #define RT1d %esi
568 +
569 ++#define RT1bl %sil
570 ++
571 + #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \
572 + movzbl ab ## bl, tmp2 ## d; \
573 + movzbl ab ## bh, tmp1 ## d; \
574 +@@ -92,6 +96,11 @@
575 + op1##l T0(CTX, tmp2, 4), dst ## d; \
576 + op2##l T1(CTX, tmp1, 4), dst ## d;
577 +
578 ++#define swap_ab_with_cd(ab, cd, tmp) \
579 ++ movq cd, tmp; \
580 ++ movq ab, cd; \
581 ++ movq tmp, ab;
582 ++
583 + /*
584 + * Combined G1 & G2 function. Reordered with help of rotates to have moves
585 + * at begining.
586 +@@ -110,15 +119,15 @@
587 + /* G1,2 && G2,2 */ \
588 + do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \
589 + do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \
590 +- xchgq cd ## 0, ab ## 0; \
591 ++ swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \
592 + \
593 + do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \
594 + do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \
595 +- xchgq cd ## 1, ab ## 1; \
596 ++ swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \
597 + \
598 + do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \
599 + do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \
600 +- xchgq cd ## 2, ab ## 2;
601 ++ swap_ab_with_cd(ab ## 2, cd ## 2, RT0);
602 +
603 + #define enc_round_end(ab, x, y, n) \
604 + addl y ## d, x ## d; \
605 +@@ -168,6 +177,16 @@
606 + decrypt_round3(ba, dc, (n*2)+1); \
607 + decrypt_round3(ba, dc, (n*2));
608 +
609 ++#define push_cd() \
610 ++ pushq RCD2; \
611 ++ pushq RCD1; \
612 ++ pushq RCD0;
613 ++
614 ++#define pop_cd() \
615 ++ popq RCD0; \
616 ++ popq RCD1; \
617 ++ popq RCD2;
618 ++
619 + #define inpack3(in, n, xy, m) \
620 + movq 4*(n)(in), xy ## 0; \
621 + xorq w+4*m(CTX), xy ## 0; \
622 +@@ -223,11 +242,8 @@ ENTRY(__twofish_enc_blk_3way)
623 + * %rdx: src, RIO
624 + * %rcx: bool, if true: xor output
625 + */
626 +- pushq %r15;
627 +- pushq %r14;
628 + pushq %r13;
629 + pushq %r12;
630 +- pushq %rbp;
631 + pushq %rbx;
632 +
633 + pushq %rcx; /* bool xor */
634 +@@ -235,40 +251,36 @@ ENTRY(__twofish_enc_blk_3way)
635 +
636 + inpack_enc3();
637 +
638 +- encrypt_cycle3(RAB, RCD, 0);
639 +- encrypt_cycle3(RAB, RCD, 1);
640 +- encrypt_cycle3(RAB, RCD, 2);
641 +- encrypt_cycle3(RAB, RCD, 3);
642 +- encrypt_cycle3(RAB, RCD, 4);
643 +- encrypt_cycle3(RAB, RCD, 5);
644 +- encrypt_cycle3(RAB, RCD, 6);
645 +- encrypt_cycle3(RAB, RCD, 7);
646 ++ push_cd();
647 ++ encrypt_cycle3(RAB, CD, 0);
648 ++ encrypt_cycle3(RAB, CD, 1);
649 ++ encrypt_cycle3(RAB, CD, 2);
650 ++ encrypt_cycle3(RAB, CD, 3);
651 ++ encrypt_cycle3(RAB, CD, 4);
652 ++ encrypt_cycle3(RAB, CD, 5);
653 ++ encrypt_cycle3(RAB, CD, 6);
654 ++ encrypt_cycle3(RAB, CD, 7);
655 ++ pop_cd();
656 +
657 + popq RIO; /* dst */
658 +- popq %rbp; /* bool xor */
659 ++ popq RT1; /* bool xor */
660 +
661 +- testb %bpl, %bpl;
662 ++ testb RT1bl, RT1bl;
663 + jnz .L__enc_xor3;
664 +
665 + outunpack_enc3(mov);
666 +
667 + popq %rbx;
668 +- popq %rbp;
669 + popq %r12;
670 + popq %r13;
671 +- popq %r14;
672 +- popq %r15;
673 + ret;
674 +
675 + .L__enc_xor3:
676 + outunpack_enc3(xor);
677 +
678 + popq %rbx;
679 +- popq %rbp;
680 + popq %r12;
681 + popq %r13;
682 +- popq %r14;
683 +- popq %r15;
684 + ret;
685 + ENDPROC(__twofish_enc_blk_3way)
686 +
687 +@@ -278,35 +290,31 @@ ENTRY(twofish_dec_blk_3way)
688 + * %rsi: dst
689 + * %rdx: src, RIO
690 + */
691 +- pushq %r15;
692 +- pushq %r14;
693 + pushq %r13;
694 + pushq %r12;
695 +- pushq %rbp;
696 + pushq %rbx;
697 +
698 + pushq %rsi; /* dst */
699 +
700 + inpack_dec3();
701 +
702 +- decrypt_cycle3(RAB, RCD, 7);
703 +- decrypt_cycle3(RAB, RCD, 6);
704 +- decrypt_cycle3(RAB, RCD, 5);
705 +- decrypt_cycle3(RAB, RCD, 4);
706 +- decrypt_cycle3(RAB, RCD, 3);
707 +- decrypt_cycle3(RAB, RCD, 2);
708 +- decrypt_cycle3(RAB, RCD, 1);
709 +- decrypt_cycle3(RAB, RCD, 0);
710 ++ push_cd();
711 ++ decrypt_cycle3(RAB, CD, 7);
712 ++ decrypt_cycle3(RAB, CD, 6);
713 ++ decrypt_cycle3(RAB, CD, 5);
714 ++ decrypt_cycle3(RAB, CD, 4);
715 ++ decrypt_cycle3(RAB, CD, 3);
716 ++ decrypt_cycle3(RAB, CD, 2);
717 ++ decrypt_cycle3(RAB, CD, 1);
718 ++ decrypt_cycle3(RAB, CD, 0);
719 ++ pop_cd();
720 +
721 + popq RIO; /* dst */
722 +
723 + outunpack_dec3();
724 +
725 + popq %rbx;
726 +- popq %rbp;
727 + popq %r12;
728 + popq %r13;
729 +- popq %r14;
730 +- popq %r15;
731 + ret;
732 + ENDPROC(twofish_dec_blk_3way)
733 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
734 +index dd35c6c50516..5ffde16253cb 100644
735 +--- a/arch/x86/kvm/vmx.c
736 ++++ b/arch/x86/kvm/vmx.c
737 +@@ -6950,7 +6950,6 @@ static __init int hardware_setup(void)
738 + goto out;
739 + }
740 +
741 +- vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
742 + memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
743 + memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
744 +
745 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
746 +index f97358423f9c..0dcd7bf45dc1 100644
747 +--- a/arch/x86/kvm/x86.c
748 ++++ b/arch/x86/kvm/x86.c
749 +@@ -2926,6 +2926,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
750 + kvm_x86_ops->vcpu_put(vcpu);
751 + kvm_put_guest_fpu(vcpu);
752 + vcpu->arch.last_host_tsc = rdtsc();
753 ++ /*
754 ++ * If userspace has set any breakpoints or watchpoints, dr6 is restored
755 ++ * on every vmexit, but if not, we might have a stale dr6 from the
756 ++ * guest. do_debug expects dr6 to be cleared after it runs, do the same.
757 ++ */
758 ++ set_debugreg(0, 6);
759 + }
760 +
761 + static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
762 +@@ -7474,6 +7480,29 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
763 + }
764 + EXPORT_SYMBOL_GPL(kvm_task_switch);
765 +
766 ++int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
767 ++{
768 ++ if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) {
769 ++ /*
770 ++ * When EFER.LME and CR0.PG are set, the processor is in
771 ++ * 64-bit mode (though maybe in a 32-bit code segment).
772 ++ * CR4.PAE and EFER.LMA must be set.
773 ++ */
774 ++ if (!(sregs->cr4 & X86_CR4_PAE_BIT)
775 ++ || !(sregs->efer & EFER_LMA))
776 ++ return -EINVAL;
777 ++ } else {
778 ++ /*
779 ++ * Not in 64-bit mode: EFER.LMA is clear and the code
780 ++ * segment cannot be 64-bit.
781 ++ */
782 ++ if (sregs->efer & EFER_LMA || sregs->cs.l)
783 ++ return -EINVAL;
784 ++ }
785 ++
786 ++ return 0;
787 ++}
788 ++
789 + int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
790 + struct kvm_sregs *sregs)
791 + {
792 +@@ -7486,6 +7515,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
793 + (sregs->cr4 & X86_CR4_OSXSAVE))
794 + return -EINVAL;
795 +
796 ++ if (kvm_valid_sregs(vcpu, sregs))
797 ++ return -EINVAL;
798 ++
799 + apic_base_msr.data = sregs->apic_base;
800 + apic_base_msr.host_initiated = true;
801 + if (kvm_set_apic_base(vcpu, &apic_base_msr))
802 +diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
803 +index bb120e59c597..7bebdd0273d3 100644
804 +--- a/arch/x86/mm/ioremap.c
805 ++++ b/arch/x86/mm/ioremap.c
806 +@@ -349,11 +349,11 @@ void iounmap(volatile void __iomem *addr)
807 + return;
808 + }
809 +
810 ++ mmiotrace_iounmap(addr);
811 ++
812 + addr = (volatile void __iomem *)
813 + (PAGE_MASK & (unsigned long __force)addr);
814 +
815 +- mmiotrace_iounmap(addr);
816 +-
817 + /* Use the vm area unlocked, assuming the caller
818 + ensures there isn't another iounmap for the same address
819 + in parallel. Reuse of the virtual address is prevented by
820 +diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
821 +index aa44c3aa4cd5..7c8686709636 100644
822 +--- a/arch/x86/mm/kmmio.c
823 ++++ b/arch/x86/mm/kmmio.c
824 +@@ -435,17 +435,18 @@ int register_kmmio_probe(struct kmmio_probe *p)
825 + unsigned long flags;
826 + int ret = 0;
827 + unsigned long size = 0;
828 ++ unsigned long addr = p->addr & PAGE_MASK;
829 + const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
830 + unsigned int l;
831 + pte_t *pte;
832 +
833 + spin_lock_irqsave(&kmmio_lock, flags);
834 +- if (get_kmmio_probe(p->addr)) {
835 ++ if (get_kmmio_probe(addr)) {
836 + ret = -EEXIST;
837 + goto out;
838 + }
839 +
840 +- pte = lookup_address(p->addr, &l);
841 ++ pte = lookup_address(addr, &l);
842 + if (!pte) {
843 + ret = -EINVAL;
844 + goto out;
845 +@@ -454,7 +455,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
846 + kmmio_count++;
847 + list_add_rcu(&p->list, &kmmio_probes);
848 + while (size < size_lim) {
849 +- if (add_kmmio_fault_page(p->addr + size))
850 ++ if (add_kmmio_fault_page(addr + size))
851 + pr_err("Unable to set page fault.\n");
852 + size += page_level_size(l);
853 + }
854 +@@ -528,19 +529,20 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
855 + {
856 + unsigned long flags;
857 + unsigned long size = 0;
858 ++ unsigned long addr = p->addr & PAGE_MASK;
859 + const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
860 + struct kmmio_fault_page *release_list = NULL;
861 + struct kmmio_delayed_release *drelease;
862 + unsigned int l;
863 + pte_t *pte;
864 +
865 +- pte = lookup_address(p->addr, &l);
866 ++ pte = lookup_address(addr, &l);
867 + if (!pte)
868 + return;
869 +
870 + spin_lock_irqsave(&kmmio_lock, flags);
871 + while (size < size_lim) {
872 +- release_kmmio_fault_page(p->addr + size, &release_list);
873 ++ release_kmmio_fault_page(addr + size, &release_list);
874 + size += page_level_size(l);
875 + }
876 + list_del_rcu(&p->list);
877 +diff --git a/block/blk-map.c b/block/blk-map.c
878 +index 368daa02714e..e31be14da8ea 100644
879 +--- a/block/blk-map.c
880 ++++ b/block/blk-map.c
881 +@@ -126,7 +126,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
882 + unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
883 + struct bio *bio = NULL;
884 + struct iov_iter i;
885 +- int ret;
886 ++ int ret = -EINVAL;
887 +
888 + if (!iter_is_iovec(iter))
889 + goto fail;
890 +@@ -155,7 +155,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
891 + __blk_rq_unmap_user(bio);
892 + fail:
893 + rq->bio = NULL;
894 +- return -EINVAL;
895 ++ return ret;
896 + }
897 + EXPORT_SYMBOL(blk_rq_map_user_iov);
898 +
899 +diff --git a/drivers/Makefile b/drivers/Makefile
900 +index d242d3514d30..5f5ccdbad21a 100644
901 +--- a/drivers/Makefile
902 ++++ b/drivers/Makefile
903 +@@ -105,6 +105,7 @@ obj-$(CONFIG_TC) += tc/
904 + obj-$(CONFIG_UWB) += uwb/
905 + obj-$(CONFIG_USB_PHY) += usb/
906 + obj-$(CONFIG_USB) += usb/
907 ++obj-$(CONFIG_USB_SUPPORT) += usb/
908 + obj-$(CONFIG_PCI) += usb/
909 + obj-$(CONFIG_USB_GADGET) += usb/
910 + obj-$(CONFIG_OF) += usb/
911 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
912 +index 2ef8bd29e188..b7efdc8badee 100644
913 +--- a/drivers/android/binder.c
914 ++++ b/drivers/android/binder.c
915 +@@ -1933,8 +1933,14 @@ static void binder_send_failed_reply(struct binder_transaction *t,
916 + &target_thread->todo);
917 + wake_up_interruptible(&target_thread->wait);
918 + } else {
919 +- WARN(1, "Unexpected reply error: %u\n",
920 +- target_thread->reply_error.cmd);
921 ++ /*
922 ++ * Cannot get here for normal operation, but
923 ++ * we can if multiple synchronous transactions
924 ++ * are sent without blocking for responses.
925 ++ * Just ignore the 2nd error in this case.
926 ++ */
927 ++ pr_warn("Unexpected reply error: %u\n",
928 ++ target_thread->reply_error.cmd);
929 + }
930 + binder_inner_proc_unlock(target_thread->proc);
931 + binder_thread_dec_tmpref(target_thread);
932 +@@ -2135,7 +2141,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
933 + int debug_id = buffer->debug_id;
934 +
935 + binder_debug(BINDER_DEBUG_TRANSACTION,
936 +- "%d buffer release %d, size %zd-%zd, failed at %p\n",
937 ++ "%d buffer release %d, size %zd-%zd, failed at %pK\n",
938 + proc->pid, buffer->debug_id,
939 + buffer->data_size, buffer->offsets_size, failed_at);
940 +
941 +@@ -3647,7 +3653,7 @@ static int binder_thread_write(struct binder_proc *proc,
942 + }
943 + }
944 + binder_debug(BINDER_DEBUG_DEAD_BINDER,
945 +- "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
946 ++ "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
947 + proc->pid, thread->pid, (u64)cookie,
948 + death);
949 + if (death == NULL) {
950 +@@ -4316,6 +4322,15 @@ static int binder_thread_release(struct binder_proc *proc,
951 +
952 + binder_inner_proc_unlock(thread->proc);
953 +
954 ++ /*
955 ++ * This is needed to avoid races between wake_up_poll() above and
956 ++ * and ep_remove_waitqueue() called for other reasons (eg the epoll file
957 ++ * descriptor being closed); ep_remove_waitqueue() holds an RCU read
958 ++ * lock, so we can be sure it's done after calling synchronize_rcu().
959 ++ */
960 ++ if (thread->looper & BINDER_LOOPER_STATE_POLL)
961 ++ synchronize_rcu();
962 ++
963 + if (send_reply)
964 + binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
965 + binder_release_work(proc, &thread->todo);
966 +@@ -4331,6 +4346,8 @@ static unsigned int binder_poll(struct file *filp,
967 + bool wait_for_proc_work;
968 +
969 + thread = binder_get_thread(proc);
970 ++ if (!thread)
971 ++ return POLLERR;
972 +
973 + binder_inner_proc_lock(thread->proc);
974 + thread->looper |= BINDER_LOOPER_STATE_POLL;
975 +@@ -4974,7 +4991,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
976 + spin_lock(&t->lock);
977 + to_proc = t->to_proc;
978 + seq_printf(m,
979 +- "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
980 ++ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
981 + prefix, t->debug_id, t,
982 + t->from ? t->from->proc->pid : 0,
983 + t->from ? t->from->pid : 0,
984 +@@ -4998,7 +5015,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
985 + }
986 + if (buffer->target_node)
987 + seq_printf(m, " node %d", buffer->target_node->debug_id);
988 +- seq_printf(m, " size %zd:%zd data %p\n",
989 ++ seq_printf(m, " size %zd:%zd data %pK\n",
990 + buffer->data_size, buffer->offsets_size,
991 + buffer->data);
992 + }
993 +diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
994 +index 7bcf80fa9ada..b2b1eece0db1 100644
995 +--- a/drivers/base/power/runtime.c
996 ++++ b/drivers/base/power/runtime.c
997 +@@ -276,7 +276,8 @@ static int rpm_get_suppliers(struct device *dev)
998 + continue;
999 +
1000 + retval = pm_runtime_get_sync(link->supplier);
1001 +- if (retval < 0) {
1002 ++ /* Ignore suppliers with disabled runtime PM. */
1003 ++ if (retval < 0 && retval != -EACCES) {
1004 + pm_runtime_put_noidle(link->supplier);
1005 + return retval;
1006 + }
1007 +diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
1008 +index 7ac657f46d15..aec66159566d 100644
1009 +--- a/drivers/crypto/s5p-sss.c
1010 ++++ b/drivers/crypto/s5p-sss.c
1011 +@@ -601,15 +601,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1012 + uint32_t aes_control;
1013 + unsigned long flags;
1014 + int err;
1015 ++ u8 *iv;
1016 +
1017 + aes_control = SSS_AES_KEY_CHANGE_MODE;
1018 + if (mode & FLAGS_AES_DECRYPT)
1019 + aes_control |= SSS_AES_MODE_DECRYPT;
1020 +
1021 +- if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
1022 ++ if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
1023 + aes_control |= SSS_AES_CHAIN_MODE_CBC;
1024 +- else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
1025 ++ iv = req->info;
1026 ++ } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
1027 + aes_control |= SSS_AES_CHAIN_MODE_CTR;
1028 ++ iv = req->info;
1029 ++ } else {
1030 ++ iv = NULL; /* AES_ECB */
1031 ++ }
1032 +
1033 + if (dev->ctx->keylen == AES_KEYSIZE_192)
1034 + aes_control |= SSS_AES_KEY_SIZE_192;
1035 +@@ -640,7 +646,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1036 + goto outdata_error;
1037 +
1038 + SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
1039 +- s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
1040 ++ s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
1041 +
1042 + s5p_set_dma_indata(dev, dev->sg_src);
1043 + s5p_set_dma_outdata(dev, dev->sg_dst);
1044 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
1045 +index a19b5d0300a9..ceae25112acd 100644
1046 +--- a/drivers/crypto/talitos.c
1047 ++++ b/drivers/crypto/talitos.c
1048 +@@ -1124,6 +1124,11 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src,
1049 + struct talitos_private *priv = dev_get_drvdata(dev);
1050 + bool is_sec1 = has_ftr_sec1(priv);
1051 +
1052 ++ if (!src) {
1053 ++ *ptr = zero_entry;
1054 ++ return 1;
1055 ++ }
1056 ++
1057 + to_talitos_ptr_len(ptr, len, is_sec1);
1058 + to_talitos_ptr_ext_set(ptr, 0, is_sec1);
1059 +
1060 +diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
1061 +index fbab271b3bf9..a861b5b4d443 100644
1062 +--- a/drivers/dma/at_hdmac.c
1063 ++++ b/drivers/dma/at_hdmac.c
1064 +@@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
1065 + unsigned long flags)
1066 + {
1067 + struct at_dma_chan *atchan = to_at_dma_chan(chan);
1068 +- struct data_chunk *first = xt->sgl;
1069 ++ struct data_chunk *first;
1070 + struct at_desc *desc = NULL;
1071 + size_t xfer_count;
1072 + unsigned int dwidth;
1073 +@@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
1074 + if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
1075 + return NULL;
1076 +
1077 ++ first = xt->sgl;
1078 ++
1079 + dev_info(chan2dev(chan),
1080 + "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
1081 + __func__, &xt->src_start, &xt->dst_start, xt->numf,
1082 +diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
1083 +index d50273fed715..afd5e10f8927 100644
1084 +--- a/drivers/dma/dma-jz4740.c
1085 ++++ b/drivers/dma/dma-jz4740.c
1086 +@@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
1087 +
1088 + ret = dma_async_device_register(dd);
1089 + if (ret)
1090 +- return ret;
1091 ++ goto err_clk;
1092 +
1093 + irq = platform_get_irq(pdev, 0);
1094 + ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
1095 +@@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
1096 +
1097 + err_unregister:
1098 + dma_async_device_unregister(dd);
1099 ++err_clk:
1100 ++ clk_disable_unprepare(dmadev->clk);
1101 + return ret;
1102 + }
1103 +
1104 +diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
1105 +index 93e006c3441d..854deb0da07c 100644
1106 +--- a/drivers/dma/ioat/init.c
1107 ++++ b/drivers/dma/ioat/init.c
1108 +@@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
1109 + if (memcmp(src, dest, IOAT_TEST_SIZE)) {
1110 + dev_err(dev, "Self-test copy failed compare, disabling\n");
1111 + err = -ENODEV;
1112 +- goto free_resources;
1113 ++ goto unmap_dma;
1114 + }
1115 +
1116 + unmap_dma:
1117 +diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
1118 +index 6b535ec858cc..15a1f4b348c4 100644
1119 +--- a/drivers/gpio/gpio-74x164.c
1120 ++++ b/drivers/gpio/gpio-74x164.c
1121 +@@ -23,6 +23,7 @@
1122 + struct gen_74x164_chip {
1123 + struct gpio_chip gpio_chip;
1124 + struct mutex lock;
1125 ++ struct gpio_desc *gpiod_oe;
1126 + u32 registers;
1127 + /*
1128 + * Since the registers are chained, every byte sent will make
1129 +@@ -31,8 +32,7 @@ struct gen_74x164_chip {
1130 + * register at the end of the transfer. So, to have a logical
1131 + * numbering, store the bytes in reverse order.
1132 + */
1133 +- u8 buffer[0];
1134 +- struct gpio_desc *gpiod_oe;
1135 ++ u8 buffer[];
1136 + };
1137 +
1138 + static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
1139 +diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
1140 +index f75d8443ecaf..e4b3d7db68c9 100644
1141 +--- a/drivers/gpio/gpio-davinci.c
1142 ++++ b/drivers/gpio/gpio-davinci.c
1143 +@@ -383,7 +383,7 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
1144 + u32 mask;
1145 +
1146 + d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data);
1147 +- g = (struct davinci_gpio_regs __iomem *)d->regs;
1148 ++ g = (struct davinci_gpio_regs __iomem *)d->regs[0];
1149 + mask = __gpio_mask(data->irq - d->base_irq);
1150 +
1151 + if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
1152 +diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
1153 +index 2a4d163ac76f..79ce877bf45f 100644
1154 +--- a/drivers/gpu/drm/armada/armada_crtc.c
1155 ++++ b/drivers/gpu/drm/armada/armada_crtc.c
1156 +@@ -1225,17 +1225,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1157 +
1158 + ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
1159 + dcrtc);
1160 +- if (ret < 0) {
1161 +- kfree(dcrtc);
1162 +- return ret;
1163 +- }
1164 ++ if (ret < 0)
1165 ++ goto err_crtc;
1166 +
1167 + if (dcrtc->variant->init) {
1168 + ret = dcrtc->variant->init(dcrtc, dev);
1169 +- if (ret) {
1170 +- kfree(dcrtc);
1171 +- return ret;
1172 +- }
1173 ++ if (ret)
1174 ++ goto err_crtc;
1175 + }
1176 +
1177 + /* Ensure AXI pipeline is enabled */
1178 +@@ -1246,13 +1242,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1179 + dcrtc->crtc.port = port;
1180 +
1181 + primary = kzalloc(sizeof(*primary), GFP_KERNEL);
1182 +- if (!primary)
1183 +- return -ENOMEM;
1184 ++ if (!primary) {
1185 ++ ret = -ENOMEM;
1186 ++ goto err_crtc;
1187 ++ }
1188 +
1189 + ret = armada_drm_plane_init(primary);
1190 + if (ret) {
1191 + kfree(primary);
1192 +- return ret;
1193 ++ goto err_crtc;
1194 + }
1195 +
1196 + ret = drm_universal_plane_init(drm, &primary->base, 0,
1197 +@@ -1263,7 +1261,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1198 + DRM_PLANE_TYPE_PRIMARY, NULL);
1199 + if (ret) {
1200 + kfree(primary);
1201 +- return ret;
1202 ++ goto err_crtc;
1203 + }
1204 +
1205 + ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
1206 +@@ -1282,6 +1280,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1207 +
1208 + err_crtc_init:
1209 + primary->base.funcs->destroy(&primary->base);
1210 ++err_crtc:
1211 ++ kfree(dcrtc);
1212 ++
1213 + return ret;
1214 + }
1215 +
1216 +diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
1217 +index af4e906c630d..56b9f9b1c3ae 100644
1218 +--- a/drivers/gpu/drm/drm_modeset_lock.c
1219 ++++ b/drivers/gpu/drm/drm_modeset_lock.c
1220 +@@ -88,7 +88,7 @@ void drm_modeset_lock_all(struct drm_device *dev)
1221 + struct drm_modeset_acquire_ctx *ctx;
1222 + int ret;
1223 +
1224 +- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1225 ++ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
1226 + if (WARN_ON(!ctx))
1227 + return;
1228 +
1229 +diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
1230 +index 10ae9681f02d..589905aab185 100644
1231 +--- a/drivers/gpu/drm/i915/intel_drv.h
1232 ++++ b/drivers/gpu/drm/i915/intel_drv.h
1233 +@@ -1708,7 +1708,7 @@ extern struct drm_display_mode *intel_find_panel_downclock(
1234 + int intel_backlight_device_register(struct intel_connector *connector);
1235 + void intel_backlight_device_unregister(struct intel_connector *connector);
1236 + #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1237 +-static int intel_backlight_device_register(struct intel_connector *connector)
1238 ++static inline int intel_backlight_device_register(struct intel_connector *connector)
1239 + {
1240 + return 0;
1241 + }
1242 +diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
1243 +index c50debb1986f..d31b3d0c9955 100644
1244 +--- a/drivers/gpu/drm/vc4/vc4_gem.c
1245 ++++ b/drivers/gpu/drm/vc4/vc4_gem.c
1246 +@@ -829,8 +829,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
1247 + /* If we got force-completed because of GPU reset rather than
1248 + * through our IRQ handler, signal the fence now.
1249 + */
1250 +- if (exec->fence)
1251 ++ if (exec->fence) {
1252 + dma_fence_signal(exec->fence);
1253 ++ dma_fence_put(exec->fence);
1254 ++ }
1255 +
1256 + if (exec->bo) {
1257 + for (i = 0; i < exec->bo_count; i++)
1258 +diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
1259 +index 521addec831e..3dd62d75f531 100644
1260 +--- a/drivers/gpu/drm/vc4/vc4_irq.c
1261 ++++ b/drivers/gpu/drm/vc4/vc4_irq.c
1262 +@@ -139,6 +139,7 @@ vc4_irq_finish_render_job(struct drm_device *dev)
1263 + list_move_tail(&exec->head, &vc4->job_done_list);
1264 + if (exec->fence) {
1265 + dma_fence_signal_locked(exec->fence);
1266 ++ dma_fence_put(exec->fence);
1267 + exec->fence = NULL;
1268 + }
1269 + vc4_submit_next_render_job(dev);
1270 +diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
1271 +index a47428b4d31b..e565fd4fc414 100644
1272 +--- a/drivers/iio/industrialio-core.c
1273 ++++ b/drivers/iio/industrialio-core.c
1274 +@@ -631,7 +631,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
1275 + * iio_format_value() - Formats a IIO value into its string representation
1276 + * @buf: The buffer to which the formatted value gets written
1277 + * which is assumed to be big enough (i.e. PAGE_SIZE).
1278 +- * @type: One of the IIO_VAL_... constants. This decides how the val
1279 ++ * @type: One of the IIO_VAL_* constants. This decides how the val
1280 + * and val2 parameters are formatted.
1281 + * @size: Number of IIO value entries contained in vals
1282 + * @vals: Pointer to the values, exact meaning depends on the
1283 +@@ -639,7 +639,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
1284 + *
1285 + * Return: 0 by default, a negative number on failure or the
1286 + * total number of characters written for a type that belongs
1287 +- * to the IIO_VAL_... constant.
1288 ++ * to the IIO_VAL_* constant.
1289 + */
1290 + ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
1291 + {
1292 +diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
1293 +index f42b3a1c75ff..dba796c06ba6 100644
1294 +--- a/drivers/iio/proximity/sx9500.c
1295 ++++ b/drivers/iio/proximity/sx9500.c
1296 +@@ -871,6 +871,7 @@ static int sx9500_init_device(struct iio_dev *indio_dev)
1297 + static void sx9500_gpio_probe(struct i2c_client *client,
1298 + struct sx9500_data *data)
1299 + {
1300 ++ struct gpio_desc *gpiod_int;
1301 + struct device *dev;
1302 +
1303 + if (!client)
1304 +@@ -878,6 +879,14 @@ static void sx9500_gpio_probe(struct i2c_client *client,
1305 +
1306 + dev = &client->dev;
1307 +
1308 ++ if (client->irq <= 0) {
1309 ++ gpiod_int = devm_gpiod_get(dev, SX9500_GPIO_INT, GPIOD_IN);
1310 ++ if (IS_ERR(gpiod_int))
1311 ++ dev_err(dev, "gpio get irq failed\n");
1312 ++ else
1313 ++ client->irq = gpiod_to_irq(gpiod_int);
1314 ++ }
1315 ++
1316 + data->gpiod_rst = devm_gpiod_get(dev, SX9500_GPIO_RESET, GPIOD_OUT_HIGH);
1317 + if (IS_ERR(data->gpiod_rst)) {
1318 + dev_warn(dev, "gpio get reset pin failed\n");
1319 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1320 +index fa79c7076ccd..e457dface2d2 100644
1321 +--- a/drivers/infiniband/core/cma.c
1322 ++++ b/drivers/infiniband/core/cma.c
1323 +@@ -801,6 +801,7 @@ struct rdma_cm_id *rdma_create_id(struct net *net,
1324 + INIT_LIST_HEAD(&id_priv->mc_list);
1325 + get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
1326 + id_priv->id.route.addr.dev_addr.net = get_net(net);
1327 ++ id_priv->seq_num &= 0x00ffffff;
1328 +
1329 + return &id_priv->id;
1330 + }
1331 +@@ -4461,7 +4462,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
1332 + return skb->len;
1333 + }
1334 +
1335 +-static const struct rdma_nl_cbs cma_cb_table[] = {
1336 ++static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
1337 + [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
1338 + };
1339 +
1340 +diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
1341 +index ebfdb5503701..b4b28ff8b7dc 100644
1342 +--- a/drivers/infiniband/core/device.c
1343 ++++ b/drivers/infiniband/core/device.c
1344 +@@ -1154,7 +1154,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
1345 + }
1346 + EXPORT_SYMBOL(ib_get_net_dev_by_params);
1347 +
1348 +-static const struct rdma_nl_cbs ibnl_ls_cb_table[] = {
1349 ++static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
1350 + [RDMA_NL_LS_OP_RESOLVE] = {
1351 + .doit = ib_nl_handle_resolve_resp,
1352 + .flags = RDMA_NL_ADMIN_PERM,
1353 +@@ -1261,5 +1261,5 @@ static void __exit ib_core_cleanup(void)
1354 +
1355 + MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
1356 +
1357 +-module_init(ib_core_init);
1358 ++subsys_initcall(ib_core_init);
1359 + module_exit(ib_core_cleanup);
1360 +diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
1361 +index fcf42f6bb82a..30d7277249b8 100644
1362 +--- a/drivers/infiniband/core/iwcm.c
1363 ++++ b/drivers/infiniband/core/iwcm.c
1364 +@@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason)
1365 + }
1366 + EXPORT_SYMBOL(iwcm_reject_msg);
1367 +
1368 +-static struct rdma_nl_cbs iwcm_nl_cb_table[] = {
1369 ++static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
1370 + [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
1371 + [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
1372 + [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
1373 +diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
1374 +index 2fae850a3eff..9a05245a1acf 100644
1375 +--- a/drivers/infiniband/core/nldev.c
1376 ++++ b/drivers/infiniband/core/nldev.c
1377 +@@ -303,7 +303,7 @@ out: cb->args[0] = idx;
1378 + return skb->len;
1379 + }
1380 +
1381 +-static const struct rdma_nl_cbs nldev_cb_table[] = {
1382 ++static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
1383 + [RDMA_NLDEV_CMD_GET] = {
1384 + .doit = nldev_get_doit,
1385 + .dump = nldev_get_dumpit,
1386 +diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
1387 +index e1cf0c08ca6f..84c6a6ff4a67 100644
1388 +--- a/drivers/infiniband/hw/hfi1/rc.c
1389 ++++ b/drivers/infiniband/hw/hfi1/rc.c
1390 +@@ -815,7 +815,7 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp,
1391 + struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1392 + struct hfi1_16b_header *hdr = &opa_hdr->opah;
1393 + struct ib_other_headers *ohdr;
1394 +- u32 bth0, bth1;
1395 ++ u32 bth0, bth1 = 0;
1396 + u16 len, pkey;
1397 + u8 becn = !!is_fecn;
1398 + u8 l4 = OPA_16B_L4_IB_LOCAL;
1399 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
1400 +index 5230dd3c938c..d6a1a308c6a0 100644
1401 +--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
1402 ++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
1403 +@@ -1043,7 +1043,7 @@ static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type,
1404 + * i40iw_schedule_cm_timer
1405 + * @@cm_node: connection's node
1406 + * @sqbuf: buffer to send
1407 +- * @type: if it es send ot close
1408 ++ * @type: if it is send or close
1409 + * @send_retrans: if rexmits to be done
1410 + * @close_when_complete: is cm_node to be removed
1411 + *
1412 +@@ -1067,7 +1067,8 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
1413 +
1414 + new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
1415 + if (!new_send) {
1416 +- i40iw_free_sqbuf(vsi, (void *)sqbuf);
1417 ++ if (type != I40IW_TIMER_TYPE_CLOSE)
1418 ++ i40iw_free_sqbuf(vsi, (void *)sqbuf);
1419 + return -ENOMEM;
1420 + }
1421 + new_send->retrycount = I40IW_DEFAULT_RETRYS;
1422 +@@ -1082,7 +1083,6 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
1423 + new_send->timetosend += (HZ / 10);
1424 + if (cm_node->close_entry) {
1425 + kfree(new_send);
1426 +- i40iw_free_sqbuf(vsi, (void *)sqbuf);
1427 + i40iw_pr_err("already close entry\n");
1428 + return -EINVAL;
1429 + }
1430 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
1431 +index 42ca5346777d..d86f3e670804 100644
1432 +--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
1433 ++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
1434 +@@ -506,7 +506,7 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
1435 +
1436 + ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
1437 + &cqp->sdbuf,
1438 +- 128,
1439 ++ I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,
1440 + I40IW_SD_BUF_ALIGNMENT);
1441 +
1442 + if (ret_code)
1443 +@@ -589,14 +589,15 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
1444 + }
1445 +
1446 + /**
1447 +- * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
1448 +- * @cqp: struct for cqp hw
1449 +- * @wqe_idx: we index of cqp ring
1450 ++ * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index
1451 ++ * @cqp: pointer to CQP structure
1452 ++ * @scratch: private data for CQP WQE
1453 ++ * @wqe_idx: WQE index for next WQE on CQP SQ
1454 + */
1455 +-u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
1456 ++static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,
1457 ++ u64 scratch, u32 *wqe_idx)
1458 + {
1459 + u64 *wqe = NULL;
1460 +- u32 wqe_idx;
1461 + enum i40iw_status_code ret_code;
1462 +
1463 + if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
1464 +@@ -609,20 +610,32 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
1465 + cqp->sq_ring.size);
1466 + return NULL;
1467 + }
1468 +- I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
1469 ++ I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
1470 + cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
1471 + if (ret_code)
1472 + return NULL;
1473 +- if (!wqe_idx)
1474 ++ if (!*wqe_idx)
1475 + cqp->polarity = !cqp->polarity;
1476 +
1477 +- wqe = cqp->sq_base[wqe_idx].elem;
1478 +- cqp->scratch_array[wqe_idx] = scratch;
1479 ++ wqe = cqp->sq_base[*wqe_idx].elem;
1480 ++ cqp->scratch_array[*wqe_idx] = scratch;
1481 + I40IW_CQP_INIT_WQE(wqe);
1482 +
1483 + return wqe;
1484 + }
1485 +
1486 ++/**
1487 ++ * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
1488 ++ * @cqp: struct for cqp hw
1489 ++ * @scratch: private data for CQP WQE
1490 ++ */
1491 ++u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
1492 ++{
1493 ++ u32 wqe_idx;
1494 ++
1495 ++ return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
1496 ++}
1497 ++
1498 + /**
1499 + * i40iw_sc_cqp_destroy - destroy cqp during close
1500 + * @cqp: struct for cqp hw
1501 +@@ -3534,8 +3547,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
1502 + u64 *wqe;
1503 + int mem_entries, wqe_entries;
1504 + struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
1505 ++ u64 offset;
1506 ++ u32 wqe_idx;
1507 +
1508 +- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1509 ++ wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
1510 + if (!wqe)
1511 + return I40IW_ERR_RING_FULL;
1512 +
1513 +@@ -3548,8 +3563,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
1514 + LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
1515 +
1516 + if (mem_entries) {
1517 +- memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
1518 +- data = sdbuf->pa;
1519 ++ offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE;
1520 ++ memcpy((char *)sdbuf->va + offset, &info->entry[3],
1521 ++ mem_entries << 4);
1522 ++ data = (u64)sdbuf->pa + offset;
1523 + } else {
1524 + data = 0;
1525 + }
1526 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
1527 +index 2ebaadbed379..24eabcad5e40 100644
1528 +--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
1529 ++++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
1530 +@@ -1109,7 +1109,7 @@
1531 + #define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
1532 +
1533 + #define I40IWQPC_ARPIDX_SHIFT 48
1534 +-#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT)
1535 ++#define I40IWQPC_ARPIDX_MASK (0xffffULL << I40IWQPC_ARPIDX_SHIFT)
1536 +
1537 + #define I40IWQPC_FLOWLABEL_SHIFT 0
1538 + #define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT)
1539 +@@ -1516,7 +1516,7 @@ enum i40iw_alignment {
1540 + I40IW_AEQ_ALIGNMENT = 0x100,
1541 + I40IW_CEQ_ALIGNMENT = 0x100,
1542 + I40IW_CQ0_ALIGNMENT = 0x100,
1543 +- I40IW_SD_BUF_ALIGNMENT = 0x100
1544 ++ I40IW_SD_BUF_ALIGNMENT = 0x80
1545 + };
1546 +
1547 + #define I40IW_WQE_SIZE_64 64
1548 +@@ -1524,6 +1524,8 @@ enum i40iw_alignment {
1549 + #define I40IW_QP_WQE_MIN_SIZE 32
1550 + #define I40IW_QP_WQE_MAX_SIZE 128
1551 +
1552 ++#define I40IW_UPDATE_SD_BUF_SIZE 128
1553 ++
1554 + #define I40IW_CQE_QTYPE_RQ 0
1555 + #define I40IW_CQE_QTYPE_SQ 1
1556 +
1557 +diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
1558 +index fcfa08747899..9354fec8efe7 100644
1559 +--- a/drivers/infiniband/hw/mlx4/qp.c
1560 ++++ b/drivers/infiniband/hw/mlx4/qp.c
1561 +@@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
1562 + return (-EOPNOTSUPP);
1563 + }
1564 +
1565 ++ if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
1566 ++ MLX4_IB_RX_HASH_DST_IPV4 |
1567 ++ MLX4_IB_RX_HASH_SRC_IPV6 |
1568 ++ MLX4_IB_RX_HASH_DST_IPV6 |
1569 ++ MLX4_IB_RX_HASH_SRC_PORT_TCP |
1570 ++ MLX4_IB_RX_HASH_DST_PORT_TCP |
1571 ++ MLX4_IB_RX_HASH_SRC_PORT_UDP |
1572 ++ MLX4_IB_RX_HASH_DST_PORT_UDP)) {
1573 ++ pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
1574 ++ ucmd->rx_hash_fields_mask);
1575 ++ return (-EOPNOTSUPP);
1576 ++ }
1577 ++
1578 + if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
1579 + (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
1580 + rss_ctx->flags = MLX4_RSS_IPV4;
1581 +@@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
1582 + return (-EOPNOTSUPP);
1583 + }
1584 +
1585 +- if (rss_ctx->flags & MLX4_RSS_IPV4) {
1586 ++ if (rss_ctx->flags & MLX4_RSS_IPV4)
1587 + rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
1588 +- } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
1589 ++ if (rss_ctx->flags & MLX4_RSS_IPV6)
1590 + rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
1591 +- } else {
1592 ++ if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
1593 + pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
1594 + return (-EOPNOTSUPP);
1595 + }
1596 +@@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
1597 +
1598 + if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
1599 + (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
1600 +- if (rss_ctx->flags & MLX4_RSS_IPV4) {
1601 ++ if (rss_ctx->flags & MLX4_RSS_IPV4)
1602 + rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
1603 +- } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
1604 ++ if (rss_ctx->flags & MLX4_RSS_IPV6)
1605 + rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
1606 +- } else {
1607 ++ if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
1608 + pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
1609 + return (-EOPNOTSUPP);
1610 + }
1611 +-
1612 + } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
1613 + (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
1614 + pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
1615 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
1616 +index 5a8216b50e38..788fc0800465 100644
1617 +--- a/drivers/md/raid1.c
1618 ++++ b/drivers/md/raid1.c
1619 +@@ -810,11 +810,15 @@ static void flush_pending_writes(struct r1conf *conf)
1620 + spin_lock_irq(&conf->device_lock);
1621 +
1622 + if (conf->pending_bio_list.head) {
1623 ++ struct blk_plug plug;
1624 + struct bio *bio;
1625 ++
1626 + bio = bio_list_get(&conf->pending_bio_list);
1627 + conf->pending_count = 0;
1628 + spin_unlock_irq(&conf->device_lock);
1629 ++ blk_start_plug(&plug);
1630 + flush_bio_list(conf, bio);
1631 ++ blk_finish_plug(&plug);
1632 + } else
1633 + spin_unlock_irq(&conf->device_lock);
1634 + }
1635 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
1636 +index 374df5796649..0d18d3b95201 100644
1637 +--- a/drivers/md/raid10.c
1638 ++++ b/drivers/md/raid10.c
1639 +@@ -890,10 +890,13 @@ static void flush_pending_writes(struct r10conf *conf)
1640 + spin_lock_irq(&conf->device_lock);
1641 +
1642 + if (conf->pending_bio_list.head) {
1643 ++ struct blk_plug plug;
1644 + struct bio *bio;
1645 ++
1646 + bio = bio_list_get(&conf->pending_bio_list);
1647 + conf->pending_count = 0;
1648 + spin_unlock_irq(&conf->device_lock);
1649 ++ blk_start_plug(&plug);
1650 + /* flush any pending bitmap writes to disk
1651 + * before proceeding w/ I/O */
1652 + bitmap_unplug(conf->mddev->bitmap);
1653 +@@ -914,6 +917,7 @@ static void flush_pending_writes(struct r10conf *conf)
1654 + generic_make_request(bio);
1655 + bio = next;
1656 + }
1657 ++ blk_finish_plug(&plug);
1658 + } else
1659 + spin_unlock_irq(&conf->device_lock);
1660 + }
1661 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1662 +index c406f16f5295..7ec822ced80b 100644
1663 +--- a/drivers/md/raid5.c
1664 ++++ b/drivers/md/raid5.c
1665 +@@ -2678,13 +2678,13 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
1666 + pr_debug("raid456: error called\n");
1667 +
1668 + spin_lock_irqsave(&conf->device_lock, flags);
1669 ++ set_bit(Faulty, &rdev->flags);
1670 + clear_bit(In_sync, &rdev->flags);
1671 + mddev->degraded = raid5_calc_degraded(conf);
1672 + spin_unlock_irqrestore(&conf->device_lock, flags);
1673 + set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1674 +
1675 + set_bit(Blocked, &rdev->flags);
1676 +- set_bit(Faulty, &rdev->flags);
1677 + set_mask_bits(&mddev->sb_flags, 0,
1678 + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1679 + pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
1680 +diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
1681 +index 94153895fcd4..3bdc34deae7b 100644
1682 +--- a/drivers/media/i2c/Kconfig
1683 ++++ b/drivers/media/i2c/Kconfig
1684 +@@ -660,6 +660,7 @@ config VIDEO_OV13858
1685 + tristate "OmniVision OV13858 sensor support"
1686 + depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
1687 + depends on MEDIA_CAMERA_SUPPORT
1688 ++ select V4L2_FWNODE
1689 + ---help---
1690 + This is a Video4Linux2 sensor-level driver for the OmniVision
1691 + OV13858 camera.
1692 +diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
1693 +index 9fd254a8e20d..13c10b5e2b45 100644
1694 +--- a/drivers/media/i2c/s5k6aa.c
1695 ++++ b/drivers/media/i2c/s5k6aa.c
1696 +@@ -421,6 +421,7 @@ static int s5k6aa_set_ahb_address(struct i2c_client *client)
1697 +
1698 + /**
1699 + * s5k6aa_configure_pixel_clock - apply ISP main clock/PLL configuration
1700 ++ * @s5k6aa: pointer to &struct s5k6aa describing the device
1701 + *
1702 + * Configure the internal ISP PLL for the required output frequency.
1703 + * Locking: called with s5k6aa.lock mutex held.
1704 +@@ -669,6 +670,7 @@ static int s5k6aa_set_input_params(struct s5k6aa *s5k6aa)
1705 +
1706 + /**
1707 + * s5k6aa_configure_video_bus - configure the video output interface
1708 ++ * @s5k6aa: pointer to &struct s5k6aa describing the device
1709 + * @bus_type: video bus type: parallel or MIPI-CSI
1710 + * @nlanes: number of MIPI lanes to be used (MIPI-CSI only)
1711 + *
1712 +@@ -724,6 +726,8 @@ static int s5k6aa_new_config_sync(struct i2c_client *client, int timeout,
1713 +
1714 + /**
1715 + * s5k6aa_set_prev_config - write user preview register set
1716 ++ * @s5k6aa: pointer to &struct s5k6aa describing the device
1717 ++ * @preset: s5kaa preset to be applied
1718 + *
1719 + * Configure output resolution and color fromat, pixel clock
1720 + * frequency range, device frame rate type and frame period range.
1721 +@@ -777,6 +781,7 @@ static int s5k6aa_set_prev_config(struct s5k6aa *s5k6aa,
1722 +
1723 + /**
1724 + * s5k6aa_initialize_isp - basic ISP MCU initialization
1725 ++ * @sd: pointer to V4L2 sub-device descriptor
1726 + *
1727 + * Configure AHB addresses for registers read/write; configure PLLs for
1728 + * required output pixel clock. The ISP power supply needs to be already
1729 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
1730 +index ad5b25b89699..44975061b953 100644
1731 +--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
1732 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
1733 +@@ -3642,6 +3642,12 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
1734 + hdw);
1735 + hdw->ctl_write_urb->actual_length = 0;
1736 + hdw->ctl_write_pend_flag = !0;
1737 ++ if (usb_urb_ep_type_check(hdw->ctl_write_urb)) {
1738 ++ pvr2_trace(
1739 ++ PVR2_TRACE_ERROR_LEGS,
1740 ++ "Invalid write control endpoint");
1741 ++ return -EINVAL;
1742 ++ }
1743 + status = usb_submit_urb(hdw->ctl_write_urb,GFP_KERNEL);
1744 + if (status < 0) {
1745 + pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1746 +@@ -3666,6 +3672,12 @@ status);
1747 + hdw);
1748 + hdw->ctl_read_urb->actual_length = 0;
1749 + hdw->ctl_read_pend_flag = !0;
1750 ++ if (usb_urb_ep_type_check(hdw->ctl_read_urb)) {
1751 ++ pvr2_trace(
1752 ++ PVR2_TRACE_ERROR_LEGS,
1753 ++ "Invalid read control endpoint");
1754 ++ return -EINVAL;
1755 ++ }
1756 + status = usb_submit_urb(hdw->ctl_read_urb,GFP_KERNEL);
1757 + if (status < 0) {
1758 + pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1759 +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
1760 +index 0ccccbaf530d..e4b10b2d1a08 100644
1761 +--- a/drivers/misc/mei/hw-me-regs.h
1762 ++++ b/drivers/misc/mei/hw-me-regs.h
1763 +@@ -132,6 +132,11 @@
1764 + #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
1765 + #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
1766 +
1767 ++#define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */
1768 ++#define MEI_DEV_ID_CNP_LP_4 0x9DE4 /* Cannon Point LP 4 (iTouch) */
1769 ++#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
1770 ++#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
1771 ++
1772 + /*
1773 + * MEI HW Section
1774 + */
1775 +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
1776 +index d46cb1f0868f..c77e08cbbfd1 100644
1777 +--- a/drivers/misc/mei/pci-me.c
1778 ++++ b/drivers/misc/mei/pci-me.c
1779 +@@ -98,6 +98,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
1780 + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
1781 + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
1782 +
1783 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH8_CFG)},
1784 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)},
1785 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
1786 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
1787 ++
1788 + /* required last entry */
1789 + {0, }
1790 + };
1791 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
1792 +index d74c7335c512..eebda5ec9676 100644
1793 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
1794 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
1795 +@@ -339,7 +339,7 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
1796 + u16 mask;
1797 +
1798 + mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask);
1799 +- mask |= GENMASK(chip->g1_irq.nirqs, 0);
1800 ++ mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
1801 + mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
1802 +
1803 + free_irq(chip->irq, chip);
1804 +@@ -395,7 +395,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
1805 + return 0;
1806 +
1807 + out_disable:
1808 +- mask |= GENMASK(chip->g1_irq.nirqs, 0);
1809 ++ mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
1810 + mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
1811 +
1812 + out_mapping:
1813 +@@ -2153,6 +2153,19 @@ static const struct of_device_id mv88e6xxx_mdio_external_match[] = {
1814 + { },
1815 + };
1816 +
1817 ++static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
1818 ++
1819 ++{
1820 ++ struct mv88e6xxx_mdio_bus *mdio_bus;
1821 ++ struct mii_bus *bus;
1822 ++
1823 ++ list_for_each_entry(mdio_bus, &chip->mdios, list) {
1824 ++ bus = mdio_bus->bus;
1825 ++
1826 ++ mdiobus_unregister(bus);
1827 ++ }
1828 ++}
1829 ++
1830 + static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
1831 + struct device_node *np)
1832 + {
1833 +@@ -2177,27 +2190,16 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
1834 + match = of_match_node(mv88e6xxx_mdio_external_match, child);
1835 + if (match) {
1836 + err = mv88e6xxx_mdio_register(chip, child, true);
1837 +- if (err)
1838 ++ if (err) {
1839 ++ mv88e6xxx_mdios_unregister(chip);
1840 + return err;
1841 ++ }
1842 + }
1843 + }
1844 +
1845 + return 0;
1846 + }
1847 +
1848 +-static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
1849 +-
1850 +-{
1851 +- struct mv88e6xxx_mdio_bus *mdio_bus;
1852 +- struct mii_bus *bus;
1853 +-
1854 +- list_for_each_entry(mdio_bus, &chip->mdios, list) {
1855 +- bus = mdio_bus->bus;
1856 +-
1857 +- mdiobus_unregister(bus);
1858 +- }
1859 +-}
1860 +-
1861 + static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
1862 + {
1863 + struct mv88e6xxx_chip *chip = ds->priv;
1864 +diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
1865 +index e278e3d96ee0..c6163874e4e7 100644
1866 +--- a/drivers/net/ethernet/arc/emac_rockchip.c
1867 ++++ b/drivers/net/ethernet/arc/emac_rockchip.c
1868 +@@ -220,9 +220,11 @@ static int emac_rockchip_probe(struct platform_device *pdev)
1869 +
1870 + /* RMII TX/RX needs always a rate of 25MHz */
1871 + err = clk_set_rate(priv->macclk, 25000000);
1872 +- if (err)
1873 ++ if (err) {
1874 + dev_err(dev,
1875 + "failed to change mac clock rate (%d)\n", err);
1876 ++ goto out_clk_disable_macclk;
1877 ++ }
1878 + }
1879 +
1880 + err = arc_emac_probe(ndev, interface);
1881 +@@ -232,7 +234,8 @@ static int emac_rockchip_probe(struct platform_device *pdev)
1882 + }
1883 +
1884 + return 0;
1885 +-
1886 ++out_clk_disable_macclk:
1887 ++ clk_disable_unprepare(priv->macclk);
1888 + out_regulator_disable:
1889 + if (priv->regulator)
1890 + regulator_disable(priv->regulator);
1891 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1892 +index aa764c5e3c6b..b66689a6eac0 100644
1893 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1894 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1895 +@@ -8218,8 +8218,9 @@ static void bnxt_shutdown(struct pci_dev *pdev)
1896 + if (netif_running(dev))
1897 + dev_close(dev);
1898 +
1899 ++ bnxt_ulp_shutdown(bp);
1900 ++
1901 + if (system_state == SYSTEM_POWER_OFF) {
1902 +- bnxt_ulp_shutdown(bp);
1903 + bnxt_clear_int_mode(bp);
1904 + pci_wake_from_d3(pdev, bp->wol);
1905 + pci_set_power_state(pdev, PCI_D3hot);
1906 +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
1907 +index 5be52d89b182..7f837006bb6a 100644
1908 +--- a/drivers/net/ethernet/freescale/gianfar.c
1909 ++++ b/drivers/net/ethernet/freescale/gianfar.c
1910 +@@ -1378,9 +1378,11 @@ static int gfar_probe(struct platform_device *ofdev)
1911 +
1912 + gfar_init_addr_hash_table(priv);
1913 +
1914 +- /* Insert receive time stamps into padding alignment bytes */
1915 ++ /* Insert receive time stamps into padding alignment bytes, and
1916 ++ * plus 2 bytes padding to ensure the cpu alignment.
1917 ++ */
1918 + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1919 +- priv->padding = 8;
1920 ++ priv->padding = 8 + DEFAULT_PADDING;
1921 +
1922 + if (dev->features & NETIF_F_IP_CSUM ||
1923 + priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1924 +@@ -1790,6 +1792,7 @@ static int init_phy(struct net_device *dev)
1925 + GFAR_SUPPORTED_GBIT : 0;
1926 + phy_interface_t interface;
1927 + struct phy_device *phydev;
1928 ++ struct ethtool_eee edata;
1929 +
1930 + priv->oldlink = 0;
1931 + priv->oldspeed = 0;
1932 +@@ -1814,6 +1817,10 @@ static int init_phy(struct net_device *dev)
1933 + /* Add support for flow control, but don't advertise it by default */
1934 + phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1935 +
1936 ++ /* disable EEE autoneg, EEE not supported by eTSEC */
1937 ++ memset(&edata, 0, sizeof(struct ethtool_eee));
1938 ++ phy_ethtool_set_eee(phydev, &edata);
1939 ++
1940 + return 0;
1941 + }
1942 +
1943 +diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
1944 +index 06f3fe429d82..529be74f609d 100644
1945 +--- a/drivers/net/ethernet/marvell/mvpp2.c
1946 ++++ b/drivers/net/ethernet/marvell/mvpp2.c
1947 +@@ -5399,7 +5399,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
1948 + u32 txq_dma;
1949 +
1950 + /* Allocate memory for TX descriptors */
1951 +- aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
1952 ++ aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
1953 + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
1954 + &aggr_txq->descs_dma, GFP_KERNEL);
1955 + if (!aggr_txq->descs)
1956 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
1957 +index 1c43aca8162d..9a7655560629 100644
1958 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
1959 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
1960 +@@ -84,16 +84,13 @@ nfp_repr_phy_port_get_stats64(struct nfp_port *port,
1961 + {
1962 + u8 __iomem *mem = port->eth_stats;
1963 +
1964 +- /* TX and RX stats are flipped as we are returning the stats as seen
1965 +- * at the switch port corresponding to the phys port.
1966 +- */
1967 +- stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
1968 +- stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
1969 +- stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
1970 ++ stats->tx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
1971 ++ stats->tx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
1972 ++ stats->tx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
1973 +
1974 +- stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
1975 +- stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
1976 +- stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
1977 ++ stats->rx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
1978 ++ stats->rx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
1979 ++ stats->rx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
1980 + }
1981 +
1982 + static void
1983 +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
1984 +index 540c7622dcb1..929fb8d96ec0 100644
1985 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
1986 ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
1987 +@@ -166,12 +166,12 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
1988 +
1989 + if (skb_headroom(skb) < required_headroom) {
1990 + if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
1991 +- return RMNET_MAP_CONSUMED;
1992 ++ goto fail;
1993 + }
1994 +
1995 + map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
1996 + if (!map_header)
1997 +- return RMNET_MAP_CONSUMED;
1998 ++ goto fail;
1999 +
2000 + if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
2001 + if (ep->mux_id == 0xff)
2002 +@@ -183,6 +183,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
2003 + skb->protocol = htons(ETH_P_MAP);
2004 +
2005 + return RMNET_MAP_SUCCESS;
2006 ++
2007 ++fail:
2008 ++ kfree_skb(skb);
2009 ++ return RMNET_MAP_CONSUMED;
2010 + }
2011 +
2012 + /* Ingress / Egress Entry Points */
2013 +diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
2014 +index 71ddadbf2368..d7ba2b813eff 100644
2015 +--- a/drivers/net/hippi/rrunner.c
2016 ++++ b/drivers/net/hippi/rrunner.c
2017 +@@ -1381,8 +1381,8 @@ static int rr_close(struct net_device *dev)
2018 + rrpriv->info_dma);
2019 + rrpriv->info = NULL;
2020 +
2021 +- free_irq(pdev->irq, dev);
2022 + spin_unlock_irqrestore(&rrpriv->lock, flags);
2023 ++ free_irq(pdev->irq, dev);
2024 +
2025 + return 0;
2026 + }
2027 +diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
2028 +index 8feb84fd4ca7..c23dea48ad0f 100644
2029 +--- a/drivers/net/ipvlan/ipvlan_core.c
2030 ++++ b/drivers/net/ipvlan/ipvlan_core.c
2031 +@@ -375,6 +375,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
2032 + .flowi4_oif = dev->ifindex,
2033 + .flowi4_tos = RT_TOS(ip4h->tos),
2034 + .flowi4_flags = FLOWI_FLAG_ANYSRC,
2035 ++ .flowi4_mark = skb->mark,
2036 + .daddr = ip4h->daddr,
2037 + .saddr = ip4h->saddr,
2038 + };
2039 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
2040 +index cb85307f125b..1b2fe74a44ea 100644
2041 +--- a/drivers/net/phy/phylink.c
2042 ++++ b/drivers/net/phy/phylink.c
2043 +@@ -772,6 +772,7 @@ void phylink_stop(struct phylink *pl)
2044 + sfp_upstream_stop(pl->sfp_bus);
2045 +
2046 + set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
2047 ++ queue_work(system_power_efficient_wq, &pl->resolve);
2048 + flush_work(&pl->resolve);
2049 + }
2050 + EXPORT_SYMBOL_GPL(phylink_stop);
2051 +diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
2052 +index baee371bf767..8bf10aba7452 100644
2053 +--- a/drivers/net/phy/sfp.c
2054 ++++ b/drivers/net/phy/sfp.c
2055 +@@ -358,7 +358,7 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
2056 + * SFP_OPTIONS_LOS_NORMAL are set? For now, we assume
2057 + * the same as SFP_OPTIONS_LOS_NORMAL set.
2058 + */
2059 +- if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED)
2060 ++ if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
2061 + los ^= SFP_F_LOS;
2062 +
2063 + if (los)
2064 +@@ -583,7 +583,8 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
2065 + if (event == SFP_E_TX_FAULT)
2066 + sfp_sm_fault(sfp, true);
2067 + else if (event ==
2068 +- (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
2069 ++ (sfp->id.ext.options &
2070 ++ cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) ?
2071 + SFP_E_LOS_HIGH : SFP_E_LOS_LOW))
2072 + sfp_sm_link_up(sfp);
2073 + break;
2074 +@@ -593,7 +594,8 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
2075 + sfp_sm_link_down(sfp);
2076 + sfp_sm_fault(sfp, true);
2077 + } else if (event ==
2078 +- (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
2079 ++ (sfp->id.ext.options &
2080 ++ cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) ?
2081 + SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) {
2082 + sfp_sm_link_down(sfp);
2083 + sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
2084 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2085 +index 511f8339fa96..7927e28f5336 100644
2086 +--- a/drivers/net/virtio_net.c
2087 ++++ b/drivers/net/virtio_net.c
2088 +@@ -714,7 +714,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
2089 + int num_skb_frags;
2090 +
2091 + buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
2092 +- if (unlikely(!ctx)) {
2093 ++ if (unlikely(!buf)) {
2094 + pr_debug("%s: rx error: %d buffers out of %d missing\n",
2095 + dev->name, num_buf,
2096 + virtio16_to_cpu(vi->vdev,
2097 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2098 +index b3fa8ae80465..eccd25febfe6 100644
2099 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2100 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2101 +@@ -2064,7 +2064,7 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
2102 + return head_pad;
2103 + }
2104 +
2105 +-/**
2106 ++/*
2107 + * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
2108 + * bus layer usage.
2109 + */
2110 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2111 +index a59b54328c07..052e67bce6b3 100644
2112 +--- a/drivers/net/wireless/mac80211_hwsim.c
2113 ++++ b/drivers/net/wireless/mac80211_hwsim.c
2114 +@@ -3119,6 +3119,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
2115 + if (info->attrs[HWSIM_ATTR_CHANNELS])
2116 + param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
2117 +
2118 ++ if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
2119 ++ GENL_SET_ERR_MSG(info, "too many channels specified");
2120 ++ return -EINVAL;
2121 ++ }
2122 ++
2123 + if (info->attrs[HWSIM_ATTR_NO_VIF])
2124 + param.no_vif = true;
2125 +
2126 +diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
2127 +index 4e0b25d09b0c..f09ff4789bb5 100644
2128 +--- a/drivers/pci/host/pcie-rcar.c
2129 ++++ b/drivers/pci/host/pcie-rcar.c
2130 +@@ -1146,12 +1146,12 @@ static int rcar_pcie_probe(struct platform_device *pdev)
2131 + err = rcar_pcie_get_resources(pcie);
2132 + if (err < 0) {
2133 + dev_err(dev, "failed to request resources: %d\n", err);
2134 +- goto err_free_bridge;
2135 ++ goto err_free_resource_list;
2136 + }
2137 +
2138 + err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
2139 + if (err)
2140 +- goto err_free_bridge;
2141 ++ goto err_free_resource_list;
2142 +
2143 + pm_runtime_enable(dev);
2144 + err = pm_runtime_get_sync(dev);
2145 +@@ -1194,9 +1194,9 @@ static int rcar_pcie_probe(struct platform_device *pdev)
2146 + err_pm_disable:
2147 + pm_runtime_disable(dev);
2148 +
2149 +-err_free_bridge:
2150 +- pci_free_host_bridge(bridge);
2151 ++err_free_resource_list:
2152 + pci_free_resource_list(&pcie->resources);
2153 ++ pci_free_host_bridge(bridge);
2154 +
2155 + return err;
2156 + }
2157 +diff --git a/drivers/pinctrl/intel/pinctrl-denverton.c b/drivers/pinctrl/intel/pinctrl-denverton.c
2158 +index 4500880240f2..6572550cfe78 100644
2159 +--- a/drivers/pinctrl/intel/pinctrl-denverton.c
2160 ++++ b/drivers/pinctrl/intel/pinctrl-denverton.c
2161 +@@ -207,7 +207,7 @@ static const unsigned int dnv_uart0_pins[] = { 60, 61, 64, 65 };
2162 + static const unsigned int dnv_uart0_modes[] = { 2, 3, 1, 1 };
2163 + static const unsigned int dnv_uart1_pins[] = { 94, 95, 96, 97 };
2164 + static const unsigned int dnv_uart2_pins[] = { 60, 61, 62, 63 };
2165 +-static const unsigned int dnv_uart2_modes[] = { 1, 1, 2, 2 };
2166 ++static const unsigned int dnv_uart2_modes[] = { 1, 2, 2, 2 };
2167 + static const unsigned int dnv_emmc_pins[] = {
2168 + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
2169 + };
2170 +diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
2171 +index 4f2a726bbaeb..f5f77432ce6f 100644
2172 +--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
2173 ++++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
2174 +@@ -428,7 +428,7 @@ static const struct sunxi_desc_pin a64_pins[] = {
2175 + SUNXI_FUNCTION(0x0, "gpio_in"),
2176 + SUNXI_FUNCTION(0x1, "gpio_out"),
2177 + SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
2178 +- SUNXI_FUNCTION(0x4, "uart0")), /* RX */
2179 ++ SUNXI_FUNCTION(0x3, "uart0")), /* RX */
2180 + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
2181 + SUNXI_FUNCTION(0x0, "gpio_in"),
2182 + SUNXI_FUNCTION(0x1, "gpio_out"),
2183 +diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
2184 +index bc14e954d7a2..b7ca9a40cc66 100644
2185 +--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
2186 ++++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
2187 +@@ -145,19 +145,19 @@ static const struct sunxi_desc_pin sun9i_a80_pins[] = {
2188 + SUNXI_FUNCTION(0x0, "gpio_in"),
2189 + SUNXI_FUNCTION(0x1, "gpio_out"),
2190 + SUNXI_FUNCTION(0x3, "mcsi"), /* MCLK */
2191 +- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PB_EINT14 */
2192 ++ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 14)), /* PB_EINT14 */
2193 + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
2194 + SUNXI_FUNCTION(0x0, "gpio_in"),
2195 + SUNXI_FUNCTION(0x1, "gpio_out"),
2196 + SUNXI_FUNCTION(0x3, "mcsi"), /* SCK */
2197 + SUNXI_FUNCTION(0x4, "i2c4"), /* SCK */
2198 +- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PB_EINT15 */
2199 ++ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)), /* PB_EINT15 */
2200 + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
2201 + SUNXI_FUNCTION(0x0, "gpio_in"),
2202 + SUNXI_FUNCTION(0x1, "gpio_out"),
2203 + SUNXI_FUNCTION(0x3, "mcsi"), /* SDA */
2204 + SUNXI_FUNCTION(0x4, "i2c4"), /* SDA */
2205 +- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PB_EINT16 */
2206 ++ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)), /* PB_EINT16 */
2207 +
2208 + /* Hole */
2209 + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
2210 +diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
2211 +index f42159fd2031..7424e53157b0 100644
2212 +--- a/drivers/platform/x86/dell-laptop.c
2213 ++++ b/drivers/platform/x86/dell-laptop.c
2214 +@@ -49,6 +49,7 @@
2215 +
2216 + struct quirk_entry {
2217 + u8 touchpad_led;
2218 ++ u8 kbd_led_levels_off_1;
2219 +
2220 + int needs_kbd_timeouts;
2221 + /*
2222 +@@ -79,6 +80,10 @@ static struct quirk_entry quirk_dell_xps13_9333 = {
2223 + .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
2224 + };
2225 +
2226 ++static struct quirk_entry quirk_dell_latitude_e6410 = {
2227 ++ .kbd_led_levels_off_1 = 1,
2228 ++};
2229 ++
2230 + static struct platform_driver platform_driver = {
2231 + .driver = {
2232 + .name = "dell-laptop",
2233 +@@ -280,6 +285,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
2234 + },
2235 + .driver_data = &quirk_dell_xps13_9333,
2236 + },
2237 ++ {
2238 ++ .callback = dmi_matched,
2239 ++ .ident = "Dell Latitude E6410",
2240 ++ .matches = {
2241 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2242 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6410"),
2243 ++ },
2244 ++ .driver_data = &quirk_dell_latitude_e6410,
2245 ++ },
2246 + { }
2247 + };
2248 +
2249 +@@ -1200,6 +1214,9 @@ static int kbd_get_info(struct kbd_info *info)
2250 + units = (buffer->output[2] >> 8) & 0xFF;
2251 + info->levels = (buffer->output[2] >> 16) & 0xFF;
2252 +
2253 ++ if (quirks && quirks->kbd_led_levels_off_1 && info->levels)
2254 ++ info->levels--;
2255 ++
2256 + if (units & BIT(0))
2257 + info->seconds = (buffer->output[3] >> 0) & 0xFF;
2258 + if (units & BIT(1))
2259 +diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
2260 +index 8eafcd5fa004..5ede251c52ca 100644
2261 +--- a/drivers/s390/block/dasd_eckd.c
2262 ++++ b/drivers/s390/block/dasd_eckd.c
2263 +@@ -530,10 +530,12 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
2264 + pfxdata->validity.define_extent = 1;
2265 +
2266 + /* private uid is kept up to date, conf_data may be outdated */
2267 +- if (startpriv->uid.type != UA_BASE_DEVICE) {
2268 ++ if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
2269 + pfxdata->validity.verify_base = 1;
2270 +- if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
2271 +- pfxdata->validity.hyper_pav = 1;
2272 ++
2273 ++ if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
2274 ++ pfxdata->validity.verify_base = 1;
2275 ++ pfxdata->validity.hyper_pav = 1;
2276 + }
2277 +
2278 + rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
2279 +@@ -3414,10 +3416,12 @@ static int prepare_itcw(struct itcw *itcw,
2280 + pfxdata.validity.define_extent = 1;
2281 +
2282 + /* private uid is kept up to date, conf_data may be outdated */
2283 +- if (startpriv->uid.type != UA_BASE_DEVICE) {
2284 ++ if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
2285 ++ pfxdata.validity.verify_base = 1;
2286 ++
2287 ++ if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
2288 + pfxdata.validity.verify_base = 1;
2289 +- if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
2290 +- pfxdata.validity.hyper_pav = 1;
2291 ++ pfxdata.validity.hyper_pav = 1;
2292 + }
2293 +
2294 + switch (cmd) {
2295 +diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
2296 +index b2e8c0dfc79c..1aa46d0763a0 100644
2297 +--- a/drivers/scsi/bfa/bfad_bsg.c
2298 ++++ b/drivers/scsi/bfa/bfad_bsg.c
2299 +@@ -3135,7 +3135,8 @@ bfad_im_bsg_vendor_request(struct bsg_job *job)
2300 + struct fc_bsg_request *bsg_request = job->request;
2301 + struct fc_bsg_reply *bsg_reply = job->reply;
2302 + uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
2303 +- struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
2304 ++ struct Scsi_Host *shost = fc_bsg_to_shost(job);
2305 ++ struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
2306 + struct bfad_s *bfad = im_port->bfad;
2307 + struct request_queue *request_q = job->req->q;
2308 + void *payload_kbuf;
2309 +@@ -3357,7 +3358,8 @@ int
2310 + bfad_im_bsg_els_ct_request(struct bsg_job *job)
2311 + {
2312 + struct bfa_bsg_data *bsg_data;
2313 +- struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
2314 ++ struct Scsi_Host *shost = fc_bsg_to_shost(job);
2315 ++ struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
2316 + struct bfad_s *bfad = im_port->bfad;
2317 + bfa_bsg_fcpt_t *bsg_fcpt;
2318 + struct bfad_fcxp *drv_fcxp;
2319 +diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
2320 +index 24e657a4ec80..c05d6e91e4bd 100644
2321 +--- a/drivers/scsi/bfa/bfad_im.c
2322 ++++ b/drivers/scsi/bfa/bfad_im.c
2323 +@@ -546,6 +546,7 @@ int
2324 + bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
2325 + struct device *dev)
2326 + {
2327 ++ struct bfad_im_port_pointer *im_portp;
2328 + int error = 1;
2329 +
2330 + mutex_lock(&bfad_mutex);
2331 +@@ -564,7 +565,8 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
2332 + goto out_free_idr;
2333 + }
2334 +
2335 +- im_port->shost->hostdata[0] = (unsigned long)im_port;
2336 ++ im_portp = shost_priv(im_port->shost);
2337 ++ im_portp->p = im_port;
2338 + im_port->shost->unique_id = im_port->idr_id;
2339 + im_port->shost->this_id = -1;
2340 + im_port->shost->max_id = MAX_FCP_TARGET;
2341 +@@ -748,7 +750,7 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
2342 +
2343 + sht->sg_tablesize = bfad->cfg_data.io_max_sge;
2344 +
2345 +- return scsi_host_alloc(sht, sizeof(unsigned long));
2346 ++ return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer));
2347 + }
2348 +
2349 + void
2350 +diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
2351 +index c81ec2a77ef5..06ce4ba2b7bc 100644
2352 +--- a/drivers/scsi/bfa/bfad_im.h
2353 ++++ b/drivers/scsi/bfa/bfad_im.h
2354 +@@ -69,6 +69,16 @@ struct bfad_im_port_s {
2355 + struct fc_vport *fc_vport;
2356 + };
2357 +
2358 ++struct bfad_im_port_pointer {
2359 ++ struct bfad_im_port_s *p;
2360 ++};
2361 ++
2362 ++static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host)
2363 ++{
2364 ++ struct bfad_im_port_pointer *im_portp = shost_priv(host);
2365 ++ return im_portp->p;
2366 ++}
2367 ++
2368 + enum bfad_itnim_state {
2369 + ITNIM_STATE_NONE,
2370 + ITNIM_STATE_ONLINE,
2371 +diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
2372 +index 56faeb049b4a..87c08ff37ddd 100644
2373 +--- a/drivers/scsi/lpfc/lpfc_mem.c
2374 ++++ b/drivers/scsi/lpfc/lpfc_mem.c
2375 +@@ -753,12 +753,12 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
2376 + drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
2377 + rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
2378 + if (rc < 0) {
2379 +- (rqbp->rqb_free_buffer)(phba, rqb_entry);
2380 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2381 + "6409 Cannot post to RQ %d: %x %x\n",
2382 + rqb_entry->hrq->queue_id,
2383 + rqb_entry->hrq->host_index,
2384 + rqb_entry->hrq->hba_index);
2385 ++ (rqbp->rqb_free_buffer)(phba, rqb_entry);
2386 + } else {
2387 + list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
2388 + rqbp->buffer_count++;
2389 +diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
2390 +index 89f4cf507be6..f2d8c3c53ea4 100644
2391 +--- a/drivers/soc/amlogic/meson-gx-socinfo.c
2392 ++++ b/drivers/soc/amlogic/meson-gx-socinfo.c
2393 +@@ -20,8 +20,8 @@
2394 + #define AO_SEC_SOCINFO_OFFSET AO_SEC_SD_CFG8
2395 +
2396 + #define SOCINFO_MAJOR GENMASK(31, 24)
2397 +-#define SOCINFO_MINOR GENMASK(23, 16)
2398 +-#define SOCINFO_PACK GENMASK(15, 8)
2399 ++#define SOCINFO_PACK GENMASK(23, 16)
2400 ++#define SOCINFO_MINOR GENMASK(15, 8)
2401 + #define SOCINFO_MISC GENMASK(7, 0)
2402 +
2403 + static const struct meson_gx_soc_id {
2404 +diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
2405 +index c5cd635c28f3..41410031f8e9 100644
2406 +--- a/drivers/spi/spi-sun4i.c
2407 ++++ b/drivers/spi/spi-sun4i.c
2408 +@@ -525,7 +525,7 @@ static int sun4i_spi_probe(struct platform_device *pdev)
2409 +
2410 + static int sun4i_spi_remove(struct platform_device *pdev)
2411 + {
2412 +- pm_runtime_disable(&pdev->dev);
2413 ++ pm_runtime_force_suspend(&pdev->dev);
2414 +
2415 + return 0;
2416 + }
2417 +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
2418 +index 372ce9913e6d..e7541dc90473 100644
2419 +--- a/drivers/staging/android/ashmem.c
2420 ++++ b/drivers/staging/android/ashmem.c
2421 +@@ -710,30 +710,32 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
2422 + size_t pgstart, pgend;
2423 + int ret = -EINVAL;
2424 +
2425 ++ mutex_lock(&ashmem_mutex);
2426 ++
2427 + if (unlikely(!asma->file))
2428 +- return -EINVAL;
2429 ++ goto out_unlock;
2430 +
2431 +- if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
2432 +- return -EFAULT;
2433 ++ if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) {
2434 ++ ret = -EFAULT;
2435 ++ goto out_unlock;
2436 ++ }
2437 +
2438 + /* per custom, you can pass zero for len to mean "everything onward" */
2439 + if (!pin.len)
2440 + pin.len = PAGE_ALIGN(asma->size) - pin.offset;
2441 +
2442 + if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
2443 +- return -EINVAL;
2444 ++ goto out_unlock;
2445 +
2446 + if (unlikely(((__u32)-1) - pin.offset < pin.len))
2447 +- return -EINVAL;
2448 ++ goto out_unlock;
2449 +
2450 + if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
2451 +- return -EINVAL;
2452 ++ goto out_unlock;
2453 +
2454 + pgstart = pin.offset / PAGE_SIZE;
2455 + pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
2456 +
2457 +- mutex_lock(&ashmem_mutex);
2458 +-
2459 + switch (cmd) {
2460 + case ASHMEM_PIN:
2461 + ret = ashmem_pin(asma, pgstart, pgend);
2462 +@@ -746,6 +748,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
2463 + break;
2464 + }
2465 +
2466 ++out_unlock:
2467 + mutex_unlock(&ashmem_mutex);
2468 +
2469 + return ret;
2470 +diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
2471 +index d9f8b1424da1..021a956db1a8 100644
2472 +--- a/drivers/staging/android/ion/ion-ioctl.c
2473 ++++ b/drivers/staging/android/ion/ion-ioctl.c
2474 +@@ -71,8 +71,10 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2475 + return -EFAULT;
2476 +
2477 + ret = validate_ioctl_arg(cmd, &data);
2478 +- if (WARN_ON_ONCE(ret))
2479 ++ if (ret) {
2480 ++ pr_warn_once("%s: ioctl validate failed\n", __func__);
2481 + return ret;
2482 ++ }
2483 +
2484 + if (!(dir & _IOC_WRITE))
2485 + memset(&data, 0, sizeof(data));
2486 +diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
2487 +index 4dc5d7a589c2..b6ece18e6a88 100644
2488 +--- a/drivers/staging/android/ion/ion_system_heap.c
2489 ++++ b/drivers/staging/android/ion/ion_system_heap.c
2490 +@@ -371,7 +371,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
2491 + unsigned long i;
2492 + int ret;
2493 +
2494 +- page = alloc_pages(low_order_gfp_flags, order);
2495 ++ page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
2496 + if (!page)
2497 + return -ENOMEM;
2498 +
2499 +diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
2500 +index f72ca485c86f..e266a70a1b32 100644
2501 +--- a/drivers/staging/ccree/ssi_hash.c
2502 ++++ b/drivers/staging/ccree/ssi_hash.c
2503 +@@ -1781,7 +1781,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
2504 + struct device *dev = &ctx->drvdata->plat_dev->dev;
2505 + struct ahash_req_ctx *state = ahash_request_ctx(req);
2506 + u32 tmp;
2507 +- int rc;
2508 ++ int rc = 0;
2509 +
2510 + memcpy(&tmp, in, sizeof(u32));
2511 + if (tmp != CC_EXPORT_MAGIC) {
2512 +diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
2513 +index 504c987447f2..eee1c1b277fa 100644
2514 +--- a/drivers/staging/fsl-mc/bus/Kconfig
2515 ++++ b/drivers/staging/fsl-mc/bus/Kconfig
2516 +@@ -8,7 +8,7 @@
2517 +
2518 + config FSL_MC_BUS
2519 + bool "QorIQ DPAA2 fsl-mc bus driver"
2520 +- depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86 || PPC)))
2521 ++ depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC)))
2522 + select GENERIC_MSI_IRQ_DOMAIN
2523 + help
2524 + Driver to enable the bus infrastructure for the QorIQ DPAA2
2525 +diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
2526 +index 6150d2780e22..31a195d1bf05 100644
2527 +--- a/drivers/staging/iio/adc/ad7192.c
2528 ++++ b/drivers/staging/iio/adc/ad7192.c
2529 +@@ -141,6 +141,8 @@
2530 + #define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */
2531 + #define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */
2532 +
2533 ++#define AD7192_EXT_FREQ_MHZ_MIN 2457600
2534 ++#define AD7192_EXT_FREQ_MHZ_MAX 5120000
2535 + #define AD7192_INT_FREQ_MHZ 4915200
2536 +
2537 + /* NOTE:
2538 +@@ -217,6 +219,12 @@ static int ad7192_calibrate_all(struct ad7192_state *st)
2539 + ARRAY_SIZE(ad7192_calib_arr));
2540 + }
2541 +
2542 ++static inline bool ad7192_valid_external_frequency(u32 freq)
2543 ++{
2544 ++ return (freq >= AD7192_EXT_FREQ_MHZ_MIN &&
2545 ++ freq <= AD7192_EXT_FREQ_MHZ_MAX);
2546 ++}
2547 ++
2548 + static int ad7192_setup(struct ad7192_state *st,
2549 + const struct ad7192_platform_data *pdata)
2550 + {
2551 +@@ -242,17 +250,20 @@ static int ad7192_setup(struct ad7192_state *st,
2552 + id);
2553 +
2554 + switch (pdata->clock_source_sel) {
2555 +- case AD7192_CLK_EXT_MCLK1_2:
2556 +- case AD7192_CLK_EXT_MCLK2:
2557 +- st->mclk = AD7192_INT_FREQ_MHZ;
2558 +- break;
2559 + case AD7192_CLK_INT:
2560 + case AD7192_CLK_INT_CO:
2561 +- if (pdata->ext_clk_hz)
2562 +- st->mclk = pdata->ext_clk_hz;
2563 +- else
2564 +- st->mclk = AD7192_INT_FREQ_MHZ;
2565 ++ st->mclk = AD7192_INT_FREQ_MHZ;
2566 + break;
2567 ++ case AD7192_CLK_EXT_MCLK1_2:
2568 ++ case AD7192_CLK_EXT_MCLK2:
2569 ++ if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) {
2570 ++ st->mclk = pdata->ext_clk_hz;
2571 ++ break;
2572 ++ }
2573 ++ dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n",
2574 ++ pdata->ext_clk_hz);
2575 ++ ret = -EINVAL;
2576 ++ goto out;
2577 + default:
2578 + ret = -EINVAL;
2579 + goto out;
2580 +diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
2581 +index 3d539eeb0e26..6d31001d1825 100644
2582 +--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
2583 ++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
2584 +@@ -649,8 +649,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
2585 + /* Ring buffer functions - here trigger setup related */
2586 + indio_dev->setup_ops = &ad5933_ring_setup_ops;
2587 +
2588 +- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
2589 +-
2590 + return 0;
2591 + }
2592 +
2593 +@@ -763,7 +761,7 @@ static int ad5933_probe(struct i2c_client *client,
2594 + indio_dev->dev.parent = &client->dev;
2595 + indio_dev->info = &ad5933_info;
2596 + indio_dev->name = id->name;
2597 +- indio_dev->modes = INDIO_DIRECT_MODE;
2598 ++ indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE);
2599 + indio_dev->channels = ad5933_channels;
2600 + indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
2601 +
2602 +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
2603 +index 47903d510955..8b800e34407b 100644
2604 +--- a/drivers/usb/core/urb.c
2605 ++++ b/drivers/usb/core/urb.c
2606 +@@ -187,6 +187,31 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
2607 +
2608 + /*-------------------------------------------------------------------*/
2609 +
2610 ++static const int pipetypes[4] = {
2611 ++ PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
2612 ++};
2613 ++
2614 ++/**
2615 ++ * usb_urb_ep_type_check - sanity check of endpoint in the given urb
2616 ++ * @urb: urb to be checked
2617 ++ *
2618 ++ * This performs a light-weight sanity check for the endpoint in the
2619 ++ * given urb. It returns 0 if the urb contains a valid endpoint, otherwise
2620 ++ * a negative error code.
2621 ++ */
2622 ++int usb_urb_ep_type_check(const struct urb *urb)
2623 ++{
2624 ++ const struct usb_host_endpoint *ep;
2625 ++
2626 ++ ep = usb_pipe_endpoint(urb->dev, urb->pipe);
2627 ++ if (!ep)
2628 ++ return -EINVAL;
2629 ++ if (usb_pipetype(urb->pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
2630 ++ return -EINVAL;
2631 ++ return 0;
2632 ++}
2633 ++EXPORT_SYMBOL_GPL(usb_urb_ep_type_check);
2634 ++
2635 + /**
2636 + * usb_submit_urb - issue an asynchronous transfer request for an endpoint
2637 + * @urb: pointer to the urb describing the request
2638 +@@ -326,9 +351,6 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
2639 + */
2640 + int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
2641 + {
2642 +- static int pipetypes[4] = {
2643 +- PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
2644 +- };
2645 + int xfertype, max;
2646 + struct usb_device *dev;
2647 + struct usb_host_endpoint *ep;
2648 +@@ -444,7 +466,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
2649 + */
2650 +
2651 + /* Check that the pipe's type matches the endpoint's type */
2652 +- if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
2653 ++ if (usb_urb_ep_type_check(urb))
2654 + dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
2655 + usb_pipetype(urb->pipe), pipetypes[xfertype]);
2656 +
2657 +diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
2658 +index a26d1fde0f5e..fbfc09ebd2ec 100644
2659 +--- a/drivers/usb/dwc3/dwc3-of-simple.c
2660 ++++ b/drivers/usb/dwc3/dwc3-of-simple.c
2661 +@@ -57,8 +57,10 @@ static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
2662 +
2663 + clk = of_clk_get(np, i);
2664 + if (IS_ERR(clk)) {
2665 +- while (--i >= 0)
2666 ++ while (--i >= 0) {
2667 ++ clk_disable_unprepare(simple->clks[i]);
2668 + clk_put(simple->clks[i]);
2669 ++ }
2670 + return PTR_ERR(clk);
2671 + }
2672 +
2673 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2674 +index f064f1549333..97e52c0d1a72 100644
2675 +--- a/drivers/usb/dwc3/gadget.c
2676 ++++ b/drivers/usb/dwc3/gadget.c
2677 +@@ -267,7 +267,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
2678 + {
2679 + const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
2680 + struct dwc3 *dwc = dep->dwc;
2681 +- u32 timeout = 500;
2682 ++ u32 timeout = 1000;
2683 + u32 reg;
2684 +
2685 + int cmd_status = 0;
2686 +diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
2687 +index 720408d39f11..b8915513fc84 100644
2688 +--- a/drivers/usb/usbip/stub_dev.c
2689 ++++ b/drivers/usb/usbip/stub_dev.c
2690 +@@ -87,6 +87,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
2691 + goto err;
2692 +
2693 + sdev->ud.tcp_socket = socket;
2694 ++ sdev->ud.sockfd = sockfd;
2695 +
2696 + spin_unlock_irq(&sdev->ud.lock);
2697 +
2698 +@@ -186,6 +187,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
2699 + if (ud->tcp_socket) {
2700 + sockfd_put(ud->tcp_socket);
2701 + ud->tcp_socket = NULL;
2702 ++ ud->sockfd = -1;
2703 + }
2704 +
2705 + /* 3. free used data */
2706 +@@ -280,6 +282,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
2707 + sdev->ud.status = SDEV_ST_AVAILABLE;
2708 + spin_lock_init(&sdev->ud.lock);
2709 + sdev->ud.tcp_socket = NULL;
2710 ++ sdev->ud.sockfd = -1;
2711 +
2712 + INIT_LIST_HEAD(&sdev->priv_init);
2713 + INIT_LIST_HEAD(&sdev->priv_tx);
2714 +diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
2715 +index 692cfdef667e..89858aeed647 100644
2716 +--- a/drivers/usb/usbip/vhci_hcd.c
2717 ++++ b/drivers/usb/usbip/vhci_hcd.c
2718 +@@ -998,6 +998,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
2719 + if (vdev->ud.tcp_socket) {
2720 + sockfd_put(vdev->ud.tcp_socket);
2721 + vdev->ud.tcp_socket = NULL;
2722 ++ vdev->ud.sockfd = -1;
2723 + }
2724 + pr_info("release socket\n");
2725 +
2726 +@@ -1044,6 +1045,7 @@ static void vhci_device_reset(struct usbip_device *ud)
2727 + if (ud->tcp_socket) {
2728 + sockfd_put(ud->tcp_socket);
2729 + ud->tcp_socket = NULL;
2730 ++ ud->sockfd = -1;
2731 + }
2732 + ud->status = VDEV_ST_NULL;
2733 +
2734 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
2735 +index d6dbb28245e6..a827c1a684a9 100644
2736 +--- a/drivers/vhost/vhost.c
2737 ++++ b/drivers/vhost/vhost.c
2738 +@@ -904,7 +904,7 @@ static void vhost_dev_lock_vqs(struct vhost_dev *d)
2739 + {
2740 + int i = 0;
2741 + for (i = 0; i < d->nvqs; ++i)
2742 +- mutex_lock(&d->vqs[i]->mutex);
2743 ++ mutex_lock_nested(&d->vqs[i]->mutex, i);
2744 + }
2745 +
2746 + static void vhost_dev_unlock_vqs(struct vhost_dev *d)
2747 +diff --git a/drivers/video/fbdev/mmp/core.c b/drivers/video/fbdev/mmp/core.c
2748 +index a0f496049db7..3a6bb6561ba0 100644
2749 +--- a/drivers/video/fbdev/mmp/core.c
2750 ++++ b/drivers/video/fbdev/mmp/core.c
2751 +@@ -23,6 +23,7 @@
2752 + #include <linux/slab.h>
2753 + #include <linux/dma-mapping.h>
2754 + #include <linux/export.h>
2755 ++#include <linux/module.h>
2756 + #include <video/mmp_disp.h>
2757 +
2758 + static struct mmp_overlay *path_get_overlay(struct mmp_path *path,
2759 +@@ -249,3 +250,7 @@ void mmp_unregister_path(struct mmp_path *path)
2760 + mutex_unlock(&disp_lock);
2761 + }
2762 + EXPORT_SYMBOL_GPL(mmp_unregister_path);
2763 ++
2764 ++MODULE_AUTHOR("Zhou Zhu <zzhu3@×××××××.com>");
2765 ++MODULE_DESCRIPTION("Marvell MMP display framework");
2766 ++MODULE_LICENSE("GPL");
2767 +diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
2768 +index 4545561954ee..30187b86729b 100644
2769 +--- a/drivers/xen/Kconfig
2770 ++++ b/drivers/xen/Kconfig
2771 +@@ -258,7 +258,7 @@ config XEN_ACPI_HOTPLUG_CPU
2772 +
2773 + config XEN_ACPI_PROCESSOR
2774 + tristate "Xen ACPI processor"
2775 +- depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
2776 ++ depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
2777 + default m
2778 + help
2779 + This ACPI processor uploads Power Management information to the Xen
2780 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2781 +index dfdab849037b..167ce43cabe8 100644
2782 +--- a/fs/btrfs/disk-io.c
2783 ++++ b/fs/btrfs/disk-io.c
2784 +@@ -3391,6 +3391,7 @@ static int write_dev_supers(struct btrfs_device *device,
2785 + int errors = 0;
2786 + u32 crc;
2787 + u64 bytenr;
2788 ++ int op_flags;
2789 +
2790 + if (max_mirrors == 0)
2791 + max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2792 +@@ -3433,13 +3434,10 @@ static int write_dev_supers(struct btrfs_device *device,
2793 + * we fua the first super. The others we allow
2794 + * to go down lazy.
2795 + */
2796 +- if (i == 0) {
2797 +- ret = btrfsic_submit_bh(REQ_OP_WRITE,
2798 +- REQ_SYNC | REQ_FUA | REQ_META | REQ_PRIO, bh);
2799 +- } else {
2800 +- ret = btrfsic_submit_bh(REQ_OP_WRITE,
2801 +- REQ_SYNC | REQ_META | REQ_PRIO, bh);
2802 +- }
2803 ++ op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
2804 ++ if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
2805 ++ op_flags |= REQ_FUA;
2806 ++ ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
2807 + if (ret)
2808 + errors++;
2809 + }
2810 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2811 +index 1ae61f82e54b..59a01a0844c9 100644
2812 +--- a/fs/btrfs/inode.c
2813 ++++ b/fs/btrfs/inode.c
2814 +@@ -3016,6 +3016,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2815 + compress_type = ordered_extent->compress_type;
2816 + if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2817 + BUG_ON(compress_type);
2818 ++ btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
2819 ++ ordered_extent->len);
2820 + ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
2821 + ordered_extent->file_offset,
2822 + ordered_extent->file_offset +
2823 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2824 +index 1f1338d52303..2763f3184ac5 100644
2825 +--- a/fs/btrfs/ioctl.c
2826 ++++ b/fs/btrfs/ioctl.c
2827 +@@ -2221,7 +2221,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2828 + if (!path)
2829 + return -ENOMEM;
2830 +
2831 +- ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX];
2832 ++ ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2833 +
2834 + key.objectid = tree_id;
2835 + key.type = BTRFS_ROOT_ITEM_KEY;
2836 +diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
2837 +index cba328315929..63a1ca4b9dee 100644
2838 +--- a/fs/notify/dnotify/dnotify.c
2839 ++++ b/fs/notify/dnotify/dnotify.c
2840 +@@ -319,7 +319,11 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
2841 + dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
2842 + spin_lock(&fsn_mark->lock);
2843 + } else {
2844 +- fsnotify_add_mark_locked(new_fsn_mark, inode, NULL, 0);
2845 ++ error = fsnotify_add_mark_locked(new_fsn_mark, inode, NULL, 0);
2846 ++ if (error) {
2847 ++ mutex_unlock(&dnotify_group->mark_mutex);
2848 ++ goto out_err;
2849 ++ }
2850 + spin_lock(&new_fsn_mark->lock);
2851 + fsn_mark = new_fsn_mark;
2852 + dn_mark = new_dn_mark;
2853 +@@ -345,6 +349,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
2854 + */
2855 + if (dn_mark == new_dn_mark)
2856 + destroy = 1;
2857 ++ error = 0;
2858 + goto out;
2859 + }
2860 +
2861 +diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
2862 +index 6866df4f31b5..35d125569e68 100644
2863 +--- a/include/linux/ptr_ring.h
2864 ++++ b/include/linux/ptr_ring.h
2865 +@@ -445,9 +445,14 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
2866 + __PTR_RING_PEEK_CALL_v; \
2867 + })
2868 +
2869 ++/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See
2870 ++ * documentation for vmalloc for which of them are legal.
2871 ++ */
2872 + static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
2873 + {
2874 +- return kcalloc(size, sizeof(void *), gfp);
2875 ++ if (size * sizeof(void *) > KMALLOC_MAX_SIZE)
2876 ++ return NULL;
2877 ++ return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
2878 + }
2879 +
2880 + static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
2881 +@@ -580,7 +585,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
2882 + spin_unlock(&(r)->producer_lock);
2883 + spin_unlock_irqrestore(&(r)->consumer_lock, flags);
2884 +
2885 +- kfree(old);
2886 ++ kvfree(old);
2887 +
2888 + return 0;
2889 + }
2890 +@@ -620,7 +625,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
2891 + }
2892 +
2893 + for (i = 0; i < nrings; ++i)
2894 +- kfree(queues[i]);
2895 ++ kvfree(queues[i]);
2896 +
2897 + kfree(queues);
2898 +
2899 +@@ -628,7 +633,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
2900 +
2901 + nomem:
2902 + while (--i >= 0)
2903 +- kfree(queues[i]);
2904 ++ kvfree(queues[i]);
2905 +
2906 + kfree(queues);
2907 +
2908 +@@ -643,7 +648,7 @@ static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
2909 + if (destroy)
2910 + while ((ptr = ptr_ring_consume(r)))
2911 + destroy(ptr);
2912 +- kfree(r->queue);
2913 ++ kvfree(r->queue);
2914 + }
2915 +
2916 + #endif /* _LINUX_PTR_RING_H */
2917 +diff --git a/include/linux/serdev.h b/include/linux/serdev.h
2918 +index e69402d4a8ae..d609e6dc5bad 100644
2919 +--- a/include/linux/serdev.h
2920 ++++ b/include/linux/serdev.h
2921 +@@ -184,7 +184,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
2922 + struct serdev_device *serdev = ctrl->serdev;
2923 +
2924 + if (!serdev || !serdev->ops->receive_buf)
2925 +- return -EINVAL;
2926 ++ return 0;
2927 +
2928 + return serdev->ops->receive_buf(serdev, data, count);
2929 + }
2930 +diff --git a/include/linux/usb.h b/include/linux/usb.h
2931 +index 9c63792a8134..4192a1755ccb 100644
2932 +--- a/include/linux/usb.h
2933 ++++ b/include/linux/usb.h
2934 +@@ -1729,6 +1729,8 @@ static inline int usb_urb_dir_out(struct urb *urb)
2935 + return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT;
2936 + }
2937 +
2938 ++int usb_urb_ep_type_check(const struct urb *urb);
2939 ++
2940 + void *usb_alloc_coherent(struct usb_device *dev, size_t size,
2941 + gfp_t mem_flags, dma_addr_t *dma);
2942 + void usb_free_coherent(struct usb_device *dev, size_t size,
2943 +diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
2944 +index f12fa5245a45..ea0ed58db97e 100644
2945 +--- a/include/net/cfg80211.h
2946 ++++ b/include/net/cfg80211.h
2947 +@@ -815,6 +815,8 @@ struct cfg80211_csa_settings {
2948 + u8 count;
2949 + };
2950 +
2951 ++#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
2952 ++
2953 + /**
2954 + * struct iface_combination_params - input parameters for interface combinations
2955 + *
2956 +diff --git a/include/net/red.h b/include/net/red.h
2957 +index 9a9347710701..9665582c4687 100644
2958 +--- a/include/net/red.h
2959 ++++ b/include/net/red.h
2960 +@@ -168,6 +168,17 @@ static inline void red_set_vars(struct red_vars *v)
2961 + v->qcount = -1;
2962 + }
2963 +
2964 ++static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
2965 ++{
2966 ++ if (fls(qth_min) + Wlog > 32)
2967 ++ return false;
2968 ++ if (fls(qth_max) + Wlog > 32)
2969 ++ return false;
2970 ++ if (qth_max < qth_min)
2971 ++ return false;
2972 ++ return true;
2973 ++}
2974 ++
2975 + static inline void red_set_parms(struct red_parms *p,
2976 + u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
2977 + u8 Scell_log, u8 *stab, u32 max_P)
2978 +@@ -179,7 +190,7 @@ static inline void red_set_parms(struct red_parms *p,
2979 + p->qth_max = qth_max << Wlog;
2980 + p->Wlog = Wlog;
2981 + p->Plog = Plog;
2982 +- if (delta < 0)
2983 ++ if (delta <= 0)
2984 + delta = 1;
2985 + p->qth_delta = delta;
2986 + if (!max_P) {
2987 +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
2988 +index d7d8cba01469..749a42882437 100644
2989 +--- a/include/net/sctp/sctp.h
2990 ++++ b/include/net/sctp/sctp.h
2991 +@@ -444,7 +444,8 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
2992 + if (asoc->user_frag)
2993 + frag = min_t(int, frag, asoc->user_frag);
2994 +
2995 +- frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
2996 ++ frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
2997 ++ sizeof(struct sctp_data_chunk)));
2998 +
2999 + return frag;
3000 + }
3001 +diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
3002 +index 758607226bfd..2cd449328aee 100644
3003 +--- a/include/trace/events/clk.h
3004 ++++ b/include/trace/events/clk.h
3005 +@@ -134,12 +134,12 @@ DECLARE_EVENT_CLASS(clk_parent,
3006 +
3007 + TP_STRUCT__entry(
3008 + __string( name, core->name )
3009 +- __string( pname, parent->name )
3010 ++ __string( pname, parent ? parent->name : "none" )
3011 + ),
3012 +
3013 + TP_fast_assign(
3014 + __assign_str(name, core->name);
3015 +- __assign_str(pname, parent->name);
3016 ++ __assign_str(pname, parent ? parent->name : "none");
3017 + ),
3018 +
3019 + TP_printk("%s %s", __get_str(name), __get_str(pname))
3020 +diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
3021 +index 810e94160c12..f7c73ae62b7a 100644
3022 +--- a/include/trace/events/xdp.h
3023 ++++ b/include/trace/events/xdp.h
3024 +@@ -8,6 +8,7 @@
3025 + #include <linux/netdevice.h>
3026 + #include <linux/filter.h>
3027 + #include <linux/tracepoint.h>
3028 ++#include <linux/bpf.h>
3029 +
3030 + #define __XDP_ACT_MAP(FN) \
3031 + FN(ABORTED) \
3032 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3033 +index f9339c3219bc..b5ae6488b890 100644
3034 +--- a/kernel/bpf/verifier.c
3035 ++++ b/kernel/bpf/verifier.c
3036 +@@ -1865,15 +1865,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3037 +
3038 + dst_reg = &regs[dst];
3039 +
3040 +- if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
3041 +- print_verifier_state(&env->cur_state);
3042 +- verbose("verifier internal error: known but bad sbounds\n");
3043 +- return -EINVAL;
3044 +- }
3045 +- if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
3046 +- print_verifier_state(&env->cur_state);
3047 +- verbose("verifier internal error: known but bad ubounds\n");
3048 +- return -EINVAL;
3049 ++ if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
3050 ++ smin_val > smax_val || umin_val > umax_val) {
3051 ++ /* Taint dst register if offset had invalid bounds derived from
3052 ++ * e.g. dead branches.
3053 ++ */
3054 ++ __mark_reg_unknown(dst_reg);
3055 ++ return 0;
3056 + }
3057 +
3058 + if (BPF_CLASS(insn->code) != BPF_ALU64) {
3059 +@@ -2075,6 +2073,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3060 + src_known = tnum_is_const(src_reg.var_off);
3061 + dst_known = tnum_is_const(dst_reg->var_off);
3062 +
3063 ++ if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
3064 ++ smin_val > smax_val || umin_val > umax_val) {
3065 ++ /* Taint dst register if offset had invalid bounds derived from
3066 ++ * e.g. dead branches.
3067 ++ */
3068 ++ __mark_reg_unknown(dst_reg);
3069 ++ return 0;
3070 ++ }
3071 ++
3072 + if (!src_known &&
3073 + opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
3074 + __mark_reg_unknown(dst_reg);
3075 +diff --git a/kernel/events/core.c b/kernel/events/core.c
3076 +index 8c20af8738ac..f42f7c7a8f84 100644
3077 +--- a/kernel/events/core.c
3078 ++++ b/kernel/events/core.c
3079 +@@ -6719,6 +6719,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
3080 + struct perf_namespaces_event *namespaces_event = data;
3081 + struct perf_output_handle handle;
3082 + struct perf_sample_data sample;
3083 ++ u16 header_size = namespaces_event->event_id.header.size;
3084 + int ret;
3085 +
3086 + if (!perf_event_namespaces_match(event))
3087 +@@ -6729,7 +6730,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
3088 + ret = perf_output_begin(&handle, event,
3089 + namespaces_event->event_id.header.size);
3090 + if (ret)
3091 +- return;
3092 ++ goto out;
3093 +
3094 + namespaces_event->event_id.pid = perf_event_pid(event,
3095 + namespaces_event->task);
3096 +@@ -6741,6 +6742,8 @@ static void perf_event_namespaces_output(struct perf_event *event,
3097 + perf_event__output_id_sample(event, &handle, &sample);
3098 +
3099 + perf_output_end(&handle);
3100 ++out:
3101 ++ namespaces_event->event_id.header.size = header_size;
3102 + }
3103 +
3104 + static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
3105 +diff --git a/kernel/kcov.c b/kernel/kcov.c
3106 +index fc6af9e1308b..b11ef6e51f7e 100644
3107 +--- a/kernel/kcov.c
3108 ++++ b/kernel/kcov.c
3109 +@@ -225,9 +225,9 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
3110 + if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
3111 + kcov->area == NULL)
3112 + return -EINVAL;
3113 +- if (kcov->t != NULL)
3114 +- return -EBUSY;
3115 + t = current;
3116 ++ if (kcov->t != NULL || t->kcov != NULL)
3117 ++ return -EBUSY;
3118 + /* Cache in task struct for performance. */
3119 + t->kcov_size = kcov->size;
3120 + t->kcov_area = kcov->area;
3121 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
3122 +index 4d362d3e4571..2f0f5720b123 100644
3123 +--- a/kernel/locking/lockdep.c
3124 ++++ b/kernel/locking/lockdep.c
3125 +@@ -4777,7 +4777,8 @@ void lockdep_invariant_state(bool force)
3126 + * Verify the former, enforce the latter.
3127 + */
3128 + WARN_ON_ONCE(!force && current->lockdep_depth);
3129 +- invalidate_xhlock(&xhlock(current->xhlock_idx));
3130 ++ if (current->xhlocks)
3131 ++ invalidate_xhlock(&xhlock(current->xhlock_idx));
3132 + }
3133 +
3134 + static int cross_lock(struct lockdep_map *lock)
3135 +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
3136 +index 45a3928544ce..e73dcab8e9f0 100644
3137 +--- a/kernel/trace/blktrace.c
3138 ++++ b/kernel/trace/blktrace.c
3139 +@@ -66,7 +66,8 @@ static struct tracer_flags blk_tracer_flags = {
3140 + };
3141 +
3142 + /* Global reference count of probes */
3143 +-static atomic_t blk_probes_ref = ATOMIC_INIT(0);
3144 ++static DEFINE_MUTEX(blk_probe_mutex);
3145 ++static int blk_probes_ref;
3146 +
3147 + static void blk_register_tracepoints(void);
3148 + static void blk_unregister_tracepoints(void);
3149 +@@ -329,11 +330,26 @@ static void blk_trace_free(struct blk_trace *bt)
3150 + kfree(bt);
3151 + }
3152 +
3153 ++static void get_probe_ref(void)
3154 ++{
3155 ++ mutex_lock(&blk_probe_mutex);
3156 ++ if (++blk_probes_ref == 1)
3157 ++ blk_register_tracepoints();
3158 ++ mutex_unlock(&blk_probe_mutex);
3159 ++}
3160 ++
3161 ++static void put_probe_ref(void)
3162 ++{
3163 ++ mutex_lock(&blk_probe_mutex);
3164 ++ if (!--blk_probes_ref)
3165 ++ blk_unregister_tracepoints();
3166 ++ mutex_unlock(&blk_probe_mutex);
3167 ++}
3168 ++
3169 + static void blk_trace_cleanup(struct blk_trace *bt)
3170 + {
3171 + blk_trace_free(bt);
3172 +- if (atomic_dec_and_test(&blk_probes_ref))
3173 +- blk_unregister_tracepoints();
3174 ++ put_probe_ref();
3175 + }
3176 +
3177 + int blk_trace_remove(struct request_queue *q)
3178 +@@ -538,8 +554,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
3179 + if (cmpxchg(&q->blk_trace, NULL, bt))
3180 + goto err;
3181 +
3182 +- if (atomic_inc_return(&blk_probes_ref) == 1)
3183 +- blk_register_tracepoints();
3184 ++ get_probe_ref();
3185 +
3186 + ret = 0;
3187 + err:
3188 +@@ -1558,9 +1573,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
3189 + if (bt == NULL)
3190 + return -EINVAL;
3191 +
3192 +- if (atomic_dec_and_test(&blk_probes_ref))
3193 +- blk_unregister_tracepoints();
3194 +-
3195 ++ put_probe_ref();
3196 + blk_trace_free(bt);
3197 + return 0;
3198 + }
3199 +@@ -1591,8 +1604,7 @@ static int blk_trace_setup_queue(struct request_queue *q,
3200 + if (cmpxchg(&q->blk_trace, NULL, bt))
3201 + goto free_bt;
3202 +
3203 +- if (atomic_inc_return(&blk_probes_ref) == 1)
3204 +- blk_register_tracepoints();
3205 ++ get_probe_ref();
3206 + return 0;
3207 +
3208 + free_bt:
3209 +diff --git a/lib/oid_registry.c b/lib/oid_registry.c
3210 +index 41b9e50711a7..b5f7d9986be1 100644
3211 +--- a/lib/oid_registry.c
3212 ++++ b/lib/oid_registry.c
3213 +@@ -116,7 +116,7 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
3214 + int count;
3215 +
3216 + if (v >= end)
3217 +- return -EBADMSG;
3218 ++ goto bad;
3219 +
3220 + n = *v++;
3221 + ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40);
3222 +@@ -134,7 +134,7 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
3223 + num = n & 0x7f;
3224 + do {
3225 + if (v >= end)
3226 +- return -EBADMSG;
3227 ++ goto bad;
3228 + n = *v++;
3229 + num <<= 7;
3230 + num |= n & 0x7f;
3231 +@@ -148,6 +148,10 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
3232 + }
3233 +
3234 + return ret;
3235 ++
3236 ++bad:
3237 ++ snprintf(buffer, bufsize, "(bad)");
3238 ++ return -EBADMSG;
3239 + }
3240 + EXPORT_SYMBOL_GPL(sprint_oid);
3241 +
3242 +diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
3243 +index d04ac1ec0559..1826f191e72c 100644
3244 +--- a/mm/early_ioremap.c
3245 ++++ b/mm/early_ioremap.c
3246 +@@ -111,7 +111,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
3247 + enum fixed_addresses idx;
3248 + int i, slot;
3249 +
3250 +- WARN_ON(system_state != SYSTEM_BOOTING);
3251 ++ WARN_ON(system_state >= SYSTEM_RUNNING);
3252 +
3253 + slot = -1;
3254 + for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
3255 +diff --git a/mm/vmalloc.c b/mm/vmalloc.c
3256 +index 673942094328..ebff729cc956 100644
3257 +--- a/mm/vmalloc.c
3258 ++++ b/mm/vmalloc.c
3259 +@@ -1943,11 +1943,15 @@ void *vmalloc_exec(unsigned long size)
3260 + }
3261 +
3262 + #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3263 +-#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
3264 ++#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3265 + #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3266 +-#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
3267 ++#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
3268 + #else
3269 +-#define GFP_VMALLOC32 GFP_KERNEL
3270 ++/*
3271 ++ * 64b systems should always have either DMA or DMA32 zones. For others
3272 ++ * GFP_DMA32 should do the right thing and use the normal zone.
3273 ++ */
3274 ++#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
3275 + #endif
3276 +
3277 + /**
3278 +diff --git a/mm/vmscan.c b/mm/vmscan.c
3279 +index eb2f0315b8c0..441f346fb5e2 100644
3280 +--- a/mm/vmscan.c
3281 ++++ b/mm/vmscan.c
3282 +@@ -297,10 +297,13 @@ EXPORT_SYMBOL(register_shrinker);
3283 + */
3284 + void unregister_shrinker(struct shrinker *shrinker)
3285 + {
3286 ++ if (!shrinker->nr_deferred)
3287 ++ return;
3288 + down_write(&shrinker_rwsem);
3289 + list_del(&shrinker->list);
3290 + up_write(&shrinker_rwsem);
3291 + kfree(shrinker->nr_deferred);
3292 ++ shrinker->nr_deferred = NULL;
3293 + }
3294 + EXPORT_SYMBOL(unregister_shrinker);
3295 +
3296 +diff --git a/net/core/dev.c b/net/core/dev.c
3297 +index ffee085f0357..d33bbed640b1 100644
3298 +--- a/net/core/dev.c
3299 ++++ b/net/core/dev.c
3300 +@@ -2792,7 +2792,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3301 +
3302 + segs = skb_mac_gso_segment(skb, features);
3303 +
3304 +- if (unlikely(skb_needs_check(skb, tx_path)))
3305 ++ if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3306 + skb_warn_bad_offload(skb);
3307 +
3308 + return segs;
3309 +diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
3310 +index 7c1ffd6f9501..00ecec4891f3 100644
3311 +--- a/net/core/gen_estimator.c
3312 ++++ b/net/core/gen_estimator.c
3313 +@@ -159,7 +159,11 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
3314 + est->intvl_log = intvl_log;
3315 + est->cpu_bstats = cpu_bstats;
3316 +
3317 ++ if (stats_lock)
3318 ++ local_bh_disable();
3319 + est_fetch_counters(est, &b);
3320 ++ if (stats_lock)
3321 ++ local_bh_enable();
3322 + est->last_bytes = b.bytes;
3323 + est->last_packets = b.packets;
3324 + old = rcu_dereference_protected(*rate_est, 1);
3325 +diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
3326 +index 73a0399dc7a2..8dbfcd388633 100644
3327 +--- a/net/decnet/af_decnet.c
3328 ++++ b/net/decnet/af_decnet.c
3329 +@@ -1339,6 +1339,12 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
3330 + lock_sock(sk);
3331 + err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
3332 + release_sock(sk);
3333 ++#ifdef CONFIG_NETFILTER
3334 ++ /* we need to exclude all possible ENOPROTOOPTs except default case */
3335 ++ if (err == -ENOPROTOOPT && optname != DSO_LINKINFO &&
3336 ++ optname != DSO_STREAM && optname != DSO_SEQPACKET)
3337 ++ err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
3338 ++#endif
3339 +
3340 + return err;
3341 + }
3342 +@@ -1446,15 +1452,6 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
3343 + dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
3344 + break;
3345 +
3346 +- default:
3347 +-#ifdef CONFIG_NETFILTER
3348 +- return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
3349 +-#endif
3350 +- case DSO_LINKINFO:
3351 +- case DSO_STREAM:
3352 +- case DSO_SEQPACKET:
3353 +- return -ENOPROTOOPT;
3354 +-
3355 + case DSO_MAXWINDOW:
3356 + if (optlen != sizeof(unsigned long))
3357 + return -EINVAL;
3358 +@@ -1502,6 +1499,12 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
3359 + return -EINVAL;
3360 + scp->info_loc = u.info;
3361 + break;
3362 ++
3363 ++ case DSO_LINKINFO:
3364 ++ case DSO_STREAM:
3365 ++ case DSO_SEQPACKET:
3366 ++ default:
3367 ++ return -ENOPROTOOPT;
3368 + }
3369 +
3370 + return 0;
3371 +@@ -1515,6 +1518,20 @@ static int dn_getsockopt(struct socket *sock, int level, int optname, char __use
3372 + lock_sock(sk);
3373 + err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
3374 + release_sock(sk);
3375 ++#ifdef CONFIG_NETFILTER
3376 ++ if (err == -ENOPROTOOPT && optname != DSO_STREAM &&
3377 ++ optname != DSO_SEQPACKET && optname != DSO_CONACCEPT &&
3378 ++ optname != DSO_CONREJECT) {
3379 ++ int len;
3380 ++
3381 ++ if (get_user(len, optlen))
3382 ++ return -EFAULT;
3383 ++
3384 ++ err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
3385 ++ if (err >= 0)
3386 ++ err = put_user(len, optlen);
3387 ++ }
3388 ++#endif
3389 +
3390 + return err;
3391 + }
3392 +@@ -1580,26 +1597,6 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
3393 + r_data = &link;
3394 + break;
3395 +
3396 +- default:
3397 +-#ifdef CONFIG_NETFILTER
3398 +- {
3399 +- int ret, len;
3400 +-
3401 +- if (get_user(len, optlen))
3402 +- return -EFAULT;
3403 +-
3404 +- ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
3405 +- if (ret >= 0)
3406 +- ret = put_user(len, optlen);
3407 +- return ret;
3408 +- }
3409 +-#endif
3410 +- case DSO_STREAM:
3411 +- case DSO_SEQPACKET:
3412 +- case DSO_CONACCEPT:
3413 +- case DSO_CONREJECT:
3414 +- return -ENOPROTOOPT;
3415 +-
3416 + case DSO_MAXWINDOW:
3417 + if (r_len > sizeof(unsigned long))
3418 + r_len = sizeof(unsigned long);
3419 +@@ -1631,6 +1628,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
3420 + r_len = sizeof(unsigned char);
3421 + r_data = &scp->info_rem;
3422 + break;
3423 ++
3424 ++ case DSO_STREAM:
3425 ++ case DSO_SEQPACKET:
3426 ++ case DSO_CONACCEPT:
3427 ++ case DSO_CONREJECT:
3428 ++ default:
3429 ++ return -ENOPROTOOPT;
3430 + }
3431 +
3432 + if (r_data) {
3433 +diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
3434 +index 56c49623bb9d..29b333a62ab0 100644
3435 +--- a/net/ipv4/esp4_offload.c
3436 ++++ b/net/ipv4/esp4_offload.c
3437 +@@ -38,7 +38,8 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
3438 + __be32 spi;
3439 + int err;
3440 +
3441 +- skb_pull(skb, offset);
3442 ++ if (!pskb_pull(skb, offset))
3443 ++ return NULL;
3444 +
3445 + if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
3446 + goto out;
3447 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
3448 +index 60fb1eb7d7d8..c7df4969f80a 100644
3449 +--- a/net/ipv4/ip_sockglue.c
3450 ++++ b/net/ipv4/ip_sockglue.c
3451 +@@ -1251,11 +1251,8 @@ int ip_setsockopt(struct sock *sk, int level,
3452 + if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
3453 + optname != IP_IPSEC_POLICY &&
3454 + optname != IP_XFRM_POLICY &&
3455 +- !ip_mroute_opt(optname)) {
3456 +- lock_sock(sk);
3457 ++ !ip_mroute_opt(optname))
3458 + err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
3459 +- release_sock(sk);
3460 +- }
3461 + #endif
3462 + return err;
3463 + }
3464 +@@ -1280,12 +1277,9 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
3465 + if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
3466 + optname != IP_IPSEC_POLICY &&
3467 + optname != IP_XFRM_POLICY &&
3468 +- !ip_mroute_opt(optname)) {
3469 +- lock_sock(sk);
3470 +- err = compat_nf_setsockopt(sk, PF_INET, optname,
3471 +- optval, optlen);
3472 +- release_sock(sk);
3473 +- }
3474 ++ !ip_mroute_opt(optname))
3475 ++ err = compat_nf_setsockopt(sk, PF_INET, optname, optval,
3476 ++ optlen);
3477 + #endif
3478 + return err;
3479 + }
3480 +diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
3481 +index 17b4ca562944..24a8c2e63e3d 100644
3482 +--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
3483 ++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
3484 +@@ -431,7 +431,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
3485 + struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
3486 + const struct ipt_entry *e = par->entryinfo;
3487 + struct clusterip_config *config;
3488 +- int ret;
3489 ++ int ret, i;
3490 +
3491 + if (par->nft_compat) {
3492 + pr_err("cannot use CLUSTERIP target from nftables compat\n");
3493 +@@ -450,8 +450,18 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
3494 + pr_info("Please specify destination IP\n");
3495 + return -EINVAL;
3496 + }
3497 +-
3498 +- /* FIXME: further sanity checks */
3499 ++ if (cipinfo->num_local_nodes > ARRAY_SIZE(cipinfo->local_nodes)) {
3500 ++ pr_info("bad num_local_nodes %u\n", cipinfo->num_local_nodes);
3501 ++ return -EINVAL;
3502 ++ }
3503 ++ for (i = 0; i < cipinfo->num_local_nodes; i++) {
3504 ++ if (cipinfo->local_nodes[i] - 1 >=
3505 ++ sizeof(config->local_nodes) * 8) {
3506 ++ pr_info("bad local_nodes[%d] %u\n",
3507 ++ i, cipinfo->local_nodes[i]);
3508 ++ return -EINVAL;
3509 ++ }
3510 ++ }
3511 +
3512 + config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1);
3513 + if (!config) {
3514 +diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
3515 +index fe374da4bc13..997a96896f1a 100644
3516 +--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
3517 ++++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
3518 +@@ -218,15 +218,19 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
3519 + struct nf_conntrack_tuple tuple;
3520 +
3521 + memset(&tuple, 0, sizeof(tuple));
3522 ++
3523 ++ lock_sock(sk);
3524 + tuple.src.u3.ip = inet->inet_rcv_saddr;
3525 + tuple.src.u.tcp.port = inet->inet_sport;
3526 + tuple.dst.u3.ip = inet->inet_daddr;
3527 + tuple.dst.u.tcp.port = inet->inet_dport;
3528 + tuple.src.l3num = PF_INET;
3529 + tuple.dst.protonum = sk->sk_protocol;
3530 ++ release_sock(sk);
3531 +
3532 + /* We only do TCP and SCTP at the moment: is there a better way? */
3533 +- if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) {
3534 ++ if (tuple.dst.protonum != IPPROTO_TCP &&
3535 ++ tuple.dst.protonum != IPPROTO_SCTP) {
3536 + pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
3537 + return -ENOPROTOOPT;
3538 + }
3539 +diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
3540 +index 1ea9d794447e..f52c314d4c97 100644
3541 +--- a/net/ipv6/esp6_offload.c
3542 ++++ b/net/ipv6/esp6_offload.c
3543 +@@ -60,7 +60,8 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
3544 + int nhoff;
3545 + int err;
3546 +
3547 +- skb_pull(skb, offset);
3548 ++ if (!pskb_pull(skb, offset))
3549 ++ return NULL;
3550 +
3551 + if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
3552 + goto out;
3553 +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
3554 +index 3b251760cb8c..24b69384bdbf 100644
3555 +--- a/net/ipv6/ipv6_sockglue.c
3556 ++++ b/net/ipv6/ipv6_sockglue.c
3557 +@@ -915,12 +915,8 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
3558 + #ifdef CONFIG_NETFILTER
3559 + /* we need to exclude all possible ENOPROTOOPTs except default case */
3560 + if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
3561 +- optname != IPV6_XFRM_POLICY) {
3562 +- lock_sock(sk);
3563 +- err = nf_setsockopt(sk, PF_INET6, optname, optval,
3564 +- optlen);
3565 +- release_sock(sk);
3566 +- }
3567 ++ optname != IPV6_XFRM_POLICY)
3568 ++ err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen);
3569 + #endif
3570 + return err;
3571 + }
3572 +@@ -950,12 +946,9 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
3573 + #ifdef CONFIG_NETFILTER
3574 + /* we need to exclude all possible ENOPROTOOPTs except default case */
3575 + if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
3576 +- optname != IPV6_XFRM_POLICY) {
3577 +- lock_sock(sk);
3578 +- err = compat_nf_setsockopt(sk, PF_INET6, optname,
3579 +- optval, optlen);
3580 +- release_sock(sk);
3581 +- }
3582 ++ optname != IPV6_XFRM_POLICY)
3583 ++ err = compat_nf_setsockopt(sk, PF_INET6, optname, optval,
3584 ++ optlen);
3585 + #endif
3586 + return err;
3587 + }
3588 +diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
3589 +index fe01dc953c56..b807478c4f7f 100644
3590 +--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
3591 ++++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
3592 +@@ -226,20 +226,27 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = {
3593 + static int
3594 + ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
3595 + {
3596 +- const struct inet_sock *inet = inet_sk(sk);
3597 ++ struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
3598 + const struct ipv6_pinfo *inet6 = inet6_sk(sk);
3599 ++ const struct inet_sock *inet = inet_sk(sk);
3600 + const struct nf_conntrack_tuple_hash *h;
3601 + struct sockaddr_in6 sin6;
3602 +- struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
3603 + struct nf_conn *ct;
3604 ++ __be32 flow_label;
3605 ++ int bound_dev_if;
3606 +
3607 ++ lock_sock(sk);
3608 + tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
3609 + tuple.src.u.tcp.port = inet->inet_sport;
3610 + tuple.dst.u3.in6 = sk->sk_v6_daddr;
3611 + tuple.dst.u.tcp.port = inet->inet_dport;
3612 + tuple.dst.protonum = sk->sk_protocol;
3613 ++ bound_dev_if = sk->sk_bound_dev_if;
3614 ++ flow_label = inet6->flow_label;
3615 ++ release_sock(sk);
3616 +
3617 +- if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP)
3618 ++ if (tuple.dst.protonum != IPPROTO_TCP &&
3619 ++ tuple.dst.protonum != IPPROTO_SCTP)
3620 + return -ENOPROTOOPT;
3621 +
3622 + if (*len < 0 || (unsigned int) *len < sizeof(sin6))
3623 +@@ -257,14 +264,13 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
3624 +
3625 + sin6.sin6_family = AF_INET6;
3626 + sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
3627 +- sin6.sin6_flowinfo = inet6->flow_label & IPV6_FLOWINFO_MASK;
3628 ++ sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK;
3629 + memcpy(&sin6.sin6_addr,
3630 + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
3631 + sizeof(sin6.sin6_addr));
3632 +
3633 + nf_ct_put(ct);
3634 +- sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr,
3635 +- sk->sk_bound_dev_if);
3636 ++ sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if);
3637 + return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
3638 + }
3639 +
3640 +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
3641 +index c5fa634e63ca..58d53b907d53 100644
3642 +--- a/net/kcm/kcmsock.c
3643 ++++ b/net/kcm/kcmsock.c
3644 +@@ -1387,8 +1387,13 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
3645 + if (!csk)
3646 + return -EINVAL;
3647 +
3648 +- /* We must prevent loops or risk deadlock ! */
3649 +- if (csk->sk_family == PF_KCM)
3650 ++ /* Only allow TCP sockets to be attached for now */
3651 ++ if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
3652 ++ csk->sk_protocol != IPPROTO_TCP)
3653 ++ return -EOPNOTSUPP;
3654 ++
3655 ++ /* Don't allow listeners or closed sockets */
3656 ++ if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
3657 + return -EOPNOTSUPP;
3658 +
3659 + psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
3660 +@@ -1405,9 +1410,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
3661 + return err;
3662 + }
3663 +
3664 +- sock_hold(csk);
3665 +-
3666 + write_lock_bh(&csk->sk_callback_lock);
3667 ++
3668 ++ /* Check if sk_user_data is aready by KCM or someone else.
3669 ++ * Must be done under lock to prevent race conditions.
3670 ++ */
3671 ++ if (csk->sk_user_data) {
3672 ++ write_unlock_bh(&csk->sk_callback_lock);
3673 ++ strp_done(&psock->strp);
3674 ++ kmem_cache_free(kcm_psockp, psock);
3675 ++ return -EALREADY;
3676 ++ }
3677 ++
3678 + psock->save_data_ready = csk->sk_data_ready;
3679 + psock->save_write_space = csk->sk_write_space;
3680 + psock->save_state_change = csk->sk_state_change;
3681 +@@ -1415,8 +1429,11 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
3682 + csk->sk_data_ready = psock_data_ready;
3683 + csk->sk_write_space = psock_write_space;
3684 + csk->sk_state_change = psock_state_change;
3685 ++
3686 + write_unlock_bh(&csk->sk_callback_lock);
3687 +
3688 ++ sock_hold(csk);
3689 ++
3690 + /* Finished initialization, now add the psock to the MUX. */
3691 + spin_lock_bh(&mux->lock);
3692 + head = &mux->psocks;
3693 +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
3694 +index d8571f414208..60c92158a2cd 100644
3695 +--- a/net/netfilter/x_tables.c
3696 ++++ b/net/netfilter/x_tables.c
3697 +@@ -39,7 +39,6 @@ MODULE_LICENSE("GPL");
3698 + MODULE_AUTHOR("Harald Welte <laforge@×××××××××.org>");
3699 + MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
3700 +
3701 +-#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
3702 + #define XT_PCPU_BLOCK_SIZE 4096
3703 +
3704 + struct compat_delta {
3705 +@@ -210,6 +209,9 @@ xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
3706 + {
3707 + struct xt_match *match;
3708 +
3709 ++ if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
3710 ++ return ERR_PTR(-EINVAL);
3711 ++
3712 + match = xt_find_match(nfproto, name, revision);
3713 + if (IS_ERR(match)) {
3714 + request_module("%st_%s", xt_prefix[nfproto], name);
3715 +@@ -252,6 +254,9 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
3716 + {
3717 + struct xt_target *target;
3718 +
3719 ++ if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
3720 ++ return ERR_PTR(-EINVAL);
3721 ++
3722 + target = xt_find_target(af, name, revision);
3723 + if (IS_ERR(target)) {
3724 + request_module("%st_%s", xt_prefix[af], name);
3725 +@@ -1000,7 +1005,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
3726 + return NULL;
3727 +
3728 + /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
3729 +- if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
3730 ++ if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
3731 + return NULL;
3732 +
3733 + info = kvmalloc(sz, GFP_KERNEL);
3734 +diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
3735 +index 498b54fd04d7..141c295191f6 100644
3736 +--- a/net/netfilter/xt_RATEEST.c
3737 ++++ b/net/netfilter/xt_RATEEST.c
3738 +@@ -39,23 +39,31 @@ static void xt_rateest_hash_insert(struct xt_rateest *est)
3739 + hlist_add_head(&est->list, &rateest_hash[h]);
3740 + }
3741 +
3742 +-struct xt_rateest *xt_rateest_lookup(const char *name)
3743 ++static struct xt_rateest *__xt_rateest_lookup(const char *name)
3744 + {
3745 + struct xt_rateest *est;
3746 + unsigned int h;
3747 +
3748 + h = xt_rateest_hash(name);
3749 +- mutex_lock(&xt_rateest_mutex);
3750 + hlist_for_each_entry(est, &rateest_hash[h], list) {
3751 + if (strcmp(est->name, name) == 0) {
3752 + est->refcnt++;
3753 +- mutex_unlock(&xt_rateest_mutex);
3754 + return est;
3755 + }
3756 + }
3757 +- mutex_unlock(&xt_rateest_mutex);
3758 ++
3759 + return NULL;
3760 + }
3761 ++
3762 ++struct xt_rateest *xt_rateest_lookup(const char *name)
3763 ++{
3764 ++ struct xt_rateest *est;
3765 ++
3766 ++ mutex_lock(&xt_rateest_mutex);
3767 ++ est = __xt_rateest_lookup(name);
3768 ++ mutex_unlock(&xt_rateest_mutex);
3769 ++ return est;
3770 ++}
3771 + EXPORT_SYMBOL_GPL(xt_rateest_lookup);
3772 +
3773 + void xt_rateest_put(struct xt_rateest *est)
3774 +@@ -100,8 +108,10 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
3775 +
3776 + net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
3777 +
3778 +- est = xt_rateest_lookup(info->name);
3779 ++ mutex_lock(&xt_rateest_mutex);
3780 ++ est = __xt_rateest_lookup(info->name);
3781 + if (est) {
3782 ++ mutex_unlock(&xt_rateest_mutex);
3783 + /*
3784 + * If estimator parameters are specified, they must match the
3785 + * existing estimator.
3786 +@@ -139,11 +149,13 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
3787 +
3788 + info->est = est;
3789 + xt_rateest_hash_insert(est);
3790 ++ mutex_unlock(&xt_rateest_mutex);
3791 + return 0;
3792 +
3793 + err2:
3794 + kfree(est);
3795 + err1:
3796 ++ mutex_unlock(&xt_rateest_mutex);
3797 + return ret;
3798 + }
3799 +
3800 +diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
3801 +index 29123934887b..5185ff0f8f58 100644
3802 +--- a/net/netfilter/xt_bpf.c
3803 ++++ b/net/netfilter/xt_bpf.c
3804 +@@ -27,6 +27,9 @@ static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len,
3805 + {
3806 + struct sock_fprog_kern program;
3807 +
3808 ++ if (len > XT_BPF_MAX_NUM_INSTR)
3809 ++ return -EINVAL;
3810 ++
3811 + program.len = len;
3812 + program.filter = insns;
3813 +
3814 +@@ -55,6 +58,9 @@ static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
3815 + mm_segment_t oldfs = get_fs();
3816 + int retval, fd;
3817 +
3818 ++ if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX)
3819 ++ return -EINVAL;
3820 ++
3821 + set_fs(KERNEL_DS);
3822 + fd = bpf_obj_get_user(path);
3823 + set_fs(oldfs);
3824 +diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
3825 +index 1db1ce59079f..891f4e7e8ea7 100644
3826 +--- a/net/netfilter/xt_cgroup.c
3827 ++++ b/net/netfilter/xt_cgroup.c
3828 +@@ -52,6 +52,7 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
3829 + return -EINVAL;
3830 + }
3831 +
3832 ++ info->priv = NULL;
3833 + if (info->has_path) {
3834 + cgrp = cgroup_get_from_path(info->path);
3835 + if (IS_ERR(cgrp)) {
3836 +diff --git a/net/rds/connection.c b/net/rds/connection.c
3837 +index 7ee2d5d68b78..9efc82c665b5 100644
3838 +--- a/net/rds/connection.c
3839 ++++ b/net/rds/connection.c
3840 +@@ -366,6 +366,8 @@ void rds_conn_shutdown(struct rds_conn_path *cp)
3841 + * to the conn hash, so we never trigger a reconnect on this
3842 + * conn - the reconnect is always triggered by the active peer. */
3843 + cancel_delayed_work_sync(&cp->cp_conn_w);
3844 ++ if (conn->c_destroy_in_prog)
3845 ++ return;
3846 + rcu_read_lock();
3847 + if (!hlist_unhashed(&conn->c_hash_node)) {
3848 + rcu_read_unlock();
3849 +@@ -445,7 +447,6 @@ void rds_conn_destroy(struct rds_connection *conn)
3850 + */
3851 + rds_cong_remove_conn(conn);
3852 +
3853 +- put_net(conn->c_net);
3854 + kfree(conn->c_path);
3855 + kmem_cache_free(rds_conn_slab, conn);
3856 +
3857 +diff --git a/net/rds/rds.h b/net/rds/rds.h
3858 +index c349c71babff..d09f6c1facb4 100644
3859 +--- a/net/rds/rds.h
3860 ++++ b/net/rds/rds.h
3861 +@@ -150,7 +150,7 @@ struct rds_connection {
3862 +
3863 + /* Protocol version */
3864 + unsigned int c_version;
3865 +- struct net *c_net;
3866 ++ possible_net_t c_net;
3867 +
3868 + struct list_head c_map_item;
3869 + unsigned long c_map_queued;
3870 +@@ -165,13 +165,13 @@ struct rds_connection {
3871 + static inline
3872 + struct net *rds_conn_net(struct rds_connection *conn)
3873 + {
3874 +- return conn->c_net;
3875 ++ return read_pnet(&conn->c_net);
3876 + }
3877 +
3878 + static inline
3879 + void rds_conn_net_set(struct rds_connection *conn, struct net *net)
3880 + {
3881 +- conn->c_net = get_net(net);
3882 ++ write_pnet(&conn->c_net, net);
3883 + }
3884 +
3885 + #define RDS_FLAG_CONG_BITMAP 0x01
3886 +diff --git a/net/rds/tcp.c b/net/rds/tcp.c
3887 +index 6b7ee71f40c6..2a08bf75d008 100644
3888 +--- a/net/rds/tcp.c
3889 ++++ b/net/rds/tcp.c
3890 +@@ -306,7 +306,8 @@ static void rds_tcp_conn_free(void *arg)
3891 + rdsdebug("freeing tc %p\n", tc);
3892 +
3893 + spin_lock_irqsave(&rds_tcp_conn_lock, flags);
3894 +- list_del(&tc->t_tcp_node);
3895 ++ if (!tc->t_tcp_node_detached)
3896 ++ list_del(&tc->t_tcp_node);
3897 + spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
3898 +
3899 + kmem_cache_free(rds_tcp_conn_slab, tc);
3900 +@@ -527,12 +528,16 @@ static void rds_tcp_kill_sock(struct net *net)
3901 + rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
3902 + spin_lock_irq(&rds_tcp_conn_lock);
3903 + list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
3904 +- struct net *c_net = tc->t_cpath->cp_conn->c_net;
3905 ++ struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
3906 +
3907 + if (net != c_net || !tc->t_sock)
3908 + continue;
3909 +- if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
3910 ++ if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
3911 + list_move_tail(&tc->t_tcp_node, &tmp_list);
3912 ++ } else {
3913 ++ list_del(&tc->t_tcp_node);
3914 ++ tc->t_tcp_node_detached = true;
3915 ++ }
3916 + }
3917 + spin_unlock_irq(&rds_tcp_conn_lock);
3918 + list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
3919 +@@ -586,7 +591,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
3920 +
3921 + spin_lock_irq(&rds_tcp_conn_lock);
3922 + list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
3923 +- struct net *c_net = tc->t_cpath->cp_conn->c_net;
3924 ++ struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
3925 +
3926 + if (net != c_net || !tc->t_sock)
3927 + continue;
3928 +diff --git a/net/rds/tcp.h b/net/rds/tcp.h
3929 +index 1aafbf7c3011..e7858ee8ed8b 100644
3930 +--- a/net/rds/tcp.h
3931 ++++ b/net/rds/tcp.h
3932 +@@ -12,6 +12,7 @@ struct rds_tcp_incoming {
3933 + struct rds_tcp_connection {
3934 +
3935 + struct list_head t_tcp_node;
3936 ++ bool t_tcp_node_detached;
3937 + struct rds_conn_path *t_cpath;
3938 + /* t_conn_path_lock synchronizes the connection establishment between
3939 + * rds_tcp_accept_one and rds_tcp_conn_path_connect
3940 +diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
3941 +index b30a2c70bd48..531250fceb9e 100644
3942 +--- a/net/sched/sch_choke.c
3943 ++++ b/net/sched/sch_choke.c
3944 +@@ -369,6 +369,9 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
3945 +
3946 + ctl = nla_data(tb[TCA_CHOKE_PARMS]);
3947 +
3948 ++ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
3949 ++ return -EINVAL;
3950 ++
3951 + if (ctl->limit > CHOKE_MAX_QUEUE)
3952 + return -EINVAL;
3953 +
3954 +diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
3955 +index 17c7130454bd..bc30f9186ac6 100644
3956 +--- a/net/sched/sch_gred.c
3957 ++++ b/net/sched/sch_gred.c
3958 +@@ -356,6 +356,9 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
3959 + struct gred_sched *table = qdisc_priv(sch);
3960 + struct gred_sched_data *q = table->tab[dp];
3961 +
3962 ++ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
3963 ++ return -EINVAL;
3964 ++
3965 + if (!q) {
3966 + table->tab[dp] = q = *prealloc;
3967 + *prealloc = NULL;
3968 +diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
3969 +index 93b9d70a9b28..d87c41e82917 100644
3970 +--- a/net/sched/sch_red.c
3971 ++++ b/net/sched/sch_red.c
3972 +@@ -184,6 +184,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
3973 + max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
3974 +
3975 + ctl = nla_data(tb[TCA_RED_PARMS]);
3976 ++ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
3977 ++ return -EINVAL;
3978 +
3979 + if (ctl->limit > 0) {
3980 + child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
3981 +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
3982 +index 74ea863b8240..3fbf20126045 100644
3983 +--- a/net/sched/sch_sfq.c
3984 ++++ b/net/sched/sch_sfq.c
3985 +@@ -637,6 +637,9 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
3986 + if (ctl->divisor &&
3987 + (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
3988 + return -EINVAL;
3989 ++ if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
3990 ++ ctl_v1->Wlog))
3991 ++ return -EINVAL;
3992 + if (ctl_v1 && ctl_v1->qth_min) {
3993 + p = kmalloc(sizeof(*p), GFP_KERNEL);
3994 + if (!p)
3995 +diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
3996 +index 2966ff400755..39ac91060bc2 100644
3997 +--- a/net/sctp/outqueue.c
3998 ++++ b/net/sctp/outqueue.c
3999 +@@ -368,7 +368,8 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
4000 + asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
4001 + streamout->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
4002 +
4003 +- if (!chk->tsn_gap_acked) {
4004 ++ if (queue != &asoc->outqueue.retransmit &&
4005 ++ !chk->tsn_gap_acked) {
4006 + if (chk->transport)
4007 + chk->transport->flight_size -=
4008 + sctp_data_size(chk);
4009 +@@ -1429,7 +1430,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
4010 + /* If this chunk has not been acked, stop
4011 + * considering it as 'outstanding'.
4012 + */
4013 +- if (!tchunk->tsn_gap_acked) {
4014 ++ if (transmitted_queue != &q->retransmit &&
4015 ++ !tchunk->tsn_gap_acked) {
4016 + if (tchunk->transport)
4017 + tchunk->transport->flight_size -=
4018 + sctp_data_size(tchunk);
4019 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
4020 +index 1c08d86efe94..3c8b92667866 100644
4021 +--- a/net/sctp/socket.c
4022 ++++ b/net/sctp/socket.c
4023 +@@ -3136,9 +3136,9 @@ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsign
4024 + */
4025 + static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
4026 + {
4027 ++ struct sctp_sock *sp = sctp_sk(sk);
4028 + struct sctp_assoc_value params;
4029 + struct sctp_association *asoc;
4030 +- struct sctp_sock *sp = sctp_sk(sk);
4031 + int val;
4032 +
4033 + if (optlen == sizeof(int)) {
4034 +@@ -3154,26 +3154,35 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
4035 + if (copy_from_user(&params, optval, optlen))
4036 + return -EFAULT;
4037 + val = params.assoc_value;
4038 +- } else
4039 ++ } else {
4040 + return -EINVAL;
4041 ++ }
4042 +
4043 +- if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN)))
4044 +- return -EINVAL;
4045 ++ if (val) {
4046 ++ int min_len, max_len;
4047 +
4048 +- asoc = sctp_id2assoc(sk, params.assoc_id);
4049 +- if (!asoc && params.assoc_id && sctp_style(sk, UDP))
4050 +- return -EINVAL;
4051 ++ min_len = SCTP_DEFAULT_MINSEGMENT - sp->pf->af->net_header_len;
4052 ++ min_len -= sizeof(struct sctphdr) +
4053 ++ sizeof(struct sctp_data_chunk);
4054 ++
4055 ++ max_len = SCTP_MAX_CHUNK_LEN - sizeof(struct sctp_data_chunk);
4056 +
4057 ++ if (val < min_len || val > max_len)
4058 ++ return -EINVAL;
4059 ++ }
4060 ++
4061 ++ asoc = sctp_id2assoc(sk, params.assoc_id);
4062 + if (asoc) {
4063 + if (val == 0) {
4064 +- val = asoc->pathmtu;
4065 +- val -= sp->pf->af->net_header_len;
4066 ++ val = asoc->pathmtu - sp->pf->af->net_header_len;
4067 + val -= sizeof(struct sctphdr) +
4068 +- sizeof(struct sctp_data_chunk);
4069 ++ sizeof(struct sctp_data_chunk);
4070 + }
4071 + asoc->user_frag = val;
4072 + asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
4073 + } else {
4074 ++ if (params.assoc_id && sctp_style(sk, UDP))
4075 ++ return -EINVAL;
4076 + sp->user_frag = val;
4077 + }
4078 +
4079 +diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
4080 +index e21991fe883a..2c63f7b169b5 100644
4081 +--- a/net/vmw_vsock/hyperv_transport.c
4082 ++++ b/net/vmw_vsock/hyperv_transport.c
4083 +@@ -488,7 +488,7 @@ static void hvs_release(struct vsock_sock *vsk)
4084 +
4085 + lock_sock(sk);
4086 +
4087 +- sk->sk_state = SS_DISCONNECTING;
4088 ++ sk->sk_state = TCP_CLOSING;
4089 + vsock_remove_sock(vsk);
4090 +
4091 + release_sock(sk);
4092 +diff --git a/net/wireless/core.c b/net/wireless/core.c
4093 +index 7b33e8c366bc..33ce0484b2a0 100644
4094 +--- a/net/wireless/core.c
4095 ++++ b/net/wireless/core.c
4096 +@@ -439,6 +439,8 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
4097 + if (rv)
4098 + goto use_default_name;
4099 + } else {
4100 ++ int rv;
4101 ++
4102 + use_default_name:
4103 + /* NOTE: This is *probably* safe w/out holding rtnl because of
4104 + * the restrictions on phy names. Probably this call could
4105 +@@ -446,7 +448,11 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
4106 + * phyX. But, might should add some locking and check return
4107 + * value, and use a different name if this one exists?
4108 + */
4109 +- dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
4110 ++ rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
4111 ++ if (rv < 0) {
4112 ++ kfree(rdev);
4113 ++ return NULL;
4114 ++ }
4115 + }
4116 +
4117 + INIT_LIST_HEAD(&rdev->wiphy.wdev_list);
4118 +diff --git a/net/wireless/core.h b/net/wireless/core.h
4119 +index 705835047f98..90f90c7d8bf9 100644
4120 +--- a/net/wireless/core.h
4121 ++++ b/net/wireless/core.h
4122 +@@ -502,8 +502,6 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
4123 + void cfg80211_stop_nan(struct cfg80211_registered_device *rdev,
4124 + struct wireless_dev *wdev);
4125 +
4126 +-#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
4127 +-
4128 + #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
4129 + #define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
4130 + #else
4131 +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
4132 +index 347ab31574d5..da6447389ffb 100644
4133 +--- a/net/xfrm/xfrm_input.c
4134 ++++ b/net/xfrm/xfrm_input.c
4135 +@@ -207,7 +207,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
4136 + xfrm_address_t *daddr;
4137 + struct xfrm_mode *inner_mode;
4138 + u32 mark = skb->mark;
4139 +- unsigned int family;
4140 ++ unsigned int family = AF_UNSPEC;
4141 + int decaps = 0;
4142 + int async = 0;
4143 + bool xfrm_gro = false;
4144 +@@ -216,6 +216,16 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
4145 +
4146 + if (encap_type < 0) {
4147 + x = xfrm_input_state(skb);
4148 ++
4149 ++ if (unlikely(x->km.state != XFRM_STATE_VALID)) {
4150 ++ if (x->km.state == XFRM_STATE_ACQ)
4151 ++ XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
4152 ++ else
4153 ++ XFRM_INC_STATS(net,
4154 ++ LINUX_MIB_XFRMINSTATEINVALID);
4155 ++ goto drop;
4156 ++ }
4157 ++
4158 + family = x->outer_mode->afinfo->family;
4159 +
4160 + /* An encap_type of -1 indicates async resumption. */
4161 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
4162 +index 688ed34f0671..22f5da66357b 100644
4163 +--- a/net/xfrm/xfrm_policy.c
4164 ++++ b/net/xfrm/xfrm_policy.c
4165 +@@ -610,7 +610,8 @@ static void xfrm_hash_rebuild(struct work_struct *work)
4166 +
4167 + /* re-insert all policies by order of creation */
4168 + list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
4169 +- if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
4170 ++ if (policy->walk.dead ||
4171 ++ xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
4172 + /* skip socket policies */
4173 + continue;
4174 + }
4175 +@@ -975,8 +976,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
4176 + }
4177 + if (!cnt)
4178 + err = -ESRCH;
4179 +- else
4180 +- xfrm_policy_cache_flush();
4181 + out:
4182 + spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4183 + return err;
4184 +@@ -1169,9 +1168,15 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
4185 + again:
4186 + pol = rcu_dereference(sk->sk_policy[dir]);
4187 + if (pol != NULL) {
4188 +- bool match = xfrm_selector_match(&pol->selector, fl, family);
4189 ++ bool match;
4190 + int err = 0;
4191 +
4192 ++ if (pol->family != family) {
4193 ++ pol = NULL;
4194 ++ goto out;
4195 ++ }
4196 ++
4197 ++ match = xfrm_selector_match(&pol->selector, fl, family);
4198 + if (match) {
4199 + if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
4200 + pol = NULL;
4201 +@@ -1738,6 +1743,8 @@ void xfrm_policy_cache_flush(void)
4202 + bool found = 0;
4203 + int cpu;
4204 +
4205 ++ might_sleep();
4206 ++
4207 + local_bh_disable();
4208 + rcu_read_lock();
4209 + for_each_possible_cpu(cpu) {
4210 +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
4211 +index 1f5cee2269af..58be0e7f4c7d 100644
4212 +--- a/net/xfrm/xfrm_state.c
4213 ++++ b/net/xfrm/xfrm_state.c
4214 +@@ -313,13 +313,14 @@ xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load)
4215 + if ((type && !try_module_get(type->owner)))
4216 + type = NULL;
4217 +
4218 ++ rcu_read_unlock();
4219 ++
4220 + if (!type && try_load) {
4221 + request_module("xfrm-offload-%d-%d", family, proto);
4222 + try_load = 0;
4223 + goto retry;
4224 + }
4225 +
4226 +- rcu_read_unlock();
4227 + return type;
4228 + }
4229 +
4230 +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
4231 +index e44a0fed48dd..0edf38d2afd9 100644
4232 +--- a/net/xfrm/xfrm_user.c
4233 ++++ b/net/xfrm/xfrm_user.c
4234 +@@ -1417,11 +1417,14 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
4235 +
4236 + static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
4237 + {
4238 ++ u16 prev_family;
4239 + int i;
4240 +
4241 + if (nr > XFRM_MAX_DEPTH)
4242 + return -EINVAL;
4243 +
4244 ++ prev_family = family;
4245 ++
4246 + for (i = 0; i < nr; i++) {
4247 + /* We never validated the ut->family value, so many
4248 + * applications simply leave it at zero. The check was
4249 +@@ -1433,6 +1436,12 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
4250 + if (!ut[i].family)
4251 + ut[i].family = family;
4252 +
4253 ++ if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
4254 ++ (ut[i].family != prev_family))
4255 ++ return -EINVAL;
4256 ++
4257 ++ prev_family = ut[i].family;
4258 ++
4259 + switch (ut[i].family) {
4260 + case AF_INET:
4261 + break;
4262 +@@ -1443,6 +1452,21 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
4263 + default:
4264 + return -EINVAL;
4265 + }
4266 ++
4267 ++ switch (ut[i].id.proto) {
4268 ++ case IPPROTO_AH:
4269 ++ case IPPROTO_ESP:
4270 ++ case IPPROTO_COMP:
4271 ++#if IS_ENABLED(CONFIG_IPV6)
4272 ++ case IPPROTO_ROUTING:
4273 ++ case IPPROTO_DSTOPTS:
4274 ++#endif
4275 ++ case IPSEC_PROTO_ANY:
4276 ++ break;
4277 ++ default:
4278 ++ return -EINVAL;
4279 ++ }
4280 ++
4281 + }
4282 +
4283 + return 0;
4284 +diff --git a/scripts/kernel-doc b/scripts/kernel-doc
4285 +index 8323ff9dec71..8bcf90407929 100755
4286 +--- a/scripts/kernel-doc
4287 ++++ b/scripts/kernel-doc
4288 +@@ -3216,4 +3216,4 @@ if ($verbose && $warnings) {
4289 + print STDERR "$warnings warnings\n";
4290 + }
4291 +
4292 +-exit($errors);
4293 ++exit($output_mode eq "none" ? 0 : $errors);
4294 +diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
4295 +index e4a1c0dc561a..c9c031e3d1ae 100644
4296 +--- a/security/selinux/ss/services.c
4297 ++++ b/security/selinux/ss/services.c
4298 +@@ -867,6 +867,9 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
4299 + int index;
4300 + int rc;
4301 +
4302 ++ if (!ss_initialized)
4303 ++ return 0;
4304 ++
4305 + read_lock(&policy_rwlock);
4306 +
4307 + rc = -EINVAL;
4308 +@@ -1413,27 +1416,25 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
4309 + if (!scontext_len)
4310 + return -EINVAL;
4311 +
4312 ++ /* Copy the string to allow changes and ensure a NUL terminator */
4313 ++ scontext2 = kmemdup_nul(scontext, scontext_len, gfp_flags);
4314 ++ if (!scontext2)
4315 ++ return -ENOMEM;
4316 ++
4317 + if (!ss_initialized) {
4318 + int i;
4319 +
4320 + for (i = 1; i < SECINITSID_NUM; i++) {
4321 +- if (!strcmp(initial_sid_to_string[i], scontext)) {
4322 ++ if (!strcmp(initial_sid_to_string[i], scontext2)) {
4323 + *sid = i;
4324 +- return 0;
4325 ++ goto out;
4326 + }
4327 + }
4328 + *sid = SECINITSID_KERNEL;
4329 +- return 0;
4330 ++ goto out;
4331 + }
4332 + *sid = SECSID_NULL;
4333 +
4334 +- /* Copy the string so that we can modify the copy as we parse it. */
4335 +- scontext2 = kmalloc(scontext_len + 1, gfp_flags);
4336 +- if (!scontext2)
4337 +- return -ENOMEM;
4338 +- memcpy(scontext2, scontext, scontext_len);
4339 +- scontext2[scontext_len] = 0;
4340 +-
4341 + if (force) {
4342 + /* Save another copy for storing in uninterpreted form */
4343 + rc = -ENOMEM;
4344 +diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
4345 +index ee5055d47d13..a89fe9b6463b 100644
4346 +--- a/sound/soc/rockchip/rockchip_spdif.c
4347 ++++ b/sound/soc/rockchip/rockchip_spdif.c
4348 +@@ -322,26 +322,30 @@ static int rk_spdif_probe(struct platform_device *pdev)
4349 + spdif->mclk = devm_clk_get(&pdev->dev, "mclk");
4350 + if (IS_ERR(spdif->mclk)) {
4351 + dev_err(&pdev->dev, "Can't retrieve rk_spdif master clock\n");
4352 +- return PTR_ERR(spdif->mclk);
4353 ++ ret = PTR_ERR(spdif->mclk);
4354 ++ goto err_disable_hclk;
4355 + }
4356 +
4357 + ret = clk_prepare_enable(spdif->mclk);
4358 + if (ret) {
4359 + dev_err(spdif->dev, "clock enable failed %d\n", ret);
4360 +- return ret;
4361 ++ goto err_disable_clocks;
4362 + }
4363 +
4364 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4365 + regs = devm_ioremap_resource(&pdev->dev, res);
4366 +- if (IS_ERR(regs))
4367 +- return PTR_ERR(regs);
4368 ++ if (IS_ERR(regs)) {
4369 ++ ret = PTR_ERR(regs);
4370 ++ goto err_disable_clocks;
4371 ++ }
4372 +
4373 + spdif->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "hclk", regs,
4374 + &rk_spdif_regmap_config);
4375 + if (IS_ERR(spdif->regmap)) {
4376 + dev_err(&pdev->dev,
4377 + "Failed to initialise managed register map\n");
4378 +- return PTR_ERR(spdif->regmap);
4379 ++ ret = PTR_ERR(spdif->regmap);
4380 ++ goto err_disable_clocks;
4381 + }
4382 +
4383 + spdif->playback_dma_data.addr = res->start + SPDIF_SMPDR;
4384 +@@ -373,6 +377,10 @@ static int rk_spdif_probe(struct platform_device *pdev)
4385 +
4386 + err_pm_runtime:
4387 + pm_runtime_disable(&pdev->dev);
4388 ++err_disable_clocks:
4389 ++ clk_disable_unprepare(spdif->mclk);
4390 ++err_disable_hclk:
4391 ++ clk_disable_unprepare(spdif->hclk);
4392 +
4393 + return ret;
4394 + }
4395 +diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
4396 +index 2aef7c00cca1..f0fb85fda42d 100644
4397 +--- a/sound/soc/sh/rcar/ssi.c
4398 ++++ b/sound/soc/sh/rcar/ssi.c
4399 +@@ -449,25 +449,29 @@ static bool rsnd_ssi_pointer_update(struct rsnd_mod *mod,
4400 + int byte)
4401 + {
4402 + struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
4403 ++ bool ret = false;
4404 ++ int byte_pos;
4405 +
4406 +- ssi->byte_pos += byte;
4407 ++ byte_pos = ssi->byte_pos + byte;
4408 +
4409 +- if (ssi->byte_pos >= ssi->next_period_byte) {
4410 ++ if (byte_pos >= ssi->next_period_byte) {
4411 + struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
4412 +
4413 + ssi->period_pos++;
4414 + ssi->next_period_byte += ssi->byte_per_period;
4415 +
4416 + if (ssi->period_pos >= runtime->periods) {
4417 +- ssi->byte_pos = 0;
4418 ++ byte_pos = 0;
4419 + ssi->period_pos = 0;
4420 + ssi->next_period_byte = ssi->byte_per_period;
4421 + }
4422 +
4423 +- return true;
4424 ++ ret = true;
4425 + }
4426 +
4427 +- return false;
4428 ++ WRITE_ONCE(ssi->byte_pos, byte_pos);
4429 ++
4430 ++ return ret;
4431 + }
4432 +
4433 + /*
4434 +@@ -838,7 +842,7 @@ static int rsnd_ssi_pointer(struct rsnd_mod *mod,
4435 + struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
4436 + struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
4437 +
4438 +- *pointer = bytes_to_frames(runtime, ssi->byte_pos);
4439 ++ *pointer = bytes_to_frames(runtime, READ_ONCE(ssi->byte_pos));
4440 +
4441 + return 0;
4442 + }
4443 +diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
4444 +index 070a6880980e..c60a57797640 100644
4445 +--- a/sound/soc/ux500/mop500.c
4446 ++++ b/sound/soc/ux500/mop500.c
4447 +@@ -163,3 +163,7 @@ static struct platform_driver snd_soc_mop500_driver = {
4448 + };
4449 +
4450 + module_platform_driver(snd_soc_mop500_driver);
4451 ++
4452 ++MODULE_LICENSE("GPL v2");
4453 ++MODULE_DESCRIPTION("ASoC MOP500 board driver");
4454 ++MODULE_AUTHOR("Ola Lilja");
4455 +diff --git a/sound/soc/ux500/ux500_pcm.c b/sound/soc/ux500/ux500_pcm.c
4456 +index f12c01dddc8d..d35ba7700f46 100644
4457 +--- a/sound/soc/ux500/ux500_pcm.c
4458 ++++ b/sound/soc/ux500/ux500_pcm.c
4459 +@@ -165,3 +165,8 @@ int ux500_pcm_unregister_platform(struct platform_device *pdev)
4460 + return 0;
4461 + }
4462 + EXPORT_SYMBOL_GPL(ux500_pcm_unregister_platform);
4463 ++
4464 ++MODULE_AUTHOR("Ola Lilja");
4465 ++MODULE_AUTHOR("Roger Nilsson");
4466 ++MODULE_DESCRIPTION("ASoC UX500 driver");
4467 ++MODULE_LICENSE("GPL v2");
4468 +diff --git a/sound/usb/bcd2000/bcd2000.c b/sound/usb/bcd2000/bcd2000.c
4469 +index 7371e5b06035..a6408209d7f1 100644
4470 +--- a/sound/usb/bcd2000/bcd2000.c
4471 ++++ b/sound/usb/bcd2000/bcd2000.c
4472 +@@ -342,6 +342,13 @@ static int bcd2000_init_midi(struct bcd2000 *bcd2k)
4473 + bcd2k->midi_out_buf, BUFSIZE,
4474 + bcd2000_output_complete, bcd2k, 1);
4475 +
4476 ++ /* sanity checks of EPs before actually submitting */
4477 ++ if (usb_urb_ep_type_check(bcd2k->midi_in_urb) ||
4478 ++ usb_urb_ep_type_check(bcd2k->midi_out_urb)) {
4479 ++ dev_err(&bcd2k->dev->dev, "invalid MIDI EP\n");
4480 ++ return -EINVAL;
4481 ++ }
4482 ++
4483 + bcd2000_init_device(bcd2k);
4484 +
4485 + return 0;
4486 +diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
4487 +index d8409d9ae55b..d55ca48de3ea 100644
4488 +--- a/sound/usb/caiaq/device.c
4489 ++++ b/sound/usb/caiaq/device.c
4490 +@@ -461,6 +461,13 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
4491 + cdev->midi_out_buf, EP1_BUFSIZE,
4492 + snd_usb_caiaq_midi_output_done, cdev);
4493 +
4494 ++ /* sanity checks of EPs before actually submitting */
4495 ++ if (usb_urb_ep_type_check(&cdev->ep1_in_urb) ||
4496 ++ usb_urb_ep_type_check(&cdev->midi_out_urb)) {
4497 ++ dev_err(dev, "invalid EPs\n");
4498 ++ return -EINVAL;
4499 ++ }
4500 ++
4501 + init_waitqueue_head(&cdev->ep1_wait_queue);
4502 + init_waitqueue_head(&cdev->prepare_wait_queue);
4503 +
4504 +diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
4505 +index c8f723c3a033..167aebf8276e 100644
4506 +--- a/sound/usb/line6/driver.c
4507 ++++ b/sound/usb/line6/driver.c
4508 +@@ -78,6 +78,13 @@ static int line6_start_listen(struct usb_line6 *line6)
4509 + line6->buffer_listen, LINE6_BUFSIZE_LISTEN,
4510 + line6_data_received, line6);
4511 + }
4512 ++
4513 ++ /* sanity checks of EP before actually submitting */
4514 ++ if (usb_urb_ep_type_check(line6->urb_listen)) {
4515 ++ dev_err(line6->ifcdev, "invalid control EP\n");
4516 ++ return -EINVAL;
4517 ++ }
4518 ++
4519 + line6->urb_listen->actual_length = 0;
4520 + err = usb_submit_urb(line6->urb_listen, GFP_ATOMIC);
4521 + return err;
4522 +diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
4523 +index d95fdcc26f4b..944070e98a2c 100644
4524 +--- a/tools/perf/bench/numa.c
4525 ++++ b/tools/perf/bench/numa.c
4526 +@@ -216,6 +216,47 @@ static const char * const numa_usage[] = {
4527 + NULL
4528 + };
4529 +
4530 ++/*
4531 ++ * To get number of numa nodes present.
4532 ++ */
4533 ++static int nr_numa_nodes(void)
4534 ++{
4535 ++ int i, nr_nodes = 0;
4536 ++
4537 ++ for (i = 0; i < g->p.nr_nodes; i++) {
4538 ++ if (numa_bitmask_isbitset(numa_nodes_ptr, i))
4539 ++ nr_nodes++;
4540 ++ }
4541 ++
4542 ++ return nr_nodes;
4543 ++}
4544 ++
4545 ++/*
4546 ++ * To check if given numa node is present.
4547 ++ */
4548 ++static int is_node_present(int node)
4549 ++{
4550 ++ return numa_bitmask_isbitset(numa_nodes_ptr, node);
4551 ++}
4552 ++
4553 ++/*
4554 ++ * To check given numa node has cpus.
4555 ++ */
4556 ++static bool node_has_cpus(int node)
4557 ++{
4558 ++ struct bitmask *cpu = numa_allocate_cpumask();
4559 ++ unsigned int i;
4560 ++
4561 ++ if (cpu && !numa_node_to_cpus(node, cpu)) {
4562 ++ for (i = 0; i < cpu->size; i++) {
4563 ++ if (numa_bitmask_isbitset(cpu, i))
4564 ++ return true;
4565 ++ }
4566 ++ }
4567 ++
4568 ++ return false; /* lets fall back to nocpus safely */
4569 ++}
4570 ++
4571 + static cpu_set_t bind_to_cpu(int target_cpu)
4572 + {
4573 + cpu_set_t orig_mask, mask;
4574 +@@ -244,12 +285,12 @@ static cpu_set_t bind_to_cpu(int target_cpu)
4575 +
4576 + static cpu_set_t bind_to_node(int target_node)
4577 + {
4578 +- int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes;
4579 ++ int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
4580 + cpu_set_t orig_mask, mask;
4581 + int cpu;
4582 + int ret;
4583 +
4584 +- BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus);
4585 ++ BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
4586 + BUG_ON(!cpus_per_node);
4587 +
4588 + ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
4589 +@@ -649,7 +690,7 @@ static int parse_setup_node_list(void)
4590 + int i;
4591 +
4592 + for (i = 0; i < mul; i++) {
4593 +- if (t >= g->p.nr_tasks) {
4594 ++ if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
4595 + printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
4596 + goto out;
4597 + }
4598 +@@ -964,6 +1005,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
4599 + sum = 0;
4600 +
4601 + for (node = 0; node < g->p.nr_nodes; node++) {
4602 ++ if (!is_node_present(node))
4603 ++ continue;
4604 + nr = nodes[node];
4605 + nr_min = min(nr, nr_min);
4606 + nr_max = max(nr, nr_max);
4607 +@@ -984,8 +1027,11 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
4608 + process_groups = 0;
4609 +
4610 + for (node = 0; node < g->p.nr_nodes; node++) {
4611 +- int processes = count_node_processes(node);
4612 ++ int processes;
4613 +
4614 ++ if (!is_node_present(node))
4615 ++ continue;
4616 ++ processes = count_node_processes(node);
4617 + nr = nodes[node];
4618 + tprintf(" %2d/%-2d", nr, processes);
4619 +
4620 +@@ -1291,7 +1337,7 @@ static void print_summary(void)
4621 +
4622 + printf("\n ###\n");
4623 + printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
4624 +- g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus);
4625 ++ g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
4626 + printf(" # %5dx %5ldMB global shared mem operations\n",
4627 + g->p.nr_loops, g->p.bytes_global/1024/1024);
4628 + printf(" # %5dx %5ldMB process shared mem operations\n",
4629 +diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
4630 +index bd1fedef3d1c..a0f7ed2b869b 100644
4631 +--- a/tools/perf/builtin-help.c
4632 ++++ b/tools/perf/builtin-help.c
4633 +@@ -284,7 +284,7 @@ static int perf_help_config(const char *var, const char *value, void *cb)
4634 + add_man_viewer(value);
4635 + return 0;
4636 + }
4637 +- if (!strstarts(var, "man."))
4638 ++ if (strstarts(var, "man."))
4639 + return add_man_viewer_info(var, value);
4640 +
4641 + return 0;
4642 +@@ -314,7 +314,7 @@ static const char *cmd_to_page(const char *perf_cmd)
4643 +
4644 + if (!perf_cmd)
4645 + return "perf";
4646 +- else if (!strstarts(perf_cmd, "perf"))
4647 ++ else if (strstarts(perf_cmd, "perf"))
4648 + return perf_cmd;
4649 +
4650 + return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s;
4651 +diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
4652 +index ee954bde7e3e..dd57978b2096 100644
4653 +--- a/tools/perf/builtin-top.c
4654 ++++ b/tools/perf/builtin-top.c
4655 +@@ -77,6 +77,7 @@
4656 + #include "sane_ctype.h"
4657 +
4658 + static volatile int done;
4659 ++static volatile int resize;
4660 +
4661 + #define HEADER_LINE_NR 5
4662 +
4663 +@@ -86,10 +87,13 @@ static void perf_top__update_print_entries(struct perf_top *top)
4664 + }
4665 +
4666 + static void perf_top__sig_winch(int sig __maybe_unused,
4667 +- siginfo_t *info __maybe_unused, void *arg)
4668 ++ siginfo_t *info __maybe_unused, void *arg __maybe_unused)
4669 + {
4670 +- struct perf_top *top = arg;
4671 ++ resize = 1;
4672 ++}
4673 +
4674 ++static void perf_top__resize(struct perf_top *top)
4675 ++{
4676 + get_term_dimensions(&top->winsize);
4677 + perf_top__update_print_entries(top);
4678 + }
4679 +@@ -477,7 +481,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
4680 + .sa_sigaction = perf_top__sig_winch,
4681 + .sa_flags = SA_SIGINFO,
4682 + };
4683 +- perf_top__sig_winch(SIGWINCH, NULL, top);
4684 ++ perf_top__resize(top);
4685 + sigaction(SIGWINCH, &act, NULL);
4686 + } else {
4687 + signal(SIGWINCH, SIG_DFL);
4688 +@@ -1022,6 +1026,11 @@ static int __cmd_top(struct perf_top *top)
4689 +
4690 + if (hits == top->samples)
4691 + ret = perf_evlist__poll(top->evlist, 100);
4692 ++
4693 ++ if (resize) {
4694 ++ perf_top__resize(top);
4695 ++ resize = 0;
4696 ++ }
4697 + }
4698 +
4699 + ret = 0;
4700 +diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
4701 +index 2e68c5f120da..2a9ef080efd0 100755
4702 +--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
4703 ++++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
4704 +@@ -17,8 +17,10 @@ skip_if_no_perf_probe || exit 2
4705 + file=$(mktemp /tmp/temporary_file.XXXXX)
4706 +
4707 + trace_open_vfs_getname() {
4708 +- perf trace -e open touch $file 2>&1 | \
4709 +- egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open\(filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
4710 ++ test "$(uname -m)" = s390x && { svc="openat"; txt="dfd: +CWD, +"; }
4711 ++
4712 ++ perf trace -e ${svc:-open} touch $file 2>&1 | \
4713 ++ egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ ${svc:-open}\(${txt}filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
4714 + }
4715 +
4716 +
4717 +diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
4718 +index bc4a7344e274..89c8e1604ca7 100644
4719 +--- a/tools/perf/tests/task-exit.c
4720 ++++ b/tools/perf/tests/task-exit.c
4721 +@@ -84,7 +84,11 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
4722 +
4723 + evsel = perf_evlist__first(evlist);
4724 + evsel->attr.task = 1;
4725 ++#ifdef __s390x__
4726 ++ evsel->attr.sample_freq = 1000000;
4727 ++#else
4728 + evsel->attr.sample_freq = 1;
4729 ++#endif
4730 + evsel->attr.inherit = 0;
4731 + evsel->attr.watermark = 0;
4732 + evsel->attr.wakeup_events = 1;
4733 +diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
4734 +index aa66791b1bfc..41d415707264 100644
4735 +--- a/tools/perf/util/annotate.c
4736 ++++ b/tools/perf/util/annotate.c
4737 +@@ -166,7 +166,7 @@ static void ins__delete(struct ins_operands *ops)
4738 + static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
4739 + struct ins_operands *ops)
4740 + {
4741 +- return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw);
4742 ++ return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
4743 + }
4744 +
4745 + int ins__scnprintf(struct ins *ins, char *bf, size_t size,
4746 +@@ -231,12 +231,12 @@ static int call__scnprintf(struct ins *ins, char *bf, size_t size,
4747 + struct ins_operands *ops)
4748 + {
4749 + if (ops->target.name)
4750 +- return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name);
4751 ++ return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
4752 +
4753 + if (ops->target.addr == 0)
4754 + return ins__raw_scnprintf(ins, bf, size, ops);
4755 +
4756 +- return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr);
4757 ++ return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
4758 + }
4759 +
4760 + static struct ins_ops call_ops = {
4761 +@@ -300,7 +300,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
4762 + c++;
4763 + }
4764 +
4765 +- return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64,
4766 ++ return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
4767 + ins->name, c ? c - ops->raw : 0, ops->raw,
4768 + ops->target.offset);
4769 + }
4770 +@@ -373,7 +373,7 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
4771 + if (ops->locked.ins.ops == NULL)
4772 + return ins__raw_scnprintf(ins, bf, size, ops);
4773 +
4774 +- printed = scnprintf(bf, size, "%-6.6s ", ins->name);
4775 ++ printed = scnprintf(bf, size, "%-6s ", ins->name);
4776 + return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
4777 + size - printed, ops->locked.ops);
4778 + }
4779 +@@ -449,7 +449,7 @@ static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map *m
4780 + static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
4781 + struct ins_operands *ops)
4782 + {
4783 +- return scnprintf(bf, size, "%-6.6s %s,%s", ins->name,
4784 ++ return scnprintf(bf, size, "%-6s %s,%s", ins->name,
4785 + ops->source.name ?: ops->source.raw,
4786 + ops->target.name ?: ops->target.raw);
4787 + }
4788 +@@ -489,7 +489,7 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
4789 + static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
4790 + struct ins_operands *ops)
4791 + {
4792 +- return scnprintf(bf, size, "%-6.6s %s", ins->name,
4793 ++ return scnprintf(bf, size, "%-6s %s", ins->name,
4794 + ops->target.name ?: ops->target.raw);
4795 + }
4796 +
4797 +@@ -501,7 +501,7 @@ static struct ins_ops dec_ops = {
4798 + static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
4799 + struct ins_operands *ops __maybe_unused)
4800 + {
4801 +- return scnprintf(bf, size, "%-6.6s", "nop");
4802 ++ return scnprintf(bf, size, "%-6s", "nop");
4803 + }
4804 +
4805 + static struct ins_ops nop_ops = {
4806 +@@ -925,7 +925,7 @@ void disasm_line__free(struct disasm_line *dl)
4807 + int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
4808 + {
4809 + if (raw || !dl->ins.ops)
4810 +- return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw);
4811 ++ return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
4812 +
4813 + return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
4814 + }
4815 +diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
4816 +index 0dccdb89572c..1f6beb3d0c68 100644
4817 +--- a/tools/perf/util/evsel.c
4818 ++++ b/tools/perf/util/evsel.c
4819 +@@ -733,12 +733,16 @@ static void apply_config_terms(struct perf_evsel *evsel,
4820 + list_for_each_entry(term, config_terms, list) {
4821 + switch (term->type) {
4822 + case PERF_EVSEL__CONFIG_TERM_PERIOD:
4823 +- attr->sample_period = term->val.period;
4824 +- attr->freq = 0;
4825 ++ if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
4826 ++ attr->sample_period = term->val.period;
4827 ++ attr->freq = 0;
4828 ++ }
4829 + break;
4830 + case PERF_EVSEL__CONFIG_TERM_FREQ:
4831 +- attr->sample_freq = term->val.freq;
4832 +- attr->freq = 1;
4833 ++ if (!(term->weak && opts->user_freq != UINT_MAX)) {
4834 ++ attr->sample_freq = term->val.freq;
4835 ++ attr->freq = 1;
4836 ++ }
4837 + break;
4838 + case PERF_EVSEL__CONFIG_TERM_TIME:
4839 + if (term->val.time)
4840 +diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
4841 +index b4df79d72329..3ed0e9b42378 100644
4842 +--- a/tools/perf/util/evsel.h
4843 ++++ b/tools/perf/util/evsel.h
4844 +@@ -67,6 +67,7 @@ struct perf_evsel_config_term {
4845 + bool overwrite;
4846 + char *branch;
4847 + } val;
4848 ++ bool weak;
4849 + };
4850 +
4851 + /** struct perf_evsel - event selector
4852 +diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
4853 +index 56694e3409ea..b25635e945f3 100644
4854 +--- a/tools/perf/util/parse-events.c
4855 ++++ b/tools/perf/util/parse-events.c
4856 +@@ -1115,6 +1115,7 @@ do { \
4857 + INIT_LIST_HEAD(&__t->list); \
4858 + __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \
4859 + __t->val.__name = __val; \
4860 ++ __t->weak = term->weak; \
4861 + list_add_tail(&__t->list, head_terms); \
4862 + } while (0)
4863 +
4864 +@@ -2395,6 +2396,7 @@ static int new_term(struct parse_events_term **_term,
4865 +
4866 + *term = *temp;
4867 + INIT_LIST_HEAD(&term->list);
4868 ++ term->weak = false;
4869 +
4870 + switch (term->type_val) {
4871 + case PARSE_EVENTS__TERM_TYPE_NUM:
4872 +diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
4873 +index eed50b54bab3..458b72225a0a 100644
4874 +--- a/tools/perf/util/parse-events.h
4875 ++++ b/tools/perf/util/parse-events.h
4876 +@@ -101,6 +101,9 @@ struct parse_events_term {
4877 + /* error string indexes for within parsed string */
4878 + int err_term;
4879 + int err_val;
4880 ++
4881 ++ /* Coming from implicit alias */
4882 ++ bool weak;
4883 + };
4884 +
4885 + struct parse_events_error {
4886 +diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
4887 +index b10b35a63138..9dff41bcc776 100644
4888 +--- a/tools/perf/util/pmu.c
4889 ++++ b/tools/perf/util/pmu.c
4890 +@@ -404,6 +404,11 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
4891 + parse_events_terms__purge(&list);
4892 + return ret;
4893 + }
4894 ++ /*
4895 ++ * Weak terms don't override command line options,
4896 ++ * which we don't want for implicit terms in aliases.
4897 ++ */
4898 ++ cloned->weak = true;
4899 + list_add_tail(&cloned->list, &list);
4900 + }
4901 + list_splice(&list, terms);
4902 +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
4903 +index 16299939d3ff..c55d265489ca 100644
4904 +--- a/tools/testing/selftests/bpf/test_verifier.c
4905 ++++ b/tools/testing/selftests/bpf/test_verifier.c
4906 +@@ -6534,7 +6534,7 @@ static struct bpf_test tests[] = {
4907 + BPF_JMP_IMM(BPF_JA, 0, 0, -7),
4908 + },
4909 + .fixup_map1 = { 4 },
4910 +- .errstr = "unbounded min value",
4911 ++ .errstr = "R0 invalid mem access 'inv'",
4912 + .result = REJECT,
4913 + },
4914 + {
4915 +@@ -7714,6 +7714,127 @@ static struct bpf_test tests[] = {
4916 + .result = REJECT,
4917 + .prog_type = BPF_PROG_TYPE_XDP,
4918 + },
4919 ++ {
4920 ++ "check deducing bounds from const, 1",
4921 ++ .insns = {
4922 ++ BPF_MOV64_IMM(BPF_REG_0, 1),
4923 ++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
4924 ++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
4925 ++ BPF_EXIT_INSN(),
4926 ++ },
4927 ++ .result = REJECT,
4928 ++ .errstr = "R0 tried to subtract pointer from scalar",
4929 ++ },
4930 ++ {
4931 ++ "check deducing bounds from const, 2",
4932 ++ .insns = {
4933 ++ BPF_MOV64_IMM(BPF_REG_0, 1),
4934 ++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
4935 ++ BPF_EXIT_INSN(),
4936 ++ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
4937 ++ BPF_EXIT_INSN(),
4938 ++ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
4939 ++ BPF_EXIT_INSN(),
4940 ++ },
4941 ++ .result = ACCEPT,
4942 ++ },
4943 ++ {
4944 ++ "check deducing bounds from const, 3",
4945 ++ .insns = {
4946 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
4947 ++ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
4948 ++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
4949 ++ BPF_EXIT_INSN(),
4950 ++ },
4951 ++ .result = REJECT,
4952 ++ .errstr = "R0 tried to subtract pointer from scalar",
4953 ++ },
4954 ++ {
4955 ++ "check deducing bounds from const, 4",
4956 ++ .insns = {
4957 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
4958 ++ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
4959 ++ BPF_EXIT_INSN(),
4960 ++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
4961 ++ BPF_EXIT_INSN(),
4962 ++ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
4963 ++ BPF_EXIT_INSN(),
4964 ++ },
4965 ++ .result = ACCEPT,
4966 ++ },
4967 ++ {
4968 ++ "check deducing bounds from const, 5",
4969 ++ .insns = {
4970 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
4971 ++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
4972 ++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
4973 ++ BPF_EXIT_INSN(),
4974 ++ },
4975 ++ .result = REJECT,
4976 ++ .errstr = "R0 tried to subtract pointer from scalar",
4977 ++ },
4978 ++ {
4979 ++ "check deducing bounds from const, 6",
4980 ++ .insns = {
4981 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
4982 ++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
4983 ++ BPF_EXIT_INSN(),
4984 ++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
4985 ++ BPF_EXIT_INSN(),
4986 ++ },
4987 ++ .result = REJECT,
4988 ++ .errstr = "R0 tried to subtract pointer from scalar",
4989 ++ },
4990 ++ {
4991 ++ "check deducing bounds from const, 7",
4992 ++ .insns = {
4993 ++ BPF_MOV64_IMM(BPF_REG_0, ~0),
4994 ++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
4995 ++ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
4996 ++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4997 ++ offsetof(struct __sk_buff, mark)),
4998 ++ BPF_EXIT_INSN(),
4999 ++ },
5000 ++ .result = REJECT,
5001 ++ .errstr = "dereference of modified ctx ptr",
5002 ++ },
5003 ++ {
5004 ++ "check deducing bounds from const, 8",
5005 ++ .insns = {
5006 ++ BPF_MOV64_IMM(BPF_REG_0, ~0),
5007 ++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
5008 ++ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
5009 ++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5010 ++ offsetof(struct __sk_buff, mark)),
5011 ++ BPF_EXIT_INSN(),
5012 ++ },
5013 ++ .result = REJECT,
5014 ++ .errstr = "dereference of modified ctx ptr",
5015 ++ },
5016 ++ {
5017 ++ "check deducing bounds from const, 9",
5018 ++ .insns = {
5019 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
5020 ++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
5021 ++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
5022 ++ BPF_EXIT_INSN(),
5023 ++ },
5024 ++ .result = REJECT,
5025 ++ .errstr = "R0 tried to subtract pointer from scalar",
5026 ++ },
5027 ++ {
5028 ++ "check deducing bounds from const, 10",
5029 ++ .insns = {
5030 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
5031 ++ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
5032 ++ /* Marks reg as unknown. */
5033 ++ BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
5034 ++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
5035 ++ BPF_EXIT_INSN(),
5036 ++ },
5037 ++ .result = REJECT,
5038 ++ .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
5039 ++ },
5040 + {
5041 + "XDP pkt read, pkt_end <= pkt_data', bad access 2",
5042 + .insns = {
5043 +diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
5044 +index 8b6c42dc1aa9..1366462a3ab2 100644
5045 +--- a/virt/kvm/arm/arm.c
5046 ++++ b/virt/kvm/arm/arm.c
5047 +@@ -1453,7 +1453,7 @@ int kvm_arch_init(void *opaque)
5048 + bool in_hyp_mode;
5049 +
5050 + if (!is_hyp_mode_available()) {
5051 +- kvm_err("HYP mode not available\n");
5052 ++ kvm_info("HYP mode not available\n");
5053 + return -ENODEV;
5054 + }
5055 +
5056 +diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
5057 +index fed717e07938..da53c6e7d688 100644
5058 +--- a/virt/kvm/arm/vgic/vgic.c
5059 ++++ b/virt/kvm/arm/vgic/vgic.c
5060 +@@ -454,6 +454,7 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
5061 + int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
5062 + {
5063 + struct vgic_irq *irq;
5064 ++ unsigned long flags;
5065 + int ret = 0;
5066 +
5067 + if (!vgic_initialized(vcpu->kvm))
5068 +@@ -464,12 +465,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
5069 + return -EINVAL;
5070 +
5071 + irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
5072 +- spin_lock(&irq->irq_lock);
5073 ++ spin_lock_irqsave(&irq->irq_lock, flags);
5074 + if (irq->owner && irq->owner != owner)
5075 + ret = -EEXIST;
5076 + else
5077 + irq->owner = owner;
5078 +- spin_unlock(&irq->irq_lock);
5079 ++ spin_unlock_irqrestore(&irq->irq_lock, flags);
5080 +
5081 + return ret;
5082 + }