Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.18 commit in: /
Date: Wed, 28 Jan 2015 22:19:01
Message-Id: 1422483655.425d26faaf8ad9dcc54756902763a1388e5985d3.blueness@gentoo
1 commit: 425d26faaf8ad9dcc54756902763a1388e5985d3
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jan 28 22:20:55 2015 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Wed Jan 28 22:20:55 2015 +0000
6 URL: http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=425d26fa
7
8 Linux 3.18.4 patch
9
10 ---
11 0000_README | 4 +
12 1003_linux-3.18.4.patch | 7410 +++++++++++++++++++++++++++++++++++++++++++++++
13 2 files changed, 7414 insertions(+)
14
15 diff --git a/0000_README b/0000_README
16 index 0df447d..6e4c6e3 100644
17 --- a/0000_README
18 +++ b/0000_README
19 @@ -55,6 +55,10 @@ Patch: 1002_linux-3.18.3.patch
20 From: http://www.kernel.org
21 Desc: Linux 3.18.3
22
23 +Patch: 1003_linux-3.18.4.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 3.18.4
26 +
27 Patch: 1500_XATTR_USER_PREFIX.patch
28 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
29 Desc: Support for namespace user.pax.* on tmpfs.
30
31 diff --git a/1003_linux-3.18.4.patch b/1003_linux-3.18.4.patch
32 new file mode 100644
33 index 0000000..23e256c
34 --- /dev/null
35 +++ b/1003_linux-3.18.4.patch
36 @@ -0,0 +1,7410 @@
37 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
38 +index 479f33204a37..f4c71d4a9ba3 100644
39 +--- a/Documentation/kernel-parameters.txt
40 ++++ b/Documentation/kernel-parameters.txt
41 +@@ -1270,6 +1270,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
42 + i8042.notimeout [HW] Ignore timeout condition signalled by controller
43 + i8042.reset [HW] Reset the controller during init and cleanup
44 + i8042.unlock [HW] Unlock (ignore) the keylock
45 ++ i8042.kbdreset [HW] Reset device connected to KBD port
46 +
47 + i810= [HW,DRM]
48 +
49 +diff --git a/Makefile b/Makefile
50 +index 91cfe8d5ee06..4e9328491c1e 100644
51 +--- a/Makefile
52 ++++ b/Makefile
53 +@@ -1,6 +1,6 @@
54 + VERSION = 3
55 + PATCHLEVEL = 18
56 +-SUBLEVEL = 3
57 ++SUBLEVEL = 4
58 + EXTRAVERSION =
59 + NAME = Diseased Newt
60 +
61 +diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
62 +index cfaedd9c61c9..1c169dc74ad1 100644
63 +--- a/arch/arc/boot/dts/nsimosci.dts
64 ++++ b/arch/arc/boot/dts/nsimosci.dts
65 +@@ -20,7 +20,7 @@
66 + /* this is for console on PGU */
67 + /* bootargs = "console=tty0 consoleblank=0"; */
68 + /* this is for console on serial */
69 +- bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
70 ++ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
71 + };
72 +
73 + aliases {
74 +@@ -41,9 +41,9 @@
75 + #interrupt-cells = <1>;
76 + };
77 +
78 +- uart0: serial@c0000000 {
79 ++ uart0: serial@f0000000 {
80 + compatible = "ns8250";
81 +- reg = <0xc0000000 0x2000>;
82 ++ reg = <0xf0000000 0x2000>;
83 + interrupts = <11>;
84 + clock-frequency = <3686400>;
85 + baud = <115200>;
86 +@@ -52,21 +52,21 @@
87 + no-loopback-test = <1>;
88 + };
89 +
90 +- pgu0: pgu@c9000000 {
91 ++ pgu0: pgu@f9000000 {
92 + compatible = "snps,arcpgufb";
93 +- reg = <0xc9000000 0x400>;
94 ++ reg = <0xf9000000 0x400>;
95 + };
96 +
97 +- ps2: ps2@c9001000 {
98 ++ ps2: ps2@f9001000 {
99 + compatible = "snps,arc_ps2";
100 +- reg = <0xc9000400 0x14>;
101 ++ reg = <0xf9000400 0x14>;
102 + interrupts = <13>;
103 + interrupt-names = "arc_ps2_irq";
104 + };
105 +
106 +- eth0: ethernet@c0003000 {
107 ++ eth0: ethernet@f0003000 {
108 + compatible = "snps,oscilan";
109 +- reg = <0xc0003000 0x44>;
110 ++ reg = <0xf0003000 0x44>;
111 + interrupts = <7>, <8>;
112 + interrupt-names = "rx", "tx";
113 + };
114 +diff --git a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
115 +index ea1f99b8eed6..45ac1d04cf42 100644
116 +--- a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
117 ++++ b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
118 +@@ -30,6 +30,8 @@
119 + };
120 +
121 + &sdhci2 {
122 ++ broken-cd;
123 ++ bus-width = <8>;
124 + non-removable;
125 + status = "okay";
126 + };
127 +diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
128 +index 891d56b03922..b805e19ed390 100644
129 +--- a/arch/arm/boot/dts/berlin2q.dtsi
130 ++++ b/arch/arm/boot/dts/berlin2q.dtsi
131 +@@ -83,7 +83,8 @@
132 + compatible = "mrvl,pxav3-mmc";
133 + reg = <0xab1000 0x200>;
134 + interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
135 +- clocks = <&chip CLKID_SDIO1XIN>;
136 ++ clocks = <&chip CLKID_NFC_ECC>, <&chip CLKID_NFC>;
137 ++ clock-names = "io", "core";
138 + status = "disabled";
139 + };
140 +
141 +@@ -314,36 +315,6 @@
142 + interrupt-parent = <&gic>;
143 + interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
144 + };
145 +-
146 +- gpio4: gpio@5000 {
147 +- compatible = "snps,dw-apb-gpio";
148 +- reg = <0x5000 0x400>;
149 +- #address-cells = <1>;
150 +- #size-cells = <0>;
151 +-
152 +- porte: gpio-port@4 {
153 +- compatible = "snps,dw-apb-gpio-port";
154 +- gpio-controller;
155 +- #gpio-cells = <2>;
156 +- snps,nr-gpios = <32>;
157 +- reg = <0>;
158 +- };
159 +- };
160 +-
161 +- gpio5: gpio@c000 {
162 +- compatible = "snps,dw-apb-gpio";
163 +- reg = <0xc000 0x400>;
164 +- #address-cells = <1>;
165 +- #size-cells = <0>;
166 +-
167 +- portf: gpio-port@5 {
168 +- compatible = "snps,dw-apb-gpio-port";
169 +- gpio-controller;
170 +- #gpio-cells = <2>;
171 +- snps,nr-gpios = <32>;
172 +- reg = <0>;
173 +- };
174 +- };
175 + };
176 +
177 + chip: chip-control@ea0000 {
178 +@@ -372,6 +343,21 @@
179 + ranges = <0 0xfc0000 0x10000>;
180 + interrupt-parent = <&sic>;
181 +
182 ++ sm_gpio1: gpio@5000 {
183 ++ compatible = "snps,dw-apb-gpio";
184 ++ reg = <0x5000 0x400>;
185 ++ #address-cells = <1>;
186 ++ #size-cells = <0>;
187 ++
188 ++ portf: gpio-port@5 {
189 ++ compatible = "snps,dw-apb-gpio-port";
190 ++ gpio-controller;
191 ++ #gpio-cells = <2>;
192 ++ snps,nr-gpios = <32>;
193 ++ reg = <0>;
194 ++ };
195 ++ };
196 ++
197 + i2c2: i2c@7000 {
198 + compatible = "snps,designware-i2c";
199 + #address-cells = <1>;
200 +@@ -422,6 +408,21 @@
201 + status = "disabled";
202 + };
203 +
204 ++ sm_gpio0: gpio@c000 {
205 ++ compatible = "snps,dw-apb-gpio";
206 ++ reg = <0xc000 0x400>;
207 ++ #address-cells = <1>;
208 ++ #size-cells = <0>;
209 ++
210 ++ porte: gpio-port@4 {
211 ++ compatible = "snps,dw-apb-gpio-port";
212 ++ gpio-controller;
213 ++ #gpio-cells = <2>;
214 ++ snps,nr-gpios = <32>;
215 ++ reg = <0>;
216 ++ };
217 ++ };
218 ++
219 + sysctrl: pin-controller@d000 {
220 + compatible = "marvell,berlin2q-system-ctrl";
221 + reg = <0xd000 0x100>;
222 +diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
223 +index c6ce6258434f..1bd6c79f445e 100644
224 +--- a/arch/arm/boot/dts/dra7-evm.dts
225 ++++ b/arch/arm/boot/dts/dra7-evm.dts
226 +@@ -399,23 +399,23 @@
227 + };
228 + partition@5 {
229 + label = "QSPI.u-boot-spl-os";
230 +- reg = <0x00140000 0x00010000>;
231 ++ reg = <0x00140000 0x00080000>;
232 + };
233 + partition@6 {
234 + label = "QSPI.u-boot-env";
235 +- reg = <0x00150000 0x00010000>;
236 ++ reg = <0x001c0000 0x00010000>;
237 + };
238 + partition@7 {
239 + label = "QSPI.u-boot-env.backup1";
240 +- reg = <0x00160000 0x0010000>;
241 ++ reg = <0x001d0000 0x0010000>;
242 + };
243 + partition@8 {
244 + label = "QSPI.kernel";
245 +- reg = <0x00170000 0x0800000>;
246 ++ reg = <0x001e0000 0x0800000>;
247 + };
248 + partition@9 {
249 + label = "QSPI.file-system";
250 +- reg = <0x00970000 0x01690000>;
251 ++ reg = <0x009e0000 0x01620000>;
252 + };
253 + };
254 + };
255 +diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
256 +index 58d3c3cf2923..d238676a9107 100644
257 +--- a/arch/arm/boot/dts/imx25.dtsi
258 ++++ b/arch/arm/boot/dts/imx25.dtsi
259 +@@ -162,7 +162,7 @@
260 + #size-cells = <0>;
261 + compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
262 + reg = <0x43fa4000 0x4000>;
263 +- clocks = <&clks 62>, <&clks 62>;
264 ++ clocks = <&clks 78>, <&clks 78>;
265 + clock-names = "ipg", "per";
266 + interrupts = <14>;
267 + status = "disabled";
268 +diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
269 +index 56569cecaa78..649befeb2cf9 100644
270 +--- a/arch/arm/boot/dts/imx51-babbage.dts
271 ++++ b/arch/arm/boot/dts/imx51-babbage.dts
272 +@@ -127,24 +127,12 @@
273 + #address-cells = <1>;
274 + #size-cells = <0>;
275 +
276 +- reg_usbh1_vbus: regulator@0 {
277 +- compatible = "regulator-fixed";
278 +- pinctrl-names = "default";
279 +- pinctrl-0 = <&pinctrl_usbh1reg>;
280 +- reg = <0>;
281 +- regulator-name = "usbh1_vbus";
282 +- regulator-min-microvolt = <5000000>;
283 +- regulator-max-microvolt = <5000000>;
284 +- gpio = <&gpio2 5 GPIO_ACTIVE_HIGH>;
285 +- enable-active-high;
286 +- };
287 +-
288 +- reg_usbotg_vbus: regulator@1 {
289 ++ reg_hub_reset: regulator@0 {
290 + compatible = "regulator-fixed";
291 + pinctrl-names = "default";
292 + pinctrl-0 = <&pinctrl_usbotgreg>;
293 +- reg = <1>;
294 +- regulator-name = "usbotg_vbus";
295 ++ reg = <0>;
296 ++ regulator-name = "hub_reset";
297 + regulator-min-microvolt = <5000000>;
298 + regulator-max-microvolt = <5000000>;
299 + gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>;
300 +@@ -176,6 +164,7 @@
301 + reg = <0>;
302 + clocks = <&clks IMX5_CLK_DUMMY>;
303 + clock-names = "main_clk";
304 ++ reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
305 + };
306 + };
307 + };
308 +@@ -419,7 +408,7 @@
309 + &usbh1 {
310 + pinctrl-names = "default";
311 + pinctrl-0 = <&pinctrl_usbh1>;
312 +- vbus-supply = <&reg_usbh1_vbus>;
313 ++ vbus-supply = <&reg_hub_reset>;
314 + fsl,usbphy = <&usbh1phy>;
315 + phy_type = "ulpi";
316 + status = "okay";
317 +@@ -429,7 +418,6 @@
318 + dr_mode = "otg";
319 + disable-over-current;
320 + phy_type = "utmi_wide";
321 +- vbus-supply = <&reg_usbotg_vbus>;
322 + status = "okay";
323 + };
324 +
325 +diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
326 +index b3f86670d2eb..a0e51bb68b2d 100644
327 +--- a/arch/arm/configs/omap2plus_defconfig
328 ++++ b/arch/arm/configs/omap2plus_defconfig
329 +@@ -68,7 +68,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
330 + CONFIG_CPU_FREQ_GOV_POWERSAVE=y
331 + CONFIG_CPU_FREQ_GOV_USERSPACE=y
332 + CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
333 +-CONFIG_GENERIC_CPUFREQ_CPU0=y
334 ++CONFIG_CPUFREQ_DT=y
335 + # CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set
336 + CONFIG_CPU_IDLE=y
337 + CONFIG_BINFMT_MISC=y
338 +diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
339 +index 4e79da7c5e30..2daef619d053 100644
340 +--- a/arch/arm/mach-imx/clk-imx6q.c
341 ++++ b/arch/arm/mach-imx/clk-imx6q.c
342 +@@ -144,8 +144,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
343 + post_div_table[1].div = 1;
344 + post_div_table[2].div = 1;
345 + video_div_table[1].div = 1;
346 +- video_div_table[2].div = 1;
347 +- };
348 ++ video_div_table[3].div = 1;
349 ++ }
350 +
351 + clk[IMX6QDL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
352 + clk[IMX6QDL_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
353 +diff --git a/arch/arm/mach-imx/clk-imx6sx.c b/arch/arm/mach-imx/clk-imx6sx.c
354 +index 17354a11356f..5a3e5a159e70 100644
355 +--- a/arch/arm/mach-imx/clk-imx6sx.c
356 ++++ b/arch/arm/mach-imx/clk-imx6sx.c
357 +@@ -558,6 +558,9 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
358 + clk_set_parent(clks[IMX6SX_CLK_GPU_CORE_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
359 + clk_set_parent(clks[IMX6SX_CLK_GPU_AXI_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
360 +
361 ++ clk_set_parent(clks[IMX6SX_CLK_QSPI1_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
362 ++ clk_set_parent(clks[IMX6SX_CLK_QSPI2_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
363 ++
364 + /* Set initial power mode */
365 + imx6q_set_lpm(WAIT_CLOCKED);
366 + }
367 +diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
368 +index 377eea849e7b..db57741c9c8a 100644
369 +--- a/arch/arm/mach-omap2/common.h
370 ++++ b/arch/arm/mach-omap2/common.h
371 +@@ -249,6 +249,7 @@ extern void omap4_cpu_die(unsigned int cpu);
372 + extern struct smp_operations omap4_smp_ops;
373 +
374 + extern void omap5_secondary_startup(void);
375 ++extern void omap5_secondary_hyp_startup(void);
376 + #endif
377 +
378 + #if defined(CONFIG_SMP) && defined(CONFIG_PM)
379 +diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
380 +index 4993d4bfe9b2..6d1dffca6c7b 100644
381 +--- a/arch/arm/mach-omap2/omap-headsmp.S
382 ++++ b/arch/arm/mach-omap2/omap-headsmp.S
383 +@@ -22,6 +22,7 @@
384 +
385 + /* Physical address needed since MMU not enabled yet on secondary core */
386 + #define AUX_CORE_BOOT0_PA 0x48281800
387 ++#define API_HYP_ENTRY 0x102
388 +
389 + /*
390 + * OMAP5 specific entry point for secondary CPU to jump from ROM
391 +@@ -41,6 +42,26 @@ wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
392 + b secondary_startup
393 + ENDPROC(omap5_secondary_startup)
394 + /*
395 ++ * Same as omap5_secondary_startup except we call into the ROM to
396 ++ * enable HYP mode first. This is called instead of
397 ++ * omap5_secondary_startup if the primary CPU was put into HYP mode by
398 ++ * the boot loader.
399 ++ */
400 ++ENTRY(omap5_secondary_hyp_startup)
401 ++wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
402 ++ ldr r0, [r2]
403 ++ mov r0, r0, lsr #5
404 ++ mrc p15, 0, r4, c0, c0, 5
405 ++ and r4, r4, #0x0f
406 ++ cmp r0, r4
407 ++ bne wait_2
408 ++ ldr r12, =API_HYP_ENTRY
409 ++ adr r0, hyp_boot
410 ++ smc #0
411 ++hyp_boot:
412 ++ b secondary_startup
413 ++ENDPROC(omap5_secondary_hyp_startup)
414 ++/*
415 + * OMAP4 specific entry point for secondary CPU to jump from ROM
416 + * code. This routine also provides a holding flag into which
417 + * secondary core is held until we're ready for it to initialise.
418 +diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
419 +index 256e84ef0f67..5305ec7341ec 100644
420 +--- a/arch/arm/mach-omap2/omap-smp.c
421 ++++ b/arch/arm/mach-omap2/omap-smp.c
422 +@@ -22,6 +22,7 @@
423 + #include <linux/irqchip/arm-gic.h>
424 +
425 + #include <asm/smp_scu.h>
426 ++#include <asm/virt.h>
427 +
428 + #include "omap-secure.h"
429 + #include "omap-wakeupgen.h"
430 +@@ -227,8 +228,16 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
431 + if (omap_secure_apis_support())
432 + omap_auxcoreboot_addr(virt_to_phys(startup_addr));
433 + else
434 +- writel_relaxed(virt_to_phys(omap5_secondary_startup),
435 +- base + OMAP_AUX_CORE_BOOT_1);
436 ++ /*
437 ++ * If the boot CPU is in HYP mode then start secondary
438 ++ * CPU in HYP mode as well.
439 ++ */
440 ++ if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
441 ++ writel_relaxed(virt_to_phys(omap5_secondary_hyp_startup),
442 ++ base + OMAP_AUX_CORE_BOOT_1);
443 ++ else
444 ++ writel_relaxed(virt_to_phys(omap5_secondary_startup),
445 ++ base + OMAP_AUX_CORE_BOOT_1);
446 +
447 + }
448 +
449 +diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
450 +index 4f61148ec168..fb0cb2b817a9 100644
451 +--- a/arch/arm/mach-omap2/timer.c
452 ++++ b/arch/arm/mach-omap2/timer.c
453 +@@ -513,11 +513,11 @@ static void __init realtime_counter_init(void)
454 + rate = clk_get_rate(sys_clk);
455 + /* Numerator/denumerator values refer TRM Realtime Counter section */
456 + switch (rate) {
457 +- case 1200000:
458 ++ case 12000000:
459 + num = 64;
460 + den = 125;
461 + break;
462 +- case 1300000:
463 ++ case 13000000:
464 + num = 768;
465 + den = 1625;
466 + break;
467 +@@ -529,11 +529,11 @@ static void __init realtime_counter_init(void)
468 + num = 192;
469 + den = 625;
470 + break;
471 +- case 2600000:
472 ++ case 26000000:
473 + num = 384;
474 + den = 1625;
475 + break;
476 +- case 2700000:
477 ++ case 27000000:
478 + num = 256;
479 + den = 1125;
480 + break;
481 +diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
482 +index 328657d011d5..1ba4f6357843 100644
483 +--- a/arch/arm/mach-shmobile/setup-sh73a0.c
484 ++++ b/arch/arm/mach-shmobile/setup-sh73a0.c
485 +@@ -598,6 +598,7 @@ static struct platform_device ipmmu_device = {
486 +
487 + static struct renesas_intc_irqpin_config irqpin0_platform_data = {
488 + .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
489 ++ .control_parent = true,
490 + };
491 +
492 + static struct resource irqpin0_resources[] = {
493 +@@ -659,6 +660,7 @@ static struct platform_device irqpin1_device = {
494 +
495 + static struct renesas_intc_irqpin_config irqpin2_platform_data = {
496 + .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
497 ++ .control_parent = true,
498 + };
499 +
500 + static struct resource irqpin2_resources[] = {
501 +@@ -689,6 +691,7 @@ static struct platform_device irqpin2_device = {
502 +
503 + static struct renesas_intc_irqpin_config irqpin3_platform_data = {
504 + .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
505 ++ .control_parent = true,
506 + };
507 +
508 + static struct resource irqpin3_resources[] = {
509 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
510 +index 494297c698ca..fff81f02251c 100644
511 +--- a/arch/arm64/mm/init.c
512 ++++ b/arch/arm64/mm/init.c
513 +@@ -333,14 +333,8 @@ static int keep_initrd;
514 +
515 + void free_initrd_mem(unsigned long start, unsigned long end)
516 + {
517 +- if (!keep_initrd) {
518 +- if (start == initrd_start)
519 +- start = round_down(start, PAGE_SIZE);
520 +- if (end == initrd_end)
521 +- end = round_up(end, PAGE_SIZE);
522 +-
523 ++ if (!keep_initrd)
524 + free_reserved_area((void *)start, (void *)end, 0, "initrd");
525 +- }
526 + }
527 +
528 + static int __init keepinitrd_setup(char *__unused)
529 +diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
530 +index d2d11b7055ba..8121aa6db2ff 100644
531 +--- a/arch/parisc/include/asm/ldcw.h
532 ++++ b/arch/parisc/include/asm/ldcw.h
533 +@@ -33,11 +33,18 @@
534 +
535 + #endif /*!CONFIG_PA20*/
536 +
537 +-/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
538 ++/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
539 ++ We don't explicitly expose that "*a" may be written as reload
540 ++ fails to find a register in class R1_REGS when "a" needs to be
541 ++ reloaded when generating 64-bit PIC code. Instead, we clobber
542 ++ memory to indicate to the compiler that the assembly code reads
543 ++ or writes to items other than those listed in the input and output
544 ++ operands. This may pessimize the code somewhat but __ldcw is
545 ++ usually used within code blocks surrounded by memory barriors. */
546 + #define __ldcw(a) ({ \
547 + unsigned __ret; \
548 +- __asm__ __volatile__(__LDCW " 0(%2),%0" \
549 +- : "=r" (__ret), "+m" (*(a)) : "r" (a)); \
550 ++ __asm__ __volatile__(__LDCW " 0(%1),%0" \
551 ++ : "=r" (__ret) : "r" (a) : "memory"); \
552 + __ret; \
553 + })
554 +
555 +diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
556 +index feb549aa3eea..b67ea67eb71b 100644
557 +--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
558 ++++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
559 +@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \
560 + b 1f; \
561 + END_FTR_SECTION(0, 1); \
562 + ld r12,opal_tracepoint_refcount@toc(r2); \
563 +- std r12,32(r1); \
564 + cmpdi r12,0; \
565 + bne- LABEL; \
566 + 1:
567 +diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
568 +index 87bc86821bc9..d195a87ca542 100644
569 +--- a/arch/um/Kconfig.common
570 ++++ b/arch/um/Kconfig.common
571 +@@ -3,6 +3,7 @@ config UML
572 + default y
573 + select HAVE_ARCH_AUDITSYSCALL
574 + select HAVE_UID16
575 ++ select HAVE_FUTEX_CMPXCHG if FUTEX
576 + select GENERIC_IRQ_SHOW
577 + select GENERIC_CPU_DEVICES
578 + select GENERIC_IO
579 +diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
580 +index 5b016e2498f3..3db07f30636f 100644
581 +--- a/arch/x86/boot/Makefile
582 ++++ b/arch/x86/boot/Makefile
583 +@@ -51,6 +51,7 @@ targets += cpustr.h
584 + $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
585 + $(call if_changed,cpustr)
586 + endif
587 ++clean-files += cpustr.h
588 +
589 + # ---------------------------------------------------------------------------
590 +
591 +diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
592 +index e27b49d7c922..80091ae54c2b 100644
593 +--- a/arch/x86/kernel/cpu/Makefile
594 ++++ b/arch/x86/kernel/cpu/Makefile
595 +@@ -66,3 +66,4 @@ targets += capflags.c
596 + $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
597 + $(call if_changed,mkcapflags)
598 + endif
599 ++clean-files += capflags.c
600 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
601 +index 67e6d19ef1be..93d2c04c6f8f 100644
602 +--- a/arch/x86/kernel/kprobes/core.c
603 ++++ b/arch/x86/kernel/kprobes/core.c
604 +@@ -1018,6 +1018,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
605 + regs->flags &= ~X86_EFLAGS_IF;
606 + trace_hardirqs_off();
607 + regs->ip = (unsigned long)(jp->entry);
608 ++
609 ++ /*
610 ++ * jprobes use jprobe_return() which skips the normal return
611 ++ * path of the function, and this messes up the accounting of the
612 ++ * function graph tracer to get messed up.
613 ++ *
614 ++ * Pause function graph tracing while performing the jprobe function.
615 ++ */
616 ++ pause_graph_tracing();
617 + return 1;
618 + }
619 + NOKPROBE_SYMBOL(setjmp_pre_handler);
620 +@@ -1046,24 +1055,25 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
621 + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
622 + u8 *addr = (u8 *) (regs->ip - 1);
623 + struct jprobe *jp = container_of(p, struct jprobe, kp);
624 ++ void *saved_sp = kcb->jprobe_saved_sp;
625 +
626 + if ((addr > (u8 *) jprobe_return) &&
627 + (addr < (u8 *) jprobe_return_end)) {
628 +- if (stack_addr(regs) != kcb->jprobe_saved_sp) {
629 ++ if (stack_addr(regs) != saved_sp) {
630 + struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
631 + printk(KERN_ERR
632 + "current sp %p does not match saved sp %p\n",
633 +- stack_addr(regs), kcb->jprobe_saved_sp);
634 ++ stack_addr(regs), saved_sp);
635 + printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
636 + show_regs(saved_regs);
637 + printk(KERN_ERR "Current registers\n");
638 + show_regs(regs);
639 + BUG();
640 + }
641 ++ /* It's OK to start function graph tracing again */
642 ++ unpause_graph_tracing();
643 + *regs = kcb->jprobe_saved_regs;
644 +- memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
645 +- kcb->jprobes_stack,
646 +- MIN_STACK_SIZE(kcb->jprobe_saved_sp));
647 ++ memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
648 + preempt_enable_no_resched();
649 + return 1;
650 + }
651 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
652 +index 3e556c68351b..ed7039465f16 100644
653 +--- a/arch/x86/kvm/vmx.c
654 ++++ b/arch/x86/kvm/vmx.c
655 +@@ -2377,12 +2377,12 @@ static __init void nested_vmx_setup_ctls_msrs(void)
656 + nested_vmx_secondary_ctls_low = 0;
657 + nested_vmx_secondary_ctls_high &=
658 + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
659 +- SECONDARY_EXEC_UNRESTRICTED_GUEST |
660 + SECONDARY_EXEC_WBINVD_EXITING;
661 +
662 + if (enable_ept) {
663 + /* nested EPT: emulate EPT also to L1 */
664 +- nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
665 ++ nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT |
666 ++ SECONDARY_EXEC_UNRESTRICTED_GUEST;
667 + nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
668 + VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
669 + VMX_EPT_INVEPT_BIT;
670 +diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
671 +index 531d4269e2e3..bd16d6c370ec 100644
672 +--- a/arch/x86/um/sys_call_table_32.c
673 ++++ b/arch/x86/um/sys_call_table_32.c
674 +@@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
675 +
676 + extern asmlinkage void sys_ni_syscall(void);
677 +
678 +-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
679 ++const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
680 + /*
681 + * Smells like a compiler bug -- it doesn't work
682 + * when the & below is removed.
683 +diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
684 +index f2f0723070ca..95783087f0d3 100644
685 +--- a/arch/x86/um/sys_call_table_64.c
686 ++++ b/arch/x86/um/sys_call_table_64.c
687 +@@ -46,7 +46,7 @@ typedef void (*sys_call_ptr_t)(void);
688 +
689 + extern void sys_ni_syscall(void);
690 +
691 +-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
692 ++const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
693 + /*
694 + * Smells like a compiler bug -- it doesn't work
695 + * when the & below is removed.
696 +diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
697 +index 531ae591783b..17d86595951c 100644
698 +--- a/drivers/bus/omap_l3_noc.c
699 ++++ b/drivers/bus/omap_l3_noc.c
700 +@@ -222,10 +222,14 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
701 + }
702 +
703 + /* Error found so break the for loop */
704 +- break;
705 ++ return IRQ_HANDLED;
706 + }
707 + }
708 +- return IRQ_HANDLED;
709 ++
710 ++ dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
711 ++ inttype ? "debug" : "application");
712 ++
713 ++ return IRQ_NONE;
714 + }
715 +
716 + static const struct of_device_id l3_noc_match[] = {
717 +@@ -296,11 +300,66 @@ static int omap_l3_probe(struct platform_device *pdev)
718 + return ret;
719 + }
720 +
721 ++#ifdef CONFIG_PM
722 ++
723 ++/**
724 ++ * l3_resume_noirq() - resume function for l3_noc
725 ++ * @dev: pointer to l3_noc device structure
726 ++ *
727 ++ * We only have the resume handler only since we
728 ++ * have already maintained the delta register
729 ++ * configuration as part of configuring the system
730 ++ */
731 ++static int l3_resume_noirq(struct device *dev)
732 ++{
733 ++ struct omap_l3 *l3 = dev_get_drvdata(dev);
734 ++ int i;
735 ++ struct l3_flagmux_data *flag_mux;
736 ++ void __iomem *base, *mask_regx = NULL;
737 ++ u32 mask_val;
738 ++
739 ++ for (i = 0; i < l3->num_modules; i++) {
740 ++ base = l3->l3_base[i];
741 ++ flag_mux = l3->l3_flagmux[i];
742 ++ if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
743 ++ continue;
744 ++
745 ++ mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
746 ++ (L3_APPLICATION_ERROR << 3);
747 ++ mask_val = readl_relaxed(mask_regx);
748 ++ mask_val &= ~(flag_mux->mask_app_bits);
749 ++
750 ++ writel_relaxed(mask_val, mask_regx);
751 ++ mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
752 ++ (L3_DEBUG_ERROR << 3);
753 ++ mask_val = readl_relaxed(mask_regx);
754 ++ mask_val &= ~(flag_mux->mask_dbg_bits);
755 ++
756 ++ writel_relaxed(mask_val, mask_regx);
757 ++ }
758 ++
759 ++ /* Dummy read to force OCP barrier */
760 ++ if (mask_regx)
761 ++ (void)readl(mask_regx);
762 ++
763 ++ return 0;
764 ++}
765 ++
766 ++static const struct dev_pm_ops l3_dev_pm_ops = {
767 ++ .resume_noirq = l3_resume_noirq,
768 ++};
769 ++
770 ++#define L3_DEV_PM_OPS (&l3_dev_pm_ops)
771 ++#else
772 ++#define L3_DEV_PM_OPS NULL
773 ++#endif
774 ++
775 + static struct platform_driver omap_l3_driver = {
776 + .probe = omap_l3_probe,
777 + .driver = {
778 + .name = "omap_l3_noc",
779 + .owner = THIS_MODULE,
780 ++ .pm = L3_DEV_PM_OPS,
781 + .of_match_table = of_match_ptr(l3_noc_match),
782 + },
783 + };
784 +diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
785 +index 32f7c1b36204..2f13bd5246b5 100644
786 +--- a/drivers/clk/at91/clk-slow.c
787 ++++ b/drivers/clk/at91/clk-slow.c
788 +@@ -70,6 +70,7 @@ struct clk_sam9x5_slow {
789 +
790 + #define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw)
791 +
792 ++static struct clk *slow_clk;
793 +
794 + static int clk_slow_osc_prepare(struct clk_hw *hw)
795 + {
796 +@@ -357,6 +358,8 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr,
797 + clk = clk_register(NULL, &slowck->hw);
798 + if (IS_ERR(clk))
799 + kfree(slowck);
800 ++ else
801 ++ slow_clk = clk;
802 +
803 + return clk;
804 + }
805 +@@ -433,6 +436,8 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
806 + clk = clk_register(NULL, &slowck->hw);
807 + if (IS_ERR(clk))
808 + kfree(slowck);
809 ++ else
810 ++ slow_clk = clk;
811 +
812 + return clk;
813 + }
814 +@@ -465,3 +470,25 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
815 +
816 + of_clk_add_provider(np, of_clk_src_simple_get, clk);
817 + }
818 ++
819 ++/*
820 ++ * FIXME: All slow clk users are not properly claiming it (get + prepare +
821 ++ * enable) before using it.
822 ++ * If all users properly claiming this clock decide that they don't need it
823 ++ * anymore (or are removed), it is disabled while faulty users are still
824 ++ * requiring it, and the system hangs.
825 ++ * Prevent this clock from being disabled until all users are properly
826 ++ * requesting it.
827 ++ * Once this is done we should remove this function and the slow_clk variable.
828 ++ */
829 ++static int __init of_at91_clk_slow_retain(void)
830 ++{
831 ++ if (!slow_clk)
832 ++ return 0;
833 ++
834 ++ __clk_get(slow_clk);
835 ++ clk_prepare_enable(slow_clk);
836 ++
837 ++ return 0;
838 ++}
839 ++arch_initcall(of_at91_clk_slow_retain);
840 +diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
841 +index 21784e4eb3f0..440ef81ab15c 100644
842 +--- a/drivers/clk/berlin/bg2q.c
843 ++++ b/drivers/clk/berlin/bg2q.c
844 +@@ -285,7 +285,6 @@ static const struct berlin2_gate_data bg2q_gates[] __initconst = {
845 + { "pbridge", "perif", 15, CLK_IGNORE_UNUSED },
846 + { "sdio", "perif", 16, CLK_IGNORE_UNUSED },
847 + { "nfc", "perif", 18 },
848 +- { "smemc", "perif", 19 },
849 + { "pcie", "perif", 22 },
850 + };
851 +
852 +diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
853 +index 8e58edfeeb37..8b284be4efa4 100644
854 +--- a/drivers/clk/clk-ppc-corenet.c
855 ++++ b/drivers/clk/clk-ppc-corenet.c
856 +@@ -291,7 +291,7 @@ static const struct of_device_id ppc_clk_ids[] __initconst = {
857 + {}
858 + };
859 +
860 +-static struct platform_driver ppc_corenet_clk_driver __initdata = {
861 ++static struct platform_driver ppc_corenet_clk_driver = {
862 + .driver = {
863 + .name = "ppc_corenet_clock",
864 + .owner = THIS_MODULE,
865 +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
866 +index 4896ae9e23da..26bed0889e97 100644
867 +--- a/drivers/clk/clk.c
868 ++++ b/drivers/clk/clk.c
869 +@@ -240,7 +240,6 @@ static const struct file_operations clk_dump_fops = {
870 + .release = single_release,
871 + };
872 +
873 +-/* caller must hold prepare_lock */
874 + static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
875 + {
876 + struct dentry *d;
877 +@@ -1944,7 +1943,6 @@ int __clk_init(struct device *dev, struct clk *clk)
878 + else
879 + clk->rate = 0;
880 +
881 +- clk_debug_register(clk);
882 + /*
883 + * walk the list of orphan clocks and reparent any that are children of
884 + * this clock
885 +@@ -1979,6 +1977,9 @@ int __clk_init(struct device *dev, struct clk *clk)
886 + out:
887 + clk_prepare_unlock();
888 +
889 ++ if (!ret)
890 ++ clk_debug_register(clk);
891 ++
892 + return ret;
893 + }
894 +
895 +@@ -2273,14 +2274,17 @@ int __clk_get(struct clk *clk)
896 +
897 + void __clk_put(struct clk *clk)
898 + {
899 ++ struct module *owner;
900 ++
901 + if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
902 + return;
903 +
904 + clk_prepare_lock();
905 ++ owner = clk->owner;
906 + kref_put(&clk->ref, __clk_release);
907 + clk_prepare_unlock();
908 +
909 +- module_put(clk->owner);
910 ++ module_put(owner);
911 + }
912 +
913 + /*** clk rate change notifiers ***/
914 +diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
915 +index beed49c79126..8088b384ce6e 100644
916 +--- a/drivers/clk/rockchip/clk-rk3188.c
917 ++++ b/drivers/clk/rockchip/clk-rk3188.c
918 +@@ -210,6 +210,17 @@ PNAME(mux_sclk_hsadc_p) = { "hsadc_src", "hsadc_frac", "ext_hsadc" };
919 + PNAME(mux_mac_p) = { "gpll", "dpll" };
920 + PNAME(mux_sclk_macref_p) = { "mac_src", "ext_rmii" };
921 +
922 ++static struct rockchip_pll_clock rk3066_pll_clks[] __initdata = {
923 ++ [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
924 ++ RK2928_MODE_CON, 0, 5, rk3188_pll_rates),
925 ++ [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
926 ++ RK2928_MODE_CON, 4, 4, NULL),
927 ++ [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
928 ++ RK2928_MODE_CON, 8, 6, rk3188_pll_rates),
929 ++ [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
930 ++ RK2928_MODE_CON, 12, 7, rk3188_pll_rates),
931 ++};
932 ++
933 + static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
934 + [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
935 + RK2928_MODE_CON, 0, 6, rk3188_pll_rates),
936 +@@ -742,8 +753,8 @@ static void __init rk3188_common_clk_init(struct device_node *np)
937 + static void __init rk3066a_clk_init(struct device_node *np)
938 + {
939 + rk3188_common_clk_init(np);
940 +- rockchip_clk_register_plls(rk3188_pll_clks,
941 +- ARRAY_SIZE(rk3188_pll_clks),
942 ++ rockchip_clk_register_plls(rk3066_pll_clks,
943 ++ ARRAY_SIZE(rk3066_pll_clks),
944 + RK3066_GRF_SOC_STATUS);
945 + rockchip_clk_register_branches(rk3066a_clk_branches,
946 + ARRAY_SIZE(rk3066a_clk_branches));
947 +diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
948 +index 23278291da44..6dbc64759a58 100644
949 +--- a/drivers/clk/rockchip/clk-rk3288.c
950 ++++ b/drivers/clk/rockchip/clk-rk3288.c
951 +@@ -142,20 +142,20 @@ struct rockchip_pll_rate_table rk3288_pll_rates[] = {
952 + }
953 +
954 + static struct rockchip_cpuclk_rate_table rk3288_cpuclk_rates[] __initdata = {
955 +- RK3288_CPUCLK_RATE(1800000000, 2, 4, 2, 4, 4),
956 +- RK3288_CPUCLK_RATE(1704000000, 2, 4, 2, 4, 4),
957 +- RK3288_CPUCLK_RATE(1608000000, 2, 4, 2, 4, 4),
958 +- RK3288_CPUCLK_RATE(1512000000, 2, 4, 2, 4, 4),
959 +- RK3288_CPUCLK_RATE(1416000000, 2, 4, 2, 4, 4),
960 +- RK3288_CPUCLK_RATE(1200000000, 2, 4, 2, 4, 4),
961 +- RK3288_CPUCLK_RATE(1008000000, 2, 4, 2, 4, 4),
962 +- RK3288_CPUCLK_RATE( 816000000, 2, 4, 2, 4, 4),
963 +- RK3288_CPUCLK_RATE( 696000000, 2, 4, 2, 4, 4),
964 +- RK3288_CPUCLK_RATE( 600000000, 2, 4, 2, 4, 4),
965 +- RK3288_CPUCLK_RATE( 408000000, 2, 4, 2, 4, 4),
966 +- RK3288_CPUCLK_RATE( 312000000, 2, 4, 2, 4, 4),
967 +- RK3288_CPUCLK_RATE( 216000000, 2, 4, 2, 4, 4),
968 +- RK3288_CPUCLK_RATE( 126000000, 2, 4, 2, 4, 4),
969 ++ RK3288_CPUCLK_RATE(1800000000, 1, 3, 1, 3, 3),
970 ++ RK3288_CPUCLK_RATE(1704000000, 1, 3, 1, 3, 3),
971 ++ RK3288_CPUCLK_RATE(1608000000, 1, 3, 1, 3, 3),
972 ++ RK3288_CPUCLK_RATE(1512000000, 1, 3, 1, 3, 3),
973 ++ RK3288_CPUCLK_RATE(1416000000, 1, 3, 1, 3, 3),
974 ++ RK3288_CPUCLK_RATE(1200000000, 1, 3, 1, 3, 3),
975 ++ RK3288_CPUCLK_RATE(1008000000, 1, 3, 1, 3, 3),
976 ++ RK3288_CPUCLK_RATE( 816000000, 1, 3, 1, 3, 3),
977 ++ RK3288_CPUCLK_RATE( 696000000, 1, 3, 1, 3, 3),
978 ++ RK3288_CPUCLK_RATE( 600000000, 1, 3, 1, 3, 3),
979 ++ RK3288_CPUCLK_RATE( 408000000, 1, 3, 1, 3, 3),
980 ++ RK3288_CPUCLK_RATE( 312000000, 1, 3, 1, 3, 3),
981 ++ RK3288_CPUCLK_RATE( 216000000, 1, 3, 1, 3, 3),
982 ++ RK3288_CPUCLK_RATE( 126000000, 1, 3, 1, 3, 3),
983 + };
984 +
985 + static const struct rockchip_cpuclk_reg_data rk3288_cpuclk_data = {
986 +diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
987 +index 13eae14c2cc2..b50469faf70c 100644
988 +--- a/drivers/clk/samsung/clk-exynos-audss.c
989 ++++ b/drivers/clk/samsung/clk-exynos-audss.c
990 +@@ -210,6 +210,10 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
991 + {
992 + int i;
993 +
994 ++#ifdef CONFIG_PM_SLEEP
995 ++ unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
996 ++#endif
997 ++
998 + of_clk_del_provider(pdev->dev.of_node);
999 +
1000 + for (i = 0; i < clk_data.clk_num; i++) {
1001 +diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
1002 +index bbfe7f508502..a7cf0c193ba8 100644
1003 +--- a/drivers/gpio/gpio-crystalcove.c
1004 ++++ b/drivers/gpio/gpio-crystalcove.c
1005 +@@ -272,7 +272,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
1006 + for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) {
1007 + if (pending & BIT(gpio)) {
1008 + virq = irq_find_mapping(cg->chip.irqdomain, gpio);
1009 +- generic_handle_irq(virq);
1010 ++ handle_nested_irq(virq);
1011 + }
1012 + }
1013 +
1014 +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
1015 +index 604dbe60bdee..08261f2b3a82 100644
1016 +--- a/drivers/gpio/gpiolib-of.c
1017 ++++ b/drivers/gpio/gpiolib-of.c
1018 +@@ -45,8 +45,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
1019 + return false;
1020 +
1021 + ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
1022 +- if (ret < 0)
1023 +- return false;
1024 ++ if (ret < 0) {
1025 ++ /* We've found the gpio chip, but the translation failed.
1026 ++ * Return true to stop looking and return the translation
1027 ++ * error via out_gpio
1028 ++ */
1029 ++ gg_data->out_gpio = ERR_PTR(ret);
1030 ++ return true;
1031 ++ }
1032 +
1033 + gg_data->out_gpio = gpiochip_get_desc(gc, ret);
1034 + return true;
1035 +diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
1036 +index 5f2150b619a7..0d21396f961d 100644
1037 +--- a/drivers/gpio/gpiolib-sysfs.c
1038 ++++ b/drivers/gpio/gpiolib-sysfs.c
1039 +@@ -128,7 +128,7 @@ static ssize_t gpio_value_store(struct device *dev,
1040 + return status;
1041 + }
1042 +
1043 +-static const DEVICE_ATTR(value, 0644,
1044 ++static DEVICE_ATTR(value, 0644,
1045 + gpio_value_show, gpio_value_store);
1046 +
1047 + static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
1048 +@@ -353,18 +353,15 @@ static ssize_t gpio_active_low_store(struct device *dev,
1049 + return status ? : size;
1050 + }
1051 +
1052 +-static const DEVICE_ATTR(active_low, 0644,
1053 ++static DEVICE_ATTR(active_low, 0644,
1054 + gpio_active_low_show, gpio_active_low_store);
1055 +
1056 +-static const struct attribute *gpio_attrs[] = {
1057 ++static struct attribute *gpio_attrs[] = {
1058 + &dev_attr_value.attr,
1059 + &dev_attr_active_low.attr,
1060 + NULL,
1061 + };
1062 +-
1063 +-static const struct attribute_group gpio_attr_group = {
1064 +- .attrs = (struct attribute **) gpio_attrs,
1065 +-};
1066 ++ATTRIBUTE_GROUPS(gpio);
1067 +
1068 + /*
1069 + * /sys/class/gpio/gpiochipN/
1070 +@@ -400,16 +397,13 @@ static ssize_t chip_ngpio_show(struct device *dev,
1071 + }
1072 + static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
1073 +
1074 +-static const struct attribute *gpiochip_attrs[] = {
1075 ++static struct attribute *gpiochip_attrs[] = {
1076 + &dev_attr_base.attr,
1077 + &dev_attr_label.attr,
1078 + &dev_attr_ngpio.attr,
1079 + NULL,
1080 + };
1081 +-
1082 +-static const struct attribute_group gpiochip_attr_group = {
1083 +- .attrs = (struct attribute **) gpiochip_attrs,
1084 +-};
1085 ++ATTRIBUTE_GROUPS(gpiochip);
1086 +
1087 + /*
1088 + * /sys/class/gpio/export ... write-only
1089 +@@ -564,18 +558,15 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
1090 + if (desc->chip->names && desc->chip->names[offset])
1091 + ioname = desc->chip->names[offset];
1092 +
1093 +- dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
1094 +- desc, ioname ? ioname : "gpio%u",
1095 +- desc_to_gpio(desc));
1096 ++ dev = device_create_with_groups(&gpio_class, desc->chip->dev,
1097 ++ MKDEV(0, 0), desc, gpio_groups,
1098 ++ ioname ? ioname : "gpio%u",
1099 ++ desc_to_gpio(desc));
1100 + if (IS_ERR(dev)) {
1101 + status = PTR_ERR(dev);
1102 + goto fail_unlock;
1103 + }
1104 +
1105 +- status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
1106 +- if (status)
1107 +- goto fail_unregister_device;
1108 +-
1109 + if (direction_may_change) {
1110 + status = device_create_file(dev, &dev_attr_direction);
1111 + if (status)
1112 +@@ -586,13 +577,15 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
1113 + !test_bit(FLAG_IS_OUT, &desc->flags))) {
1114 + status = device_create_file(dev, &dev_attr_edge);
1115 + if (status)
1116 +- goto fail_unregister_device;
1117 ++ goto fail_remove_attr_direction;
1118 + }
1119 +
1120 + set_bit(FLAG_EXPORT, &desc->flags);
1121 + mutex_unlock(&sysfs_lock);
1122 + return 0;
1123 +
1124 ++fail_remove_attr_direction:
1125 ++ device_remove_file(dev, &dev_attr_direction);
1126 + fail_unregister_device:
1127 + device_unregister(dev);
1128 + fail_unlock:
1129 +@@ -726,6 +719,8 @@ void gpiod_unexport(struct gpio_desc *desc)
1130 + mutex_unlock(&sysfs_lock);
1131 +
1132 + if (dev) {
1133 ++ device_remove_file(dev, &dev_attr_edge);
1134 ++ device_remove_file(dev, &dev_attr_direction);
1135 + device_unregister(dev);
1136 + put_device(dev);
1137 + }
1138 +@@ -750,13 +745,13 @@ int gpiochip_export(struct gpio_chip *chip)
1139 +
1140 + /* use chip->base for the ID; it's already known to be unique */
1141 + mutex_lock(&sysfs_lock);
1142 +- dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
1143 +- "gpiochip%d", chip->base);
1144 +- if (!IS_ERR(dev)) {
1145 +- status = sysfs_create_group(&dev->kobj,
1146 +- &gpiochip_attr_group);
1147 +- } else
1148 ++ dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0),
1149 ++ chip, gpiochip_groups,
1150 ++ "gpiochip%d", chip->base);
1151 ++ if (IS_ERR(dev))
1152 + status = PTR_ERR(dev);
1153 ++ else
1154 ++ status = 0;
1155 + chip->exported = (status == 0);
1156 + mutex_unlock(&sysfs_lock);
1157 +
1158 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1159 +index e8e98ca25ec7..c81bda0ec2cf 100644
1160 +--- a/drivers/gpio/gpiolib.c
1161 ++++ b/drivers/gpio/gpiolib.c
1162 +@@ -268,6 +268,9 @@ int gpiochip_add(struct gpio_chip *chip)
1163 +
1164 + spin_unlock_irqrestore(&gpio_lock, flags);
1165 +
1166 ++ if (status)
1167 ++ goto fail;
1168 ++
1169 + #ifdef CONFIG_PINCTRL
1170 + INIT_LIST_HEAD(&chip->pin_ranges);
1171 + #endif
1172 +@@ -275,12 +278,12 @@ int gpiochip_add(struct gpio_chip *chip)
1173 + of_gpiochip_add(chip);
1174 + acpi_gpiochip_add(chip);
1175 +
1176 +- if (status)
1177 +- goto fail;
1178 +-
1179 + status = gpiochip_export(chip);
1180 +- if (status)
1181 ++ if (status) {
1182 ++ acpi_gpiochip_remove(chip);
1183 ++ of_gpiochip_remove(chip);
1184 + goto fail;
1185 ++ }
1186 +
1187 + pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__,
1188 + chip->base, chip->base + chip->ngpio - 1,
1189 +@@ -313,14 +316,13 @@ void gpiochip_remove(struct gpio_chip *chip)
1190 + unsigned long flags;
1191 + unsigned id;
1192 +
1193 +- acpi_gpiochip_remove(chip);
1194 +-
1195 +- spin_lock_irqsave(&gpio_lock, flags);
1196 +-
1197 + gpiochip_irqchip_remove(chip);
1198 ++
1199 ++ acpi_gpiochip_remove(chip);
1200 + gpiochip_remove_pin_ranges(chip);
1201 + of_gpiochip_remove(chip);
1202 +
1203 ++ spin_lock_irqsave(&gpio_lock, flags);
1204 + for (id = 0; id < chip->ngpio; id++) {
1205 + if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags))
1206 + dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
1207 +diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
1208 +index 08e33b8b13a4..9f59c9027ebe 100644
1209 +--- a/drivers/gpu/drm/drm_dp_helper.c
1210 ++++ b/drivers/gpu/drm/drm_dp_helper.c
1211 +@@ -378,10 +378,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
1212 +
1213 + /*
1214 + * The specification doesn't give any recommendation on how often to
1215 +- * retry native transactions, so retry 7 times like for I2C-over-AUX
1216 +- * transactions.
1217 ++ * retry native transactions. We used to retry 7 times like for
1218 ++ * aux i2c transactions but real world devices this wasn't
1219 ++ * sufficient, bump to 32 which makes Dell 4k monitors happier.
1220 + */
1221 +- for (retry = 0; retry < 7; retry++) {
1222 ++ for (retry = 0; retry < 32; retry++) {
1223 +
1224 + mutex_lock(&aux->hw_mutex);
1225 + err = aux->transfer(aux, &msg);
1226 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1227 +index 070f913d2dba..f50d884b81cf 100644
1228 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
1229 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1230 +@@ -839,6 +839,8 @@ static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
1231 +
1232 + static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1233 + {
1234 ++ struct drm_dp_mst_branch *mstb;
1235 ++
1236 + switch (old_pdt) {
1237 + case DP_PEER_DEVICE_DP_LEGACY_CONV:
1238 + case DP_PEER_DEVICE_SST_SINK:
1239 +@@ -846,8 +848,9 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1240 + drm_dp_mst_unregister_i2c_bus(&port->aux);
1241 + break;
1242 + case DP_PEER_DEVICE_MST_BRANCHING:
1243 +- drm_dp_put_mst_branch_device(port->mstb);
1244 ++ mstb = port->mstb;
1245 + port->mstb = NULL;
1246 ++ drm_dp_put_mst_branch_device(mstb);
1247 + break;
1248 + }
1249 + }
1250 +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
1251 +index 0c0c39bac23d..ef757f712a3d 100644
1252 +--- a/drivers/gpu/drm/drm_fb_helper.c
1253 ++++ b/drivers/gpu/drm/drm_fb_helper.c
1254 +@@ -347,9 +347,18 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
1255 + {
1256 + struct drm_device *dev = fb_helper->dev;
1257 + bool ret;
1258 ++ bool do_delayed = false;
1259 ++
1260 + drm_modeset_lock_all(dev);
1261 + ret = restore_fbdev_mode(fb_helper);
1262 ++
1263 ++ do_delayed = fb_helper->delayed_hotplug;
1264 ++ if (do_delayed)
1265 ++ fb_helper->delayed_hotplug = false;
1266 + drm_modeset_unlock_all(dev);
1267 ++
1268 ++ if (do_delayed)
1269 ++ drm_fb_helper_hotplug_event(fb_helper);
1270 + return ret;
1271 + }
1272 + EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
1273 +@@ -888,10 +897,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
1274 +
1275 + drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
1276 +
1277 +- if (fb_helper->delayed_hotplug) {
1278 +- fb_helper->delayed_hotplug = false;
1279 +- drm_fb_helper_hotplug_event(fb_helper);
1280 +- }
1281 + return 0;
1282 + }
1283 + EXPORT_SYMBOL(drm_fb_helper_set_par);
1284 +diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
1285 +index 5ef03c216a27..c4edea907f8f 100644
1286 +--- a/drivers/gpu/drm/drm_irq.c
1287 ++++ b/drivers/gpu/drm/drm_irq.c
1288 +@@ -1029,7 +1029,8 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
1289 + {
1290 + struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
1291 +
1292 +- BUG_ON(atomic_read(&vblank->refcount) == 0);
1293 ++ if (WARN_ON(atomic_read(&vblank->refcount) == 0))
1294 ++ return;
1295 +
1296 + if (WARN_ON(crtc >= dev->num_crtcs))
1297 + return;
1298 +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1299 +index 2318b4c7a8f8..925697320949 100644
1300 +--- a/drivers/gpu/drm/i915/i915_drv.c
1301 ++++ b/drivers/gpu/drm/i915/i915_drv.c
1302 +@@ -692,11 +692,12 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
1303 + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1304 + }
1305 +
1306 +- intel_dp_mst_resume(dev);
1307 + drm_modeset_lock_all(dev);
1308 + intel_modeset_setup_hw_state(dev, true);
1309 + drm_modeset_unlock_all(dev);
1310 +
1311 ++ intel_dp_mst_resume(dev);
1312 ++
1313 + /*
1314 + * ... but also need to make sure that hotplug processing
1315 + * doesn't cause havoc. Like in the driver load code we don't
1316 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1317 +index 16a6f6d187a1..346aee828dc3 100644
1318 +--- a/drivers/gpu/drm/i915/i915_drv.h
1319 ++++ b/drivers/gpu/drm/i915/i915_drv.h
1320 +@@ -877,6 +877,7 @@ struct i915_suspend_saved_registers {
1321 + u32 savePIPEB_LINK_N1;
1322 + u32 saveMCHBAR_RENDER_STANDBY;
1323 + u32 savePCH_PORT_HOTPLUG;
1324 ++ u16 saveGCDGMBUS;
1325 + };
1326 +
1327 + struct vlv_s0ix_state {
1328 +@@ -1665,8 +1666,6 @@ struct drm_i915_private {
1329 + */
1330 + struct workqueue_struct *dp_wq;
1331 +
1332 +- uint32_t bios_vgacntr;
1333 +-
1334 + /* Old dri1 support infrastructure, beware the dragons ya fools entering
1335 + * here! */
1336 + struct i915_dri1_state dri1;
1337 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1338 +index 28f91df2604d..2de5f5f4ba45 100644
1339 +--- a/drivers/gpu/drm/i915/i915_gem.c
1340 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1341 +@@ -4193,7 +4193,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1342 + struct drm_i915_gem_object *obj;
1343 + int ret;
1344 +
1345 +- if (INTEL_INFO(dev)->gen >= 6)
1346 ++ if (drm_core_check_feature(dev, DRIVER_MODESET))
1347 + return -ENODEV;
1348 +
1349 + ret = i915_mutex_lock_interruptible(dev);
1350 +@@ -4249,6 +4249,9 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1351 + struct drm_i915_gem_object *obj;
1352 + int ret;
1353 +
1354 ++ if (drm_core_check_feature(dev, DRIVER_MODESET))
1355 ++ return -ENODEV;
1356 ++
1357 + ret = i915_mutex_lock_interruptible(dev);
1358 + if (ret)
1359 + return ret;
1360 +diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
1361 +index a5221d8f1580..c12f087d7a14 100644
1362 +--- a/drivers/gpu/drm/i915/i915_gem_context.c
1363 ++++ b/drivers/gpu/drm/i915/i915_gem_context.c
1364 +@@ -468,7 +468,12 @@ mi_set_context(struct intel_engine_cs *ring,
1365 + u32 hw_flags)
1366 + {
1367 + u32 flags = hw_flags | MI_MM_SPACE_GTT;
1368 +- int ret;
1369 ++ const int num_rings =
1370 ++ /* Use an extended w/a on ivb+ if signalling from other rings */
1371 ++ i915_semaphore_is_enabled(ring->dev) ?
1372 ++ hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
1373 ++ 0;
1374 ++ int len, i, ret;
1375 +
1376 + /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
1377 + * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
1378 +@@ -485,15 +490,31 @@ mi_set_context(struct intel_engine_cs *ring,
1379 + if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
1380 + flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
1381 +
1382 +- ret = intel_ring_begin(ring, 6);
1383 ++
1384 ++ len = 4;
1385 ++ if (INTEL_INFO(ring->dev)->gen >= 7)
1386 ++ len += 2 + (num_rings ? 4*num_rings + 2 : 0);
1387 ++
1388 ++ ret = intel_ring_begin(ring, len);
1389 + if (ret)
1390 + return ret;
1391 +
1392 + /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1393 +- if (INTEL_INFO(ring->dev)->gen >= 7)
1394 ++ if (INTEL_INFO(ring->dev)->gen >= 7) {
1395 + intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1396 +- else
1397 +- intel_ring_emit(ring, MI_NOOP);
1398 ++ if (num_rings) {
1399 ++ struct intel_engine_cs *signaller;
1400 ++
1401 ++ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
1402 ++ for_each_ring(signaller, to_i915(ring->dev), i) {
1403 ++ if (signaller == ring)
1404 ++ continue;
1405 ++
1406 ++ intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
1407 ++ intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
1408 ++ }
1409 ++ }
1410 ++ }
1411 +
1412 + intel_ring_emit(ring, MI_NOOP);
1413 + intel_ring_emit(ring, MI_SET_CONTEXT);
1414 +@@ -505,10 +526,21 @@ mi_set_context(struct intel_engine_cs *ring,
1415 + */
1416 + intel_ring_emit(ring, MI_NOOP);
1417 +
1418 +- if (INTEL_INFO(ring->dev)->gen >= 7)
1419 ++ if (INTEL_INFO(ring->dev)->gen >= 7) {
1420 ++ if (num_rings) {
1421 ++ struct intel_engine_cs *signaller;
1422 ++
1423 ++ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
1424 ++ for_each_ring(signaller, to_i915(ring->dev), i) {
1425 ++ if (signaller == ring)
1426 ++ continue;
1427 ++
1428 ++ intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
1429 ++ intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
1430 ++ }
1431 ++ }
1432 + intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1433 +- else
1434 +- intel_ring_emit(ring, MI_NOOP);
1435 ++ }
1436 +
1437 + intel_ring_advance(ring);
1438 +
1439 +diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
1440 +index 85fda6b803e4..0ee76b25204c 100644
1441 +--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
1442 ++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
1443 +@@ -137,7 +137,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
1444 + r = devm_request_mem_region(dev->dev, base + 1,
1445 + dev_priv->gtt.stolen_size - 1,
1446 + "Graphics Stolen Memory");
1447 +- if (r == NULL) {
1448 ++ /*
1449 ++ * GEN3 firmware likes to smash pci bridges into the stolen
1450 ++ * range. Apparently this works.
1451 ++ */
1452 ++ if (r == NULL && !IS_GEN3(dev)) {
1453 + DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
1454 + base, base + (uint32_t)dev_priv->gtt.stolen_size);
1455 + base = 0;
1456 +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
1457 +index f66392b6e287..9ba1177200b2 100644
1458 +--- a/drivers/gpu/drm/i915/i915_irq.c
1459 ++++ b/drivers/gpu/drm/i915/i915_irq.c
1460 +@@ -4022,8 +4022,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
1461 + if ((iir & flip_pending) == 0)
1462 + goto check_page_flip;
1463 +
1464 +- intel_prepare_page_flip(dev, plane);
1465 +-
1466 + /* We detect FlipDone by looking for the change in PendingFlip from '1'
1467 + * to '0' on the following vblank, i.e. IIR has the Pendingflip
1468 + * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
1469 +@@ -4033,6 +4031,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
1470 + if (I915_READ16(ISR) & flip_pending)
1471 + goto check_page_flip;
1472 +
1473 ++ intel_prepare_page_flip(dev, plane);
1474 + intel_finish_page_flip(dev, pipe);
1475 + return true;
1476 +
1477 +@@ -4210,8 +4209,6 @@ static bool i915_handle_vblank(struct drm_device *dev,
1478 + if ((iir & flip_pending) == 0)
1479 + goto check_page_flip;
1480 +
1481 +- intel_prepare_page_flip(dev, plane);
1482 +-
1483 + /* We detect FlipDone by looking for the change in PendingFlip from '1'
1484 + * to '0' on the following vblank, i.e. IIR has the Pendingflip
1485 + * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
1486 +@@ -4221,6 +4218,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
1487 + if (I915_READ(ISR) & flip_pending)
1488 + goto check_page_flip;
1489 +
1490 ++ intel_prepare_page_flip(dev, plane);
1491 + intel_finish_page_flip(dev, pipe);
1492 + return true;
1493 +
1494 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1495 +index c01e5f31430e..3f1e54bfcddb 100644
1496 +--- a/drivers/gpu/drm/i915/i915_reg.h
1497 ++++ b/drivers/gpu/drm/i915/i915_reg.h
1498 +@@ -74,6 +74,7 @@
1499 + #define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
1500 + #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
1501 + #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
1502 ++#define GCDGMBUS 0xcc
1503 + #define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
1504 +
1505 +
1506 +@@ -370,6 +371,7 @@
1507 + #define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
1508 + #define PIPE_CONTROL_CS_STALL (1<<20)
1509 + #define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
1510 ++#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
1511 + #define PIPE_CONTROL_QW_WRITE (1<<14)
1512 + #define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
1513 + #define PIPE_CONTROL_DEPTH_STALL (1<<13)
1514 +@@ -1071,6 +1073,7 @@ enum punit_power_well {
1515 + #define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
1516 + #define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
1517 + #define GEN6_NOSYNC 0
1518 ++#define RING_PSMI_CTL(base) ((base)+0x50)
1519 + #define RING_MAX_IDLE(base) ((base)+0x54)
1520 + #define RING_HWS_PGA(base) ((base)+0x80)
1521 + #define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
1522 +@@ -1401,6 +1404,7 @@ enum punit_power_well {
1523 + #define GEN6_BLITTER_FBC_NOTIFY (1<<3)
1524 +
1525 + #define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
1526 ++#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
1527 + #define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
1528 + #define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
1529 +
1530 +diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
1531 +index 043123c77a1f..e22b0e825de2 100644
1532 +--- a/drivers/gpu/drm/i915/i915_suspend.c
1533 ++++ b/drivers/gpu/drm/i915/i915_suspend.c
1534 +@@ -328,6 +328,10 @@ int i915_save_state(struct drm_device *dev)
1535 + }
1536 + }
1537 +
1538 ++ if (IS_GEN4(dev))
1539 ++ pci_read_config_word(dev->pdev, GCDGMBUS,
1540 ++ &dev_priv->regfile.saveGCDGMBUS);
1541 ++
1542 + /* Cache mode state */
1543 + if (INTEL_INFO(dev)->gen < 7)
1544 + dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
1545 +@@ -356,6 +360,10 @@ int i915_restore_state(struct drm_device *dev)
1546 + mutex_lock(&dev->struct_mutex);
1547 +
1548 + i915_gem_restore_fences(dev);
1549 ++
1550 ++ if (IS_GEN4(dev))
1551 ++ pci_write_config_word(dev->pdev, GCDGMBUS,
1552 ++ dev_priv->regfile.saveGCDGMBUS);
1553 + i915_restore_display(dev);
1554 +
1555 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
1556 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1557 +index 9cb5c95d5898..cadc3bcf1de2 100644
1558 +--- a/drivers/gpu/drm/i915/intel_display.c
1559 ++++ b/drivers/gpu/drm/i915/intel_display.c
1560 +@@ -12933,11 +12933,7 @@ static void i915_disable_vga(struct drm_device *dev)
1561 + vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
1562 + udelay(300);
1563 +
1564 +- /*
1565 +- * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
1566 +- * from S3 without preserving (some of?) the other bits.
1567 +- */
1568 +- I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
1569 ++ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
1570 + POSTING_READ(vga_reg);
1571 + }
1572 +
1573 +@@ -13026,8 +13022,6 @@ void intel_modeset_init(struct drm_device *dev)
1574 +
1575 + intel_shared_dpll_init(dev);
1576 +
1577 +- /* save the BIOS value before clobbering it */
1578 +- dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
1579 + /* Just disable it once at startup */
1580 + i915_disable_vga(dev);
1581 + intel_setup_outputs(dev);
1582 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1583 +index ad2fd605f76b..83c7ecf2608a 100644
1584 +--- a/drivers/gpu/drm/i915/intel_pm.c
1585 ++++ b/drivers/gpu/drm/i915/intel_pm.c
1586 +@@ -6520,29 +6520,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1587 + chv_set_pipe_power_well(dev_priv, power_well, false);
1588 + }
1589 +
1590 +-static void check_power_well_state(struct drm_i915_private *dev_priv,
1591 +- struct i915_power_well *power_well)
1592 +-{
1593 +- bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
1594 +-
1595 +- if (power_well->always_on || !i915.disable_power_well) {
1596 +- if (!enabled)
1597 +- goto mismatch;
1598 +-
1599 +- return;
1600 +- }
1601 +-
1602 +- if (enabled != (power_well->count > 0))
1603 +- goto mismatch;
1604 +-
1605 +- return;
1606 +-
1607 +-mismatch:
1608 +- WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
1609 +- power_well->name, power_well->always_on, enabled,
1610 +- power_well->count, i915.disable_power_well);
1611 +-}
1612 +-
1613 + void intel_display_power_get(struct drm_i915_private *dev_priv,
1614 + enum intel_display_power_domain domain)
1615 + {
1616 +@@ -6562,8 +6539,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
1617 + power_well->ops->enable(dev_priv, power_well);
1618 + power_well->hw_enabled = true;
1619 + }
1620 +-
1621 +- check_power_well_state(dev_priv, power_well);
1622 + }
1623 +
1624 + power_domains->domain_use_count[domain]++;
1625 +@@ -6593,8 +6568,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1626 + power_well->hw_enabled = false;
1627 + power_well->ops->disable(dev_priv, power_well);
1628 + }
1629 +-
1630 +- check_power_well_state(dev_priv, power_well);
1631 + }
1632 +
1633 + mutex_unlock(&power_domains->lock);
1634 +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
1635 +index 0a80e419b589..ae17e77dc08d 100644
1636 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
1637 ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
1638 +@@ -362,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
1639 + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1640 + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1641 + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1642 ++ flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
1643 + /*
1644 + * TLB invalidate requires a post-sync write.
1645 + */
1646 + flags |= PIPE_CONTROL_QW_WRITE;
1647 + flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
1648 +
1649 ++ flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
1650 ++
1651 + /* Workaround: we must issue a pipe_control with CS-stall bit
1652 + * set before a pipe_control command that has the state cache
1653 + * invalidate bit set. */
1654 +diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
1655 +index 918b76163965..b29091b21a76 100644
1656 +--- a/drivers/gpu/drm/i915/intel_uncore.c
1657 ++++ b/drivers/gpu/drm/i915/intel_uncore.c
1658 +@@ -43,8 +43,8 @@
1659 + static void
1660 + assert_device_not_suspended(struct drm_i915_private *dev_priv)
1661 + {
1662 +- WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
1663 +- "Device suspended\n");
1664 ++ WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
1665 ++ "Device suspended\n");
1666 + }
1667 +
1668 + static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
1669 +diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
1670 +index a75c35ccf25c..165401c4045c 100644
1671 +--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
1672 ++++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
1673 +@@ -24,13 +24,6 @@
1674 +
1675 + #include "nv04.h"
1676 +
1677 +-static void
1678 +-nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
1679 +-{
1680 +- struct nv04_mc_priv *priv = (void *)pmc;
1681 +- nv_wr08(priv, 0x088050, 0xff);
1682 +-}
1683 +-
1684 + struct nouveau_oclass *
1685 + nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
1686 + .base.handle = NV_SUBDEV(MC, 0x4c),
1687 +@@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
1688 + .fini = _nouveau_mc_fini,
1689 + },
1690 + .intr = nv04_mc_intr,
1691 +- .msi_rearm = nv4c_mc_msi_rearm,
1692 + }.base;
1693 +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1694 +index 30d242b25078..dce0d3918fa7 100644
1695 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c
1696 ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1697 +@@ -1851,10 +1851,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1698 + return pll;
1699 + }
1700 + /* otherwise, pick one of the plls */
1701 +- if ((rdev->family == CHIP_KAVERI) ||
1702 +- (rdev->family == CHIP_KABINI) ||
1703 ++ if ((rdev->family == CHIP_KABINI) ||
1704 + (rdev->family == CHIP_MULLINS)) {
1705 +- /* KB/KV/ML has PPLL1 and PPLL2 */
1706 ++ /* KB/ML has PPLL1 and PPLL2 */
1707 + pll_in_use = radeon_get_pll_use_mask(crtc);
1708 + if (!(pll_in_use & (1 << ATOM_PPLL2)))
1709 + return ATOM_PPLL2;
1710 +@@ -1863,7 +1862,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1711 + DRM_ERROR("unable to allocate a PPLL\n");
1712 + return ATOM_PPLL_INVALID;
1713 + } else {
1714 +- /* CI has PPLL0, PPLL1, and PPLL2 */
1715 ++ /* CI/KV has PPLL0, PPLL1, and PPLL2 */
1716 + pll_in_use = radeon_get_pll_use_mask(crtc);
1717 + if (!(pll_in_use & (1 << ATOM_PPLL2)))
1718 + return ATOM_PPLL2;
1719 +@@ -2154,6 +2153,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1720 + case ATOM_PPLL0:
1721 + /* disable the ppll */
1722 + if ((rdev->family == CHIP_ARUBA) ||
1723 ++ (rdev->family == CHIP_KAVERI) ||
1724 + (rdev->family == CHIP_BONAIRE) ||
1725 + (rdev->family == CHIP_HAWAII))
1726 + atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1727 +diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
1728 +index 11ba9d21b89b..db42a670f995 100644
1729 +--- a/drivers/gpu/drm/radeon/atombios_dp.c
1730 ++++ b/drivers/gpu/drm/radeon/atombios_dp.c
1731 +@@ -492,6 +492,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
1732 + struct radeon_connector_atom_dig *dig_connector;
1733 + int dp_clock;
1734 +
1735 ++ if ((mode->clock > 340000) &&
1736 ++ (!radeon_connector_is_dp12_capable(connector)))
1737 ++ return MODE_CLOCK_HIGH;
1738 ++
1739 + if (!radeon_connector->con_priv)
1740 + return MODE_CLOCK_HIGH;
1741 + dig_connector = radeon_connector->con_priv;
1742 +diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
1743 +index 11a55e9dad7f..c5699b593665 100644
1744 +--- a/drivers/gpu/drm/radeon/ci_dpm.c
1745 ++++ b/drivers/gpu/drm/radeon/ci_dpm.c
1746 +@@ -4729,7 +4729,7 @@ void ci_dpm_disable(struct radeon_device *rdev)
1747 + ci_enable_spread_spectrum(rdev, false);
1748 + ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
1749 + ci_stop_dpm(rdev);
1750 +- ci_enable_ds_master_switch(rdev, true);
1751 ++ ci_enable_ds_master_switch(rdev, false);
1752 + ci_enable_ulv(rdev, false);
1753 + ci_clear_vc(rdev);
1754 + ci_reset_to_default(rdev);
1755 +diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
1756 +index 89c01fa6dd8e..9328fb3dcfce 100644
1757 +--- a/drivers/gpu/drm/radeon/cik.c
1758 ++++ b/drivers/gpu/drm/radeon/cik.c
1759 +@@ -6314,6 +6314,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
1760 + }
1761 +
1762 + orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
1763 ++ data |= 0x00000001;
1764 + data &= 0xfffffffd;
1765 + if (orig != data)
1766 + WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
1767 +@@ -6345,7 +6346,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
1768 + }
1769 + } else {
1770 + orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
1771 +- data |= 0x00000002;
1772 ++ data |= 0x00000003;
1773 + if (orig != data)
1774 + WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
1775 +
1776 +diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
1777 +index 2fe8cfc966d9..bafdf92a5732 100644
1778 +--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
1779 ++++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
1780 +@@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
1781 + }
1782 +
1783 + sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
1784 +- if (sad_count < 0) {
1785 ++ if (sad_count <= 0) {
1786 + DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1787 + return;
1788 + }
1789 +diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
1790 +index 9b42001295ba..e3e9c10cfba9 100644
1791 +--- a/drivers/gpu/drm/radeon/kv_dpm.c
1792 ++++ b/drivers/gpu/drm/radeon/kv_dpm.c
1793 +@@ -2745,13 +2745,11 @@ int kv_dpm_init(struct radeon_device *rdev)
1794 + pi->enable_auto_thermal_throttling = true;
1795 + pi->disable_nb_ps3_in_battery = false;
1796 + if (radeon_bapm == -1) {
1797 +- /* There are stability issues reported on with
1798 +- * bapm enabled on an asrock system.
1799 +- */
1800 +- if (rdev->pdev->subsystem_vendor == 0x1849)
1801 +- pi->bapm_enable = false;
1802 +- else
1803 ++ /* only enable bapm on KB, ML by default */
1804 ++ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1805 + pi->bapm_enable = true;
1806 ++ else
1807 ++ pi->bapm_enable = false;
1808 + } else if (radeon_bapm == 0) {
1809 + pi->bapm_enable = false;
1810 + } else {
1811 +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
1812 +index 8624979afb65..d2510cfd3fea 100644
1813 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c
1814 ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
1815 +@@ -196,7 +196,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
1816 + rbo = container_of(bo, struct radeon_bo, tbo);
1817 + switch (bo->mem.mem_type) {
1818 + case TTM_PL_VRAM:
1819 +- if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
1820 ++ if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
1821 + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
1822 + else
1823 + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
1824 +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
1825 +index 09874d695188..025c429050c0 100644
1826 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
1827 ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
1828 +@@ -297,11 +297,12 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
1829 + *
1830 + * @pool: to free the pages from
1831 + * @free_all: If set to true will free all pages in pool
1832 +- * @gfp: GFP flags.
1833 ++ * @use_static: Safe to use static buffer
1834 + **/
1835 + static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
1836 +- gfp_t gfp)
1837 ++ bool use_static)
1838 + {
1839 ++ static struct page *static_buf[NUM_PAGES_TO_ALLOC];
1840 + unsigned long irq_flags;
1841 + struct page *p;
1842 + struct page **pages_to_free;
1843 +@@ -311,7 +312,11 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
1844 + if (NUM_PAGES_TO_ALLOC < nr_free)
1845 + npages_to_free = NUM_PAGES_TO_ALLOC;
1846 +
1847 +- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
1848 ++ if (use_static)
1849 ++ pages_to_free = static_buf;
1850 ++ else
1851 ++ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
1852 ++ GFP_KERNEL);
1853 + if (!pages_to_free) {
1854 + pr_err("Failed to allocate memory for pool free operation\n");
1855 + return 0;
1856 +@@ -374,7 +379,8 @@ restart:
1857 + if (freed_pages)
1858 + ttm_pages_put(pages_to_free, freed_pages);
1859 + out:
1860 +- kfree(pages_to_free);
1861 ++ if (pages_to_free != static_buf)
1862 ++ kfree(pages_to_free);
1863 + return nr_free;
1864 + }
1865 +
1866 +@@ -383,8 +389,6 @@ out:
1867 + *
1868 + * XXX: (dchinner) Deadlock warning!
1869 + *
1870 +- * We need to pass sc->gfp_mask to ttm_page_pool_free().
1871 +- *
1872 + * This code is crying out for a shrinker per pool....
1873 + */
1874 + static unsigned long
1875 +@@ -407,8 +411,8 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1876 + if (shrink_pages == 0)
1877 + break;
1878 + pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
1879 +- shrink_pages = ttm_page_pool_free(pool, nr_free,
1880 +- sc->gfp_mask);
1881 ++ /* OK to use static buffer since global mutex is held. */
1882 ++ shrink_pages = ttm_page_pool_free(pool, nr_free, true);
1883 + freed += nr_free - shrink_pages;
1884 + }
1885 + mutex_unlock(&lock);
1886 +@@ -710,7 +714,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
1887 + }
1888 + spin_unlock_irqrestore(&pool->lock, irq_flags);
1889 + if (npages)
1890 +- ttm_page_pool_free(pool, npages, GFP_KERNEL);
1891 ++ ttm_page_pool_free(pool, npages, false);
1892 + }
1893 +
1894 + /*
1895 +@@ -849,9 +853,9 @@ void ttm_page_alloc_fini(void)
1896 + pr_info("Finalizing pool allocator\n");
1897 + ttm_pool_mm_shrink_fini(_manager);
1898 +
1899 ++ /* OK to use static buffer since global mutex is no longer used. */
1900 + for (i = 0; i < NUM_POOLS; ++i)
1901 +- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
1902 +- GFP_KERNEL);
1903 ++ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
1904 +
1905 + kobject_put(&_manager->kobj);
1906 + _manager = NULL;
1907 +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
1908 +index c96db433f8af..01e1d27eb078 100644
1909 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
1910 ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
1911 +@@ -411,11 +411,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
1912 + *
1913 + * @pool: to free the pages from
1914 + * @nr_free: If set to true will free all pages in pool
1915 +- * @gfp: GFP flags.
1916 ++ * @use_static: Safe to use static buffer
1917 + **/
1918 + static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
1919 +- gfp_t gfp)
1920 ++ bool use_static)
1921 + {
1922 ++ static struct page *static_buf[NUM_PAGES_TO_ALLOC];
1923 + unsigned long irq_flags;
1924 + struct dma_page *dma_p, *tmp;
1925 + struct page **pages_to_free;
1926 +@@ -432,7 +433,11 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
1927 + npages_to_free, nr_free);
1928 + }
1929 + #endif
1930 +- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
1931 ++ if (use_static)
1932 ++ pages_to_free = static_buf;
1933 ++ else
1934 ++ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
1935 ++ GFP_KERNEL);
1936 +
1937 + if (!pages_to_free) {
1938 + pr_err("%s: Failed to allocate memory for pool free operation\n",
1939 +@@ -502,7 +507,8 @@ restart:
1940 + if (freed_pages)
1941 + ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
1942 + out:
1943 +- kfree(pages_to_free);
1944 ++ if (pages_to_free != static_buf)
1945 ++ kfree(pages_to_free);
1946 + return nr_free;
1947 + }
1948 +
1949 +@@ -531,7 +537,8 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
1950 + if (pool->type != type)
1951 + continue;
1952 + /* Takes a spinlock.. */
1953 +- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
1954 ++ /* OK to use static buffer since global mutex is held. */
1955 ++ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
1956 + WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
1957 + /* This code path is called after _all_ references to the
1958 + * struct device has been dropped - so nobody should be
1959 +@@ -986,7 +993,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
1960 +
1961 + /* shrink pool if necessary (only on !is_cached pools)*/
1962 + if (npages)
1963 +- ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
1964 ++ ttm_dma_page_pool_free(pool, npages, false);
1965 + ttm->state = tt_unpopulated;
1966 + }
1967 + EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1968 +@@ -996,8 +1003,6 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1969 + *
1970 + * XXX: (dchinner) Deadlock warning!
1971 + *
1972 +- * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
1973 +- *
1974 + * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1975 + * shrinkers
1976 + */
1977 +@@ -1030,8 +1035,8 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1978 + if (++idx < pool_offset)
1979 + continue;
1980 + nr_free = shrink_pages;
1981 +- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
1982 +- sc->gfp_mask);
1983 ++ /* OK to use static buffer since global mutex is held. */
1984 ++ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
1985 + freed += nr_free - shrink_pages;
1986 +
1987 + pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1988 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1989 +index 25f3c250fd98..daeca571b42f 100644
1990 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1991 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1992 +@@ -1063,8 +1063,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1993 +
1994 + vmaster = vmw_master_check(dev, file_priv, flags);
1995 + if (unlikely(IS_ERR(vmaster))) {
1996 +- DRM_INFO("IOCTL ERROR %d\n", nr);
1997 +- return PTR_ERR(vmaster);
1998 ++ ret = PTR_ERR(vmaster);
1999 ++
2000 ++ if (ret != -ERESTARTSYS)
2001 ++ DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
2002 ++ nr, ret);
2003 ++ return ret;
2004 + }
2005 +
2006 + ret = ioctl_func(filp, cmd, arg);
2007 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
2008 +index 197164fd7803..b7594cb758af 100644
2009 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
2010 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
2011 +@@ -545,35 +545,19 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
2012 +
2013 + static void vmw_fence_destroy(struct vmw_fence_obj *fence)
2014 + {
2015 +- struct vmw_fence_manager *fman = fman_from_fence(fence);
2016 +-
2017 + fence_free(&fence->base);
2018 +-
2019 +- /*
2020 +- * Free kernel space accounting.
2021 +- */
2022 +- ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
2023 +- fman->fence_size);
2024 + }
2025 +
2026 + int vmw_fence_create(struct vmw_fence_manager *fman,
2027 + uint32_t seqno,
2028 + struct vmw_fence_obj **p_fence)
2029 + {
2030 +- struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
2031 + struct vmw_fence_obj *fence;
2032 + int ret;
2033 +
2034 +- ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
2035 +- false, false);
2036 +- if (unlikely(ret != 0))
2037 +- return ret;
2038 +-
2039 + fence = kzalloc(sizeof(*fence), GFP_KERNEL);
2040 +- if (unlikely(fence == NULL)) {
2041 +- ret = -ENOMEM;
2042 +- goto out_no_object;
2043 +- }
2044 ++ if (unlikely(fence == NULL))
2045 ++ return -ENOMEM;
2046 +
2047 + ret = vmw_fence_obj_init(fman, fence, seqno,
2048 + vmw_fence_destroy);
2049 +@@ -585,8 +569,6 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
2050 +
2051 + out_err_init:
2052 + kfree(fence);
2053 +-out_no_object:
2054 +- ttm_mem_global_free(mem_glob, fman->fence_size);
2055 + return ret;
2056 + }
2057 +
2058 +@@ -1105,6 +1087,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
2059 + if (ret != 0)
2060 + goto out_no_queue;
2061 +
2062 ++ return 0;
2063 ++
2064 + out_no_queue:
2065 + event->base.destroy(&event->base);
2066 + out_no_event:
2067 +@@ -1180,17 +1164,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
2068 +
2069 + BUG_ON(fence == NULL);
2070 +
2071 +- if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
2072 +- ret = vmw_event_fence_action_create(file_priv, fence,
2073 +- arg->flags,
2074 +- arg->user_data,
2075 +- true);
2076 +- else
2077 +- ret = vmw_event_fence_action_create(file_priv, fence,
2078 +- arg->flags,
2079 +- arg->user_data,
2080 +- true);
2081 +-
2082 ++ ret = vmw_event_fence_action_create(file_priv, fence,
2083 ++ arg->flags,
2084 ++ arg->user_data,
2085 ++ true);
2086 + if (unlikely(ret != 0)) {
2087 + if (ret != -ERESTARTSYS)
2088 + DRM_ERROR("Failed to attach event to fence.\n");
2089 +diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
2090 +index e37412da15f5..b99de00e57b8 100644
2091 +--- a/drivers/iio/adc/ad799x.c
2092 ++++ b/drivers/iio/adc/ad799x.c
2093 +@@ -143,9 +143,15 @@ static int ad799x_write_config(struct ad799x_state *st, u16 val)
2094 + case ad7998:
2095 + return i2c_smbus_write_word_swapped(st->client, AD7998_CONF_REG,
2096 + val);
2097 +- default:
2098 ++ case ad7992:
2099 ++ case ad7993:
2100 ++ case ad7994:
2101 + return i2c_smbus_write_byte_data(st->client, AD7998_CONF_REG,
2102 + val);
2103 ++ default:
2104 ++ /* Will be written when doing a conversion */
2105 ++ st->config = val;
2106 ++ return 0;
2107 + }
2108 + }
2109 +
2110 +@@ -155,8 +161,13 @@ static int ad799x_read_config(struct ad799x_state *st)
2111 + case ad7997:
2112 + case ad7998:
2113 + return i2c_smbus_read_word_swapped(st->client, AD7998_CONF_REG);
2114 +- default:
2115 ++ case ad7992:
2116 ++ case ad7993:
2117 ++ case ad7994:
2118 + return i2c_smbus_read_byte_data(st->client, AD7998_CONF_REG);
2119 ++ default:
2120 ++ /* No readback support */
2121 ++ return st->config;
2122 + }
2123 + }
2124 +
2125 +diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
2126 +index cd4174ca9a76..f14c3849e568 100644
2127 +--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
2128 ++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
2129 +@@ -432,6 +432,7 @@ struct fast_reg_descriptor {
2130 + * @cma_id: rdma_cm connection maneger handle
2131 + * @qp: Connection Queue-pair
2132 + * @post_recv_buf_count: post receive counter
2133 ++ * @sig_count: send work request signal count
2134 + * @rx_wr: receive work request for batch posts
2135 + * @device: reference to iser device
2136 + * @comp: iser completion context
2137 +@@ -452,6 +453,7 @@ struct ib_conn {
2138 + struct rdma_cm_id *cma_id;
2139 + struct ib_qp *qp;
2140 + int post_recv_buf_count;
2141 ++ u8 sig_count;
2142 + struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
2143 + struct iser_device *device;
2144 + struct iser_comp *comp;
2145 +diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
2146 +index 5a489ea63732..3821633f1065 100644
2147 +--- a/drivers/infiniband/ulp/iser/iser_initiator.c
2148 ++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
2149 +@@ -369,7 +369,7 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
2150 + return 0;
2151 + }
2152 +
2153 +-static inline bool iser_signal_comp(int sig_count)
2154 ++static inline bool iser_signal_comp(u8 sig_count)
2155 + {
2156 + return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
2157 + }
2158 +@@ -388,7 +388,7 @@ int iser_send_command(struct iscsi_conn *conn,
2159 + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
2160 + struct scsi_cmnd *sc = task->sc;
2161 + struct iser_tx_desc *tx_desc = &iser_task->desc;
2162 +- static unsigned sig_count;
2163 ++ u8 sig_count = ++iser_conn->ib_conn.sig_count;
2164 +
2165 + edtl = ntohl(hdr->data_length);
2166 +
2167 +@@ -435,7 +435,7 @@ int iser_send_command(struct iscsi_conn *conn,
2168 + iser_task->status = ISER_TASK_STATUS_STARTED;
2169 +
2170 + err = iser_post_send(&iser_conn->ib_conn, tx_desc,
2171 +- iser_signal_comp(++sig_count));
2172 ++ iser_signal_comp(sig_count));
2173 + if (!err)
2174 + return 0;
2175 +
2176 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
2177 +index 10641b7816f4..a6daabc70425 100644
2178 +--- a/drivers/infiniband/ulp/isert/ib_isert.c
2179 ++++ b/drivers/infiniband/ulp/isert/ib_isert.c
2180 +@@ -41,6 +41,7 @@ static DEFINE_MUTEX(device_list_mutex);
2181 + static LIST_HEAD(device_list);
2182 + static struct workqueue_struct *isert_rx_wq;
2183 + static struct workqueue_struct *isert_comp_wq;
2184 ++static struct workqueue_struct *isert_release_wq;
2185 +
2186 + static void
2187 + isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
2188 +@@ -54,6 +55,19 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2189 + struct isert_rdma_wr *wr);
2190 + static int
2191 + isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
2192 ++static int
2193 ++isert_rdma_post_recvl(struct isert_conn *isert_conn);
2194 ++static int
2195 ++isert_rdma_accept(struct isert_conn *isert_conn);
2196 ++struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
2197 ++
2198 ++static inline bool
2199 ++isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
2200 ++{
2201 ++ return (conn->pi_support &&
2202 ++ cmd->prot_op != TARGET_PROT_NORMAL);
2203 ++}
2204 ++
2205 +
2206 + static void
2207 + isert_qp_event_callback(struct ib_event *e, void *context)
2208 +@@ -90,8 +104,7 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
2209 + }
2210 +
2211 + static int
2212 +-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
2213 +- u8 protection)
2214 ++isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
2215 + {
2216 + struct isert_device *device = isert_conn->conn_device;
2217 + struct ib_qp_init_attr attr;
2218 +@@ -126,7 +139,7 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
2219 + attr.cap.max_recv_sge = 1;
2220 + attr.sq_sig_type = IB_SIGNAL_REQ_WR;
2221 + attr.qp_type = IB_QPT_RC;
2222 +- if (protection)
2223 ++ if (device->pi_capable)
2224 + attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
2225 +
2226 + pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
2227 +@@ -137,12 +150,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
2228 + ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
2229 + if (ret) {
2230 + pr_err("rdma_create_qp failed for cma_id %d\n", ret);
2231 +- return ret;
2232 ++ goto err;
2233 + }
2234 + isert_conn->conn_qp = cma_id->qp;
2235 + pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
2236 +
2237 + return 0;
2238 ++err:
2239 ++ mutex_lock(&device_list_mutex);
2240 ++ device->cq_active_qps[min_index]--;
2241 ++ mutex_unlock(&device_list_mutex);
2242 ++
2243 ++ return ret;
2244 + }
2245 +
2246 + static void
2247 +@@ -430,8 +449,68 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
2248 + }
2249 +
2250 + static int
2251 ++isert_create_pi_ctx(struct fast_reg_descriptor *desc,
2252 ++ struct ib_device *device,
2253 ++ struct ib_pd *pd)
2254 ++{
2255 ++ struct ib_mr_init_attr mr_init_attr;
2256 ++ struct pi_context *pi_ctx;
2257 ++ int ret;
2258 ++
2259 ++ pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
2260 ++ if (!pi_ctx) {
2261 ++ pr_err("Failed to allocate pi context\n");
2262 ++ return -ENOMEM;
2263 ++ }
2264 ++
2265 ++ pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
2266 ++ ISCSI_ISER_SG_TABLESIZE);
2267 ++ if (IS_ERR(pi_ctx->prot_frpl)) {
2268 ++ pr_err("Failed to allocate prot frpl err=%ld\n",
2269 ++ PTR_ERR(pi_ctx->prot_frpl));
2270 ++ ret = PTR_ERR(pi_ctx->prot_frpl);
2271 ++ goto err_pi_ctx;
2272 ++ }
2273 ++
2274 ++ pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
2275 ++ if (IS_ERR(pi_ctx->prot_mr)) {
2276 ++ pr_err("Failed to allocate prot frmr err=%ld\n",
2277 ++ PTR_ERR(pi_ctx->prot_mr));
2278 ++ ret = PTR_ERR(pi_ctx->prot_mr);
2279 ++ goto err_prot_frpl;
2280 ++ }
2281 ++ desc->ind |= ISERT_PROT_KEY_VALID;
2282 ++
2283 ++ memset(&mr_init_attr, 0, sizeof(mr_init_attr));
2284 ++ mr_init_attr.max_reg_descriptors = 2;
2285 ++ mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
2286 ++ pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
2287 ++ if (IS_ERR(pi_ctx->sig_mr)) {
2288 ++ pr_err("Failed to allocate signature enabled mr err=%ld\n",
2289 ++ PTR_ERR(pi_ctx->sig_mr));
2290 ++ ret = PTR_ERR(pi_ctx->sig_mr);
2291 ++ goto err_prot_mr;
2292 ++ }
2293 ++
2294 ++ desc->pi_ctx = pi_ctx;
2295 ++ desc->ind |= ISERT_SIG_KEY_VALID;
2296 ++ desc->ind &= ~ISERT_PROTECTED;
2297 ++
2298 ++ return 0;
2299 ++
2300 ++err_prot_mr:
2301 ++ ib_dereg_mr(desc->pi_ctx->prot_mr);
2302 ++err_prot_frpl:
2303 ++ ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
2304 ++err_pi_ctx:
2305 ++ kfree(desc->pi_ctx);
2306 ++
2307 ++ return ret;
2308 ++}
2309 ++
2310 ++static int
2311 + isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
2312 +- struct fast_reg_descriptor *fr_desc, u8 protection)
2313 ++ struct fast_reg_descriptor *fr_desc)
2314 + {
2315 + int ret;
2316 +
2317 +@@ -450,62 +529,12 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
2318 + ret = PTR_ERR(fr_desc->data_mr);
2319 + goto err_data_frpl;
2320 + }
2321 +- pr_debug("Create fr_desc %p page_list %p\n",
2322 +- fr_desc, fr_desc->data_frpl->page_list);
2323 + fr_desc->ind |= ISERT_DATA_KEY_VALID;
2324 +
2325 +- if (protection) {
2326 +- struct ib_mr_init_attr mr_init_attr = {0};
2327 +- struct pi_context *pi_ctx;
2328 +-
2329 +- fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
2330 +- if (!fr_desc->pi_ctx) {
2331 +- pr_err("Failed to allocate pi context\n");
2332 +- ret = -ENOMEM;
2333 +- goto err_data_mr;
2334 +- }
2335 +- pi_ctx = fr_desc->pi_ctx;
2336 +-
2337 +- pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
2338 +- ISCSI_ISER_SG_TABLESIZE);
2339 +- if (IS_ERR(pi_ctx->prot_frpl)) {
2340 +- pr_err("Failed to allocate prot frpl err=%ld\n",
2341 +- PTR_ERR(pi_ctx->prot_frpl));
2342 +- ret = PTR_ERR(pi_ctx->prot_frpl);
2343 +- goto err_pi_ctx;
2344 +- }
2345 +-
2346 +- pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
2347 +- if (IS_ERR(pi_ctx->prot_mr)) {
2348 +- pr_err("Failed to allocate prot frmr err=%ld\n",
2349 +- PTR_ERR(pi_ctx->prot_mr));
2350 +- ret = PTR_ERR(pi_ctx->prot_mr);
2351 +- goto err_prot_frpl;
2352 +- }
2353 +- fr_desc->ind |= ISERT_PROT_KEY_VALID;
2354 +-
2355 +- mr_init_attr.max_reg_descriptors = 2;
2356 +- mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
2357 +- pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
2358 +- if (IS_ERR(pi_ctx->sig_mr)) {
2359 +- pr_err("Failed to allocate signature enabled mr err=%ld\n",
2360 +- PTR_ERR(pi_ctx->sig_mr));
2361 +- ret = PTR_ERR(pi_ctx->sig_mr);
2362 +- goto err_prot_mr;
2363 +- }
2364 +- fr_desc->ind |= ISERT_SIG_KEY_VALID;
2365 +- }
2366 +- fr_desc->ind &= ~ISERT_PROTECTED;
2367 ++ pr_debug("Created fr_desc %p\n", fr_desc);
2368 +
2369 + return 0;
2370 +-err_prot_mr:
2371 +- ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
2372 +-err_prot_frpl:
2373 +- ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
2374 +-err_pi_ctx:
2375 +- kfree(fr_desc->pi_ctx);
2376 +-err_data_mr:
2377 +- ib_dereg_mr(fr_desc->data_mr);
2378 ++
2379 + err_data_frpl:
2380 + ib_free_fast_reg_page_list(fr_desc->data_frpl);
2381 +
2382 +@@ -513,7 +542,7 @@ err_data_frpl:
2383 + }
2384 +
2385 + static int
2386 +-isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
2387 ++isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
2388 + {
2389 + struct fast_reg_descriptor *fr_desc;
2390 + struct isert_device *device = isert_conn->conn_device;
2391 +@@ -537,8 +566,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
2392 + }
2393 +
2394 + ret = isert_create_fr_desc(device->ib_device,
2395 +- isert_conn->conn_pd, fr_desc,
2396 +- pi_support);
2397 ++ isert_conn->conn_pd, fr_desc);
2398 + if (ret) {
2399 + pr_err("Failed to create fastreg descriptor err=%d\n",
2400 + ret);
2401 +@@ -563,13 +591,12 @@ err:
2402 + static int
2403 + isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2404 + {
2405 +- struct iscsi_np *np = cma_id->context;
2406 +- struct isert_np *isert_np = np->np_context;
2407 ++ struct isert_np *isert_np = cma_id->context;
2408 ++ struct iscsi_np *np = isert_np->np;
2409 + struct isert_conn *isert_conn;
2410 + struct isert_device *device;
2411 + struct ib_device *ib_dev = cma_id->device;
2412 + int ret = 0;
2413 +- u8 pi_support;
2414 +
2415 + spin_lock_bh(&np->np_thread_lock);
2416 + if (!np->enabled) {
2417 +@@ -590,6 +617,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2418 + isert_conn->state = ISER_CONN_INIT;
2419 + INIT_LIST_HEAD(&isert_conn->conn_accept_node);
2420 + init_completion(&isert_conn->conn_login_comp);
2421 ++ init_completion(&isert_conn->login_req_comp);
2422 + init_completion(&isert_conn->conn_wait);
2423 + init_completion(&isert_conn->conn_wait_comp_err);
2424 + kref_init(&isert_conn->conn_kref);
2425 +@@ -597,7 +625,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2426 + spin_lock_init(&isert_conn->conn_lock);
2427 + INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
2428 +
2429 +- cma_id->context = isert_conn;
2430 + isert_conn->conn_cm_id = cma_id;
2431 +
2432 + isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
2433 +@@ -669,15 +696,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2434 + goto out_mr;
2435 + }
2436 +
2437 +- pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
2438 +- if (pi_support && !device->pi_capable) {
2439 +- pr_err("Protection information requested but not supported, "
2440 +- "rejecting connect request\n");
2441 +- ret = rdma_reject(cma_id, NULL, 0);
2442 +- goto out_mr;
2443 +- }
2444 ++ ret = isert_conn_setup_qp(isert_conn, cma_id);
2445 ++ if (ret)
2446 ++ goto out_conn_dev;
2447 +
2448 +- ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
2449 ++ ret = isert_rdma_post_recvl(isert_conn);
2450 ++ if (ret)
2451 ++ goto out_conn_dev;
2452 ++
2453 ++ ret = isert_rdma_accept(isert_conn);
2454 + if (ret)
2455 + goto out_conn_dev;
2456 +
2457 +@@ -705,6 +732,7 @@ out_login_buf:
2458 + kfree(isert_conn->login_buf);
2459 + out:
2460 + kfree(isert_conn);
2461 ++ rdma_reject(cma_id, NULL, 0);
2462 + return ret;
2463 + }
2464 +
2465 +@@ -720,18 +748,20 @@ isert_connect_release(struct isert_conn *isert_conn)
2466 + if (device && device->use_fastreg)
2467 + isert_conn_free_fastreg_pool(isert_conn);
2468 +
2469 ++ isert_free_rx_descriptors(isert_conn);
2470 ++ rdma_destroy_id(isert_conn->conn_cm_id);
2471 ++
2472 + if (isert_conn->conn_qp) {
2473 + cq_index = ((struct isert_cq_desc *)
2474 + isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
2475 + pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
2476 ++ mutex_lock(&device_list_mutex);
2477 + isert_conn->conn_device->cq_active_qps[cq_index]--;
2478 ++ mutex_unlock(&device_list_mutex);
2479 +
2480 +- rdma_destroy_qp(isert_conn->conn_cm_id);
2481 ++ ib_destroy_qp(isert_conn->conn_qp);
2482 + }
2483 +
2484 +- isert_free_rx_descriptors(isert_conn);
2485 +- rdma_destroy_id(isert_conn->conn_cm_id);
2486 +-
2487 + ib_dereg_mr(isert_conn->conn_mr);
2488 + ib_dealloc_pd(isert_conn->conn_pd);
2489 +
2490 +@@ -754,9 +784,19 @@ isert_connect_release(struct isert_conn *isert_conn)
2491 + static void
2492 + isert_connected_handler(struct rdma_cm_id *cma_id)
2493 + {
2494 +- struct isert_conn *isert_conn = cma_id->context;
2495 ++ struct isert_conn *isert_conn = cma_id->qp->qp_context;
2496 ++
2497 ++ pr_info("conn %p\n", isert_conn);
2498 +
2499 +- kref_get(&isert_conn->conn_kref);
2500 ++ if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
2501 ++ pr_warn("conn %p connect_release is running\n", isert_conn);
2502 ++ return;
2503 ++ }
2504 ++
2505 ++ mutex_lock(&isert_conn->conn_mutex);
2506 ++ if (isert_conn->state != ISER_CONN_FULL_FEATURE)
2507 ++ isert_conn->state = ISER_CONN_UP;
2508 ++ mutex_unlock(&isert_conn->conn_mutex);
2509 + }
2510 +
2511 + static void
2512 +@@ -777,65 +817,108 @@ isert_put_conn(struct isert_conn *isert_conn)
2513 + kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
2514 + }
2515 +
2516 ++/**
2517 ++ * isert_conn_terminate() - Initiate connection termination
2518 ++ * @isert_conn: isert connection struct
2519 ++ *
2520 ++ * Notes:
2521 ++ * In case the connection state is FULL_FEATURE, move state
2522 ++ * to TEMINATING and start teardown sequence (rdma_disconnect).
2523 ++ * In case the connection state is UP, complete flush as well.
2524 ++ *
2525 ++ * This routine must be called with conn_mutex held. Thus it is
2526 ++ * safe to call multiple times.
2527 ++ */
2528 + static void
2529 +-isert_disconnect_work(struct work_struct *work)
2530 ++isert_conn_terminate(struct isert_conn *isert_conn)
2531 + {
2532 +- struct isert_conn *isert_conn = container_of(work,
2533 +- struct isert_conn, conn_logout_work);
2534 ++ int err;
2535 +
2536 +- pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
2537 +- mutex_lock(&isert_conn->conn_mutex);
2538 +- if (isert_conn->state == ISER_CONN_UP)
2539 ++ switch (isert_conn->state) {
2540 ++ case ISER_CONN_TERMINATING:
2541 ++ break;
2542 ++ case ISER_CONN_UP:
2543 ++ /*
2544 ++ * No flush completions will occur as we didn't
2545 ++ * get to ISER_CONN_FULL_FEATURE yet, complete
2546 ++ * to allow teardown progress.
2547 ++ */
2548 ++ complete(&isert_conn->conn_wait_comp_err);
2549 ++ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
2550 ++ pr_info("Terminating conn %p state %d\n",
2551 ++ isert_conn, isert_conn->state);
2552 + isert_conn->state = ISER_CONN_TERMINATING;
2553 +-
2554 +- if (isert_conn->post_recv_buf_count == 0 &&
2555 +- atomic_read(&isert_conn->post_send_buf_count) == 0) {
2556 +- mutex_unlock(&isert_conn->conn_mutex);
2557 +- goto wake_up;
2558 +- }
2559 +- if (!isert_conn->conn_cm_id) {
2560 +- mutex_unlock(&isert_conn->conn_mutex);
2561 +- isert_put_conn(isert_conn);
2562 +- return;
2563 ++ err = rdma_disconnect(isert_conn->conn_cm_id);
2564 ++ if (err)
2565 ++ pr_warn("Failed rdma_disconnect isert_conn %p\n",
2566 ++ isert_conn);
2567 ++ break;
2568 ++ default:
2569 ++ pr_warn("conn %p teminating in state %d\n",
2570 ++ isert_conn, isert_conn->state);
2571 + }
2572 ++}
2573 +
2574 +- if (isert_conn->disconnect) {
2575 +- /* Send DREQ/DREP towards our initiator */
2576 +- rdma_disconnect(isert_conn->conn_cm_id);
2577 +- }
2578 ++static int
2579 ++isert_np_cma_handler(struct isert_np *isert_np,
2580 ++ enum rdma_cm_event_type event)
2581 ++{
2582 ++ pr_debug("isert np %p, handling event %d\n", isert_np, event);
2583 +
2584 +- mutex_unlock(&isert_conn->conn_mutex);
2585 ++ switch (event) {
2586 ++ case RDMA_CM_EVENT_DEVICE_REMOVAL:
2587 ++ isert_np->np_cm_id = NULL;
2588 ++ break;
2589 ++ case RDMA_CM_EVENT_ADDR_CHANGE:
2590 ++ isert_np->np_cm_id = isert_setup_id(isert_np);
2591 ++ if (IS_ERR(isert_np->np_cm_id)) {
2592 ++ pr_err("isert np %p setup id failed: %ld\n",
2593 ++ isert_np, PTR_ERR(isert_np->np_cm_id));
2594 ++ isert_np->np_cm_id = NULL;
2595 ++ }
2596 ++ break;
2597 ++ default:
2598 ++ pr_err("isert np %p Unexpected event %d\n",
2599 ++ isert_np, event);
2600 ++ }
2601 +
2602 +-wake_up:
2603 +- complete(&isert_conn->conn_wait);
2604 ++ return -1;
2605 + }
2606 +
2607 + static int
2608 +-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
2609 ++isert_disconnected_handler(struct rdma_cm_id *cma_id,
2610 ++ enum rdma_cm_event_type event)
2611 + {
2612 ++ struct isert_np *isert_np = cma_id->context;
2613 + struct isert_conn *isert_conn;
2614 +
2615 +- if (!cma_id->qp) {
2616 +- struct isert_np *isert_np = cma_id->context;
2617 ++ if (isert_np->np_cm_id == cma_id)
2618 ++ return isert_np_cma_handler(cma_id->context, event);
2619 +
2620 +- isert_np->np_cm_id = NULL;
2621 +- return -1;
2622 +- }
2623 ++ isert_conn = cma_id->qp->qp_context;
2624 +
2625 +- isert_conn = (struct isert_conn *)cma_id->context;
2626 ++ mutex_lock(&isert_conn->conn_mutex);
2627 ++ isert_conn_terminate(isert_conn);
2628 ++ mutex_unlock(&isert_conn->conn_mutex);
2629 +
2630 +- isert_conn->disconnect = disconnect;
2631 +- INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
2632 +- schedule_work(&isert_conn->conn_logout_work);
2633 ++ pr_info("conn %p completing conn_wait\n", isert_conn);
2634 ++ complete(&isert_conn->conn_wait);
2635 +
2636 + return 0;
2637 + }
2638 +
2639 ++static void
2640 ++isert_connect_error(struct rdma_cm_id *cma_id)
2641 ++{
2642 ++ struct isert_conn *isert_conn = cma_id->qp->qp_context;
2643 ++
2644 ++ isert_put_conn(isert_conn);
2645 ++}
2646 ++
2647 + static int
2648 + isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2649 + {
2650 + int ret = 0;
2651 +- bool disconnect = false;
2652 +
2653 + pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
2654 + event->event, event->status, cma_id->context, cma_id);
2655 +@@ -853,11 +936,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2656 + case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
2657 + case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
2658 + case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
2659 +- disconnect = true;
2660 + case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
2661 +- ret = isert_disconnected_handler(cma_id, disconnect);
2662 ++ ret = isert_disconnected_handler(cma_id, event->event);
2663 + break;
2664 ++ case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
2665 ++ case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
2666 + case RDMA_CM_EVENT_CONNECT_ERROR:
2667 ++ isert_connect_error(cma_id);
2668 ++ break;
2669 + default:
2670 + pr_err("Unhandled RDMA CMA event: %d\n", event->event);
2671 + break;
2672 +@@ -991,7 +1077,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2673 + * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
2674 + */
2675 + mutex_lock(&isert_conn->conn_mutex);
2676 +- if (coalesce && isert_conn->state == ISER_CONN_UP &&
2677 ++ if (coalesce && isert_conn->state == ISER_CONN_FULL_FEATURE &&
2678 + ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
2679 + tx_desc->llnode_active = true;
2680 + llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
2681 +@@ -1072,11 +1158,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
2682 + if (login->login_complete) {
2683 + if (!conn->sess->sess_ops->SessionType &&
2684 + isert_conn->conn_device->use_fastreg) {
2685 +- /* Normal Session and fastreg is used */
2686 +- u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
2687 +-
2688 +- ret = isert_conn_create_fastreg_pool(isert_conn,
2689 +- pi_support);
2690 ++ ret = isert_conn_create_fastreg_pool(isert_conn);
2691 + if (ret) {
2692 + pr_err("Conn: %p failed to create"
2693 + " fastreg pool\n", isert_conn);
2694 +@@ -1092,7 +1174,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
2695 + if (ret)
2696 + return ret;
2697 +
2698 +- isert_conn->state = ISER_CONN_UP;
2699 ++ /* Now we are in FULL_FEATURE phase */
2700 ++ mutex_lock(&isert_conn->conn_mutex);
2701 ++ isert_conn->state = ISER_CONN_FULL_FEATURE;
2702 ++ mutex_unlock(&isert_conn->conn_mutex);
2703 + goto post_send;
2704 + }
2705 +
2706 +@@ -1109,18 +1194,17 @@ post_send:
2707 + }
2708 +
2709 + static void
2710 +-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
2711 +- struct isert_conn *isert_conn)
2712 ++isert_rx_login_req(struct isert_conn *isert_conn)
2713 + {
2714 ++ struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
2715 ++ int rx_buflen = isert_conn->login_req_len;
2716 + struct iscsi_conn *conn = isert_conn->conn;
2717 + struct iscsi_login *login = conn->conn_login;
2718 + int size;
2719 +
2720 +- if (!login) {
2721 +- pr_err("conn->conn_login is NULL\n");
2722 +- dump_stack();
2723 +- return;
2724 +- }
2725 ++ pr_info("conn %p\n", isert_conn);
2726 ++
2727 ++ WARN_ON_ONCE(!login);
2728 +
2729 + if (login->first_request) {
2730 + struct iscsi_login_req *login_req =
2731 +@@ -1483,11 +1567,20 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
2732 + hdr->opcode, hdr->itt, hdr->flags,
2733 + (int)(xfer_len - ISER_HEADERS_LEN));
2734 +
2735 +- if ((char *)desc == isert_conn->login_req_buf)
2736 +- isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
2737 +- isert_conn);
2738 +- else
2739 ++ if ((char *)desc == isert_conn->login_req_buf) {
2740 ++ isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
2741 ++ if (isert_conn->conn) {
2742 ++ struct iscsi_login *login = isert_conn->conn->conn_login;
2743 ++
2744 ++ if (login && !login->first_request)
2745 ++ isert_rx_login_req(isert_conn);
2746 ++ }
2747 ++ mutex_lock(&isert_conn->conn_mutex);
2748 ++ complete(&isert_conn->login_req_comp);
2749 ++ mutex_unlock(&isert_conn->conn_mutex);
2750 ++ } else {
2751 + isert_rx_do_work(desc, isert_conn);
2752 ++ }
2753 +
2754 + ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
2755 + DMA_FROM_DEVICE);
2756 +@@ -2046,7 +2139,7 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
2757 + msleep(3000);
2758 +
2759 + mutex_lock(&isert_conn->conn_mutex);
2760 +- isert_conn->state = ISER_CONN_DOWN;
2761 ++ isert_conn_terminate(isert_conn);
2762 + mutex_unlock(&isert_conn->conn_mutex);
2763 +
2764 + iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2765 +@@ -2231,8 +2324,16 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn)
2766 + struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2767 + struct isert_device *device = isert_conn->conn_device;
2768 +
2769 +- if (device->pi_capable)
2770 +- return TARGET_PROT_ALL;
2771 ++ if (conn->tpg->tpg_attrib.t10_pi) {
2772 ++ if (device->pi_capable) {
2773 ++ pr_info("conn %p PI offload enabled\n", isert_conn);
2774 ++ isert_conn->pi_support = true;
2775 ++ return TARGET_PROT_ALL;
2776 ++ }
2777 ++ }
2778 ++
2779 ++ pr_info("conn %p PI offload disabled\n", isert_conn);
2780 ++ isert_conn->pi_support = false;
2781 +
2782 + return TARGET_PROT_NORMAL;
2783 + }
2784 +@@ -2681,10 +2782,10 @@ isert_set_prot_checks(u8 prot_checks)
2785 + }
2786 +
2787 + static int
2788 +-isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2789 +- struct fast_reg_descriptor *fr_desc,
2790 +- struct ib_sge *data_sge, struct ib_sge *prot_sge,
2791 +- struct ib_sge *sig_sge)
2792 ++isert_reg_sig_mr(struct isert_conn *isert_conn,
2793 ++ struct se_cmd *se_cmd,
2794 ++ struct isert_rdma_wr *rdma_wr,
2795 ++ struct fast_reg_descriptor *fr_desc)
2796 + {
2797 + struct ib_send_wr sig_wr, inv_wr;
2798 + struct ib_send_wr *bad_wr, *wr = NULL;
2799 +@@ -2714,13 +2815,13 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2800 + memset(&sig_wr, 0, sizeof(sig_wr));
2801 + sig_wr.opcode = IB_WR_REG_SIG_MR;
2802 + sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2803 +- sig_wr.sg_list = data_sge;
2804 ++ sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
2805 + sig_wr.num_sge = 1;
2806 + sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2807 + sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2808 + sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2809 + if (se_cmd->t_prot_sg)
2810 +- sig_wr.wr.sig_handover.prot = prot_sge;
2811 ++ sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
2812 +
2813 + if (!wr)
2814 + wr = &sig_wr;
2815 +@@ -2734,34 +2835,93 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2816 + }
2817 + fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2818 +
2819 +- sig_sge->lkey = pi_ctx->sig_mr->lkey;
2820 +- sig_sge->addr = 0;
2821 +- sig_sge->length = se_cmd->data_length;
2822 ++ rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2823 ++ rdma_wr->ib_sg[SIG].addr = 0;
2824 ++ rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
2825 + if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2826 + se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2827 + /*
2828 + * We have protection guards on the wire
2829 + * so we need to set a larget transfer
2830 + */
2831 +- sig_sge->length += se_cmd->prot_length;
2832 ++ rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
2833 +
2834 + pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2835 +- sig_sge->addr, sig_sge->length,
2836 +- sig_sge->lkey);
2837 ++ rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2838 ++ rdma_wr->ib_sg[SIG].lkey);
2839 + err:
2840 + return ret;
2841 + }
2842 +
2843 + static int
2844 ++isert_handle_prot_cmd(struct isert_conn *isert_conn,
2845 ++ struct isert_cmd *isert_cmd,
2846 ++ struct isert_rdma_wr *wr)
2847 ++{
2848 ++ struct isert_device *device = isert_conn->conn_device;
2849 ++ struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2850 ++ int ret;
2851 ++
2852 ++ if (!wr->fr_desc->pi_ctx) {
2853 ++ ret = isert_create_pi_ctx(wr->fr_desc,
2854 ++ device->ib_device,
2855 ++ isert_conn->conn_pd);
2856 ++ if (ret) {
2857 ++ pr_err("conn %p failed to allocate pi_ctx\n",
2858 ++ isert_conn);
2859 ++ return ret;
2860 ++ }
2861 ++ }
2862 ++
2863 ++ if (se_cmd->t_prot_sg) {
2864 ++ ret = isert_map_data_buf(isert_conn, isert_cmd,
2865 ++ se_cmd->t_prot_sg,
2866 ++ se_cmd->t_prot_nents,
2867 ++ se_cmd->prot_length,
2868 ++ 0, wr->iser_ib_op, &wr->prot);
2869 ++ if (ret) {
2870 ++ pr_err("conn %p failed to map protection buffer\n",
2871 ++ isert_conn);
2872 ++ return ret;
2873 ++ }
2874 ++
2875 ++ memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2876 ++ ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2877 ++ ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2878 ++ if (ret) {
2879 ++ pr_err("conn %p failed to fast reg mr\n",
2880 ++ isert_conn);
2881 ++ goto unmap_prot_cmd;
2882 ++ }
2883 ++ }
2884 ++
2885 ++ ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2886 ++ if (ret) {
2887 ++ pr_err("conn %p failed to fast reg mr\n",
2888 ++ isert_conn);
2889 ++ goto unmap_prot_cmd;
2890 ++ }
2891 ++ wr->fr_desc->ind |= ISERT_PROTECTED;
2892 ++
2893 ++ return 0;
2894 ++
2895 ++unmap_prot_cmd:
2896 ++ if (se_cmd->t_prot_sg)
2897 ++ isert_unmap_data_buf(isert_conn, &wr->prot);
2898 ++
2899 ++ return ret;
2900 ++}
2901 ++
2902 ++static int
2903 + isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2904 + struct isert_rdma_wr *wr)
2905 + {
2906 + struct se_cmd *se_cmd = &cmd->se_cmd;
2907 + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2908 + struct isert_conn *isert_conn = conn->context;
2909 +- struct ib_sge data_sge;
2910 +- struct ib_send_wr *send_wr;
2911 + struct fast_reg_descriptor *fr_desc = NULL;
2912 ++ struct ib_send_wr *send_wr;
2913 ++ struct ib_sge *ib_sg;
2914 + u32 offset;
2915 + int ret = 0;
2916 + unsigned long flags;
2917 +@@ -2775,8 +2935,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2918 + if (ret)
2919 + return ret;
2920 +
2921 +- if (wr->data.dma_nents != 1 ||
2922 +- se_cmd->prot_op != TARGET_PROT_NORMAL) {
2923 ++ if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
2924 + spin_lock_irqsave(&isert_conn->conn_lock, flags);
2925 + fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2926 + struct fast_reg_descriptor, list);
2927 +@@ -2786,38 +2945,21 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2928 + }
2929 +
2930 + ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2931 +- ISERT_DATA_KEY_VALID, &data_sge);
2932 ++ ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
2933 + if (ret)
2934 + goto unmap_cmd;
2935 +
2936 +- if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
2937 +- struct ib_sge prot_sge, sig_sge;
2938 +-
2939 +- if (se_cmd->t_prot_sg) {
2940 +- ret = isert_map_data_buf(isert_conn, isert_cmd,
2941 +- se_cmd->t_prot_sg,
2942 +- se_cmd->t_prot_nents,
2943 +- se_cmd->prot_length,
2944 +- 0, wr->iser_ib_op, &wr->prot);
2945 +- if (ret)
2946 +- goto unmap_cmd;
2947 +-
2948 +- ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
2949 +- ISERT_PROT_KEY_VALID, &prot_sge);
2950 +- if (ret)
2951 +- goto unmap_prot_cmd;
2952 +- }
2953 +-
2954 +- ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
2955 +- &data_sge, &prot_sge, &sig_sge);
2956 ++ if (isert_prot_cmd(isert_conn, se_cmd)) {
2957 ++ ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
2958 + if (ret)
2959 +- goto unmap_prot_cmd;
2960 ++ goto unmap_cmd;
2961 +
2962 +- fr_desc->ind |= ISERT_PROTECTED;
2963 +- memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
2964 +- } else
2965 +- memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
2966 ++ ib_sg = &wr->ib_sg[SIG];
2967 ++ } else {
2968 ++ ib_sg = &wr->ib_sg[DATA];
2969 ++ }
2970 +
2971 ++ memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
2972 + wr->ib_sge = &wr->s_ib_sge;
2973 + wr->send_wr_num = 1;
2974 + memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2975 +@@ -2832,7 +2974,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2976 + send_wr->opcode = IB_WR_RDMA_WRITE;
2977 + send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2978 + send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2979 +- send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
2980 ++ send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2981 + 0 : IB_SEND_SIGNALED;
2982 + } else {
2983 + send_wr->opcode = IB_WR_RDMA_READ;
2984 +@@ -2842,9 +2984,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2985 + }
2986 +
2987 + return 0;
2988 +-unmap_prot_cmd:
2989 +- if (se_cmd->t_prot_sg)
2990 +- isert_unmap_data_buf(isert_conn, &wr->prot);
2991 ++
2992 + unmap_cmd:
2993 + if (fr_desc) {
2994 + spin_lock_irqsave(&isert_conn->conn_lock, flags);
2995 +@@ -2876,7 +3016,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2996 + return rc;
2997 + }
2998 +
2999 +- if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
3000 ++ if (!isert_prot_cmd(isert_conn, se_cmd)) {
3001 + /*
3002 + * Build isert_conn->tx_desc for iSCSI response PDU and attach
3003 + */
3004 +@@ -2899,7 +3039,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
3005 + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
3006 + }
3007 +
3008 +- if (se_cmd->prot_op == TARGET_PROT_NORMAL)
3009 ++ if (!isert_prot_cmd(isert_conn, se_cmd))
3010 + pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
3011 + "READ\n", isert_cmd);
3012 + else
3013 +@@ -3001,13 +3141,51 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3014 + return ret;
3015 + }
3016 +
3017 ++struct rdma_cm_id *
3018 ++isert_setup_id(struct isert_np *isert_np)
3019 ++{
3020 ++ struct iscsi_np *np = isert_np->np;
3021 ++ struct rdma_cm_id *id;
3022 ++ struct sockaddr *sa;
3023 ++ int ret;
3024 ++
3025 ++ sa = (struct sockaddr *)&np->np_sockaddr;
3026 ++ pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
3027 ++
3028 ++ id = rdma_create_id(isert_cma_handler, isert_np,
3029 ++ RDMA_PS_TCP, IB_QPT_RC);
3030 ++ if (IS_ERR(id)) {
3031 ++ pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
3032 ++ ret = PTR_ERR(id);
3033 ++ goto out;
3034 ++ }
3035 ++ pr_debug("id %p context %p\n", id, id->context);
3036 ++
3037 ++ ret = rdma_bind_addr(id, sa);
3038 ++ if (ret) {
3039 ++ pr_err("rdma_bind_addr() failed: %d\n", ret);
3040 ++ goto out_id;
3041 ++ }
3042 ++
3043 ++ ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
3044 ++ if (ret) {
3045 ++ pr_err("rdma_listen() failed: %d\n", ret);
3046 ++ goto out_id;
3047 ++ }
3048 ++
3049 ++ return id;
3050 ++out_id:
3051 ++ rdma_destroy_id(id);
3052 ++out:
3053 ++ return ERR_PTR(ret);
3054 ++}
3055 ++
3056 + static int
3057 + isert_setup_np(struct iscsi_np *np,
3058 + struct __kernel_sockaddr_storage *ksockaddr)
3059 + {
3060 + struct isert_np *isert_np;
3061 + struct rdma_cm_id *isert_lid;
3062 +- struct sockaddr *sa;
3063 + int ret;
3064 +
3065 + isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3066 +@@ -3019,9 +3197,8 @@ isert_setup_np(struct iscsi_np *np,
3067 + mutex_init(&isert_np->np_accept_mutex);
3068 + INIT_LIST_HEAD(&isert_np->np_accept_list);
3069 + init_completion(&isert_np->np_login_comp);
3070 ++ isert_np->np = np;
3071 +
3072 +- sa = (struct sockaddr *)ksockaddr;
3073 +- pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
3074 + /*
3075 + * Setup the np->np_sockaddr from the passed sockaddr setup
3076 + * in iscsi_target_configfs.c code..
3077 +@@ -3029,37 +3206,20 @@ isert_setup_np(struct iscsi_np *np,
3078 + memcpy(&np->np_sockaddr, ksockaddr,
3079 + sizeof(struct __kernel_sockaddr_storage));
3080 +
3081 +- isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
3082 +- IB_QPT_RC);
3083 ++ isert_lid = isert_setup_id(isert_np);
3084 + if (IS_ERR(isert_lid)) {
3085 +- pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
3086 +- PTR_ERR(isert_lid));
3087 + ret = PTR_ERR(isert_lid);
3088 + goto out;
3089 + }
3090 +
3091 +- ret = rdma_bind_addr(isert_lid, sa);
3092 +- if (ret) {
3093 +- pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
3094 +- goto out_lid;
3095 +- }
3096 +-
3097 +- ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
3098 +- if (ret) {
3099 +- pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
3100 +- goto out_lid;
3101 +- }
3102 +-
3103 + isert_np->np_cm_id = isert_lid;
3104 + np->np_context = isert_np;
3105 +- pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
3106 +
3107 + return 0;
3108 +
3109 +-out_lid:
3110 +- rdma_destroy_id(isert_lid);
3111 + out:
3112 + kfree(isert_np);
3113 ++
3114 + return ret;
3115 + }
3116 +
3117 +@@ -3094,7 +3254,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3118 + struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
3119 + int ret;
3120 +
3121 +- pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
3122 ++ pr_info("before login_req comp conn: %p\n", isert_conn);
3123 ++ ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3124 ++ if (ret) {
3125 ++ pr_err("isert_conn %p interrupted before got login req\n",
3126 ++ isert_conn);
3127 ++ return ret;
3128 ++ }
3129 ++ reinit_completion(&isert_conn->login_req_comp);
3130 ++
3131 + /*
3132 + * For login requests after the first PDU, isert_rx_login_req() will
3133 + * kick schedule_delayed_work(&conn->login_work) as the packet is
3134 +@@ -3104,11 +3272,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3135 + if (!login->first_request)
3136 + return 0;
3137 +
3138 ++ isert_rx_login_req(isert_conn);
3139 ++
3140 ++ pr_info("before conn_login_comp conn: %p\n", conn);
3141 + ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3142 + if (ret)
3143 + return ret;
3144 +
3145 +- pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
3146 ++ pr_info("processing login->req: %p\n", login->req);
3147 ++
3148 + return 0;
3149 + }
3150 +
3151 +@@ -3186,17 +3358,10 @@ accept_wait:
3152 + isert_conn->conn = conn;
3153 + max_accept = 0;
3154 +
3155 +- ret = isert_rdma_post_recvl(isert_conn);
3156 +- if (ret)
3157 +- return ret;
3158 +-
3159 +- ret = isert_rdma_accept(isert_conn);
3160 +- if (ret)
3161 +- return ret;
3162 +-
3163 + isert_set_conn_info(np, conn, isert_conn);
3164 +
3165 +- pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
3166 ++ pr_debug("Processing isert_conn: %p\n", isert_conn);
3167 ++
3168 + return 0;
3169 + }
3170 +
3171 +@@ -3212,6 +3377,24 @@ isert_free_np(struct iscsi_np *np)
3172 + kfree(isert_np);
3173 + }
3174 +
3175 ++static void isert_release_work(struct work_struct *work)
3176 ++{
3177 ++ struct isert_conn *isert_conn = container_of(work,
3178 ++ struct isert_conn,
3179 ++ release_work);
3180 ++
3181 ++ pr_info("Starting release conn %p\n", isert_conn);
3182 ++
3183 ++ wait_for_completion(&isert_conn->conn_wait);
3184 ++
3185 ++ mutex_lock(&isert_conn->conn_mutex);
3186 ++ isert_conn->state = ISER_CONN_DOWN;
3187 ++ mutex_unlock(&isert_conn->conn_mutex);
3188 ++
3189 ++ pr_info("Destroying conn %p\n", isert_conn);
3190 ++ isert_put_conn(isert_conn);
3191 ++}
3192 ++
3193 + static void isert_wait_conn(struct iscsi_conn *conn)
3194 + {
3195 + struct isert_conn *isert_conn = conn->context;
3196 +@@ -3219,10 +3402,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3197 + pr_debug("isert_wait_conn: Starting \n");
3198 +
3199 + mutex_lock(&isert_conn->conn_mutex);
3200 +- if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
3201 +- pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
3202 +- rdma_disconnect(isert_conn->conn_cm_id);
3203 +- }
3204 + /*
3205 + * Only wait for conn_wait_comp_err if the isert_conn made it
3206 + * into full feature phase..
3207 +@@ -3231,14 +3410,13 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3208 + mutex_unlock(&isert_conn->conn_mutex);
3209 + return;
3210 + }
3211 +- if (isert_conn->state == ISER_CONN_UP)
3212 +- isert_conn->state = ISER_CONN_TERMINATING;
3213 ++ isert_conn_terminate(isert_conn);
3214 + mutex_unlock(&isert_conn->conn_mutex);
3215 +
3216 + wait_for_completion(&isert_conn->conn_wait_comp_err);
3217 +
3218 +- wait_for_completion(&isert_conn->conn_wait);
3219 +- isert_put_conn(isert_conn);
3220 ++ INIT_WORK(&isert_conn->release_work, isert_release_work);
3221 ++ queue_work(isert_release_wq, &isert_conn->release_work);
3222 + }
3223 +
3224 + static void isert_free_conn(struct iscsi_conn *conn)
3225 +@@ -3286,10 +3464,21 @@ static int __init isert_init(void)
3226 + goto destroy_rx_wq;
3227 + }
3228 +
3229 ++ isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3230 ++ WQ_UNBOUND_MAX_ACTIVE);
3231 ++ if (!isert_release_wq) {
3232 ++ pr_err("Unable to allocate isert_release_wq\n");
3233 ++ ret = -ENOMEM;
3234 ++ goto destroy_comp_wq;
3235 ++ }
3236 ++
3237 + iscsit_register_transport(&iser_target_transport);
3238 +- pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
3239 ++ pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3240 ++
3241 + return 0;
3242 +
3243 ++destroy_comp_wq:
3244 ++ destroy_workqueue(isert_comp_wq);
3245 + destroy_rx_wq:
3246 + destroy_workqueue(isert_rx_wq);
3247 + return ret;
3248 +@@ -3298,6 +3487,7 @@ destroy_rx_wq:
3249 + static void __exit isert_exit(void)
3250 + {
3251 + flush_scheduled_work();
3252 ++ destroy_workqueue(isert_release_wq);
3253 + destroy_workqueue(isert_comp_wq);
3254 + destroy_workqueue(isert_rx_wq);
3255 + iscsit_unregister_transport(&iser_target_transport);
3256 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
3257 +index 04f51f7bf614..141905f446dd 100644
3258 +--- a/drivers/infiniband/ulp/isert/ib_isert.h
3259 ++++ b/drivers/infiniband/ulp/isert/ib_isert.h
3260 +@@ -23,6 +23,7 @@ enum iser_ib_op_code {
3261 + enum iser_conn_state {
3262 + ISER_CONN_INIT,
3263 + ISER_CONN_UP,
3264 ++ ISER_CONN_FULL_FEATURE,
3265 + ISER_CONN_TERMINATING,
3266 + ISER_CONN_DOWN,
3267 + };
3268 +@@ -81,6 +82,12 @@ struct isert_data_buf {
3269 + enum dma_data_direction dma_dir;
3270 + };
3271 +
3272 ++enum {
3273 ++ DATA = 0,
3274 ++ PROT = 1,
3275 ++ SIG = 2,
3276 ++};
3277 ++
3278 + struct isert_rdma_wr {
3279 + struct list_head wr_list;
3280 + struct isert_cmd *isert_cmd;
3281 +@@ -90,6 +97,7 @@ struct isert_rdma_wr {
3282 + int send_wr_num;
3283 + struct ib_send_wr *send_wr;
3284 + struct ib_send_wr s_send_wr;
3285 ++ struct ib_sge ib_sg[3];
3286 + struct isert_data_buf data;
3287 + struct isert_data_buf prot;
3288 + struct fast_reg_descriptor *fr_desc;
3289 +@@ -120,11 +128,13 @@ struct isert_conn {
3290 + atomic_t post_send_buf_count;
3291 + u32 responder_resources;
3292 + u32 initiator_depth;
3293 ++ bool pi_support;
3294 + u32 max_sge;
3295 + char *login_buf;
3296 + char *login_req_buf;
3297 + char *login_rsp_buf;
3298 + u64 login_req_dma;
3299 ++ int login_req_len;
3300 + u64 login_rsp_dma;
3301 + unsigned int conn_rx_desc_head;
3302 + struct iser_rx_desc *conn_rx_descs;
3303 +@@ -132,13 +142,13 @@ struct isert_conn {
3304 + struct iscsi_conn *conn;
3305 + struct list_head conn_accept_node;
3306 + struct completion conn_login_comp;
3307 ++ struct completion login_req_comp;
3308 + struct iser_tx_desc conn_login_tx_desc;
3309 + struct rdma_cm_id *conn_cm_id;
3310 + struct ib_pd *conn_pd;
3311 + struct ib_mr *conn_mr;
3312 + struct ib_qp *conn_qp;
3313 + struct isert_device *conn_device;
3314 +- struct work_struct conn_logout_work;
3315 + struct mutex conn_mutex;
3316 + struct completion conn_wait;
3317 + struct completion conn_wait_comp_err;
3318 +@@ -147,10 +157,10 @@ struct isert_conn {
3319 + int conn_fr_pool_size;
3320 + /* lock to protect fastreg pool */
3321 + spinlock_t conn_lock;
3322 ++ struct work_struct release_work;
3323 + #define ISERT_COMP_BATCH_COUNT 8
3324 + int conn_comp_batch;
3325 + struct llist_head conn_comp_llist;
3326 +- bool disconnect;
3327 + };
3328 +
3329 + #define ISERT_MAX_CQ 64
3330 +@@ -182,6 +192,7 @@ struct isert_device {
3331 + };
3332 +
3333 + struct isert_np {
3334 ++ struct iscsi_np *np;
3335 + struct semaphore np_sem;
3336 + struct rdma_cm_id *np_cm_id;
3337 + struct mutex np_accept_mutex;
3338 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
3339 +index f2b978026407..77ecf6d32237 100644
3340 +--- a/drivers/input/mouse/elantech.c
3341 ++++ b/drivers/input/mouse/elantech.c
3342 +@@ -1520,6 +1520,8 @@ static int elantech_set_properties(struct elantech_data *etd)
3343 + case 7:
3344 + case 8:
3345 + case 9:
3346 ++ case 10:
3347 ++ case 13:
3348 + etd->hw_version = 4;
3349 + break;
3350 + default:
3351 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
3352 +index faeeb1372462..1a858c86a72b 100644
3353 +--- a/drivers/input/serio/i8042-x86ia64io.h
3354 ++++ b/drivers/input/serio/i8042-x86ia64io.h
3355 +@@ -415,6 +415,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
3356 + },
3357 + },
3358 + {
3359 ++ /* Acer Aspire 7738 */
3360 ++ .matches = {
3361 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3362 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
3363 ++ },
3364 ++ },
3365 ++ {
3366 + /* Gericom Bellagio */
3367 + .matches = {
3368 + DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
3369 +@@ -735,6 +742,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
3370 + { }
3371 + };
3372 +
3373 ++/*
3374 ++ * Some laptops need keyboard reset before probing for the trackpad to get
3375 ++ * it detected, initialised & finally work.
3376 ++ */
3377 ++static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
3378 ++ {
3379 ++ /* Gigabyte P35 v2 - Elantech touchpad */
3380 ++ .matches = {
3381 ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
3382 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
3383 ++ },
3384 ++ },
3385 ++ {
3386 ++ /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
3387 ++ .matches = {
3388 ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
3389 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
3390 ++ },
3391 ++ },
3392 ++ {
3393 ++ /* Gigabyte P34 - Elantech touchpad */
3394 ++ .matches = {
3395 ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
3396 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
3397 ++ },
3398 ++ },
3399 ++ { }
3400 ++};
3401 ++
3402 + #endif /* CONFIG_X86 */
3403 +
3404 + #ifdef CONFIG_PNP
3405 +@@ -1030,6 +1066,9 @@ static int __init i8042_platform_init(void)
3406 + if (dmi_check_system(i8042_dmi_dritek_table))
3407 + i8042_dritek = true;
3408 +
3409 ++ if (dmi_check_system(i8042_dmi_kbdreset_table))
3410 ++ i8042_kbdreset = true;
3411 ++
3412 + /*
3413 + * A20 was already enabled during early kernel init. But some buggy
3414 + * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
3415 +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
3416 +index f5a98af3b325..804d2e02010a 100644
3417 +--- a/drivers/input/serio/i8042.c
3418 ++++ b/drivers/input/serio/i8042.c
3419 +@@ -67,6 +67,10 @@ static bool i8042_notimeout;
3420 + module_param_named(notimeout, i8042_notimeout, bool, 0);
3421 + MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
3422 +
3423 ++static bool i8042_kbdreset;
3424 ++module_param_named(kbdreset, i8042_kbdreset, bool, 0);
3425 ++MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port");
3426 ++
3427 + #ifdef CONFIG_X86
3428 + static bool i8042_dritek;
3429 + module_param_named(dritek, i8042_dritek, bool, 0);
3430 +@@ -790,6 +794,16 @@ static int __init i8042_check_aux(void)
3431 + return -1;
3432 +
3433 + /*
3434 ++ * Reset keyboard (needed on some laptops to successfully detect
3435 ++ * touchpad, e.g., some Gigabyte laptop models with Elantech
3436 ++ * touchpads).
3437 ++ */
3438 ++ if (i8042_kbdreset) {
3439 ++ pr_warn("Attempting to reset device connected to KBD port\n");
3440 ++ i8042_kbd_write(NULL, (unsigned char) 0xff);
3441 ++ }
3442 ++
3443 ++/*
3444 + * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and
3445 + * used it for a PCI card or somethig else.
3446 + */
3447 +diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
3448 +index 64fde485dcaa..4c794f15a57f 100644
3449 +--- a/drivers/leds/leds-netxbig.c
3450 ++++ b/drivers/leds/leds-netxbig.c
3451 +@@ -330,18 +330,18 @@ create_netxbig_led(struct platform_device *pdev,
3452 + led_dat->sata = 0;
3453 + led_dat->cdev.brightness = LED_OFF;
3454 + led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
3455 +- /*
3456 +- * If available, expose the SATA activity blink capability through
3457 +- * a "sata" sysfs attribute.
3458 +- */
3459 +- if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
3460 +- led_dat->cdev.groups = netxbig_led_groups;
3461 + led_dat->mode_addr = template->mode_addr;
3462 + led_dat->mode_val = template->mode_val;
3463 + led_dat->bright_addr = template->bright_addr;
3464 + led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1;
3465 + led_dat->timer = pdata->timer;
3466 + led_dat->num_timer = pdata->num_timer;
3467 ++ /*
3468 ++ * If available, expose the SATA activity blink capability through
3469 ++ * a "sata" sysfs attribute.
3470 ++ */
3471 ++ if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
3472 ++ led_dat->cdev.groups = netxbig_led_groups;
3473 +
3474 + return led_classdev_register(&pdev->dev, &led_dat->cdev);
3475 + }
3476 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3477 +index 58f3927fd7cc..62c51364cf9e 100644
3478 +--- a/drivers/md/dm.c
3479 ++++ b/drivers/md/dm.c
3480 +@@ -899,7 +899,7 @@ static void disable_write_same(struct mapped_device *md)
3481 +
3482 + static void clone_endio(struct bio *bio, int error)
3483 + {
3484 +- int r = 0;
3485 ++ int r = error;
3486 + struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
3487 + struct dm_io *io = tio->io;
3488 + struct mapped_device *md = tio->io->md;
3489 +diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
3490 +index 2335529b195c..ab5d9a3adebf 100644
3491 +--- a/drivers/media/i2c/smiapp-pll.c
3492 ++++ b/drivers/media/i2c/smiapp-pll.c
3493 +@@ -67,7 +67,7 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
3494 + {
3495 + dev_dbg(dev, "pre_pll_clk_div\t%d\n", pll->pre_pll_clk_div);
3496 + dev_dbg(dev, "pll_multiplier \t%d\n", pll->pll_multiplier);
3497 +- if (pll->flags != SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
3498 ++ if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
3499 + dev_dbg(dev, "op_sys_clk_div \t%d\n", pll->op_sys_clk_div);
3500 + dev_dbg(dev, "op_pix_clk_div \t%d\n", pll->op_pix_clk_div);
3501 + }
3502 +@@ -77,7 +77,7 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
3503 + dev_dbg(dev, "ext_clk_freq_hz \t%d\n", pll->ext_clk_freq_hz);
3504 + dev_dbg(dev, "pll_ip_clk_freq_hz \t%d\n", pll->pll_ip_clk_freq_hz);
3505 + dev_dbg(dev, "pll_op_clk_freq_hz \t%d\n", pll->pll_op_clk_freq_hz);
3506 +- if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
3507 ++ if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
3508 + dev_dbg(dev, "op_sys_clk_freq_hz \t%d\n",
3509 + pll->op_sys_clk_freq_hz);
3510 + dev_dbg(dev, "op_pix_clk_freq_hz \t%d\n",
3511 +diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
3512 +index b10aaeda2bb4..b49254e4ea0a 100644
3513 +--- a/drivers/media/i2c/smiapp/smiapp-core.c
3514 ++++ b/drivers/media/i2c/smiapp/smiapp-core.c
3515 +@@ -2677,7 +2677,9 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
3516 + pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
3517 + pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
3518 +
3519 ++ mutex_lock(&sensor->mutex);
3520 + rval = smiapp_update_mode(sensor);
3521 ++ mutex_unlock(&sensor->mutex);
3522 + if (rval) {
3523 + dev_err(&client->dev, "update mode failed\n");
3524 + goto out_nvm_release;
3525 +diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
3526 +index 69c2dbd2d165..501d5ef682e5 100644
3527 +--- a/drivers/media/platform/vivid/vivid-vid-out.c
3528 ++++ b/drivers/media/platform/vivid/vivid-vid-out.c
3529 +@@ -612,7 +612,7 @@ int vivid_vid_out_g_selection(struct file *file, void *priv,
3530 + sel->r = dev->fmt_out_rect;
3531 + break;
3532 + case V4L2_SEL_TGT_CROP_BOUNDS:
3533 +- if (!dev->has_compose_out)
3534 ++ if (!dev->has_crop_out)
3535 + return -EINVAL;
3536 + sel->r = vivid_max_rect;
3537 + break;
3538 +diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
3539 +index ec49f94425fc..2fd47c9bf5d8 100644
3540 +--- a/drivers/media/rc/img-ir/img-ir-hw.c
3541 ++++ b/drivers/media/rc/img-ir/img-ir-hw.c
3542 +@@ -530,6 +530,22 @@ static void img_ir_set_decoder(struct img_ir_priv *priv,
3543 + u32 ir_status, irq_en;
3544 + spin_lock_irq(&priv->lock);
3545 +
3546 ++ /*
3547 ++ * First record that the protocol is being stopped so that the end timer
3548 ++ * isn't restarted while we're trying to stop it.
3549 ++ */
3550 ++ hw->stopping = true;
3551 ++
3552 ++ /*
3553 ++ * Release the lock to stop the end timer, since the end timer handler
3554 ++ * acquires the lock and we don't want to deadlock waiting for it.
3555 ++ */
3556 ++ spin_unlock_irq(&priv->lock);
3557 ++ del_timer_sync(&hw->end_timer);
3558 ++ spin_lock_irq(&priv->lock);
3559 ++
3560 ++ hw->stopping = false;
3561 ++
3562 + /* switch off and disable interrupts */
3563 + img_ir_write(priv, IMG_IR_CONTROL, 0);
3564 + irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
3565 +@@ -541,12 +557,13 @@ static void img_ir_set_decoder(struct img_ir_priv *priv,
3566 + if (ir_status & (IMG_IR_RXDVAL | IMG_IR_RXDVALD2)) {
3567 + ir_status &= ~(IMG_IR_RXDVAL | IMG_IR_RXDVALD2);
3568 + img_ir_write(priv, IMG_IR_STATUS, ir_status);
3569 +- img_ir_read(priv, IMG_IR_DATA_LW);
3570 +- img_ir_read(priv, IMG_IR_DATA_UP);
3571 + }
3572 +
3573 +- /* stop the end timer and switch back to normal mode */
3574 +- del_timer_sync(&hw->end_timer);
3575 ++ /* always read data to clear buffer if IR wakes the device */
3576 ++ img_ir_read(priv, IMG_IR_DATA_LW);
3577 ++ img_ir_read(priv, IMG_IR_DATA_UP);
3578 ++
3579 ++ /* switch back to normal mode */
3580 + hw->mode = IMG_IR_M_NORMAL;
3581 +
3582 + /* clear the wakeup scancode filter */
3583 +@@ -817,7 +834,8 @@ static void img_ir_handle_data(struct img_ir_priv *priv, u32 len, u64 raw)
3584 + }
3585 +
3586 +
3587 +- if (dec->repeat) {
3588 ++ /* we mustn't update the end timer while trying to stop it */
3589 ++ if (dec->repeat && !hw->stopping) {
3590 + unsigned long interval;
3591 +
3592 + img_ir_begin_repeat(priv);
3593 +diff --git a/drivers/media/rc/img-ir/img-ir-hw.h b/drivers/media/rc/img-ir/img-ir-hw.h
3594 +index 8fcc16c32c5b..307ddcd1a99e 100644
3595 +--- a/drivers/media/rc/img-ir/img-ir-hw.h
3596 ++++ b/drivers/media/rc/img-ir/img-ir-hw.h
3597 +@@ -214,6 +214,8 @@ enum img_ir_mode {
3598 + * @flags: IMG_IR_F_*.
3599 + * @filters: HW filters (derived from scancode filters).
3600 + * @mode: Current decode mode.
3601 ++ * @stopping: Indicates that decoder is being taken down and timers
3602 ++ * should not be restarted.
3603 + * @suspend_irqen: Saved IRQ enable mask over suspend.
3604 + */
3605 + struct img_ir_priv_hw {
3606 +@@ -229,6 +231,7 @@ struct img_ir_priv_hw {
3607 + struct img_ir_filter filters[RC_FILTER_MAX];
3608 +
3609 + enum img_ir_mode mode;
3610 ++ bool stopping;
3611 + u32 suspend_irqen;
3612 + };
3613 +
3614 +diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
3615 +index 9eb77ac2153b..da87f1cc31a9 100644
3616 +--- a/drivers/media/usb/au0828/au0828-cards.c
3617 ++++ b/drivers/media/usb/au0828/au0828-cards.c
3618 +@@ -36,6 +36,11 @@ static void hvr950q_cs5340_audio(void *priv, int enable)
3619 + au0828_clear(dev, REG_000, 0x10);
3620 + }
3621 +
3622 ++/*
3623 ++ * WARNING: There's a quirks table at sound/usb/quirks-table.h
3624 ++ * that should also be updated every time a new device with V4L2 support
3625 ++ * is added here.
3626 ++ */
3627 + struct au0828_board au0828_boards[] = {
3628 + [AU0828_BOARD_UNKNOWN] = {
3629 + .name = "Unknown board",
3630 +diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
3631 +index 3f4361e48a32..efa782ed6e2d 100644
3632 +--- a/drivers/media/usb/dvb-usb/af9005.c
3633 ++++ b/drivers/media/usb/dvb-usb/af9005.c
3634 +@@ -1081,9 +1081,12 @@ static int __init af9005_usb_module_init(void)
3635 + err("usb_register failed. (%d)", result);
3636 + return result;
3637 + }
3638 ++#if IS_MODULE(CONFIG_DVB_USB_AF9005) || defined(CONFIG_DVB_USB_AF9005_REMOTE)
3639 ++ /* FIXME: convert to todays kernel IR infrastructure */
3640 + rc_decode = symbol_request(af9005_rc_decode);
3641 + rc_keys = symbol_request(rc_map_af9005_table);
3642 + rc_keys_size = symbol_request(rc_map_af9005_table_size);
3643 ++#endif
3644 + if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) {
3645 + err("af9005_rc_decode function not found, disabling remote");
3646 + af9005_properties.rc.legacy.rc_query = NULL;
3647 +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
3648 +index 7c8322d4fc63..3c07af96b30f 100644
3649 +--- a/drivers/media/usb/uvc/uvc_driver.c
3650 ++++ b/drivers/media/usb/uvc/uvc_driver.c
3651 +@@ -1623,12 +1623,12 @@ static void uvc_delete(struct uvc_device *dev)
3652 + {
3653 + struct list_head *p, *n;
3654 +
3655 +- usb_put_intf(dev->intf);
3656 +- usb_put_dev(dev->udev);
3657 +-
3658 + uvc_status_cleanup(dev);
3659 + uvc_ctrl_cleanup_device(dev);
3660 +
3661 ++ usb_put_intf(dev->intf);
3662 ++ usb_put_dev(dev->udev);
3663 ++
3664 + if (dev->vdev.dev)
3665 + v4l2_device_unregister(&dev->vdev);
3666 + #ifdef CONFIG_MEDIA_CONTROLLER
3667 +diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
3668 +index cca472109135..51fd6b524371 100644
3669 +--- a/drivers/misc/cxl/context.c
3670 ++++ b/drivers/misc/cxl/context.c
3671 +@@ -34,7 +34,8 @@ struct cxl_context *cxl_context_alloc(void)
3672 + /*
3673 + * Initialises a CXL context.
3674 + */
3675 +-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
3676 ++int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
3677 ++ struct address_space *mapping)
3678 + {
3679 + int i;
3680 +
3681 +@@ -42,6 +43,8 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
3682 + ctx->afu = afu;
3683 + ctx->master = master;
3684 + ctx->pid = NULL; /* Set in start work ioctl */
3685 ++ mutex_init(&ctx->mapping_lock);
3686 ++ ctx->mapping = mapping;
3687 +
3688 + /*
3689 + * Allocate the segment table before we put it in the IDR so that we
3690 +@@ -82,12 +85,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
3691 + * Allocating IDR! We better make sure everything's setup that
3692 + * dereferences from it.
3693 + */
3694 ++ mutex_lock(&afu->contexts_lock);
3695 + idr_preload(GFP_KERNEL);
3696 +- spin_lock(&afu->contexts_lock);
3697 + i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
3698 + ctx->afu->num_procs, GFP_NOWAIT);
3699 +- spin_unlock(&afu->contexts_lock);
3700 + idr_preload_end();
3701 ++ mutex_unlock(&afu->contexts_lock);
3702 + if (i < 0)
3703 + return i;
3704 +
3705 +@@ -147,6 +150,12 @@ static void __detach_context(struct cxl_context *ctx)
3706 + afu_release_irqs(ctx);
3707 + flush_work(&ctx->fault_work); /* Only needed for dedicated process */
3708 + wake_up_all(&ctx->wq);
3709 ++
3710 ++ /* Release Problem State Area mapping */
3711 ++ mutex_lock(&ctx->mapping_lock);
3712 ++ if (ctx->mapping)
3713 ++ unmap_mapping_range(ctx->mapping, 0, 0, 1);
3714 ++ mutex_unlock(&ctx->mapping_lock);
3715 + }
3716 +
3717 + /*
3718 +@@ -168,21 +177,22 @@ void cxl_context_detach_all(struct cxl_afu *afu)
3719 + struct cxl_context *ctx;
3720 + int tmp;
3721 +
3722 +- rcu_read_lock();
3723 +- idr_for_each_entry(&afu->contexts_idr, ctx, tmp)
3724 ++ mutex_lock(&afu->contexts_lock);
3725 ++ idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
3726 + /*
3727 + * Anything done in here needs to be setup before the IDR is
3728 + * created and torn down after the IDR removed
3729 + */
3730 + __detach_context(ctx);
3731 +- rcu_read_unlock();
3732 ++ }
3733 ++ mutex_unlock(&afu->contexts_lock);
3734 + }
3735 +
3736 + void cxl_context_free(struct cxl_context *ctx)
3737 + {
3738 +- spin_lock(&ctx->afu->contexts_lock);
3739 ++ mutex_lock(&ctx->afu->contexts_lock);
3740 + idr_remove(&ctx->afu->contexts_idr, ctx->pe);
3741 +- spin_unlock(&ctx->afu->contexts_lock);
3742 ++ mutex_unlock(&ctx->afu->contexts_lock);
3743 + synchronize_rcu();
3744 +
3745 + free_page((u64)ctx->sstp);
3746 +diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
3747 +index 3d2b8677ec8a..6ee785da574e 100644
3748 +--- a/drivers/misc/cxl/cxl.h
3749 ++++ b/drivers/misc/cxl/cxl.h
3750 +@@ -349,7 +349,7 @@ struct cxl_afu {
3751 + struct device *chardev_s, *chardev_m, *chardev_d;
3752 + struct idr contexts_idr;
3753 + struct dentry *debugfs;
3754 +- spinlock_t contexts_lock;
3755 ++ struct mutex contexts_lock;
3756 + struct mutex spa_mutex;
3757 + spinlock_t afu_cntl_lock;
3758 +
3759 +@@ -390,6 +390,10 @@ struct cxl_context {
3760 + phys_addr_t psn_phys;
3761 + u64 psn_size;
3762 +
3763 ++ /* Used to unmap any mmaps when force detaching */
3764 ++ struct address_space *mapping;
3765 ++ struct mutex mapping_lock;
3766 ++
3767 + spinlock_t sste_lock; /* Protects segment table entries */
3768 + struct cxl_sste *sstp;
3769 + u64 sstp0, sstp1;
3770 +@@ -592,7 +596,8 @@ int cxl_alloc_sst(struct cxl_context *ctx);
3771 + void init_cxl_native(void);
3772 +
3773 + struct cxl_context *cxl_context_alloc(void);
3774 +-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
3775 ++int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
3776 ++ struct address_space *mapping);
3777 + void cxl_context_free(struct cxl_context *ctx);
3778 + int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
3779 +
3780 +diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
3781 +index 378b099e7c0b..e9f2f10dbb37 100644
3782 +--- a/drivers/misc/cxl/file.c
3783 ++++ b/drivers/misc/cxl/file.c
3784 +@@ -77,7 +77,7 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
3785 + goto err_put_afu;
3786 + }
3787 +
3788 +- if ((rc = cxl_context_init(ctx, afu, master)))
3789 ++ if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
3790 + goto err_put_afu;
3791 +
3792 + pr_devel("afu_open pe: %i\n", ctx->pe);
3793 +@@ -113,6 +113,10 @@ static int afu_release(struct inode *inode, struct file *file)
3794 + __func__, ctx->pe);
3795 + cxl_context_detach(ctx);
3796 +
3797 ++ mutex_lock(&ctx->mapping_lock);
3798 ++ ctx->mapping = NULL;
3799 ++ mutex_unlock(&ctx->mapping_lock);
3800 ++
3801 + put_device(&ctx->afu->dev);
3802 +
3803 + /*
3804 +diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
3805 +index d47532e8f4f1..1d9717b4d67a 100644
3806 +--- a/drivers/misc/cxl/native.c
3807 ++++ b/drivers/misc/cxl/native.c
3808 +@@ -277,6 +277,7 @@ static int do_process_element_cmd(struct cxl_context *ctx,
3809 + u64 cmd, u64 pe_state)
3810 + {
3811 + u64 state;
3812 ++ unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
3813 +
3814 + WARN_ON(!ctx->afu->enabled);
3815 +
3816 +@@ -286,6 +287,10 @@ static int do_process_element_cmd(struct cxl_context *ctx,
3817 + smp_mb();
3818 + cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
3819 + while (1) {
3820 ++ if (time_after_eq(jiffies, timeout)) {
3821 ++ dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
3822 ++ return -EBUSY;
3823 ++ }
3824 + state = be64_to_cpup(ctx->afu->sw_command_status);
3825 + if (state == ~0ULL) {
3826 + pr_err("cxl: Error adding process element to AFU\n");
3827 +@@ -610,13 +615,6 @@ static inline int detach_process_native_dedicated(struct cxl_context *ctx)
3828 + return 0;
3829 + }
3830 +
3831 +-/*
3832 +- * TODO: handle case when this is called inside a rcu_read_lock() which may
3833 +- * happen when we unbind the driver (ie. cxl_context_detach_all()) . Terminate
3834 +- * & remove use a mutex lock and schedule which will not good with lock held.
3835 +- * May need to write do_process_element_cmd() that handles outstanding page
3836 +- * faults synchronously.
3837 +- */
3838 + static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
3839 + {
3840 + if (!ctx->pe_inserted)
3841 +diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
3842 +index 10c98ab7f46e..0f2cc9f8b4db 100644
3843 +--- a/drivers/misc/cxl/pci.c
3844 ++++ b/drivers/misc/cxl/pci.c
3845 +@@ -502,7 +502,7 @@ static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
3846 + afu->dev.release = cxl_release_afu;
3847 + afu->slice = slice;
3848 + idr_init(&afu->contexts_idr);
3849 +- spin_lock_init(&afu->contexts_lock);
3850 ++ mutex_init(&afu->contexts_lock);
3851 + spin_lock_init(&afu->afu_cntl_lock);
3852 + mutex_init(&afu->spa_mutex);
3853 +
3854 +diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
3855 +index ce7ec06d87d1..461bdbd5d483 100644
3856 +--- a/drivers/misc/cxl/sysfs.c
3857 ++++ b/drivers/misc/cxl/sysfs.c
3858 +@@ -121,7 +121,7 @@ static ssize_t reset_store_afu(struct device *device,
3859 + int rc;
3860 +
3861 + /* Not safe to reset if it is currently in use */
3862 +- spin_lock(&afu->contexts_lock);
3863 ++ mutex_lock(&afu->contexts_lock);
3864 + if (!idr_is_empty(&afu->contexts_idr)) {
3865 + rc = -EBUSY;
3866 + goto err;
3867 +@@ -132,7 +132,7 @@ static ssize_t reset_store_afu(struct device *device,
3868 +
3869 + rc = count;
3870 + err:
3871 +- spin_unlock(&afu->contexts_lock);
3872 ++ mutex_unlock(&afu->contexts_lock);
3873 + return rc;
3874 + }
3875 +
3876 +@@ -247,7 +247,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
3877 + int rc = -EBUSY;
3878 +
3879 + /* can't change this if we have a user */
3880 +- spin_lock(&afu->contexts_lock);
3881 ++ mutex_lock(&afu->contexts_lock);
3882 + if (!idr_is_empty(&afu->contexts_idr))
3883 + goto err;
3884 +
3885 +@@ -271,7 +271,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
3886 + afu->current_mode = 0;
3887 + afu->num_procs = 0;
3888 +
3889 +- spin_unlock(&afu->contexts_lock);
3890 ++ mutex_unlock(&afu->contexts_lock);
3891 +
3892 + if ((rc = _cxl_afu_deactivate_mode(afu, old_mode)))
3893 + return rc;
3894 +@@ -280,7 +280,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
3895 +
3896 + return count;
3897 + err:
3898 +- spin_unlock(&afu->contexts_lock);
3899 ++ mutex_unlock(&afu->contexts_lock);
3900 + return rc;
3901 + }
3902 +
3903 +diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
3904 +index 4f2fd6fc1e23..432aec8dd3ce 100644
3905 +--- a/drivers/misc/mei/hw-me.c
3906 ++++ b/drivers/misc/mei/hw-me.c
3907 +@@ -234,6 +234,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
3908 + struct mei_me_hw *hw = to_me_hw(dev);
3909 + u32 hcsr = mei_hcsr_read(hw);
3910 +
3911 ++ /* H_RST may be found lit before reset is started,
3912 ++ * for example if preceding reset flow hasn't completed.
3913 ++ * In that case asserting H_RST will be ignored, therefore
3914 ++ * we need to clean H_RST bit to start a successful reset sequence.
3915 ++ */
3916 ++ if ((hcsr & H_RST) == H_RST) {
3917 ++ dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
3918 ++ hcsr &= ~H_RST;
3919 ++ mei_me_reg_write(hw, H_CSR, hcsr);
3920 ++ hcsr = mei_hcsr_read(hw);
3921 ++ }
3922 ++
3923 + hcsr |= H_RST | H_IG | H_IS;
3924 +
3925 + if (intr_enable)
3926 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
3927 +index 7625bd791fca..023c2010cd75 100644
3928 +--- a/drivers/mmc/host/sdhci.c
3929 ++++ b/drivers/mmc/host/sdhci.c
3930 +@@ -1239,6 +1239,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
3931 + spin_unlock_irq(&host->lock);
3932 + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
3933 + spin_lock_irq(&host->lock);
3934 ++
3935 ++ if (mode != MMC_POWER_OFF)
3936 ++ sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
3937 ++ else
3938 ++ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
3939 ++
3940 + return;
3941 + }
3942 +
3943 +diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
3944 +index 541fb7a05625..cc7bfc0c0a71 100644
3945 +--- a/drivers/net/can/usb/kvaser_usb.c
3946 ++++ b/drivers/net/can/usb/kvaser_usb.c
3947 +@@ -1246,6 +1246,9 @@ static int kvaser_usb_close(struct net_device *netdev)
3948 + if (err)
3949 + netdev_warn(netdev, "Cannot stop device, error %d\n", err);
3950 +
3951 ++ /* reset tx contexts */
3952 ++ kvaser_usb_unlink_tx_urbs(priv);
3953 ++
3954 + priv->can.state = CAN_STATE_STOPPED;
3955 + close_candev(priv->netdev);
3956 +
3957 +@@ -1294,12 +1297,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
3958 + if (!urb) {
3959 + netdev_err(netdev, "No memory left for URBs\n");
3960 + stats->tx_dropped++;
3961 +- goto nourbmem;
3962 ++ dev_kfree_skb(skb);
3963 ++ return NETDEV_TX_OK;
3964 + }
3965 +
3966 + buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
3967 + if (!buf) {
3968 + stats->tx_dropped++;
3969 ++ dev_kfree_skb(skb);
3970 + goto nobufmem;
3971 + }
3972 +
3973 +@@ -1334,6 +1339,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
3974 + }
3975 + }
3976 +
3977 ++ /* This should never happen; it implies a flow control bug */
3978 + if (!context) {
3979 + netdev_warn(netdev, "cannot find free context\n");
3980 + ret = NETDEV_TX_BUSY;
3981 +@@ -1364,9 +1370,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
3982 + if (unlikely(err)) {
3983 + can_free_echo_skb(netdev, context->echo_index);
3984 +
3985 +- skb = NULL; /* set to NULL to avoid double free in
3986 +- * dev_kfree_skb(skb) */
3987 +-
3988 + atomic_dec(&priv->active_tx_urbs);
3989 + usb_unanchor_urb(urb);
3990 +
3991 +@@ -1388,8 +1391,6 @@ releasebuf:
3992 + kfree(buf);
3993 + nobufmem:
3994 + usb_free_urb(urb);
3995 +-nourbmem:
3996 +- dev_kfree_skb(skb);
3997 + return ret;
3998 + }
3999 +
4000 +@@ -1502,6 +1503,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
4001 + struct kvaser_usb_net_priv *priv;
4002 + int i, err;
4003 +
4004 ++ err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
4005 ++ if (err)
4006 ++ return err;
4007 ++
4008 + netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
4009 + if (!netdev) {
4010 + dev_err(&intf->dev, "Cannot alloc candev\n");
4011 +@@ -1606,9 +1611,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
4012 +
4013 + usb_set_intfdata(intf, dev);
4014 +
4015 +- for (i = 0; i < MAX_NET_DEVICES; i++)
4016 +- kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
4017 +-
4018 + err = kvaser_usb_get_software_info(dev);
4019 + if (err) {
4020 + dev_err(&intf->dev,
4021 +diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
4022 +index e398eda07298..c8af3ce3ea38 100644
4023 +--- a/drivers/net/ethernet/atheros/alx/main.c
4024 ++++ b/drivers/net/ethernet/atheros/alx/main.c
4025 +@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
4026 + schedule_work(&alx->reset_wk);
4027 + }
4028 +
4029 +-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
4030 ++static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
4031 + {
4032 + struct alx_rx_queue *rxq = &alx->rxq;
4033 + struct alx_rrd *rrd;
4034 + struct alx_buffer *rxb;
4035 + struct sk_buff *skb;
4036 + u16 length, rfd_cleaned = 0;
4037 ++ int work = 0;
4038 +
4039 +- while (budget > 0) {
4040 ++ while (work < budget) {
4041 + rrd = &rxq->rrd[rxq->rrd_read_idx];
4042 + if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
4043 + break;
4044 +@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
4045 + ALX_GET_FIELD(le32_to_cpu(rrd->word0),
4046 + RRD_NOR) != 1) {
4047 + alx_schedule_reset(alx);
4048 +- return 0;
4049 ++ return work;
4050 + }
4051 +
4052 + rxb = &rxq->bufs[rxq->read_idx];
4053 +@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
4054 + }
4055 +
4056 + napi_gro_receive(&alx->napi, skb);
4057 +- budget--;
4058 ++ work++;
4059 +
4060 + next_pkt:
4061 + if (++rxq->read_idx == alx->rx_ringsz)
4062 +@@ -258,21 +259,22 @@ next_pkt:
4063 + if (rfd_cleaned)
4064 + alx_refill_rx_ring(alx, GFP_ATOMIC);
4065 +
4066 +- return budget > 0;
4067 ++ return work;
4068 + }
4069 +
4070 + static int alx_poll(struct napi_struct *napi, int budget)
4071 + {
4072 + struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
4073 + struct alx_hw *hw = &alx->hw;
4074 +- bool complete = true;
4075 + unsigned long flags;
4076 ++ bool tx_complete;
4077 ++ int work;
4078 +
4079 +- complete = alx_clean_tx_irq(alx) &&
4080 +- alx_clean_rx_irq(alx, budget);
4081 ++ tx_complete = alx_clean_tx_irq(alx);
4082 ++ work = alx_clean_rx_irq(alx, budget);
4083 +
4084 +- if (!complete)
4085 +- return 1;
4086 ++ if (!tx_complete || work == budget)
4087 ++ return budget;
4088 +
4089 + napi_complete(&alx->napi);
4090 +
4091 +@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
4092 +
4093 + alx_post_write(hw);
4094 +
4095 +- return 0;
4096 ++ return work;
4097 + }
4098 +
4099 + static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
4100 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
4101 +index 77f8f836cbbe..5748542f6717 100644
4102 +--- a/drivers/net/ethernet/broadcom/tg3.c
4103 ++++ b/drivers/net/ethernet/broadcom/tg3.c
4104 +@@ -17789,23 +17789,6 @@ static int tg3_init_one(struct pci_dev *pdev,
4105 + goto err_out_apeunmap;
4106 + }
4107 +
4108 +- /*
4109 +- * Reset chip in case UNDI or EFI driver did not shutdown
4110 +- * DMA self test will enable WDMAC and we'll see (spurious)
4111 +- * pending DMA on the PCI bus at that point.
4112 +- */
4113 +- if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
4114 +- (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
4115 +- tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4116 +- tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4117 +- }
4118 +-
4119 +- err = tg3_test_dma(tp);
4120 +- if (err) {
4121 +- dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
4122 +- goto err_out_apeunmap;
4123 +- }
4124 +-
4125 + intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
4126 + rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
4127 + sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
4128 +@@ -17850,6 +17833,23 @@ static int tg3_init_one(struct pci_dev *pdev,
4129 + sndmbx += 0xc;
4130 + }
4131 +
4132 ++ /*
4133 ++ * Reset chip in case UNDI or EFI driver did not shutdown
4134 ++ * DMA self test will enable WDMAC and we'll see (spurious)
4135 ++ * pending DMA on the PCI bus at that point.
4136 ++ */
4137 ++ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
4138 ++ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
4139 ++ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4140 ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4141 ++ }
4142 ++
4143 ++ err = tg3_test_dma(tp);
4144 ++ if (err) {
4145 ++ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
4146 ++ goto err_out_apeunmap;
4147 ++ }
4148 ++
4149 + tg3_init_coal(tp);
4150 +
4151 + pci_set_drvdata(pdev, dev);
4152 +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
4153 +index 73cf1653a4a3..167cd8ede397 100644
4154 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c
4155 ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
4156 +@@ -1059,10 +1059,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
4157 + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
4158 + }
4159 +
4160 +- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
4161 +- skb->csum = htons(checksum);
4162 +- skb->ip_summed = CHECKSUM_COMPLETE;
4163 +- }
4164 ++ /* Hardware does not provide whole packet checksum. It only
4165 ++ * provides pseudo checksum. Since hw validates the packet
4166 ++ * checksum but not provide us the checksum value. use
4167 ++ * CHECSUM_UNNECESSARY.
4168 ++ */
4169 ++ if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
4170 ++ ipv4_csum_ok)
4171 ++ skb->ip_summed = CHECKSUM_UNNECESSARY;
4172 +
4173 + if (vlan_stripped)
4174 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
4175 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
4176 +index 597c463e384d..d2975fa7e549 100644
4177 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
4178 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
4179 +@@ -4427,9 +4427,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4180 + be16_to_cpu(port));
4181 + }
4182 +
4183 +-static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
4184 ++static netdev_features_t be_features_check(struct sk_buff *skb,
4185 ++ struct net_device *dev,
4186 ++ netdev_features_t features)
4187 + {
4188 +- return vxlan_gso_check(skb);
4189 ++ return vxlan_features_check(skb, features);
4190 + }
4191 + #endif
4192 +
4193 +@@ -4460,7 +4462,7 @@ static const struct net_device_ops be_netdev_ops = {
4194 + #ifdef CONFIG_BE2NET_VXLAN
4195 + .ndo_add_vxlan_port = be_add_vxlan_port,
4196 + .ndo_del_vxlan_port = be_del_vxlan_port,
4197 +- .ndo_gso_check = be_gso_check,
4198 ++ .ndo_features_check = be_features_check,
4199 + #endif
4200 + };
4201 +
4202 +diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
4203 +index 5b8300a32bf5..4d61ef50b465 100644
4204 +--- a/drivers/net/ethernet/intel/Kconfig
4205 ++++ b/drivers/net/ethernet/intel/Kconfig
4206 +@@ -281,6 +281,17 @@ config I40E_DCB
4207 +
4208 + If unsure, say N.
4209 +
4210 ++config I40E_FCOE
4211 ++ bool "Fibre Channel over Ethernet (FCoE)"
4212 ++ default n
4213 ++ depends on I40E && DCB && FCOE
4214 ++ ---help---
4215 ++ Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
4216 ++ in the driver. This will create new netdev for exclusive FCoE
4217 ++ use with XL710 FCoE offloads enabled.
4218 ++
4219 ++ If unsure, say N.
4220 ++
4221 + config I40EVF
4222 + tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
4223 + depends on PCI_MSI
4224 +diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
4225 +index 4b94ddb29c24..c40581999121 100644
4226 +--- a/drivers/net/ethernet/intel/i40e/Makefile
4227 ++++ b/drivers/net/ethernet/intel/i40e/Makefile
4228 +@@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \
4229 + i40e_virtchnl_pf.o
4230 +
4231 + i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
4232 +-i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
4233 ++i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
4234 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
4235 +index 045b5c4b98b3..ad802dd0f67a 100644
4236 +--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
4237 ++++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
4238 +@@ -78,7 +78,7 @@ do { \
4239 + } while (0)
4240 +
4241 + typedef enum i40e_status_code i40e_status;
4242 +-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
4243 ++#ifdef CONFIG_I40E_FCOE
4244 + #define I40E_FCOE
4245 +-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
4246 ++#endif
4247 + #endif /* _I40E_OSDEP_H_ */
4248 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
4249 +index 4d69e382b4e5..6bdaa313e7ea 100644
4250 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
4251 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
4252 +@@ -1569,8 +1569,15 @@ int mlx4_en_start_port(struct net_device *dev)
4253 + mlx4_en_free_affinity_hint(priv, i);
4254 + goto cq_err;
4255 + }
4256 +- for (j = 0; j < cq->size; j++)
4257 +- cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
4258 ++
4259 ++ for (j = 0; j < cq->size; j++) {
4260 ++ struct mlx4_cqe *cqe = NULL;
4261 ++
4262 ++ cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
4263 ++ priv->cqe_factor;
4264 ++ cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
4265 ++ }
4266 ++
4267 + err = mlx4_en_set_cq_moder(priv, cq);
4268 + if (err) {
4269 + en_err(priv, "Failed setting cq moderation parameters\n");
4270 +@@ -2356,9 +2363,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev,
4271 + queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
4272 + }
4273 +
4274 +-static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev)
4275 ++static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
4276 ++ struct net_device *dev,
4277 ++ netdev_features_t features)
4278 + {
4279 +- return vxlan_gso_check(skb);
4280 ++ return vxlan_features_check(skb, features);
4281 + }
4282 + #endif
4283 +
4284 +@@ -2391,7 +2400,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
4285 + #ifdef CONFIG_MLX4_EN_VXLAN
4286 + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
4287 + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
4288 +- .ndo_gso_check = mlx4_en_gso_check,
4289 ++ .ndo_features_check = mlx4_en_features_check,
4290 + #endif
4291 + };
4292 +
4293 +@@ -2425,7 +2434,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
4294 + #ifdef CONFIG_MLX4_EN_VXLAN
4295 + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
4296 + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
4297 +- .ndo_gso_check = mlx4_en_gso_check,
4298 ++ .ndo_features_check = mlx4_en_features_check,
4299 + #endif
4300 + };
4301 +
4302 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4303 +index 454d9fea640e..11ff28b5fca3 100644
4304 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4305 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4306 +@@ -954,7 +954,17 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
4307 + tx_desc->ctrl.owner_opcode = op_own;
4308 + if (send_doorbell) {
4309 + wmb();
4310 +- iowrite32(ring->doorbell_qpn,
4311 ++ /* Since there is no iowrite*_native() that writes the
4312 ++ * value as is, without byteswapping - using the one
4313 ++ * the doesn't do byteswapping in the relevant arch
4314 ++ * endianness.
4315 ++ */
4316 ++#if defined(__LITTLE_ENDIAN)
4317 ++ iowrite32(
4318 ++#else
4319 ++ iowrite32be(
4320 ++#endif
4321 ++ ring->doorbell_qpn,
4322 + ring->bf.uar->map + MLX4_SEND_DOORBELL);
4323 + } else {
4324 + ring->xmit_more++;
4325 +diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
4326 +index 2e88a235e26b..5f1228794328 100644
4327 +--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
4328 ++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
4329 +@@ -1647,8 +1647,8 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
4330 + /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
4331 + MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
4332 + if (byte_field) {
4333 +- param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
4334 +- param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
4335 ++ param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED;
4336 ++ param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED;
4337 + param->cqe_size = 1 << ((byte_field &
4338 + MLX4_CQE_SIZE_MASK_STRIDE) + 5);
4339 + param->eqe_size = 1 << (((byte_field &
4340 +diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
4341 +index 193a6adb5d04..b7cdef0aebd6 100644
4342 +--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
4343 ++++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
4344 +@@ -590,6 +590,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free);
4345 + void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
4346 + {
4347 + mlx4_mtt_cleanup(dev, &mr->mtt);
4348 ++ mr->mtt.order = -1;
4349 + }
4350 + EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
4351 +
4352 +@@ -599,14 +600,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
4353 + {
4354 + int err;
4355 +
4356 +- mpt_entry->start = cpu_to_be64(iova);
4357 +- mpt_entry->length = cpu_to_be64(size);
4358 +- mpt_entry->entity_size = cpu_to_be32(page_shift);
4359 +-
4360 + err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
4361 + if (err)
4362 + return err;
4363 +
4364 ++ mpt_entry->start = cpu_to_be64(mr->iova);
4365 ++ mpt_entry->length = cpu_to_be64(mr->size);
4366 ++ mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
4367 ++
4368 + mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
4369 + MLX4_MPT_PD_FLAG_EN_INV);
4370 + mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
4371 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
4372 +index a913b3ad2f89..477a5d33d79c 100644
4373 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
4374 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
4375 +@@ -504,9 +504,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
4376 + adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
4377 + }
4378 +
4379 +-static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev)
4380 ++static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
4381 ++ struct net_device *dev,
4382 ++ netdev_features_t features)
4383 + {
4384 +- return vxlan_gso_check(skb);
4385 ++ return vxlan_features_check(skb, features);
4386 + }
4387 + #endif
4388 +
4389 +@@ -531,7 +533,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
4390 + #ifdef CONFIG_QLCNIC_VXLAN
4391 + .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
4392 + .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
4393 +- .ndo_gso_check = qlcnic_gso_check,
4394 ++ .ndo_features_check = qlcnic_features_check,
4395 + #endif
4396 + #ifdef CONFIG_NET_POLL_CONTROLLER
4397 + .ndo_poll_controller = qlcnic_poll_controller,
4398 +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
4399 +index c560f9aeb55d..64d1cef4cda1 100644
4400 +--- a/drivers/net/ethernet/ti/cpsw.c
4401 ++++ b/drivers/net/ethernet/ti/cpsw.c
4402 +@@ -610,7 +610,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
4403 +
4404 + /* Clear all mcast from ALE */
4405 + cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
4406 +- priv->host_port);
4407 ++ priv->host_port, -1);
4408 +
4409 + /* Flood All Unicast Packets to Host port */
4410 + cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
4411 +@@ -634,6 +634,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
4412 + static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
4413 + {
4414 + struct cpsw_priv *priv = netdev_priv(ndev);
4415 ++ int vid;
4416 ++
4417 ++ if (priv->data.dual_emac)
4418 ++ vid = priv->slaves[priv->emac_port].port_vlan;
4419 ++ else
4420 ++ vid = priv->data.default_vlan;
4421 +
4422 + if (ndev->flags & IFF_PROMISC) {
4423 + /* Enable promiscuous mode */
4424 +@@ -649,7 +655,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
4425 + cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
4426 +
4427 + /* Clear all mcast from ALE */
4428 +- cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
4429 ++ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
4430 ++ vid);
4431 +
4432 + if (!netdev_mc_empty(ndev)) {
4433 + struct netdev_hw_addr *ha;
4434 +@@ -757,6 +764,14 @@ requeue:
4435 + static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
4436 + {
4437 + struct cpsw_priv *priv = dev_id;
4438 ++ int value = irq - priv->irqs_table[0];
4439 ++
4440 ++ /* NOTICE: Ending IRQ here. The trick with the 'value' variable above
4441 ++ * is to make sure we will always write the correct value to the EOI
4442 ++ * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
4443 ++ * for TX Interrupt and 3 for MISC Interrupt.
4444 ++ */
4445 ++ cpdma_ctlr_eoi(priv->dma, value);
4446 +
4447 + cpsw_intr_disable(priv);
4448 + if (priv->irq_enabled == true) {
4449 +@@ -786,8 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
4450 + int num_tx, num_rx;
4451 +
4452 + num_tx = cpdma_chan_process(priv->txch, 128);
4453 +- if (num_tx)
4454 +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
4455 +
4456 + num_rx = cpdma_chan_process(priv->rxch, budget);
4457 + if (num_rx < budget) {
4458 +@@ -795,7 +808,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
4459 +
4460 + napi_complete(napi);
4461 + cpsw_intr_enable(priv);
4462 +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
4463 + prim_cpsw = cpsw_get_slave_priv(priv, 0);
4464 + if (prim_cpsw->irq_enabled == false) {
4465 + prim_cpsw->irq_enabled = true;
4466 +@@ -1310,8 +1322,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
4467 + napi_enable(&priv->napi);
4468 + cpdma_ctlr_start(priv->dma);
4469 + cpsw_intr_enable(priv);
4470 +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
4471 +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
4472 +
4473 + prim_cpsw = cpsw_get_slave_priv(priv, 0);
4474 + if (prim_cpsw->irq_enabled == false) {
4475 +@@ -1578,9 +1588,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
4476 + cpdma_chan_start(priv->txch);
4477 + cpdma_ctlr_int_ctrl(priv->dma, true);
4478 + cpsw_intr_enable(priv);
4479 +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
4480 +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
4481 +-
4482 + }
4483 +
4484 + static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
4485 +@@ -1620,9 +1627,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
4486 + cpsw_interrupt(ndev->irq, priv);
4487 + cpdma_ctlr_int_ctrl(priv->dma, true);
4488 + cpsw_intr_enable(priv);
4489 +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
4490 +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
4491 +-
4492 + }
4493 + #endif
4494 +
4495 +diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
4496 +index 097ebe7077ac..5246b3a18ff8 100644
4497 +--- a/drivers/net/ethernet/ti/cpsw_ale.c
4498 ++++ b/drivers/net/ethernet/ti/cpsw_ale.c
4499 +@@ -234,7 +234,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
4500 + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
4501 + }
4502 +
4503 +-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
4504 ++int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
4505 + {
4506 + u32 ale_entry[ALE_ENTRY_WORDS];
4507 + int ret, idx;
4508 +@@ -245,6 +245,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
4509 + if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
4510 + continue;
4511 +
4512 ++ /* if vid passed is -1 then remove all multicast entry from
4513 ++ * the table irrespective of vlan id, if a valid vlan id is
4514 ++ * passed then remove only multicast added to that vlan id.
4515 ++ * if vlan id doesn't match then move on to next entry.
4516 ++ */
4517 ++ if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
4518 ++ continue;
4519 ++
4520 + if (cpsw_ale_get_mcast(ale_entry)) {
4521 + u8 addr[6];
4522 +
4523 +diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
4524 +index c0d4127aa549..af1e7ecd87c6 100644
4525 +--- a/drivers/net/ethernet/ti/cpsw_ale.h
4526 ++++ b/drivers/net/ethernet/ti/cpsw_ale.h
4527 +@@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
4528 +
4529 + int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
4530 + int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
4531 +-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
4532 ++int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
4533 + int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
4534 + int flags, u16 vid);
4535 + int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
4536 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
4537 +index 2368395d8ae5..9c505c4dbe04 100644
4538 +--- a/drivers/net/team/team.c
4539 ++++ b/drivers/net/team/team.c
4540 +@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind)
4541 + static void team_notify_peers_work(struct work_struct *work)
4542 + {
4543 + struct team *team;
4544 ++ int val;
4545 +
4546 + team = container_of(work, struct team, notify_peers.dw.work);
4547 +
4548 +@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work)
4549 + schedule_delayed_work(&team->notify_peers.dw, 0);
4550 + return;
4551 + }
4552 ++ val = atomic_dec_if_positive(&team->notify_peers.count_pending);
4553 ++ if (val < 0) {
4554 ++ rtnl_unlock();
4555 ++ return;
4556 ++ }
4557 + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
4558 + rtnl_unlock();
4559 +- if (!atomic_dec_and_test(&team->notify_peers.count_pending))
4560 ++ if (val)
4561 + schedule_delayed_work(&team->notify_peers.dw,
4562 + msecs_to_jiffies(team->notify_peers.interval));
4563 + }
4564 +@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
4565 + static void team_mcast_rejoin_work(struct work_struct *work)
4566 + {
4567 + struct team *team;
4568 ++ int val;
4569 +
4570 + team = container_of(work, struct team, mcast_rejoin.dw.work);
4571 +
4572 +@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
4573 + schedule_delayed_work(&team->mcast_rejoin.dw, 0);
4574 + return;
4575 + }
4576 ++ val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
4577 ++ if (val < 0) {
4578 ++ rtnl_unlock();
4579 ++ return;
4580 ++ }
4581 + call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
4582 + rtnl_unlock();
4583 +- if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
4584 ++ if (val)
4585 + schedule_delayed_work(&team->mcast_rejoin.dw,
4586 + msecs_to_jiffies(team->mcast_rejoin.interval));
4587 + }
4588 +diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
4589 +index 8021f6eec27f..41ae16435300 100644
4590 +--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
4591 ++++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
4592 +@@ -665,7 +665,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
4593 + if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
4594 + return false;
4595 +
4596 +- if (!mvm->cfg->rx_with_siso_diversity)
4597 ++ if (mvm->cfg->rx_with_siso_diversity)
4598 + return false;
4599 +
4600 + ieee80211_iterate_active_interfaces_atomic(
4601 +diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
4602 +index 846a2e6e34d8..c70efb9a6e78 100644
4603 +--- a/drivers/net/wireless/rtlwifi/pci.c
4604 ++++ b/drivers/net/wireless/rtlwifi/pci.c
4605 +@@ -666,7 +666,8 @@ tx_status_ok:
4606 + }
4607 +
4608 + static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
4609 +- u8 *entry, int rxring_idx, int desc_idx)
4610 ++ struct sk_buff *new_skb, u8 *entry,
4611 ++ int rxring_idx, int desc_idx)
4612 + {
4613 + struct rtl_priv *rtlpriv = rtl_priv(hw);
4614 + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
4615 +@@ -674,11 +675,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
4616 + u8 tmp_one = 1;
4617 + struct sk_buff *skb;
4618 +
4619 ++ if (likely(new_skb)) {
4620 ++ skb = new_skb;
4621 ++ goto remap;
4622 ++ }
4623 + skb = dev_alloc_skb(rtlpci->rxbuffersize);
4624 + if (!skb)
4625 + return 0;
4626 +- rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
4627 +
4628 ++remap:
4629 + /* just set skb->cb to mapping addr for pci_unmap_single use */
4630 + *((dma_addr_t *)skb->cb) =
4631 + pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
4632 +@@ -686,6 +691,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
4633 + bufferaddress = *((dma_addr_t *)skb->cb);
4634 + if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
4635 + return 0;
4636 ++ rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
4637 + if (rtlpriv->use_new_trx_flow) {
4638 + rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
4639 + HW_DESC_RX_PREPARE,
4640 +@@ -781,6 +787,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
4641 + /*rx pkt */
4642 + struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
4643 + rtlpci->rx_ring[rxring_idx].idx];
4644 ++ struct sk_buff *new_skb;
4645 +
4646 + if (rtlpriv->use_new_trx_flow) {
4647 + rx_remained_cnt =
4648 +@@ -807,6 +814,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
4649 + pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
4650 + rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
4651 +
4652 ++ /* get a new skb - if fail, old one will be reused */
4653 ++ new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
4654 ++ if (unlikely(!new_skb)) {
4655 ++ pr_err("Allocation of new skb failed in %s\n",
4656 ++ __func__);
4657 ++ goto no_new;
4658 ++ }
4659 + if (rtlpriv->use_new_trx_flow) {
4660 + buffer_desc =
4661 + &rtlpci->rx_ring[rxring_idx].buffer_desc
4662 +@@ -911,14 +925,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
4663 + schedule_work(&rtlpriv->works.lps_change_work);
4664 + }
4665 + end:
4666 ++ skb = new_skb;
4667 ++no_new:
4668 + if (rtlpriv->use_new_trx_flow) {
4669 +- _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
4670 ++ _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
4671 + rxring_idx,
4672 +- rtlpci->rx_ring[rxring_idx].idx);
4673 ++ rtlpci->rx_ring[rxring_idx].idx);
4674 + } else {
4675 +- _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
4676 ++ _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
4677 ++ rxring_idx,
4678 + rtlpci->rx_ring[rxring_idx].idx);
4679 +-
4680 + if (rtlpci->rx_ring[rxring_idx].idx ==
4681 + rtlpci->rxringcount - 1)
4682 + rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
4683 +@@ -1307,7 +1323,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
4684 + rtlpci->rx_ring[rxring_idx].idx = 0;
4685 + for (i = 0; i < rtlpci->rxringcount; i++) {
4686 + entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
4687 +- if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
4688 ++ if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
4689 + rxring_idx, i))
4690 + return -ENOMEM;
4691 + }
4692 +@@ -1332,7 +1348,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
4693 +
4694 + for (i = 0; i < rtlpci->rxringcount; i++) {
4695 + entry = &rtlpci->rx_ring[rxring_idx].desc[i];
4696 +- if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
4697 ++ if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
4698 + rxring_idx, i))
4699 + return -ENOMEM;
4700 + }
4701 +diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
4702 +index d2ec5160bbf0..5c646d5f7bb8 100644
4703 +--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
4704 ++++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
4705 +@@ -955,6 +955,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
4706 + local_save_flags(flags);
4707 + local_irq_enable();
4708 +
4709 ++ rtlhal->fw_ready = false;
4710 + rtlpriv->intf_ops->disable_aspm(hw);
4711 + rtstatus = _rtl92ce_init_mac(hw);
4712 + if (!rtstatus) {
4713 +@@ -971,6 +972,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
4714 + goto exit;
4715 + }
4716 +
4717 ++ rtlhal->fw_ready = true;
4718 + rtlhal->last_hmeboxnum = 0;
4719 + rtl92c_phy_mac_config(hw);
4720 + /* because last function modify RCR, so we update
4721 +diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
4722 +index 083ecc93fe5e..5f1fda44882b 100644
4723 +--- a/drivers/net/xen-netback/common.h
4724 ++++ b/drivers/net/xen-netback/common.h
4725 +@@ -230,6 +230,8 @@ struct xenvif {
4726 + */
4727 + bool disabled;
4728 + unsigned long status;
4729 ++ unsigned long drain_timeout;
4730 ++ unsigned long stall_timeout;
4731 +
4732 + /* Queues */
4733 + struct xenvif_queue *queues;
4734 +@@ -328,7 +330,7 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id);
4735 + extern bool separate_tx_rx_irq;
4736 +
4737 + extern unsigned int rx_drain_timeout_msecs;
4738 +-extern unsigned int rx_drain_timeout_jiffies;
4739 ++extern unsigned int rx_stall_timeout_msecs;
4740 + extern unsigned int xenvif_max_queues;
4741 +
4742 + #ifdef CONFIG_DEBUG_FS
4743 +diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
4744 +index 895fe84011e7..d752d1c5c8bd 100644
4745 +--- a/drivers/net/xen-netback/interface.c
4746 ++++ b/drivers/net/xen-netback/interface.c
4747 +@@ -166,7 +166,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
4748 + goto drop;
4749 +
4750 + cb = XENVIF_RX_CB(skb);
4751 +- cb->expires = jiffies + rx_drain_timeout_jiffies;
4752 ++ cb->expires = jiffies + vif->drain_timeout;
4753 +
4754 + xenvif_rx_queue_tail(queue, skb);
4755 + xenvif_kick_thread(queue);
4756 +@@ -414,6 +414,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
4757 + vif->ip_csum = 1;
4758 + vif->dev = dev;
4759 + vif->disabled = false;
4760 ++ vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
4761 ++ vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
4762 +
4763 + /* Start out with no queues. */
4764 + vif->queues = NULL;
4765 +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
4766 +index 6563f0713fc0..c39aace4f642 100644
4767 +--- a/drivers/net/xen-netback/netback.c
4768 ++++ b/drivers/net/xen-netback/netback.c
4769 +@@ -60,14 +60,12 @@ module_param(separate_tx_rx_irq, bool, 0644);
4770 + */
4771 + unsigned int rx_drain_timeout_msecs = 10000;
4772 + module_param(rx_drain_timeout_msecs, uint, 0444);
4773 +-unsigned int rx_drain_timeout_jiffies;
4774 +
4775 + /* The length of time before the frontend is considered unresponsive
4776 + * because it isn't providing Rx slots.
4777 + */
4778 +-static unsigned int rx_stall_timeout_msecs = 60000;
4779 ++unsigned int rx_stall_timeout_msecs = 60000;
4780 + module_param(rx_stall_timeout_msecs, uint, 0444);
4781 +-static unsigned int rx_stall_timeout_jiffies;
4782 +
4783 + unsigned int xenvif_max_queues;
4784 + module_param_named(max_queues, xenvif_max_queues, uint, 0644);
4785 +@@ -2022,7 +2020,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
4786 + return !queue->stalled
4787 + && prod - cons < XEN_NETBK_RX_SLOTS_MAX
4788 + && time_after(jiffies,
4789 +- queue->last_rx_time + rx_stall_timeout_jiffies);
4790 ++ queue->last_rx_time + queue->vif->stall_timeout);
4791 + }
4792 +
4793 + static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
4794 +@@ -2040,8 +2038,9 @@ static bool xenvif_have_rx_work(struct xenvif_queue *queue)
4795 + {
4796 + return (!skb_queue_empty(&queue->rx_queue)
4797 + && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
4798 +- || xenvif_rx_queue_stalled(queue)
4799 +- || xenvif_rx_queue_ready(queue)
4800 ++ || (queue->vif->stall_timeout &&
4801 ++ (xenvif_rx_queue_stalled(queue)
4802 ++ || xenvif_rx_queue_ready(queue)))
4803 + || kthread_should_stop()
4804 + || queue->vif->disabled;
4805 + }
4806 +@@ -2094,6 +2093,9 @@ int xenvif_kthread_guest_rx(void *data)
4807 + struct xenvif_queue *queue = data;
4808 + struct xenvif *vif = queue->vif;
4809 +
4810 ++ if (!vif->stall_timeout)
4811 ++ xenvif_queue_carrier_on(queue);
4812 ++
4813 + for (;;) {
4814 + xenvif_wait_for_rx_work(queue);
4815 +
4816 +@@ -2120,10 +2122,12 @@ int xenvif_kthread_guest_rx(void *data)
4817 + * while it's probably not responsive, drop the
4818 + * carrier so packets are dropped earlier.
4819 + */
4820 +- if (xenvif_rx_queue_stalled(queue))
4821 +- xenvif_queue_carrier_off(queue);
4822 +- else if (xenvif_rx_queue_ready(queue))
4823 +- xenvif_queue_carrier_on(queue);
4824 ++ if (vif->stall_timeout) {
4825 ++ if (xenvif_rx_queue_stalled(queue))
4826 ++ xenvif_queue_carrier_off(queue);
4827 ++ else if (xenvif_rx_queue_ready(queue))
4828 ++ xenvif_queue_carrier_on(queue);
4829 ++ }
4830 +
4831 + /* Queued packets may have foreign pages from other
4832 + * domains. These cannot be queued indefinitely as
4833 +@@ -2194,9 +2198,6 @@ static int __init netback_init(void)
4834 + if (rc)
4835 + goto failed_init;
4836 +
4837 +- rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
4838 +- rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs);
4839 +-
4840 + #ifdef CONFIG_DEBUG_FS
4841 + xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
4842 + if (IS_ERR_OR_NULL(xen_netback_dbg_root))
4843 +diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
4844 +index fab0d4b42f58..c047282c4ee0 100644
4845 +--- a/drivers/net/xen-netback/xenbus.c
4846 ++++ b/drivers/net/xen-netback/xenbus.c
4847 +@@ -736,6 +736,7 @@ static void connect(struct backend_info *be)
4848 + }
4849 +
4850 + queue->remaining_credit = credit_bytes;
4851 ++ queue->credit_usec = credit_usec;
4852 +
4853 + err = connect_rings(be, queue);
4854 + if (err) {
4855 +@@ -886,9 +887,15 @@ static int read_xenbus_vif_flags(struct backend_info *be)
4856 + return -EOPNOTSUPP;
4857 +
4858 + if (xenbus_scanf(XBT_NIL, dev->otherend,
4859 +- "feature-rx-notify", "%d", &val) < 0 || val == 0) {
4860 +- xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory");
4861 +- return -EINVAL;
4862 ++ "feature-rx-notify", "%d", &val) < 0)
4863 ++ val = 0;
4864 ++ if (!val) {
4865 ++ /* - Reduce drain timeout to poll more frequently for
4866 ++ * Rx requests.
4867 ++ * - Disable Rx stall detection.
4868 ++ */
4869 ++ be->vif->drain_timeout = msecs_to_jiffies(30);
4870 ++ be->vif->stall_timeout = 0;
4871 + }
4872 +
4873 + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
4874 +diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
4875 +index 37040ab42890..fdff39ff5021 100644
4876 +--- a/drivers/pinctrl/pinctrl-xway.c
4877 ++++ b/drivers/pinctrl/pinctrl-xway.c
4878 +@@ -798,10 +798,8 @@ static int pinmux_xway_probe(struct platform_device *pdev)
4879 +
4880 + /* load the gpio chip */
4881 + xway_chip.dev = &pdev->dev;
4882 +- of_gpiochip_add(&xway_chip);
4883 + ret = gpiochip_add(&xway_chip);
4884 + if (ret) {
4885 +- of_gpiochip_remove(&xway_chip);
4886 + dev_err(&pdev->dev, "Failed to register gpio chip\n");
4887 + return ret;
4888 + }
4889 +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
4890 +index c1a6cd66af42..abdaed34c728 100644
4891 +--- a/drivers/platform/x86/asus-nb-wmi.c
4892 ++++ b/drivers/platform/x86/asus-nb-wmi.c
4893 +@@ -191,6 +191,15 @@ static const struct dmi_system_id asus_quirks[] = {
4894 + },
4895 + {
4896 + .callback = dmi_matched,
4897 ++ .ident = "ASUSTeK COMPUTER INC. X551CA",
4898 ++ .matches = {
4899 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
4900 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X551CA"),
4901 ++ },
4902 ++ .driver_data = &quirk_asus_wapf4,
4903 ++ },
4904 ++ {
4905 ++ .callback = dmi_matched,
4906 + .ident = "ASUSTeK COMPUTER INC. X55A",
4907 + .matches = {
4908 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
4909 +diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
4910 +index 6bec745b6b92..10ce6cba4455 100644
4911 +--- a/drivers/platform/x86/hp_accel.c
4912 ++++ b/drivers/platform/x86/hp_accel.c
4913 +@@ -246,6 +246,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
4914 + AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap),
4915 + AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
4916 + AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
4917 ++ AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted),
4918 + { NULL, }
4919 + /* Laptop models without axis info (yet):
4920 + * "NC6910" "HP Compaq 6910"
4921 +diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
4922 +index a94e7a7820b4..51272b5d7552 100644
4923 +--- a/drivers/reset/reset-sunxi.c
4924 ++++ b/drivers/reset/reset-sunxi.c
4925 +@@ -102,6 +102,8 @@ static int sunxi_reset_init(struct device_node *np)
4926 + goto err_alloc;
4927 + }
4928 +
4929 ++ spin_lock_init(&data->lock);
4930 ++
4931 + data->rcdev.owner = THIS_MODULE;
4932 + data->rcdev.nr_resets = size * 32;
4933 + data->rcdev.ops = &sunxi_reset_ops;
4934 +@@ -157,6 +159,8 @@ static int sunxi_reset_probe(struct platform_device *pdev)
4935 + if (IS_ERR(data->membase))
4936 + return PTR_ERR(data->membase);
4937 +
4938 ++ spin_lock_init(&data->lock);
4939 ++
4940 + data->rcdev.owner = THIS_MODULE;
4941 + data->rcdev.nr_resets = resource_size(res) * 32;
4942 + data->rcdev.ops = &sunxi_reset_ops;
4943 +diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
4944 +index 0d1d06488a28..e689bf20a3ea 100644
4945 +--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
4946 ++++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
4947 +@@ -1006,12 +1006,9 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
4948 + &mpt2sas_phy->remote_identify);
4949 + _transport_add_phy_to_an_existing_port(ioc, sas_node,
4950 + mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
4951 +- } else {
4952 ++ } else
4953 + memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
4954 + sas_identify));
4955 +- _transport_del_phy_from_an_existing_port(ioc, sas_node,
4956 +- mpt2sas_phy);
4957 +- }
4958 +
4959 + if (mpt2sas_phy->phy)
4960 + mpt2sas_phy->phy->negotiated_linkrate =
4961 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
4962 +index d4bafaaebea9..3637ae6c0171 100644
4963 +--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
4964 ++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
4965 +@@ -1003,12 +1003,9 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
4966 + &mpt3sas_phy->remote_identify);
4967 + _transport_add_phy_to_an_existing_port(ioc, sas_node,
4968 + mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
4969 +- } else {
4970 ++ } else
4971 + memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
4972 + sas_identify));
4973 +- _transport_del_phy_from_an_existing_port(ioc, sas_node,
4974 +- mpt3sas_phy);
4975 +- }
4976 +
4977 + if (mpt3sas_phy->phy)
4978 + mpt3sas_phy->phy->negotiated_linkrate =
4979 +diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
4980 +index c1d04d4d3c6c..262ab837a704 100644
4981 +--- a/drivers/scsi/scsi_devinfo.c
4982 ++++ b/drivers/scsi/scsi_devinfo.c
4983 +@@ -211,6 +211,7 @@ static struct {
4984 + {"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN},
4985 + {"MegaRAID", "LD", NULL, BLIST_FORCELUN},
4986 + {"MICROP", "4110", NULL, BLIST_NOTQ},
4987 ++ {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC},
4988 + {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2},
4989 + {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN},
4990 + {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
4991 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
4992 +index bc5ff6ff9c79..01a79473350a 100644
4993 +--- a/drivers/scsi/scsi_error.c
4994 ++++ b/drivers/scsi/scsi_error.c
4995 +@@ -1027,7 +1027,7 @@ retry:
4996 + }
4997 + /* signal not to enter either branch of the if () below */
4998 + timeleft = 0;
4999 +- rtn = NEEDS_RETRY;
5000 ++ rtn = FAILED;
5001 + } else {
5002 + timeleft = wait_for_completion_timeout(&done, timeout);
5003 + rtn = SUCCESS;
5004 +@@ -1067,7 +1067,7 @@ retry:
5005 + rtn = FAILED;
5006 + break;
5007 + }
5008 +- } else if (!rtn) {
5009 ++ } else if (rtn != FAILED) {
5010 + scsi_abort_eh_cmnd(scmd);
5011 + rtn = FAILED;
5012 + }
5013 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
5014 +index 50a6e1ac8d9c..17fb0518c9c1 100644
5015 +--- a/drivers/scsi/scsi_lib.c
5016 ++++ b/drivers/scsi/scsi_lib.c
5017 +@@ -1829,7 +1829,9 @@ static int scsi_mq_prep_fn(struct request *req)
5018 +
5019 + if (scsi_host_get_prot(shost)) {
5020 + cmd->prot_sdb = (void *)sg +
5021 +- shost->sg_tablesize * sizeof(struct scatterlist);
5022 ++ min_t(unsigned int,
5023 ++ shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) *
5024 ++ sizeof(struct scatterlist);
5025 + memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
5026 +
5027 + cmd->prot_sdb->table.sgl =
5028 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
5029 +index 733e5f759518..90af465359d6 100644
5030 +--- a/drivers/scsi/storvsc_drv.c
5031 ++++ b/drivers/scsi/storvsc_drv.c
5032 +@@ -1688,13 +1688,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
5033 + if (ret == -EAGAIN) {
5034 + /* no more space */
5035 +
5036 +- if (cmd_request->bounce_sgl_count) {
5037 ++ if (cmd_request->bounce_sgl_count)
5038 + destroy_bounce_buffer(cmd_request->bounce_sgl,
5039 + cmd_request->bounce_sgl_count);
5040 +
5041 +- ret = SCSI_MLQUEUE_DEVICE_BUSY;
5042 +- goto queue_error;
5043 +- }
5044 ++ ret = SCSI_MLQUEUE_DEVICE_BUSY;
5045 ++ goto queue_error;
5046 + }
5047 +
5048 + return 0;
5049 +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
5050 +index 480f2e0ecc11..18e2601527df 100644
5051 +--- a/drivers/target/iscsi/iscsi_target_login.c
5052 ++++ b/drivers/target/iscsi/iscsi_target_login.c
5053 +@@ -281,7 +281,6 @@ static int iscsi_login_zero_tsih_s1(
5054 + {
5055 + struct iscsi_session *sess = NULL;
5056 + struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
5057 +- enum target_prot_op sup_pro_ops;
5058 + int ret;
5059 +
5060 + sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
5061 +@@ -343,9 +342,8 @@ static int iscsi_login_zero_tsih_s1(
5062 + kfree(sess);
5063 + return -ENOMEM;
5064 + }
5065 +- sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
5066 +
5067 +- sess->se_sess = transport_init_session(sup_pro_ops);
5068 ++ sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
5069 + if (IS_ERR(sess->se_sess)) {
5070 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
5071 + ISCSI_LOGIN_STATUS_NO_RESOURCES);
5072 +@@ -1204,6 +1202,9 @@ old_sess_out:
5073 + conn->sock = NULL;
5074 + }
5075 +
5076 ++ if (conn->conn_transport->iscsit_wait_conn)
5077 ++ conn->conn_transport->iscsit_wait_conn(conn);
5078 ++
5079 + if (conn->conn_transport->iscsit_free_conn)
5080 + conn->conn_transport->iscsit_free_conn(conn);
5081 +
5082 +@@ -1364,6 +1365,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
5083 + }
5084 + login->zero_tsih = zero_tsih;
5085 +
5086 ++ conn->sess->se_sess->sup_prot_ops =
5087 ++ conn->conn_transport->iscsit_get_sup_prot_ops(conn);
5088 ++
5089 + tpg = conn->tpg;
5090 + if (!tpg) {
5091 + pr_err("Unable to locate struct iscsi_conn->tpg\n");
5092 +diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
5093 +index ce87ce9bdb9c..0b68c2ebce95 100644
5094 +--- a/drivers/target/iscsi/iscsi_target_util.c
5095 ++++ b/drivers/target/iscsi/iscsi_target_util.c
5096 +@@ -1358,15 +1358,15 @@ static int iscsit_do_tx_data(
5097 + struct iscsi_conn *conn,
5098 + struct iscsi_data_count *count)
5099 + {
5100 +- int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
5101 ++ int ret, iov_len;
5102 + struct kvec *iov_p;
5103 + struct msghdr msg;
5104 +
5105 + if (!conn || !conn->sock || !conn->conn_ops)
5106 + return -1;
5107 +
5108 +- if (data <= 0) {
5109 +- pr_err("Data length is: %d\n", data);
5110 ++ if (count->data_length <= 0) {
5111 ++ pr_err("Data length is: %d\n", count->data_length);
5112 + return -1;
5113 + }
5114 +
5115 +@@ -1375,20 +1375,16 @@ static int iscsit_do_tx_data(
5116 + iov_p = count->iov;
5117 + iov_len = count->iov_count;
5118 +
5119 +- while (total_tx < data) {
5120 +- tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
5121 +- (data - total_tx));
5122 +- if (tx_loop <= 0) {
5123 +- pr_debug("tx_loop: %d total_tx %d\n",
5124 +- tx_loop, total_tx);
5125 +- return tx_loop;
5126 +- }
5127 +- total_tx += tx_loop;
5128 +- pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
5129 +- tx_loop, total_tx, data);
5130 ++ ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
5131 ++ count->data_length);
5132 ++ if (ret != count->data_length) {
5133 ++ pr_err("Unexpected ret: %d send data %d\n",
5134 ++ ret, count->data_length);
5135 ++ return -EPIPE;
5136 + }
5137 ++ pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
5138 +
5139 +- return total_tx;
5140 ++ return ret;
5141 + }
5142 +
5143 + int rx_data(
5144 +diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
5145 +index ab3ab27d49b7..0be83e788df2 100644
5146 +--- a/drivers/target/loopback/tcm_loop.c
5147 ++++ b/drivers/target/loopback/tcm_loop.c
5148 +@@ -190,7 +190,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
5149 + set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
5150 + goto out_done;
5151 + }
5152 +- tl_nexus = tl_hba->tl_nexus;
5153 ++ tl_nexus = tl_tpg->tl_nexus;
5154 + if (!tl_nexus) {
5155 + scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
5156 + " does not exist\n");
5157 +@@ -270,16 +270,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
5158 + * to struct scsi_device
5159 + */
5160 + static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
5161 +- struct tcm_loop_nexus *tl_nexus,
5162 + int lun, int task, enum tcm_tmreq_table tmr)
5163 + {
5164 + struct se_cmd *se_cmd = NULL;
5165 + struct se_session *se_sess;
5166 + struct se_portal_group *se_tpg;
5167 ++ struct tcm_loop_nexus *tl_nexus;
5168 + struct tcm_loop_cmd *tl_cmd = NULL;
5169 + struct tcm_loop_tmr *tl_tmr = NULL;
5170 + int ret = TMR_FUNCTION_FAILED, rc;
5171 +
5172 ++ /*
5173 ++ * Locate the tl_nexus and se_sess pointers
5174 ++ */
5175 ++ tl_nexus = tl_tpg->tl_nexus;
5176 ++ if (!tl_nexus) {
5177 ++ pr_err("Unable to perform device reset without"
5178 ++ " active I_T Nexus\n");
5179 ++ return ret;
5180 ++ }
5181 ++
5182 + tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
5183 + if (!tl_cmd) {
5184 + pr_err("Unable to allocate memory for tl_cmd\n");
5185 +@@ -295,7 +305,7 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
5186 +
5187 + se_cmd = &tl_cmd->tl_se_cmd;
5188 + se_tpg = &tl_tpg->tl_se_tpg;
5189 +- se_sess = tl_nexus->se_sess;
5190 ++ se_sess = tl_tpg->tl_nexus->se_sess;
5191 + /*
5192 + * Initialize struct se_cmd descriptor from target_core_mod infrastructure
5193 + */
5194 +@@ -340,7 +350,6 @@ release:
5195 + static int tcm_loop_abort_task(struct scsi_cmnd *sc)
5196 + {
5197 + struct tcm_loop_hba *tl_hba;
5198 +- struct tcm_loop_nexus *tl_nexus;
5199 + struct tcm_loop_tpg *tl_tpg;
5200 + int ret = FAILED;
5201 +
5202 +@@ -348,21 +357,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
5203 + * Locate the tcm_loop_hba_t pointer
5204 + */
5205 + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
5206 +- /*
5207 +- * Locate the tl_nexus and se_sess pointers
5208 +- */
5209 +- tl_nexus = tl_hba->tl_nexus;
5210 +- if (!tl_nexus) {
5211 +- pr_err("Unable to perform device reset without"
5212 +- " active I_T Nexus\n");
5213 +- return FAILED;
5214 +- }
5215 +-
5216 +- /*
5217 +- * Locate the tl_tpg pointer from TargetID in sc->device->id
5218 +- */
5219 + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
5220 +- ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
5221 ++ ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
5222 + sc->request->tag, TMR_ABORT_TASK);
5223 + return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
5224 + }
5225 +@@ -374,7 +370,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
5226 + static int tcm_loop_device_reset(struct scsi_cmnd *sc)
5227 + {
5228 + struct tcm_loop_hba *tl_hba;
5229 +- struct tcm_loop_nexus *tl_nexus;
5230 + struct tcm_loop_tpg *tl_tpg;
5231 + int ret = FAILED;
5232 +
5233 +@@ -382,20 +377,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
5234 + * Locate the tcm_loop_hba_t pointer
5235 + */
5236 + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
5237 +- /*
5238 +- * Locate the tl_nexus and se_sess pointers
5239 +- */
5240 +- tl_nexus = tl_hba->tl_nexus;
5241 +- if (!tl_nexus) {
5242 +- pr_err("Unable to perform device reset without"
5243 +- " active I_T Nexus\n");
5244 +- return FAILED;
5245 +- }
5246 +- /*
5247 +- * Locate the tl_tpg pointer from TargetID in sc->device->id
5248 +- */
5249 + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
5250 +- ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
5251 ++
5252 ++ ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
5253 + 0, TMR_LUN_RESET);
5254 + return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
5255 + }
5256 +@@ -1005,8 +989,8 @@ static int tcm_loop_make_nexus(
5257 + struct tcm_loop_nexus *tl_nexus;
5258 + int ret = -ENOMEM;
5259 +
5260 +- if (tl_tpg->tl_hba->tl_nexus) {
5261 +- pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
5262 ++ if (tl_tpg->tl_nexus) {
5263 ++ pr_debug("tl_tpg->tl_nexus already exists\n");
5264 + return -EEXIST;
5265 + }
5266 + se_tpg = &tl_tpg->tl_se_tpg;
5267 +@@ -1041,7 +1025,7 @@ static int tcm_loop_make_nexus(
5268 + */
5269 + __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
5270 + tl_nexus->se_sess, tl_nexus);
5271 +- tl_tpg->tl_hba->tl_nexus = tl_nexus;
5272 ++ tl_tpg->tl_nexus = tl_nexus;
5273 + pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
5274 + " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
5275 + name);
5276 +@@ -1057,12 +1041,8 @@ static int tcm_loop_drop_nexus(
5277 + {
5278 + struct se_session *se_sess;
5279 + struct tcm_loop_nexus *tl_nexus;
5280 +- struct tcm_loop_hba *tl_hba = tpg->tl_hba;
5281 +
5282 +- if (!tl_hba)
5283 +- return -ENODEV;
5284 +-
5285 +- tl_nexus = tl_hba->tl_nexus;
5286 ++ tl_nexus = tpg->tl_nexus;
5287 + if (!tl_nexus)
5288 + return -ENODEV;
5289 +
5290 +@@ -1078,13 +1058,13 @@ static int tcm_loop_drop_nexus(
5291 + }
5292 +
5293 + pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
5294 +- " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
5295 ++ " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
5296 + tl_nexus->se_sess->se_node_acl->initiatorname);
5297 + /*
5298 + * Release the SCSI I_T Nexus to the emulated SAS Target Port
5299 + */
5300 + transport_deregister_session(tl_nexus->se_sess);
5301 +- tpg->tl_hba->tl_nexus = NULL;
5302 ++ tpg->tl_nexus = NULL;
5303 + kfree(tl_nexus);
5304 + return 0;
5305 + }
5306 +@@ -1100,7 +1080,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
5307 + struct tcm_loop_nexus *tl_nexus;
5308 + ssize_t ret;
5309 +
5310 +- tl_nexus = tl_tpg->tl_hba->tl_nexus;
5311 ++ tl_nexus = tl_tpg->tl_nexus;
5312 + if (!tl_nexus)
5313 + return -ENODEV;
5314 +
5315 +diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
5316 +index 54c59d0b6608..6ae49f272ba6 100644
5317 +--- a/drivers/target/loopback/tcm_loop.h
5318 ++++ b/drivers/target/loopback/tcm_loop.h
5319 +@@ -27,11 +27,6 @@ struct tcm_loop_tmr {
5320 + };
5321 +
5322 + struct tcm_loop_nexus {
5323 +- int it_nexus_active;
5324 +- /*
5325 +- * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
5326 +- */
5327 +- struct scsi_host *sh;
5328 + /*
5329 + * Pointer to TCM session for I_T Nexus
5330 + */
5331 +@@ -51,6 +46,7 @@ struct tcm_loop_tpg {
5332 + atomic_t tl_tpg_port_count;
5333 + struct se_portal_group tl_se_tpg;
5334 + struct tcm_loop_hba *tl_hba;
5335 ++ struct tcm_loop_nexus *tl_nexus;
5336 + };
5337 +
5338 + struct tcm_loop_hba {
5339 +@@ -59,7 +55,6 @@ struct tcm_loop_hba {
5340 + struct se_hba_s *se_hba;
5341 + struct se_lun *tl_hba_lun;
5342 + struct se_port *tl_hba_lun_sep;
5343 +- struct tcm_loop_nexus *tl_nexus;
5344 + struct device dev;
5345 + struct Scsi_Host *sh;
5346 + struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
5347 +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
5348 +index c45f9e907e44..24fa5d1999af 100644
5349 +--- a/drivers/target/target_core_device.c
5350 ++++ b/drivers/target/target_core_device.c
5351 +@@ -1169,10 +1169,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
5352 + " changed for TCM/pSCSI\n", dev);
5353 + return -EINVAL;
5354 + }
5355 +- if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
5356 ++ if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
5357 + pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
5358 +- " greater than fabric_max_sectors: %u\n", dev,
5359 +- optimal_sectors, dev->dev_attrib.fabric_max_sectors);
5360 ++ " greater than hw_max_sectors: %u\n", dev,
5361 ++ optimal_sectors, dev->dev_attrib.hw_max_sectors);
5362 + return -EINVAL;
5363 + }
5364 +
5365 +@@ -1572,7 +1572,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
5366 + DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
5367 + dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
5368 + dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
5369 +- dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
5370 +
5371 + xcopy_lun = &dev->xcopy_lun;
5372 + xcopy_lun->lun_se_dev = dev;
5373 +@@ -1613,6 +1612,7 @@ int target_configure_device(struct se_device *dev)
5374 + dev->dev_attrib.hw_max_sectors =
5375 + se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
5376 + dev->dev_attrib.hw_block_size);
5377 ++ dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
5378 +
5379 + dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
5380 + dev->creation_time = get_jiffies_64();
5381 +diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
5382 +index 72c83d98662b..f018b6a3ffbf 100644
5383 +--- a/drivers/target/target_core_file.c
5384 ++++ b/drivers/target/target_core_file.c
5385 +@@ -620,7 +620,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
5386 + struct fd_prot fd_prot;
5387 + sense_reason_t rc;
5388 + int ret = 0;
5389 +-
5390 ++ /*
5391 ++ * We are currently limited by the number of iovecs (2048) per
5392 ++ * single vfs_[writev,readv] call.
5393 ++ */
5394 ++ if (cmd->data_length > FD_MAX_BYTES) {
5395 ++ pr_err("FILEIO: Not able to process I/O of %u bytes due to"
5396 ++ "FD_MAX_BYTES: %u iovec count limitiation\n",
5397 ++ cmd->data_length, FD_MAX_BYTES);
5398 ++ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
5399 ++ }
5400 + /*
5401 + * Call vectorized fileio functions to map struct scatterlist
5402 + * physical memory addresses to struct iovec virtual memory.
5403 +diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
5404 +index 7e6b857c6b3f..be27773ab829 100644
5405 +--- a/drivers/target/target_core_iblock.c
5406 ++++ b/drivers/target/target_core_iblock.c
5407 +@@ -123,7 +123,7 @@ static int iblock_configure_device(struct se_device *dev)
5408 + q = bdev_get_queue(bd);
5409 +
5410 + dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
5411 +- dev->dev_attrib.hw_max_sectors = UINT_MAX;
5412 ++ dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
5413 + dev->dev_attrib.hw_queue_depth = q->nr_requests;
5414 +
5415 + /*
5416 +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
5417 +index ebe62afb957d..7a88af0e32d6 100644
5418 +--- a/drivers/target/target_core_sbc.c
5419 ++++ b/drivers/target/target_core_sbc.c
5420 +@@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
5421 +
5422 + if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
5423 + unsigned long long end_lba;
5424 +-
5425 +- if (sectors > dev->dev_attrib.fabric_max_sectors) {
5426 +- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
5427 +- " big sectors %u exceeds fabric_max_sectors:"
5428 +- " %u\n", cdb[0], sectors,
5429 +- dev->dev_attrib.fabric_max_sectors);
5430 +- return TCM_INVALID_CDB_FIELD;
5431 +- }
5432 +- if (sectors > dev->dev_attrib.hw_max_sectors) {
5433 +- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
5434 +- " big sectors %u exceeds backend hw_max_sectors:"
5435 +- " %u\n", cdb[0], sectors,
5436 +- dev->dev_attrib.hw_max_sectors);
5437 +- return TCM_INVALID_CDB_FIELD;
5438 +- }
5439 + check_lba:
5440 + end_lba = dev->transport->get_blocks(dev) + 1;
5441 + if (cmd->t_task_lba + sectors > end_lba) {
5442 +diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
5443 +index bc286a67af7c..614005b6b08b 100644
5444 +--- a/drivers/target/target_core_spc.c
5445 ++++ b/drivers/target/target_core_spc.c
5446 +@@ -505,7 +505,6 @@ static sense_reason_t
5447 + spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
5448 + {
5449 + struct se_device *dev = cmd->se_dev;
5450 +- u32 max_sectors;
5451 + int have_tp = 0;
5452 + int opt, min;
5453 +
5454 +@@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
5455 + /*
5456 + * Set MAXIMUM TRANSFER LENGTH
5457 + */
5458 +- max_sectors = min(dev->dev_attrib.fabric_max_sectors,
5459 +- dev->dev_attrib.hw_max_sectors);
5460 +- put_unaligned_be32(max_sectors, &buf[8]);
5461 ++ put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
5462 +
5463 + /*
5464 + * Set OPTIMAL TRANSFER LENGTH
5465 +diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
5466 +index 95cb7fc20e17..6cb78497076a 100644
5467 +--- a/drivers/thermal/intel_powerclamp.c
5468 ++++ b/drivers/thermal/intel_powerclamp.c
5469 +@@ -435,7 +435,6 @@ static int clamp_thread(void *arg)
5470 + * allowed. thus jiffies are updated properly.
5471 + */
5472 + preempt_disable();
5473 +- tick_nohz_idle_enter();
5474 + /* mwait until target jiffies is reached */
5475 + while (time_before(jiffies, target_jiffies)) {
5476 + unsigned long ecx = 1;
5477 +@@ -451,7 +450,6 @@ static int clamp_thread(void *arg)
5478 + start_critical_timings();
5479 + atomic_inc(&idle_wakeup_counter);
5480 + }
5481 +- tick_nohz_idle_exit();
5482 + preempt_enable();
5483 + }
5484 + del_timer_sync(&wakeup_timer);
5485 +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
5486 +index eaeb9a02c7fe..a28dee9d5017 100644
5487 +--- a/drivers/tty/serial/serial_core.c
5488 ++++ b/drivers/tty/serial/serial_core.c
5489 +@@ -2102,7 +2102,9 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
5490 + break;
5491 + }
5492 +
5493 +- dev_info(port->dev, "%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
5494 ++ printk(KERN_INFO "%s%s%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
5495 ++ port->dev ? dev_name(port->dev) : "",
5496 ++ port->dev ? ": " : "",
5497 + drv->dev_name,
5498 + drv->tty_driver->name_base + port->line,
5499 + address, port->irq, port->uartclk / 16, uart_type(port));
5500 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
5501 +index 546ea5431b8c..272e0928736e 100644
5502 +--- a/drivers/usb/dwc3/gadget.c
5503 ++++ b/drivers/usb/dwc3/gadget.c
5504 +@@ -882,8 +882,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
5505 +
5506 + if (i == (request->num_mapped_sgs - 1) ||
5507 + sg_is_last(s)) {
5508 +- if (list_is_last(&req->list,
5509 +- &dep->request_list))
5510 ++ if (list_empty(&dep->request_list))
5511 + last_one = true;
5512 + chain = false;
5513 + }
5514 +@@ -901,6 +900,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
5515 + if (last_one)
5516 + break;
5517 + }
5518 ++
5519 ++ if (last_one)
5520 ++ break;
5521 + } else {
5522 + dma = req->request.dma;
5523 + length = req->request.length;
5524 +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
5525 +index c744e4975d74..08048613eed6 100644
5526 +--- a/drivers/usb/gadget/legacy/inode.c
5527 ++++ b/drivers/usb/gadget/legacy/inode.c
5528 +@@ -449,6 +449,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
5529 + data->name, len, (int) value);
5530 + free1:
5531 + mutex_unlock(&data->lock);
5532 ++ kfree (kbuf);
5533 + return value;
5534 + }
5535 +
5536 +diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
5537 +index 1529926e20a0..840856ca3e66 100644
5538 +--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
5539 ++++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
5540 +@@ -716,10 +716,10 @@ static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
5541 + req->using_dma = 1;
5542 + req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
5543 + | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
5544 +- | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
5545 ++ | USBA_DMA_END_BUF_EN;
5546 +
5547 +- if (ep->is_in)
5548 +- req->ctrl |= USBA_DMA_END_BUF_EN;
5549 ++ if (!ep->is_in)
5550 ++ req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
5551 +
5552 + /*
5553 + * Add this request to the queue and submit for DMA if
5554 +@@ -828,7 +828,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
5555 + {
5556 + struct usba_ep *ep = to_usba_ep(_ep);
5557 + struct usba_udc *udc = ep->udc;
5558 +- struct usba_request *req = to_usba_req(_req);
5559 ++ struct usba_request *req;
5560 + unsigned long flags;
5561 + u32 status;
5562 +
5563 +@@ -837,6 +837,16 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
5564 +
5565 + spin_lock_irqsave(&udc->lock, flags);
5566 +
5567 ++ list_for_each_entry(req, &ep->queue, queue) {
5568 ++ if (&req->req == _req)
5569 ++ break;
5570 ++ }
5571 ++
5572 ++ if (&req->req != _req) {
5573 ++ spin_unlock_irqrestore(&udc->lock, flags);
5574 ++ return -EINVAL;
5575 ++ }
5576 ++
5577 + if (req->using_dma) {
5578 + /*
5579 + * If this request is currently being transferred,
5580 +@@ -1572,7 +1582,6 @@ static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
5581 + if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
5582 + DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
5583 + receive_data(ep);
5584 +- usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
5585 + }
5586 + }
5587 +
5588 +diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
5589 +index e113fd73aeae..c399606f154e 100644
5590 +--- a/drivers/usb/host/ehci-sched.c
5591 ++++ b/drivers/usb/host/ehci-sched.c
5592 +@@ -1581,6 +1581,10 @@ iso_stream_schedule (
5593 + else
5594 + next = (now + 2 + 7) & ~0x07; /* full frame cache */
5595 +
5596 ++ /* If needed, initialize last_iso_frame so that this URB will be seen */
5597 ++ if (ehci->isoc_count == 0)
5598 ++ ehci->last_iso_frame = now >> 3;
5599 ++
5600 + /*
5601 + * Use ehci->last_iso_frame as the base. There can't be any
5602 + * TDs scheduled for earlier than that.
5603 +@@ -1671,10 +1675,6 @@ iso_stream_schedule (
5604 + urb->start_frame = start & (mod - 1);
5605 + if (!stream->highspeed)
5606 + urb->start_frame >>= 3;
5607 +-
5608 +- /* Make sure scan_isoc() sees these */
5609 +- if (ehci->isoc_count == 0)
5610 +- ehci->last_iso_frame = now >> 3;
5611 + return status;
5612 +
5613 + fail:
5614 +diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
5615 +index 2f3acebb577a..f4e6b945136c 100644
5616 +--- a/drivers/usb/host/pci-quirks.c
5617 ++++ b/drivers/usb/host/pci-quirks.c
5618 +@@ -571,7 +571,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
5619 + {
5620 + void __iomem *base;
5621 + u32 control;
5622 +- u32 fminterval;
5623 ++ u32 fminterval = 0;
5624 ++ bool no_fminterval = false;
5625 + int cnt;
5626 +
5627 + if (!mmio_resource_enabled(pdev, 0))
5628 +@@ -581,6 +582,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
5629 + if (base == NULL)
5630 + return;
5631 +
5632 ++ /*
5633 ++ * ULi M5237 OHCI controller locks the whole system when accessing
5634 ++ * the OHCI_FMINTERVAL offset.
5635 ++ */
5636 ++ if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
5637 ++ no_fminterval = true;
5638 ++
5639 + control = readl(base + OHCI_CONTROL);
5640 +
5641 + /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
5642 +@@ -619,7 +627,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
5643 + }
5644 +
5645 + /* software reset of the controller, preserving HcFmInterval */
5646 +- fminterval = readl(base + OHCI_FMINTERVAL);
5647 ++ if (!no_fminterval)
5648 ++ fminterval = readl(base + OHCI_FMINTERVAL);
5649 ++
5650 + writel(OHCI_HCR, base + OHCI_CMDSTATUS);
5651 +
5652 + /* reset requires max 10 us delay */
5653 +@@ -628,7 +638,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
5654 + break;
5655 + udelay(1);
5656 + }
5657 +- writel(fminterval, base + OHCI_FMINTERVAL);
5658 ++
5659 ++ if (!no_fminterval)
5660 ++ writel(fminterval, base + OHCI_FMINTERVAL);
5661 +
5662 + /* Now the controller is safely in SUSPEND and nothing can wake it up */
5663 + iounmap(base);
5664 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
5665 +index 142b601f9563..7f76c8a12f89 100644
5666 +--- a/drivers/usb/host/xhci-pci.c
5667 ++++ b/drivers/usb/host/xhci-pci.c
5668 +@@ -82,6 +82,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
5669 + "must be suspended extra slowly",
5670 + pdev->revision);
5671 + }
5672 ++ if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK)
5673 ++ xhci->quirks |= XHCI_BROKEN_STREAMS;
5674 + /* Fresco Logic confirms: all revisions of this chip do not
5675 + * support MSI, even though some of them claim to in their PCI
5676 + * capabilities.
5677 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
5678 +index 033b46c470bd..3bceabe109f7 100644
5679 +--- a/drivers/usb/host/xhci.c
5680 ++++ b/drivers/usb/host/xhci.c
5681 +@@ -3803,6 +3803,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
5682 + return -EINVAL;
5683 + }
5684 +
5685 ++ if (setup == SETUP_CONTEXT_ONLY) {
5686 ++ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
5687 ++ if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
5688 ++ SLOT_STATE_DEFAULT) {
5689 ++ xhci_dbg(xhci, "Slot already in default state\n");
5690 ++ return 0;
5691 ++ }
5692 ++ }
5693 ++
5694 + command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
5695 + if (!command)
5696 + return -ENOMEM;
5697 +diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
5698 +index 855793d701bb..4500610356f2 100644
5699 +--- a/drivers/usb/musb/musb_host.c
5700 ++++ b/drivers/usb/musb/musb_host.c
5701 +@@ -2663,7 +2663,6 @@ void musb_host_cleanup(struct musb *musb)
5702 + if (musb->port_mode == MUSB_PORT_MODE_GADGET)
5703 + return;
5704 + usb_remove_hcd(musb->hcd);
5705 +- musb->hcd = NULL;
5706 + }
5707 +
5708 + void musb_host_free(struct musb *musb)
5709 +diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
5710 +index 8d7fc48b1f30..29fa1c3d0089 100644
5711 +--- a/drivers/usb/serial/console.c
5712 ++++ b/drivers/usb/serial/console.c
5713 +@@ -46,6 +46,8 @@ static struct console usbcons;
5714 + * ------------------------------------------------------------
5715 + */
5716 +
5717 ++static const struct tty_operations usb_console_fake_tty_ops = {
5718 ++};
5719 +
5720 + /*
5721 + * The parsing of the command line works exactly like the
5722 +@@ -137,13 +139,17 @@ static int usb_console_setup(struct console *co, char *options)
5723 + goto reset_open_count;
5724 + }
5725 + kref_init(&tty->kref);
5726 +- tty_port_tty_set(&port->port, tty);
5727 + tty->driver = usb_serial_tty_driver;
5728 + tty->index = co->index;
5729 ++ init_ldsem(&tty->ldisc_sem);
5730 ++ INIT_LIST_HEAD(&tty->tty_files);
5731 ++ kref_get(&tty->driver->kref);
5732 ++ tty->ops = &usb_console_fake_tty_ops;
5733 + if (tty_init_termios(tty)) {
5734 + retval = -ENOMEM;
5735 +- goto free_tty;
5736 ++ goto put_tty;
5737 + }
5738 ++ tty_port_tty_set(&port->port, tty);
5739 + }
5740 +
5741 + /* only call the device specific open if this
5742 +@@ -161,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
5743 + serial->type->set_termios(tty, port, &dummy);
5744 +
5745 + tty_port_tty_set(&port->port, NULL);
5746 +- kfree(tty);
5747 ++ tty_kref_put(tty);
5748 + }
5749 + set_bit(ASYNCB_INITIALIZED, &port->port.flags);
5750 + }
5751 +@@ -177,8 +183,8 @@ static int usb_console_setup(struct console *co, char *options)
5752 +
5753 + fail:
5754 + tty_port_tty_set(&port->port, NULL);
5755 +- free_tty:
5756 +- kfree(tty);
5757 ++ put_tty:
5758 ++ tty_kref_put(tty);
5759 + reset_open_count:
5760 + port->port.count = 0;
5761 + usb_autopm_put_interface(serial->interface);
5762 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
5763 +index 6c4eb3cf5efd..f4c56fc1a9f6 100644
5764 +--- a/drivers/usb/serial/cp210x.c
5765 ++++ b/drivers/usb/serial/cp210x.c
5766 +@@ -120,10 +120,12 @@ static const struct usb_device_id id_table[] = {
5767 + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
5768 + { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
5769 + { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
5770 +- { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
5771 ++ { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
5772 ++ { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
5773 + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
5774 + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
5775 + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
5776 ++ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
5777 + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
5778 + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
5779 + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
5780 +diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
5781 +index 077c714f1285..e07b15ed5814 100644
5782 +--- a/drivers/usb/serial/keyspan.c
5783 ++++ b/drivers/usb/serial/keyspan.c
5784 +@@ -410,6 +410,8 @@ static void usa26_instat_callback(struct urb *urb)
5785 + }
5786 + port = serial->port[msg->port];
5787 + p_priv = usb_get_serial_port_data(port);
5788 ++ if (!p_priv)
5789 ++ goto resubmit;
5790 +
5791 + /* Update handshaking pin state information */
5792 + old_dcd_state = p_priv->dcd_state;
5793 +@@ -420,7 +422,7 @@ static void usa26_instat_callback(struct urb *urb)
5794 +
5795 + if (old_dcd_state != p_priv->dcd_state)
5796 + tty_port_tty_hangup(&port->port, true);
5797 +-
5798 ++resubmit:
5799 + /* Resubmit urb so we continue receiving */
5800 + err = usb_submit_urb(urb, GFP_ATOMIC);
5801 + if (err != 0)
5802 +@@ -527,6 +529,8 @@ static void usa28_instat_callback(struct urb *urb)
5803 + }
5804 + port = serial->port[msg->port];
5805 + p_priv = usb_get_serial_port_data(port);
5806 ++ if (!p_priv)
5807 ++ goto resubmit;
5808 +
5809 + /* Update handshaking pin state information */
5810 + old_dcd_state = p_priv->dcd_state;
5811 +@@ -537,7 +541,7 @@ static void usa28_instat_callback(struct urb *urb)
5812 +
5813 + if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
5814 + tty_port_tty_hangup(&port->port, true);
5815 +-
5816 ++resubmit:
5817 + /* Resubmit urb so we continue receiving */
5818 + err = usb_submit_urb(urb, GFP_ATOMIC);
5819 + if (err != 0)
5820 +@@ -607,6 +611,8 @@ static void usa49_instat_callback(struct urb *urb)
5821 + }
5822 + port = serial->port[msg->portNumber];
5823 + p_priv = usb_get_serial_port_data(port);
5824 ++ if (!p_priv)
5825 ++ goto resubmit;
5826 +
5827 + /* Update handshaking pin state information */
5828 + old_dcd_state = p_priv->dcd_state;
5829 +@@ -617,7 +623,7 @@ static void usa49_instat_callback(struct urb *urb)
5830 +
5831 + if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
5832 + tty_port_tty_hangup(&port->port, true);
5833 +-
5834 ++resubmit:
5835 + /* Resubmit urb so we continue receiving */
5836 + err = usb_submit_urb(urb, GFP_ATOMIC);
5837 + if (err != 0)
5838 +@@ -855,6 +861,8 @@ static void usa90_instat_callback(struct urb *urb)
5839 +
5840 + port = serial->port[0];
5841 + p_priv = usb_get_serial_port_data(port);
5842 ++ if (!p_priv)
5843 ++ goto resubmit;
5844 +
5845 + /* Update handshaking pin state information */
5846 + old_dcd_state = p_priv->dcd_state;
5847 +@@ -865,7 +873,7 @@ static void usa90_instat_callback(struct urb *urb)
5848 +
5849 + if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
5850 + tty_port_tty_hangup(&port->port, true);
5851 +-
5852 ++resubmit:
5853 + /* Resubmit urb so we continue receiving */
5854 + err = usb_submit_urb(urb, GFP_ATOMIC);
5855 + if (err != 0)
5856 +@@ -926,6 +934,8 @@ static void usa67_instat_callback(struct urb *urb)
5857 +
5858 + port = serial->port[msg->port];
5859 + p_priv = usb_get_serial_port_data(port);
5860 ++ if (!p_priv)
5861 ++ goto resubmit;
5862 +
5863 + /* Update handshaking pin state information */
5864 + old_dcd_state = p_priv->dcd_state;
5865 +@@ -934,7 +944,7 @@ static void usa67_instat_callback(struct urb *urb)
5866 +
5867 + if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
5868 + tty_port_tty_hangup(&port->port, true);
5869 +-
5870 ++resubmit:
5871 + /* Resubmit urb so we continue receiving */
5872 + err = usb_submit_urb(urb, GFP_ATOMIC);
5873 + if (err != 0)
5874 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
5875 +index 7a4c21b4f676..efdcee15b520 100644
5876 +--- a/drivers/usb/serial/option.c
5877 ++++ b/drivers/usb/serial/option.c
5878 +@@ -234,6 +234,8 @@ static void option_instat_callback(struct urb *urb);
5879 +
5880 + #define QUALCOMM_VENDOR_ID 0x05C6
5881 +
5882 ++#define SIERRA_VENDOR_ID 0x1199
5883 ++
5884 + #define CMOTECH_VENDOR_ID 0x16d8
5885 + #define CMOTECH_PRODUCT_6001 0x6001
5886 + #define CMOTECH_PRODUCT_CMU_300 0x6002
5887 +@@ -512,7 +514,7 @@ enum option_blacklist_reason {
5888 + OPTION_BLACKLIST_RESERVED_IF = 2
5889 + };
5890 +
5891 +-#define MAX_BL_NUM 8
5892 ++#define MAX_BL_NUM 11
5893 + struct option_blacklist_info {
5894 + /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */
5895 + const unsigned long sendsetup;
5896 +@@ -601,6 +603,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
5897 + .reserved = BIT(1) | BIT(5),
5898 + };
5899 +
5900 ++static const struct option_blacklist_info sierra_mc73xx_blacklist = {
5901 ++ .sendsetup = BIT(0) | BIT(2),
5902 ++ .reserved = BIT(8) | BIT(10) | BIT(11),
5903 ++};
5904 ++
5905 + static const struct usb_device_id option_ids[] = {
5906 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
5907 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
5908 +@@ -1098,6 +1105,8 @@ static const struct usb_device_id option_ids[] = {
5909 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
5910 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
5911 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
5912 ++ { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
5913 ++ .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
5914 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
5915 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
5916 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
5917 +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
5918 +index cb3e14780a7e..9c63897b3a56 100644
5919 +--- a/drivers/usb/serial/qcserial.c
5920 ++++ b/drivers/usb/serial/qcserial.c
5921 +@@ -142,7 +142,6 @@ static const struct usb_device_id id_table[] = {
5922 + {DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */
5923 + {DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */
5924 + {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */
5925 +- {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC73xx */
5926 + {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
5927 + {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
5928 + {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
5929 +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
5930 +index 18a283d6de1c..1f430bb02ca1 100644
5931 +--- a/drivers/usb/storage/unusual_uas.h
5932 ++++ b/drivers/usb/storage/unusual_uas.h
5933 +@@ -68,6 +68,20 @@ UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999,
5934 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5935 + US_FL_NO_ATA_1X),
5936 +
5937 ++/* Reported-by: Marcin Zajączkowski <mszpak@××.pl> */
5938 ++UNUSUAL_DEV(0x0bc2, 0xa013, 0x0000, 0x9999,
5939 ++ "Seagate",
5940 ++ "Backup Plus",
5941 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5942 ++ US_FL_NO_ATA_1X),
5943 ++
5944 ++/* Reported-by: Hans de Goede <hdegoede@××××××.com> */
5945 ++UNUSUAL_DEV(0x0bc2, 0xa0a4, 0x0000, 0x9999,
5946 ++ "Seagate",
5947 ++ "Backup Plus Desk",
5948 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5949 ++ US_FL_NO_ATA_1X),
5950 ++
5951 + /* https://bbs.archlinux.org/viewtopic.php?id=183190 */
5952 + UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
5953 + "Seagate",
5954 +@@ -82,6 +96,13 @@ UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999,
5955 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5956 + US_FL_NO_ATA_1X),
5957 +
5958 ++/* Reported-by: G. Richard Bellamy <rbellamy@×××××××××.com> */
5959 ++UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
5960 ++ "Seagate",
5961 ++ "BUP Fast HDD",
5962 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5963 ++ US_FL_NO_ATA_1X),
5964 ++
5965 + /* Reported-by: Claudio Bizzarri <claudio.bizzarri@×××××.com> */
5966 + UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
5967 + "JMicron",
5968 +@@ -104,6 +125,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
5969 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5970 + US_FL_NO_ATA_1X),
5971 +
5972 ++/* Reported-by: Takeo Nakayama <javhera@×××.com> */
5973 ++UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
5974 ++ "JMicron",
5975 ++ "JMS566",
5976 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5977 ++ US_FL_NO_REPORT_OPCODES),
5978 ++
5979 + /* Reported-by: Hans de Goede <hdegoede@××××××.com> */
5980 + UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
5981 + "Hitachi",
5982 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
5983 +index 9558da3f06a0..2f8a0552d020 100644
5984 +--- a/drivers/vfio/pci/vfio_pci.c
5985 ++++ b/drivers/vfio/pci/vfio_pci.c
5986 +@@ -839,13 +839,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
5987 +
5988 + static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5989 + {
5990 +- u8 type;
5991 + struct vfio_pci_device *vdev;
5992 + struct iommu_group *group;
5993 + int ret;
5994 +
5995 +- pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
5996 +- if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
5997 ++ if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
5998 + return -EINVAL;
5999 +
6000 + group = iommu_group_get(&pdev->dev);
6001 +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
6002 +index a17f11850669..cb84f69f76ad 100644
6003 +--- a/drivers/vhost/scsi.c
6004 ++++ b/drivers/vhost/scsi.c
6005 +@@ -909,6 +909,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
6006 + return 0;
6007 + }
6008 +
6009 ++static int vhost_scsi_to_tcm_attr(int attr)
6010 ++{
6011 ++ switch (attr) {
6012 ++ case VIRTIO_SCSI_S_SIMPLE:
6013 ++ return MSG_SIMPLE_TAG;
6014 ++ case VIRTIO_SCSI_S_ORDERED:
6015 ++ return MSG_ORDERED_TAG;
6016 ++ case VIRTIO_SCSI_S_HEAD:
6017 ++ return MSG_HEAD_TAG;
6018 ++ case VIRTIO_SCSI_S_ACA:
6019 ++ return MSG_ACA_TAG;
6020 ++ default:
6021 ++ break;
6022 ++ }
6023 ++ return MSG_SIMPLE_TAG;
6024 ++}
6025 ++
6026 + static void tcm_vhost_submission_work(struct work_struct *work)
6027 + {
6028 + struct tcm_vhost_cmd *cmd =
6029 +@@ -934,9 +951,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
6030 + rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
6031 + cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
6032 + cmd->tvc_lun, cmd->tvc_exp_data_len,
6033 +- cmd->tvc_task_attr, cmd->tvc_data_direction,
6034 +- TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
6035 +- NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
6036 ++ vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
6037 ++ cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
6038 ++ sg_ptr, cmd->tvc_sgl_count, NULL, 0,
6039 ++ sg_prot_ptr, cmd->tvc_prot_sgl_count);
6040 + if (rc < 0) {
6041 + transport_send_check_condition_and_sense(se_cmd,
6042 + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
6043 +diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
6044 +index 900aa4ecd617..d6cab1fd9a47 100644
6045 +--- a/drivers/video/fbdev/core/fb_defio.c
6046 ++++ b/drivers/video/fbdev/core/fb_defio.c
6047 +@@ -83,9 +83,10 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
6048 + cancel_delayed_work_sync(&info->deferred_work);
6049 +
6050 + /* Run it immediately */
6051 +- err = schedule_delayed_work(&info->deferred_work, 0);
6052 ++ schedule_delayed_work(&info->deferred_work, 0);
6053 + mutex_unlock(&inode->i_mutex);
6054 +- return err;
6055 ++
6056 ++ return 0;
6057 + }
6058 + EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
6059 +
6060 +diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
6061 +index 940cd196eef5..10fbfd8ab963 100644
6062 +--- a/drivers/video/logo/logo.c
6063 ++++ b/drivers/video/logo/logo.c
6064 +@@ -21,6 +21,21 @@ static bool nologo;
6065 + module_param(nologo, bool, 0);
6066 + MODULE_PARM_DESC(nologo, "Disables startup logo");
6067 +
6068 ++/*
6069 ++ * Logos are located in the initdata, and will be freed in kernel_init.
6070 ++ * Use late_init to mark the logos as freed to prevent any further use.
6071 ++ */
6072 ++
6073 ++static bool logos_freed;
6074 ++
6075 ++static int __init fb_logo_late_init(void)
6076 ++{
6077 ++ logos_freed = true;
6078 ++ return 0;
6079 ++}
6080 ++
6081 ++late_initcall(fb_logo_late_init);
6082 ++
6083 + /* logo's are marked __initdata. Use __init_refok to tell
6084 + * modpost that it is intended that this function uses data
6085 + * marked __initdata.
6086 +@@ -29,7 +44,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
6087 + {
6088 + const struct linux_logo *logo = NULL;
6089 +
6090 +- if (nologo)
6091 ++ if (nologo || logos_freed)
6092 + return NULL;
6093 +
6094 + if (depth >= 1) {
6095 +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
6096 +index d1bb7ecfd201..61024987f97b 100644
6097 +--- a/fs/lockd/svc.c
6098 ++++ b/fs/lockd/svc.c
6099 +@@ -138,10 +138,6 @@ lockd(void *vrqstp)
6100 +
6101 + dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
6102 +
6103 +- if (!nlm_timeout)
6104 +- nlm_timeout = LOCKD_DFLT_TIMEO;
6105 +- nlmsvc_timeout = nlm_timeout * HZ;
6106 +-
6107 + /*
6108 + * The main request loop. We don't terminate until the last
6109 + * NFS mount or NFS daemon has gone away.
6110 +@@ -350,6 +346,10 @@ static struct svc_serv *lockd_create_svc(void)
6111 + printk(KERN_WARNING
6112 + "lockd_up: no pid, %d users??\n", nlmsvc_users);
6113 +
6114 ++ if (!nlm_timeout)
6115 ++ nlm_timeout = LOCKD_DFLT_TIMEO;
6116 ++ nlmsvc_timeout = nlm_timeout * HZ;
6117 ++
6118 + serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
6119 + if (!serv) {
6120 + printk(KERN_WARNING "lockd_up: create service failed\n");
6121 +diff --git a/fs/locks.c b/fs/locks.c
6122 +index 735b8d3fa78c..59e2f905e4ff 100644
6123 +--- a/fs/locks.c
6124 ++++ b/fs/locks.c
6125 +@@ -1702,7 +1702,7 @@ static int generic_delete_lease(struct file *filp)
6126 + break;
6127 + }
6128 + trace_generic_delete_lease(inode, fl);
6129 +- if (fl)
6130 ++ if (fl && IS_LEASE(fl))
6131 + error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
6132 + spin_unlock(&inode->i_lock);
6133 + locks_dispose_list(&dispose);
6134 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
6135 +index ffdb28d86cf8..9588873d4c46 100644
6136 +--- a/fs/nfs/nfs4client.c
6137 ++++ b/fs/nfs/nfs4client.c
6138 +@@ -572,20 +572,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
6139 + }
6140 +
6141 + /*
6142 +- * Returns true if the server owners match
6143 ++ * Returns true if the server major ids match
6144 + */
6145 + static bool
6146 +-nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b)
6147 ++nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b)
6148 + {
6149 + struct nfs41_server_owner *o1 = a->cl_serverowner;
6150 + struct nfs41_server_owner *o2 = b->cl_serverowner;
6151 +
6152 +- if (o1->minor_id != o2->minor_id) {
6153 +- dprintk("NFS: --> %s server owner minor IDs do not match\n",
6154 +- __func__);
6155 +- return false;
6156 +- }
6157 +-
6158 + if (o1->major_id_sz != o2->major_id_sz)
6159 + goto out_major_mismatch;
6160 + if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
6161 +@@ -661,7 +655,12 @@ int nfs41_walk_client_list(struct nfs_client *new,
6162 + if (!nfs4_match_clientids(pos, new))
6163 + continue;
6164 +
6165 +- if (!nfs4_match_serverowners(pos, new))
6166 ++ /*
6167 ++ * Note that session trunking is just a special subcase of
6168 ++ * client id trunking. In either case, we want to fall back
6169 ++ * to using the existing nfs_client.
6170 ++ */
6171 ++ if (!nfs4_check_clientid_trunking(pos, new))
6172 + continue;
6173 +
6174 + atomic_inc(&pos->cl_count);
6175 +diff --git a/fs/proc/stat.c b/fs/proc/stat.c
6176 +index bf2d03f8fd3e..510413eb25b8 100644
6177 +--- a/fs/proc/stat.c
6178 ++++ b/fs/proc/stat.c
6179 +@@ -159,7 +159,7 @@ static int show_stat(struct seq_file *p, void *v)
6180 +
6181 + /* sum again ? it could be updated? */
6182 + for_each_irq_nr(j)
6183 +- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
6184 ++ seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
6185 +
6186 + seq_printf(p,
6187 + "\nctxt %llu\n"
6188 +diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
6189 +index b9376cd5a187..25a822f6f000 100644
6190 +--- a/include/linux/kernel_stat.h
6191 ++++ b/include/linux/kernel_stat.h
6192 +@@ -68,6 +68,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
6193 + * Number of interrupts per specific IRQ source, since bootup
6194 + */
6195 + extern unsigned int kstat_irqs(unsigned int irq);
6196 ++extern unsigned int kstat_irqs_usr(unsigned int irq);
6197 +
6198 + /*
6199 + * Number of interrupts per cpu, since bootup
6200 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
6201 +index 74fd5d37f15a..22339b4b1c8c 100644
6202 +--- a/include/linux/netdevice.h
6203 ++++ b/include/linux/netdevice.h
6204 +@@ -998,12 +998,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
6205 + * Callback to use for xmit over the accelerated station. This
6206 + * is used in place of ndo_start_xmit on accelerated net
6207 + * devices.
6208 +- * bool (*ndo_gso_check) (struct sk_buff *skb,
6209 +- * struct net_device *dev);
6210 ++ * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
6211 ++ * struct net_device *dev
6212 ++ * netdev_features_t features);
6213 + * Called by core transmit path to determine if device is capable of
6214 +- * performing GSO on a packet. The device returns true if it is
6215 +- * able to GSO the packet, false otherwise. If the return value is
6216 +- * false the stack will do software GSO.
6217 ++ * performing offload operations on a given packet. This is to give
6218 ++ * the device an opportunity to implement any restrictions that cannot
6219 ++ * be otherwise expressed by feature flags. The check is called with
6220 ++ * the set of features that the stack has calculated and it returns
6221 ++ * those the driver believes to be appropriate.
6222 + */
6223 + struct net_device_ops {
6224 + int (*ndo_init)(struct net_device *dev);
6225 +@@ -1153,8 +1156,9 @@ struct net_device_ops {
6226 + struct net_device *dev,
6227 + void *priv);
6228 + int (*ndo_get_lock_subclass)(struct net_device *dev);
6229 +- bool (*ndo_gso_check) (struct sk_buff *skb,
6230 +- struct net_device *dev);
6231 ++ netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
6232 ++ struct net_device *dev,
6233 ++ netdev_features_t features);
6234 + };
6235 +
6236 + /**
6237 +@@ -3584,8 +3588,6 @@ static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
6238 + netdev_features_t features)
6239 + {
6240 + return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
6241 +- (dev->netdev_ops->ndo_gso_check &&
6242 +- !dev->netdev_ops->ndo_gso_check(skb, dev)) ||
6243 + unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
6244 + (skb->ip_summed != CHECKSUM_UNNECESSARY)));
6245 + }
6246 +diff --git a/include/net/vxlan.h b/include/net/vxlan.h
6247 +index 57cccd0052e5..903461aa5644 100644
6248 +--- a/include/net/vxlan.h
6249 ++++ b/include/net/vxlan.h
6250 +@@ -1,6 +1,9 @@
6251 + #ifndef __NET_VXLAN_H
6252 + #define __NET_VXLAN_H 1
6253 +
6254 ++#include <linux/ip.h>
6255 ++#include <linux/ipv6.h>
6256 ++#include <linux/if_vlan.h>
6257 + #include <linux/skbuff.h>
6258 + #include <linux/netdevice.h>
6259 + #include <linux/udp.h>
6260 +@@ -51,16 +54,33 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
6261 + __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
6262 + __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
6263 +
6264 +-static inline bool vxlan_gso_check(struct sk_buff *skb)
6265 ++static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
6266 ++ netdev_features_t features)
6267 + {
6268 +- if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) &&
6269 ++ u8 l4_hdr = 0;
6270 ++
6271 ++ if (!skb->encapsulation)
6272 ++ return features;
6273 ++
6274 ++ switch (vlan_get_protocol(skb)) {
6275 ++ case htons(ETH_P_IP):
6276 ++ l4_hdr = ip_hdr(skb)->protocol;
6277 ++ break;
6278 ++ case htons(ETH_P_IPV6):
6279 ++ l4_hdr = ipv6_hdr(skb)->nexthdr;
6280 ++ break;
6281 ++ default:
6282 ++ return features;;
6283 ++ }
6284 ++
6285 ++ if ((l4_hdr == IPPROTO_UDP) &&
6286 + (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
6287 + skb->inner_protocol != htons(ETH_P_TEB) ||
6288 + (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
6289 + sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
6290 +- return false;
6291 ++ return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
6292 +
6293 +- return true;
6294 ++ return features;
6295 + }
6296 +
6297 + /* IP header + UDP + VXLAN + Ethernet header */
6298 +diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
6299 +index 74a2a1773494..79b12b004ade 100644
6300 +--- a/include/uapi/linux/in6.h
6301 ++++ b/include/uapi/linux/in6.h
6302 +@@ -149,7 +149,7 @@ struct in6_flowlabel_req {
6303 + /*
6304 + * IPV6 socket options
6305 + */
6306 +-
6307 ++#if __UAPI_DEF_IPV6_OPTIONS
6308 + #define IPV6_ADDRFORM 1
6309 + #define IPV6_2292PKTINFO 2
6310 + #define IPV6_2292HOPOPTS 3
6311 +@@ -196,6 +196,7 @@ struct in6_flowlabel_req {
6312 +
6313 + #define IPV6_IPSEC_POLICY 34
6314 + #define IPV6_XFRM_POLICY 35
6315 ++#endif
6316 +
6317 + /*
6318 + * Multicast:
6319 +diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
6320 +index c140620dad92..e28807ad17fa 100644
6321 +--- a/include/uapi/linux/libc-compat.h
6322 ++++ b/include/uapi/linux/libc-compat.h
6323 +@@ -69,6 +69,7 @@
6324 + #define __UAPI_DEF_SOCKADDR_IN6 0
6325 + #define __UAPI_DEF_IPV6_MREQ 0
6326 + #define __UAPI_DEF_IPPROTO_V6 0
6327 ++#define __UAPI_DEF_IPV6_OPTIONS 0
6328 +
6329 + #else
6330 +
6331 +@@ -82,6 +83,7 @@
6332 + #define __UAPI_DEF_SOCKADDR_IN6 1
6333 + #define __UAPI_DEF_IPV6_MREQ 1
6334 + #define __UAPI_DEF_IPPROTO_V6 1
6335 ++#define __UAPI_DEF_IPV6_OPTIONS 1
6336 +
6337 + #endif /* _NETINET_IN_H */
6338 +
6339 +@@ -103,6 +105,7 @@
6340 + #define __UAPI_DEF_SOCKADDR_IN6 1
6341 + #define __UAPI_DEF_IPV6_MREQ 1
6342 + #define __UAPI_DEF_IPPROTO_V6 1
6343 ++#define __UAPI_DEF_IPV6_OPTIONS 1
6344 +
6345 + /* Definitions for xattr.h */
6346 + #define __UAPI_DEF_XATTR 1
6347 +diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
6348 +index 7dcfbe6771b1..b483d1909d3e 100644
6349 +--- a/include/uapi/linux/target_core_user.h
6350 ++++ b/include/uapi/linux/target_core_user.h
6351 +@@ -6,10 +6,6 @@
6352 + #include <linux/types.h>
6353 + #include <linux/uio.h>
6354 +
6355 +-#ifndef __packed
6356 +-#define __packed __attribute__((packed))
6357 +-#endif
6358 +-
6359 + #define TCMU_VERSION "1.0"
6360 +
6361 + /*
6362 +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
6363 +index 4332d766619d..df553b0af936 100644
6364 +--- a/kernel/irq/internals.h
6365 ++++ b/kernel/irq/internals.h
6366 +@@ -78,8 +78,12 @@ extern void unmask_threaded_irq(struct irq_desc *desc);
6367 +
6368 + #ifdef CONFIG_SPARSE_IRQ
6369 + static inline void irq_mark_irq(unsigned int irq) { }
6370 ++extern void irq_lock_sparse(void);
6371 ++extern void irq_unlock_sparse(void);
6372 + #else
6373 + extern void irq_mark_irq(unsigned int irq);
6374 ++static inline void irq_lock_sparse(void) { }
6375 ++static inline void irq_unlock_sparse(void) { }
6376 + #endif
6377 +
6378 + extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
6379 +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
6380 +index a1782f88f0af..99793b9b6d23 100644
6381 +--- a/kernel/irq/irqdesc.c
6382 ++++ b/kernel/irq/irqdesc.c
6383 +@@ -132,6 +132,16 @@ static void free_masks(struct irq_desc *desc)
6384 + static inline void free_masks(struct irq_desc *desc) { }
6385 + #endif
6386 +
6387 ++void irq_lock_sparse(void)
6388 ++{
6389 ++ mutex_lock(&sparse_irq_lock);
6390 ++}
6391 ++
6392 ++void irq_unlock_sparse(void)
6393 ++{
6394 ++ mutex_unlock(&sparse_irq_lock);
6395 ++}
6396 ++
6397 + static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
6398 + {
6399 + struct irq_desc *desc;
6400 +@@ -168,6 +178,12 @@ static void free_desc(unsigned int irq)
6401 +
6402 + unregister_irq_proc(irq, desc);
6403 +
6404 ++ /*
6405 ++ * sparse_irq_lock protects also show_interrupts() and
6406 ++ * kstat_irq_usr(). Once we deleted the descriptor from the
6407 ++ * sparse tree we can free it. Access in proc will fail to
6408 ++ * lookup the descriptor.
6409 ++ */
6410 + mutex_lock(&sparse_irq_lock);
6411 + delete_irq_desc(irq);
6412 + mutex_unlock(&sparse_irq_lock);
6413 +@@ -574,6 +590,15 @@ void kstat_incr_irq_this_cpu(unsigned int irq)
6414 + kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
6415 + }
6416 +
6417 ++/**
6418 ++ * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
6419 ++ * @irq: The interrupt number
6420 ++ * @cpu: The cpu number
6421 ++ *
6422 ++ * Returns the sum of interrupt counts on @cpu since boot for
6423 ++ * @irq. The caller must ensure that the interrupt is not removed
6424 ++ * concurrently.
6425 ++ */
6426 + unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
6427 + {
6428 + struct irq_desc *desc = irq_to_desc(irq);
6429 +@@ -582,6 +607,14 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
6430 + *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
6431 + }
6432 +
6433 ++/**
6434 ++ * kstat_irqs - Get the statistics for an interrupt
6435 ++ * @irq: The interrupt number
6436 ++ *
6437 ++ * Returns the sum of interrupt counts on all cpus since boot for
6438 ++ * @irq. The caller must ensure that the interrupt is not removed
6439 ++ * concurrently.
6440 ++ */
6441 + unsigned int kstat_irqs(unsigned int irq)
6442 + {
6443 + struct irq_desc *desc = irq_to_desc(irq);
6444 +@@ -594,3 +627,22 @@ unsigned int kstat_irqs(unsigned int irq)
6445 + sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
6446 + return sum;
6447 + }
6448 ++
6449 ++/**
6450 ++ * kstat_irqs_usr - Get the statistics for an interrupt
6451 ++ * @irq: The interrupt number
6452 ++ *
6453 ++ * Returns the sum of interrupt counts on all cpus since boot for
6454 ++ * @irq. Contrary to kstat_irqs() this can be called from any
6455 ++ * preemptible context. It's protected against concurrent removal of
6456 ++ * an interrupt descriptor when sparse irqs are enabled.
6457 ++ */
6458 ++unsigned int kstat_irqs_usr(unsigned int irq)
6459 ++{
6460 ++ int sum;
6461 ++
6462 ++ irq_lock_sparse();
6463 ++ sum = kstat_irqs(irq);
6464 ++ irq_unlock_sparse();
6465 ++ return sum;
6466 ++}
6467 +diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
6468 +index ac1ba2f11032..9dc9bfd8a678 100644
6469 +--- a/kernel/irq/proc.c
6470 ++++ b/kernel/irq/proc.c
6471 +@@ -15,6 +15,23 @@
6472 +
6473 + #include "internals.h"
6474 +
6475 ++/*
6476 ++ * Access rules:
6477 ++ *
6478 ++ * procfs protects read/write of /proc/irq/N/ files against a
6479 ++ * concurrent free of the interrupt descriptor. remove_proc_entry()
6480 ++ * immediately prevents new read/writes to happen and waits for
6481 ++ * already running read/write functions to complete.
6482 ++ *
6483 ++ * We remove the proc entries first and then delete the interrupt
6484 ++ * descriptor from the radix tree and free it. So it is guaranteed
6485 ++ * that irq_to_desc(N) is valid as long as the read/writes are
6486 ++ * permitted by procfs.
6487 ++ *
6488 ++ * The read from /proc/interrupts is a different problem because there
6489 ++ * is no protection. So the lookup and the access to irqdesc
6490 ++ * information must be protected by sparse_irq_lock.
6491 ++ */
6492 + static struct proc_dir_entry *root_irq_dir;
6493 +
6494 + #ifdef CONFIG_SMP
6495 +@@ -437,9 +454,10 @@ int show_interrupts(struct seq_file *p, void *v)
6496 + seq_putc(p, '\n');
6497 + }
6498 +
6499 ++ irq_lock_sparse();
6500 + desc = irq_to_desc(i);
6501 + if (!desc)
6502 +- return 0;
6503 ++ goto outsparse;
6504 +
6505 + raw_spin_lock_irqsave(&desc->lock, flags);
6506 + for_each_online_cpu(j)
6507 +@@ -479,6 +497,8 @@ int show_interrupts(struct seq_file *p, void *v)
6508 + seq_putc(p, '\n');
6509 + out:
6510 + raw_spin_unlock_irqrestore(&desc->lock, flags);
6511 ++outsparse:
6512 ++ irq_unlock_sparse();
6513 + return 0;
6514 + }
6515 + #endif
6516 +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
6517 +index 7b5741fc4110..8c30ef7a2b70 100644
6518 +--- a/kernel/time/tick-sched.c
6519 ++++ b/kernel/time/tick-sched.c
6520 +@@ -847,7 +847,6 @@ void tick_nohz_idle_enter(void)
6521 +
6522 + local_irq_enable();
6523 + }
6524 +-EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
6525 +
6526 + /**
6527 + * tick_nohz_irq_exit - update next tick event from interrupt exit
6528 +@@ -974,7 +973,6 @@ void tick_nohz_idle_exit(void)
6529 +
6530 + local_irq_enable();
6531 + }
6532 +-EXPORT_SYMBOL_GPL(tick_nohz_idle_exit);
6533 +
6534 + static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
6535 + {
6536 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
6537 +index 31c90fec4158..124e2c702ead 100644
6538 +--- a/kernel/trace/ftrace.c
6539 ++++ b/kernel/trace/ftrace.c
6540 +@@ -2308,12 +2308,14 @@ static void ftrace_run_update_code(int command)
6541 + }
6542 +
6543 + static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
6544 +- struct ftrace_hash *old_hash)
6545 ++ struct ftrace_ops_hash *old_hash)
6546 + {
6547 + ops->flags |= FTRACE_OPS_FL_MODIFYING;
6548 +- ops->old_hash.filter_hash = old_hash;
6549 ++ ops->old_hash.filter_hash = old_hash->filter_hash;
6550 ++ ops->old_hash.notrace_hash = old_hash->notrace_hash;
6551 + ftrace_run_update_code(command);
6552 + ops->old_hash.filter_hash = NULL;
6553 ++ ops->old_hash.notrace_hash = NULL;
6554 + ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
6555 + }
6556 +
6557 +@@ -3357,7 +3359,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
6558 +
6559 + static int ftrace_probe_registered;
6560 +
6561 +-static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
6562 ++static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
6563 + {
6564 + int ret;
6565 + int i;
6566 +@@ -3415,6 +3417,7 @@ int
6567 + register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
6568 + void *data)
6569 + {
6570 ++ struct ftrace_ops_hash old_hash_ops;
6571 + struct ftrace_func_probe *entry;
6572 + struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
6573 + struct ftrace_hash *old_hash = *orig_hash;
6574 +@@ -3436,6 +3439,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
6575 +
6576 + mutex_lock(&trace_probe_ops.func_hash->regex_lock);
6577 +
6578 ++ old_hash_ops.filter_hash = old_hash;
6579 ++ /* Probes only have filters */
6580 ++ old_hash_ops.notrace_hash = NULL;
6581 ++
6582 + hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
6583 + if (!hash) {
6584 + count = -ENOMEM;
6585 +@@ -3496,7 +3503,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
6586 +
6587 + ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
6588 +
6589 +- __enable_ftrace_function_probe(old_hash);
6590 ++ __enable_ftrace_function_probe(&old_hash_ops);
6591 +
6592 + if (!ret)
6593 + free_ftrace_hash_rcu(old_hash);
6594 +@@ -3784,10 +3791,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
6595 + }
6596 +
6597 + static void ftrace_ops_update_code(struct ftrace_ops *ops,
6598 +- struct ftrace_hash *old_hash)
6599 ++ struct ftrace_ops_hash *old_hash)
6600 + {
6601 +- if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
6602 ++ struct ftrace_ops *op;
6603 ++
6604 ++ if (!ftrace_enabled)
6605 ++ return;
6606 ++
6607 ++ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
6608 + ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
6609 ++ return;
6610 ++ }
6611 ++
6612 ++ /*
6613 ++ * If this is the shared global_ops filter, then we need to
6614 ++ * check if there is another ops that shares it, is enabled.
6615 ++ * If so, we still need to run the modify code.
6616 ++ */
6617 ++ if (ops->func_hash != &global_ops.local_hash)
6618 ++ return;
6619 ++
6620 ++ do_for_each_ftrace_op(op, ftrace_ops_list) {
6621 ++ if (op->func_hash == &global_ops.local_hash &&
6622 ++ op->flags & FTRACE_OPS_FL_ENABLED) {
6623 ++ ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
6624 ++ /* Only need to do this once */
6625 ++ return;
6626 ++ }
6627 ++ } while_for_each_ftrace_op(op);
6628 + }
6629 +
6630 + static int
6631 +@@ -3795,6 +3826,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
6632 + unsigned long ip, int remove, int reset, int enable)
6633 + {
6634 + struct ftrace_hash **orig_hash;
6635 ++ struct ftrace_ops_hash old_hash_ops;
6636 + struct ftrace_hash *old_hash;
6637 + struct ftrace_hash *hash;
6638 + int ret;
6639 +@@ -3831,9 +3863,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
6640 +
6641 + mutex_lock(&ftrace_lock);
6642 + old_hash = *orig_hash;
6643 ++ old_hash_ops.filter_hash = ops->func_hash->filter_hash;
6644 ++ old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
6645 + ret = ftrace_hash_move(ops, enable, orig_hash, hash);
6646 + if (!ret) {
6647 +- ftrace_ops_update_code(ops, old_hash);
6648 ++ ftrace_ops_update_code(ops, &old_hash_ops);
6649 + free_ftrace_hash_rcu(old_hash);
6650 + }
6651 + mutex_unlock(&ftrace_lock);
6652 +@@ -4042,6 +4076,7 @@ static void __init set_ftrace_early_filters(void)
6653 + int ftrace_regex_release(struct inode *inode, struct file *file)
6654 + {
6655 + struct seq_file *m = (struct seq_file *)file->private_data;
6656 ++ struct ftrace_ops_hash old_hash_ops;
6657 + struct ftrace_iterator *iter;
6658 + struct ftrace_hash **orig_hash;
6659 + struct ftrace_hash *old_hash;
6660 +@@ -4075,10 +4110,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
6661 +
6662 + mutex_lock(&ftrace_lock);
6663 + old_hash = *orig_hash;
6664 ++ old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
6665 ++ old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
6666 + ret = ftrace_hash_move(iter->ops, filter_hash,
6667 + orig_hash, iter->hash);
6668 + if (!ret) {
6669 +- ftrace_ops_update_code(iter->ops, old_hash);
6670 ++ ftrace_ops_update_code(iter->ops, &old_hash_ops);
6671 + free_ftrace_hash_rcu(old_hash);
6672 + }
6673 + mutex_unlock(&ftrace_lock);
6674 +diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
6675 +index 8290e0bef7ea..6dd0335ea61b 100644
6676 +--- a/lib/decompress_bunzip2.c
6677 ++++ b/lib/decompress_bunzip2.c
6678 +@@ -184,7 +184,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
6679 + if (get_bits(bd, 1))
6680 + return RETVAL_OBSOLETE_INPUT;
6681 + origPtr = get_bits(bd, 24);
6682 +- if (origPtr > dbufSize)
6683 ++ if (origPtr >= dbufSize)
6684 + return RETVAL_DATA_ERROR;
6685 + /* mapping table: if some byte values are never used (encoding things
6686 + like ascii text), the compression code removes the gaps to have fewer
6687 +diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
6688 +index fc1835c6bb40..00f9e144cc97 100644
6689 +--- a/net/batman-adv/fragmentation.c
6690 ++++ b/net/batman-adv/fragmentation.c
6691 +@@ -251,7 +251,7 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
6692 + kfree(entry);
6693 +
6694 + /* Make room for the rest of the fragments. */
6695 +- if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
6696 ++ if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
6697 + kfree_skb(skb_out);
6698 + skb_out = NULL;
6699 + goto free;
6700 +@@ -434,7 +434,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
6701 + * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
6702 + */
6703 + mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
6704 +- max_fragment_size = (mtu - header_size - ETH_HLEN);
6705 ++ max_fragment_size = mtu - header_size;
6706 + max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
6707 +
6708 + /* Don't even try to fragment, if we need more than 16 fragments */
6709 +diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
6710 +index 90cff585b37d..e0bcf9e84273 100644
6711 +--- a/net/batman-adv/gateway_client.c
6712 ++++ b/net/batman-adv/gateway_client.c
6713 +@@ -810,7 +810,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
6714 + goto out;
6715 +
6716 + gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
6717 +- if (!gw_node->bandwidth_down == 0)
6718 ++ if (!gw_node)
6719 + goto out;
6720 +
6721 + switch (atomic_read(&bat_priv->gw_mode)) {
6722 +diff --git a/net/core/dev.c b/net/core/dev.c
6723 +index 945bbd001359..84409688ff39 100644
6724 +--- a/net/core/dev.c
6725 ++++ b/net/core/dev.c
6726 +@@ -1697,6 +1697,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
6727 +
6728 + skb_scrub_packet(skb, true);
6729 + skb->protocol = eth_type_trans(skb, dev);
6730 ++ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
6731 +
6732 + return 0;
6733 + }
6734 +@@ -2565,7 +2566,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
6735 +
6736 + netdev_features_t netif_skb_features(struct sk_buff *skb)
6737 + {
6738 +- const struct net_device *dev = skb->dev;
6739 ++ struct net_device *dev = skb->dev;
6740 + netdev_features_t features = dev->features;
6741 + u16 gso_segs = skb_shinfo(skb)->gso_segs;
6742 + __be16 protocol = skb->protocol;
6743 +@@ -2573,11 +2574,21 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
6744 + if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
6745 + features &= ~NETIF_F_GSO_MASK;
6746 +
6747 +- if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
6748 +- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
6749 +- protocol = veh->h_vlan_encapsulated_proto;
6750 +- } else if (!vlan_tx_tag_present(skb)) {
6751 +- return harmonize_features(skb, features);
6752 ++ /* If encapsulation offload request, verify we are testing
6753 ++ * hardware encapsulation features instead of standard
6754 ++ * features for the netdev
6755 ++ */
6756 ++ if (skb->encapsulation)
6757 ++ features &= dev->hw_enc_features;
6758 ++
6759 ++ if (!vlan_tx_tag_present(skb)) {
6760 ++ if (unlikely(protocol == htons(ETH_P_8021Q) ||
6761 ++ protocol == htons(ETH_P_8021AD))) {
6762 ++ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
6763 ++ protocol = veh->h_vlan_encapsulated_proto;
6764 ++ } else {
6765 ++ goto finalize;
6766 ++ }
6767 + }
6768 +
6769 + features = netdev_intersect_features(features,
6770 +@@ -2594,6 +2605,11 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
6771 + NETIF_F_HW_VLAN_CTAG_TX |
6772 + NETIF_F_HW_VLAN_STAG_TX);
6773 +
6774 ++finalize:
6775 ++ if (dev->netdev_ops->ndo_features_check)
6776 ++ features &= dev->netdev_ops->ndo_features_check(skb, dev,
6777 ++ features);
6778 ++
6779 + return harmonize_features(skb, features);
6780 + }
6781 + EXPORT_SYMBOL(netif_skb_features);
6782 +@@ -2668,19 +2684,12 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
6783 + if (unlikely(!skb))
6784 + goto out_null;
6785 +
6786 +- /* If encapsulation offload request, verify we are testing
6787 +- * hardware encapsulation features instead of standard
6788 +- * features for the netdev
6789 +- */
6790 +- if (skb->encapsulation)
6791 +- features &= dev->hw_enc_features;
6792 +-
6793 + if (netif_needs_gso(dev, skb, features)) {
6794 + struct sk_buff *segs;
6795 +
6796 + segs = skb_gso_segment(skb, features);
6797 + if (IS_ERR(segs)) {
6798 +- segs = NULL;
6799 ++ goto out_kfree_skb;
6800 + } else if (segs) {
6801 + consume_skb(skb);
6802 + skb = segs;
6803 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
6804 +index 32e31c299631..d7543d0fd744 100644
6805 +--- a/net/core/skbuff.c
6806 ++++ b/net/core/skbuff.c
6807 +@@ -4040,6 +4040,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
6808 + skb->ignore_df = 0;
6809 + skb_dst_drop(skb);
6810 + skb->mark = 0;
6811 ++ skb_init_secmark(skb);
6812 + secpath_reset(skb);
6813 + nf_reset(skb);
6814 + nf_reset_trace(skb);
6815 +diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
6816 +index dedb21e99914..2caa6ad965a6 100644
6817 +--- a/net/ipv4/geneve.c
6818 ++++ b/net/ipv4/geneve.c
6819 +@@ -165,6 +165,15 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
6820 + }
6821 + }
6822 +
6823 ++static void geneve_notify_del_rx_port(struct geneve_sock *gs)
6824 ++{
6825 ++ struct sock *sk = gs->sock->sk;
6826 ++ sa_family_t sa_family = sk->sk_family;
6827 ++
6828 ++ if (sa_family == AF_INET)
6829 ++ udp_del_offload(&gs->udp_offloads);
6830 ++}
6831 ++
6832 + /* Callback from net/ipv4/udp.c to receive packets */
6833 + static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
6834 + {
6835 +@@ -293,6 +302,7 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
6836 + geneve_rcv_t *rcv, void *data,
6837 + bool no_share, bool ipv6)
6838 + {
6839 ++ struct geneve_net *gn = net_generic(net, geneve_net_id);
6840 + struct geneve_sock *gs;
6841 +
6842 + gs = geneve_socket_create(net, port, rcv, data, ipv6);
6843 +@@ -302,15 +312,15 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
6844 + if (no_share) /* Return error if sharing is not allowed. */
6845 + return ERR_PTR(-EINVAL);
6846 +
6847 ++ spin_lock(&gn->sock_lock);
6848 + gs = geneve_find_sock(net, port);
6849 +- if (gs) {
6850 +- if (gs->rcv == rcv)
6851 +- atomic_inc(&gs->refcnt);
6852 +- else
6853 ++ if (gs && ((gs->rcv != rcv) ||
6854 ++ !atomic_add_unless(&gs->refcnt, 1, 0)))
6855 + gs = ERR_PTR(-EBUSY);
6856 +- } else {
6857 ++ spin_unlock(&gn->sock_lock);
6858 ++
6859 ++ if (!gs)
6860 + gs = ERR_PTR(-EINVAL);
6861 +- }
6862 +
6863 + return gs;
6864 + }
6865 +@@ -318,9 +328,17 @@ EXPORT_SYMBOL_GPL(geneve_sock_add);
6866 +
6867 + void geneve_sock_release(struct geneve_sock *gs)
6868 + {
6869 ++ struct net *net = sock_net(gs->sock->sk);
6870 ++ struct geneve_net *gn = net_generic(net, geneve_net_id);
6871 ++
6872 + if (!atomic_dec_and_test(&gs->refcnt))
6873 + return;
6874 +
6875 ++ spin_lock(&gn->sock_lock);
6876 ++ hlist_del_rcu(&gs->hlist);
6877 ++ geneve_notify_del_rx_port(gs);
6878 ++ spin_unlock(&gn->sock_lock);
6879 ++
6880 + queue_work(geneve_wq, &gs->del_work);
6881 + }
6882 + EXPORT_SYMBOL_GPL(geneve_sock_release);
6883 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
6884 +index 12055fdbe716..69aaf0a2c424 100644
6885 +--- a/net/ipv4/ip_gre.c
6886 ++++ b/net/ipv4/ip_gre.c
6887 +@@ -252,10 +252,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
6888 + struct ip_tunnel *tunnel = netdev_priv(dev);
6889 + const struct iphdr *tnl_params;
6890 +
6891 +- skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
6892 +- if (IS_ERR(skb))
6893 +- goto out;
6894 +-
6895 + if (dev->header_ops) {
6896 + /* Need space for new headers */
6897 + if (skb_cow_head(skb, dev->needed_headroom -
6898 +@@ -268,6 +264,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
6899 + * to gre header.
6900 + */
6901 + skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
6902 ++ skb_reset_mac_header(skb);
6903 + } else {
6904 + if (skb_cow_head(skb, dev->needed_headroom))
6905 + goto free_skb;
6906 +@@ -275,6 +272,10 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
6907 + tnl_params = &tunnel->parms.iph;
6908 + }
6909 +
6910 ++ skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
6911 ++ if (IS_ERR(skb))
6912 ++ goto out;
6913 ++
6914 + __gre_xmit(skb, dev, tnl_params, skb->protocol);
6915 +
6916 + return NETDEV_TX_OK;
6917 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
6918 +index a3d453b94747..c2df40ba553f 100644
6919 +--- a/net/ipv4/tcp_output.c
6920 ++++ b/net/ipv4/tcp_output.c
6921 +@@ -1984,7 +1984,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6922 + if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
6923 + break;
6924 +
6925 +- if (tso_segs == 1) {
6926 ++ if (tso_segs == 1 || !sk->sk_gso_max_segs) {
6927 + if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
6928 + (tcp_skb_is_last(sk, skb) ?
6929 + nonagle : TCP_NAGLE_PUSH))))
6930 +@@ -2020,7 +2020,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6931 + }
6932 +
6933 + limit = mss_now;
6934 +- if (tso_segs > 1 && !tcp_urg_mode(tp))
6935 ++ if (tso_segs > 1 && sk->sk_gso_max_segs && !tcp_urg_mode(tp))
6936 + limit = tcp_mss_split_point(sk, skb, mss_now,
6937 + min_t(unsigned int,
6938 + cwnd_quota,
6939 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
6940 +index c277951d783b..c1136022d8d9 100644
6941 +--- a/net/ipv6/tcp_ipv6.c
6942 ++++ b/net/ipv6/tcp_ipv6.c
6943 +@@ -1385,6 +1385,28 @@ ipv6_pktoptions:
6944 + return 0;
6945 + }
6946 +
6947 ++static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
6948 ++ const struct tcphdr *th)
6949 ++{
6950 ++ /* This is tricky: we move IP6CB at its correct location into
6951 ++ * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
6952 ++ * _decode_session6() uses IP6CB().
6953 ++ * barrier() makes sure compiler won't play aliasing games.
6954 ++ */
6955 ++ memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
6956 ++ sizeof(struct inet6_skb_parm));
6957 ++ barrier();
6958 ++
6959 ++ TCP_SKB_CB(skb)->seq = ntohl(th->seq);
6960 ++ TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
6961 ++ skb->len - th->doff*4);
6962 ++ TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
6963 ++ TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
6964 ++ TCP_SKB_CB(skb)->tcp_tw_isn = 0;
6965 ++ TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
6966 ++ TCP_SKB_CB(skb)->sacked = 0;
6967 ++}
6968 ++
6969 + static int tcp_v6_rcv(struct sk_buff *skb)
6970 + {
6971 + const struct tcphdr *th;
6972 +@@ -1416,24 +1438,9 @@ static int tcp_v6_rcv(struct sk_buff *skb)
6973 +
6974 + th = tcp_hdr(skb);
6975 + hdr = ipv6_hdr(skb);
6976 +- /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
6977 +- * barrier() makes sure compiler wont play fool^Waliasing games.
6978 +- */
6979 +- memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
6980 +- sizeof(struct inet6_skb_parm));
6981 +- barrier();
6982 +-
6983 +- TCP_SKB_CB(skb)->seq = ntohl(th->seq);
6984 +- TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
6985 +- skb->len - th->doff*4);
6986 +- TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
6987 +- TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
6988 +- TCP_SKB_CB(skb)->tcp_tw_isn = 0;
6989 +- TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
6990 +- TCP_SKB_CB(skb)->sacked = 0;
6991 +
6992 + sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
6993 +- tcp_v6_iif(skb));
6994 ++ inet6_iif(skb));
6995 + if (!sk)
6996 + goto no_tcp_socket;
6997 +
6998 +@@ -1449,6 +1456,8 @@ process:
6999 + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
7000 + goto discard_and_relse;
7001 +
7002 ++ tcp_v6_fill_cb(skb, hdr, th);
7003 ++
7004 + #ifdef CONFIG_TCP_MD5SIG
7005 + if (tcp_v6_inbound_md5_hash(sk, skb))
7006 + goto discard_and_relse;
7007 +@@ -1480,6 +1489,8 @@ no_tcp_socket:
7008 + if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
7009 + goto discard_it;
7010 +
7011 ++ tcp_v6_fill_cb(skb, hdr, th);
7012 ++
7013 + if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
7014 + csum_error:
7015 + TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
7016 +@@ -1503,6 +1514,8 @@ do_time_wait:
7017 + goto discard_it;
7018 + }
7019 +
7020 ++ tcp_v6_fill_cb(skb, hdr, th);
7021 ++
7022 + if (skb->len < (th->doff<<2)) {
7023 + inet_twsk_put(inet_twsk(sk));
7024 + goto bad_packet;
7025 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
7026 +index b6bf8e8caec7..79c965a51ab2 100644
7027 +--- a/net/netlink/af_netlink.c
7028 ++++ b/net/netlink/af_netlink.c
7029 +@@ -526,14 +526,14 @@ out:
7030 + return err;
7031 + }
7032 +
7033 +-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
7034 ++static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
7035 + {
7036 + #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
7037 + struct page *p_start, *p_end;
7038 +
7039 + /* First page is flushed through netlink_{get,set}_status */
7040 + p_start = pgvec_to_page(hdr + PAGE_SIZE);
7041 +- p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
7042 ++ p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
7043 + while (p_start <= p_end) {
7044 + flush_dcache_page(p_start);
7045 + p_start++;
7046 +@@ -551,9 +551,9 @@ static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
7047 + static void netlink_set_status(struct nl_mmap_hdr *hdr,
7048 + enum nl_mmap_status status)
7049 + {
7050 ++ smp_mb();
7051 + hdr->nm_status = status;
7052 + flush_dcache_page(pgvec_to_page(hdr));
7053 +- smp_wmb();
7054 + }
7055 +
7056 + static struct nl_mmap_hdr *
7057 +@@ -715,24 +715,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
7058 + struct nl_mmap_hdr *hdr;
7059 + struct sk_buff *skb;
7060 + unsigned int maxlen;
7061 +- bool excl = true;
7062 + int err = 0, len = 0;
7063 +
7064 +- /* Netlink messages are validated by the receiver before processing.
7065 +- * In order to avoid userspace changing the contents of the message
7066 +- * after validation, the socket and the ring may only be used by a
7067 +- * single process, otherwise we fall back to copying.
7068 +- */
7069 +- if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
7070 +- atomic_read(&nlk->mapped) > 1)
7071 +- excl = false;
7072 +-
7073 + mutex_lock(&nlk->pg_vec_lock);
7074 +
7075 + ring = &nlk->tx_ring;
7076 + maxlen = ring->frame_size - NL_MMAP_HDRLEN;
7077 +
7078 + do {
7079 ++ unsigned int nm_len;
7080 ++
7081 + hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
7082 + if (hdr == NULL) {
7083 + if (!(msg->msg_flags & MSG_DONTWAIT) &&
7084 +@@ -740,35 +732,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
7085 + schedule();
7086 + continue;
7087 + }
7088 +- if (hdr->nm_len > maxlen) {
7089 ++
7090 ++ nm_len = ACCESS_ONCE(hdr->nm_len);
7091 ++ if (nm_len > maxlen) {
7092 + err = -EINVAL;
7093 + goto out;
7094 + }
7095 +
7096 +- netlink_frame_flush_dcache(hdr);
7097 ++ netlink_frame_flush_dcache(hdr, nm_len);
7098 +
7099 +- if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
7100 +- skb = alloc_skb_head(GFP_KERNEL);
7101 +- if (skb == NULL) {
7102 +- err = -ENOBUFS;
7103 +- goto out;
7104 +- }
7105 +- sock_hold(sk);
7106 +- netlink_ring_setup_skb(skb, sk, ring, hdr);
7107 +- NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
7108 +- __skb_put(skb, hdr->nm_len);
7109 +- netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
7110 +- atomic_inc(&ring->pending);
7111 +- } else {
7112 +- skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
7113 +- if (skb == NULL) {
7114 +- err = -ENOBUFS;
7115 +- goto out;
7116 +- }
7117 +- __skb_put(skb, hdr->nm_len);
7118 +- memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
7119 +- netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
7120 ++ skb = alloc_skb(nm_len, GFP_KERNEL);
7121 ++ if (skb == NULL) {
7122 ++ err = -ENOBUFS;
7123 ++ goto out;
7124 + }
7125 ++ __skb_put(skb, nm_len);
7126 ++ memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
7127 ++ netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
7128 +
7129 + netlink_increment_head(ring);
7130 +
7131 +@@ -814,7 +794,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
7132 + hdr->nm_pid = NETLINK_CB(skb).creds.pid;
7133 + hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
7134 + hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
7135 +- netlink_frame_flush_dcache(hdr);
7136 ++ netlink_frame_flush_dcache(hdr, hdr->nm_len);
7137 + netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
7138 +
7139 + NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
7140 +diff --git a/net/wireless/chan.c b/net/wireless/chan.c
7141 +index 72d81e2154d5..92ae263ebbf3 100644
7142 +--- a/net/wireless/chan.c
7143 ++++ b/net/wireless/chan.c
7144 +@@ -602,7 +602,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
7145 + {
7146 + struct ieee80211_sta_ht_cap *ht_cap;
7147 + struct ieee80211_sta_vht_cap *vht_cap;
7148 +- u32 width, control_freq;
7149 ++ u32 width, control_freq, cap;
7150 +
7151 + if (WARN_ON(!cfg80211_chandef_valid(chandef)))
7152 + return false;
7153 +@@ -642,7 +642,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
7154 + return false;
7155 + break;
7156 + case NL80211_CHAN_WIDTH_80P80:
7157 +- if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
7158 ++ cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
7159 ++ if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
7160 + return false;
7161 + case NL80211_CHAN_WIDTH_80:
7162 + if (!vht_cap->vht_supported)
7163 +@@ -653,7 +654,9 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
7164 + case NL80211_CHAN_WIDTH_160:
7165 + if (!vht_cap->vht_supported)
7166 + return false;
7167 +- if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
7168 ++ cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
7169 ++ if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
7170 ++ cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
7171 + return false;
7172 + prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
7173 + width = 160;
7174 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
7175 +index 5839c85075f1..ea558e07981f 100644
7176 +--- a/net/wireless/nl80211.c
7177 ++++ b/net/wireless/nl80211.c
7178 +@@ -5799,7 +5799,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
7179 + }
7180 +
7181 + /* there was no other matchset, so the RSSI one is alone */
7182 +- if (i == 0)
7183 ++ if (i == 0 && n_match_sets)
7184 + request->match_sets[0].rssi_thold = default_match_rssi;
7185 +
7186 + request->min_rssi_thold = INT_MAX;
7187 +diff --git a/net/wireless/reg.c b/net/wireless/reg.c
7188 +index b725a31a4751..6fd53ea30193 100644
7189 +--- a/net/wireless/reg.c
7190 ++++ b/net/wireless/reg.c
7191 +@@ -1760,7 +1760,7 @@ static enum reg_request_treatment
7192 + reg_process_hint_driver(struct wiphy *wiphy,
7193 + struct regulatory_request *driver_request)
7194 + {
7195 +- const struct ieee80211_regdomain *regd;
7196 ++ const struct ieee80211_regdomain *regd, *tmp;
7197 + enum reg_request_treatment treatment;
7198 +
7199 + treatment = __reg_process_hint_driver(driver_request);
7200 +@@ -1780,7 +1780,10 @@ reg_process_hint_driver(struct wiphy *wiphy,
7201 + reg_free_request(driver_request);
7202 + return REG_REQ_IGNORE;
7203 + }
7204 ++
7205 ++ tmp = get_wiphy_regdom(wiphy);
7206 + rcu_assign_pointer(wiphy->regd, regd);
7207 ++ rcu_free_regdom(tmp);
7208 + }
7209 +
7210 +
7211 +@@ -1839,11 +1842,8 @@ __reg_process_hint_country_ie(struct wiphy *wiphy,
7212 + return REG_REQ_IGNORE;
7213 + return REG_REQ_ALREADY_SET;
7214 + }
7215 +- /*
7216 +- * Two consecutive Country IE hints on the same wiphy.
7217 +- * This should be picked up early by the driver/stack
7218 +- */
7219 +- if (WARN_ON(regdom_changes(country_ie_request->alpha2)))
7220 ++
7221 ++ if (regdom_changes(country_ie_request->alpha2))
7222 + return REG_REQ_OK;
7223 + return REG_REQ_ALREADY_SET;
7224 + }
7225 +diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
7226 +index b1c668dc6815..a609552a86dc 100644
7227 +--- a/scripts/Makefile.clean
7228 ++++ b/scripts/Makefile.clean
7229 +@@ -45,19 +45,19 @@ __clean-files := $(extra-y) $(extra-m) $(extra-) \
7230 +
7231 + __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
7232 +
7233 +-# as clean-files is given relative to the current directory, this adds
7234 +-# a $(obj) prefix, except for absolute paths
7235 ++# clean-files is given relative to the current directory, unless it
7236 ++# starts with $(objtree)/ (which means "./", so do not add "./" unless
7237 ++# you want to delete a file from the toplevel object directory).
7238 +
7239 + __clean-files := $(wildcard \
7240 +- $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \
7241 +- $(filter /%, $(__clean-files)))
7242 ++ $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \
7243 ++ $(filter $(objtree)/%, $(__clean-files)))
7244 +
7245 +-# as clean-dirs is given relative to the current directory, this adds
7246 +-# a $(obj) prefix, except for absolute paths
7247 ++# same as clean-files
7248 +
7249 + __clean-dirs := $(wildcard \
7250 +- $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs))) \
7251 +- $(filter /%, $(clean-dirs)))
7252 ++ $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs))) \
7253 ++ $(filter $(objtree)/%, $(clean-dirs)))
7254 +
7255 + # ==========================================================================
7256 +
7257 +diff --git a/scripts/coccinelle/misc/bugon.cocci b/scripts/coccinelle/misc/bugon.cocci
7258 +index 556456ca761c..3b7eec24fb5a 100644
7259 +--- a/scripts/coccinelle/misc/bugon.cocci
7260 ++++ b/scripts/coccinelle/misc/bugon.cocci
7261 +@@ -8,7 +8,7 @@
7262 + // Confidence: High
7263 + // Copyright: (C) 2014 Himangi Saraogi. GPLv2.
7264 + // Comments:
7265 +-// Options: --no-includes, --include-headers
7266 ++// Options: --no-includes --include-headers
7267 +
7268 + virtual patch
7269 + virtual context
7270 +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
7271 +index c657752a420c..83bddbdb90e9 100644
7272 +--- a/sound/usb/quirks-table.h
7273 ++++ b/sound/usb/quirks-table.h
7274 +@@ -2804,133 +2804,45 @@ YAMAHA_DEVICE(0x7010, "UB99"),
7275 + }
7276 + },
7277 +
7278 +-/* Hauppauge HVR-950Q and HVR-850 */
7279 +-{
7280 +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7200),
7281 +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7282 +- USB_DEVICE_ID_MATCH_INT_CLASS |
7283 +- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7284 +- .bInterfaceClass = USB_CLASS_AUDIO,
7285 +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7286 +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7287 +- .vendor_name = "Hauppauge",
7288 +- .product_name = "HVR-950Q",
7289 +- .ifnum = QUIRK_ANY_INTERFACE,
7290 +- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7291 +- }
7292 +-},
7293 +-{
7294 +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7210),
7295 +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7296 +- USB_DEVICE_ID_MATCH_INT_CLASS |
7297 +- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7298 +- .bInterfaceClass = USB_CLASS_AUDIO,
7299 +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7300 +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7301 +- .vendor_name = "Hauppauge",
7302 +- .product_name = "HVR-950Q",
7303 +- .ifnum = QUIRK_ANY_INTERFACE,
7304 +- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7305 +- }
7306 +-},
7307 +-{
7308 +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7217),
7309 +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7310 +- USB_DEVICE_ID_MATCH_INT_CLASS |
7311 +- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7312 +- .bInterfaceClass = USB_CLASS_AUDIO,
7313 +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7314 +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7315 +- .vendor_name = "Hauppauge",
7316 +- .product_name = "HVR-950Q",
7317 +- .ifnum = QUIRK_ANY_INTERFACE,
7318 +- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7319 +- }
7320 +-},
7321 +-{
7322 +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x721b),
7323 +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7324 +- USB_DEVICE_ID_MATCH_INT_CLASS |
7325 +- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7326 +- .bInterfaceClass = USB_CLASS_AUDIO,
7327 +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7328 +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7329 +- .vendor_name = "Hauppauge",
7330 +- .product_name = "HVR-950Q",
7331 +- .ifnum = QUIRK_ANY_INTERFACE,
7332 +- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7333 +- }
7334 +-},
7335 +-{
7336 +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x721e),
7337 +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7338 +- USB_DEVICE_ID_MATCH_INT_CLASS |
7339 +- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7340 +- .bInterfaceClass = USB_CLASS_AUDIO,
7341 +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7342 +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7343 +- .vendor_name = "Hauppauge",
7344 +- .product_name = "HVR-950Q",
7345 +- .ifnum = QUIRK_ANY_INTERFACE,
7346 +- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7347 +- }
7348 +-},
7349 +-{
7350 +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x721f),
7351 +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7352 +- USB_DEVICE_ID_MATCH_INT_CLASS |
7353 +- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7354 +- .bInterfaceClass = USB_CLASS_AUDIO,
7355 +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7356 +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7357 +- .vendor_name = "Hauppauge",
7358 +- .product_name = "HVR-950Q",
7359 +- .ifnum = QUIRK_ANY_INTERFACE,
7360 +- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7361 +- }
7362 +-},
7363 +-{
7364 +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7240),
7365 +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7366 +- USB_DEVICE_ID_MATCH_INT_CLASS |
7367 +- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7368 +- .bInterfaceClass = USB_CLASS_AUDIO,
7369 +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7370 +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7371 +- .vendor_name = "Hauppauge",
7372 +- .product_name = "HVR-850",
7373 +- .ifnum = QUIRK_ANY_INTERFACE,
7374 +- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7375 +- }
7376 +-},
7377 +-{
7378 +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7280),
7379 +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7380 +- USB_DEVICE_ID_MATCH_INT_CLASS |
7381 +- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7382 +- .bInterfaceClass = USB_CLASS_AUDIO,
7383 +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7384 +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7385 +- .vendor_name = "Hauppauge",
7386 +- .product_name = "HVR-950Q",
7387 +- .ifnum = QUIRK_ANY_INTERFACE,
7388 +- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7389 +- }
7390 +-},
7391 +-{
7392 +- USB_DEVICE_VENDOR_SPEC(0x0fd9, 0x0008),
7393 +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7394 +- USB_DEVICE_ID_MATCH_INT_CLASS |
7395 +- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7396 +- .bInterfaceClass = USB_CLASS_AUDIO,
7397 +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7398 +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7399 +- .vendor_name = "Hauppauge",
7400 +- .product_name = "HVR-950Q",
7401 +- .ifnum = QUIRK_ANY_INTERFACE,
7402 +- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7403 +- }
7404 +-},
7405 ++/*
7406 ++ * Auvitek au0828 devices with audio interface.
7407 ++ * This should be kept in sync with drivers/media/usb/au0828/au0828-cards.c
7408 ++ * Please notice that some drivers are DVB only, and don't need to be
7409 ++ * here. That's the case, for example, of DVICO_FUSIONHDTV7.
7410 ++ */
7411 ++
7412 ++#define AU0828_DEVICE(vid, pid, vname, pname) { \
7413 ++ USB_DEVICE_VENDOR_SPEC(vid, pid), \
7414 ++ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
7415 ++ USB_DEVICE_ID_MATCH_INT_CLASS | \
7416 ++ USB_DEVICE_ID_MATCH_INT_SUBCLASS, \
7417 ++ .bInterfaceClass = USB_CLASS_AUDIO, \
7418 ++ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, \
7419 ++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { \
7420 ++ .vendor_name = vname, \
7421 ++ .product_name = pname, \
7422 ++ .ifnum = QUIRK_ANY_INTERFACE, \
7423 ++ .type = QUIRK_AUDIO_ALIGN_TRANSFER, \
7424 ++ } \
7425 ++}
7426 ++
7427 ++AU0828_DEVICE(0x2040, 0x7200, "Hauppauge", "HVR-950Q"),
7428 ++AU0828_DEVICE(0x2040, 0x7240, "Hauppauge", "HVR-850"),
7429 ++AU0828_DEVICE(0x2040, 0x7210, "Hauppauge", "HVR-950Q"),
7430 ++AU0828_DEVICE(0x2040, 0x7217, "Hauppauge", "HVR-950Q"),
7431 ++AU0828_DEVICE(0x2040, 0x721b, "Hauppauge", "HVR-950Q"),
7432 ++AU0828_DEVICE(0x2040, 0x721e, "Hauppauge", "HVR-950Q"),
7433 ++AU0828_DEVICE(0x2040, 0x721f, "Hauppauge", "HVR-950Q"),
7434 ++AU0828_DEVICE(0x2040, 0x7280, "Hauppauge", "HVR-950Q"),
7435 ++AU0828_DEVICE(0x0fd9, 0x0008, "Hauppauge", "HVR-950Q"),
7436 ++AU0828_DEVICE(0x2040, 0x7201, "Hauppauge", "HVR-950Q-MXL"),
7437 ++AU0828_DEVICE(0x2040, 0x7211, "Hauppauge", "HVR-950Q-MXL"),
7438 ++AU0828_DEVICE(0x2040, 0x7281, "Hauppauge", "HVR-950Q-MXL"),
7439 ++AU0828_DEVICE(0x05e1, 0x0480, "Hauppauge", "Woodbury"),
7440 ++AU0828_DEVICE(0x2040, 0x8200, "Hauppauge", "Woodbury"),
7441 ++AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
7442 ++AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
7443 ++AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
7444 +
7445 + /* Digidesign Mbox */
7446 + {