Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Thu, 26 Jan 2017 08:51:43
Message-Id: 1485420579.94c945baf75ef5a35c3c220ddda71d0060b72aa6.alicef@gentoo
1 commit: 94c945baf75ef5a35c3c220ddda71d0060b72aa6
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Thu Jan 26 08:49:39 2017 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Thu Jan 26 08:49:39 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=94c945ba
7
8 Linux patch 4.9.6
9
10 0000_README | 4 +
11 1005_linux-4.9.6.patch | 4537 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 4541 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index a0a0324..970967a 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -63,6 +63,10 @@ Patch: 1004_linux-4.9.5.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.5
21
22 +Patch: 1005_linux-4.9.6.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.6
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1005_linux-4.9.6.patch b/1005_linux-4.9.6.patch
31 new file mode 100644
32 index 0000000..aaeaa34
33 --- /dev/null
34 +++ b/1005_linux-4.9.6.patch
35 @@ -0,0 +1,4537 @@
36 +diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.txt b/Documentation/devicetree/bindings/clock/imx31-clock.txt
37 +index 19df842c694f..8163d565f697 100644
38 +--- a/Documentation/devicetree/bindings/clock/imx31-clock.txt
39 ++++ b/Documentation/devicetree/bindings/clock/imx31-clock.txt
40 +@@ -77,7 +77,7 @@ Examples:
41 + clks: ccm@53f80000{
42 + compatible = "fsl,imx31-ccm";
43 + reg = <0x53f80000 0x4000>;
44 +- interrupts = <0 31 0x04 0 53 0x04>;
45 ++ interrupts = <31>, <53>;
46 + #clock-cells = <1>;
47 + };
48 +
49 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
50 +index 37babf91f2cb..922dec8fa07e 100644
51 +--- a/Documentation/kernel-parameters.txt
52 ++++ b/Documentation/kernel-parameters.txt
53 +@@ -3998,10 +3998,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
54 + it if 0 is given (See Documentation/cgroup-v1/memory.txt)
55 +
56 + swiotlb= [ARM,IA-64,PPC,MIPS,X86]
57 +- Format: { <int> | force }
58 ++ Format: { <int> | force | noforce }
59 + <int> -- Number of I/O TLB slabs
60 + force -- force using of bounce buffers even if they
61 + wouldn't be automatically used by the kernel
62 ++ noforce -- Never use bounce buffers (for debugging)
63 +
64 + switches= [HW,M68k]
65 +
66 +diff --git a/Makefile b/Makefile
67 +index 2a8af8af7b27..ef95231d1625 100644
68 +--- a/Makefile
69 ++++ b/Makefile
70 +@@ -1,6 +1,6 @@
71 + VERSION = 4
72 + PATCHLEVEL = 9
73 +-SUBLEVEL = 5
74 ++SUBLEVEL = 6
75 + EXTRAVERSION =
76 + NAME = Roaring Lionus
77 +
78 +diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
79 +index bd204bfa29ed..249e10190d20 100644
80 +--- a/arch/arc/Kconfig
81 ++++ b/arch/arc/Kconfig
82 +@@ -28,7 +28,7 @@ config ARC
83 + select HAVE_KPROBES
84 + select HAVE_KRETPROBES
85 + select HAVE_MEMBLOCK
86 +- select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
87 ++ select HAVE_MOD_ARCH_SPECIFIC
88 + select HAVE_OPROFILE
89 + select HAVE_PERF_EVENTS
90 + select HANDLE_DOMAIN_IRQ
91 +diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
92 +index 6e91d8b339c3..567590ea8f6c 100644
93 +--- a/arch/arc/include/asm/module.h
94 ++++ b/arch/arc/include/asm/module.h
95 +@@ -14,13 +14,13 @@
96 +
97 + #include <asm-generic/module.h>
98 +
99 +-#ifdef CONFIG_ARC_DW2_UNWIND
100 + struct mod_arch_specific {
101 ++#ifdef CONFIG_ARC_DW2_UNWIND
102 + void *unw_info;
103 + int unw_sec_idx;
104 ++#endif
105 + const char *secstr;
106 + };
107 +-#endif
108 +
109 + #define MODULE_PROC_FAMILY "ARC700"
110 +
111 +diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
112 +index 42e964db2967..3d99a6091332 100644
113 +--- a/arch/arc/kernel/module.c
114 ++++ b/arch/arc/kernel/module.c
115 +@@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
116 + #ifdef CONFIG_ARC_DW2_UNWIND
117 + mod->arch.unw_sec_idx = 0;
118 + mod->arch.unw_info = NULL;
119 +- mod->arch.secstr = secstr;
120 + #endif
121 ++ mod->arch.secstr = secstr;
122 + return 0;
123 + }
124 +
125 +@@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
126 +
127 + }
128 +
129 ++#ifdef CONFIG_ARC_DW2_UNWIND
130 + if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
131 + module->arch.unw_sec_idx = tgtsec;
132 ++#endif
133 +
134 + return 0;
135 +
136 +diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
137 +index c558ba75cbcc..7037201c5e3a 100644
138 +--- a/arch/arm/boot/dts/Makefile
139 ++++ b/arch/arm/boot/dts/Makefile
140 +@@ -485,6 +485,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
141 + am3517-evm.dtb \
142 + am3517_mt_ventoux.dtb \
143 + logicpd-torpedo-37xx-devkit.dtb \
144 ++ logicpd-som-lv-37xx-devkit.dtb \
145 + omap3430-sdp.dtb \
146 + omap3-beagle.dtb \
147 + omap3-beagle-xm.dtb \
148 +diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
149 +index 194d884c9de1..795c1467fa50 100644
150 +--- a/arch/arm/boot/dts/am33xx.dtsi
151 ++++ b/arch/arm/boot/dts/am33xx.dtsi
152 +@@ -16,6 +16,7 @@
153 + interrupt-parent = <&intc>;
154 + #address-cells = <1>;
155 + #size-cells = <1>;
156 ++ chosen { };
157 +
158 + aliases {
159 + i2c0 = &i2c0;
160 +diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
161 +index a275fa956813..a20a71d9d22e 100644
162 +--- a/arch/arm/boot/dts/am4372.dtsi
163 ++++ b/arch/arm/boot/dts/am4372.dtsi
164 +@@ -16,6 +16,7 @@
165 + interrupt-parent = <&wakeupgen>;
166 + #address-cells = <1>;
167 + #size-cells = <1>;
168 ++ chosen { };
169 +
170 + memory@0 {
171 + device_type = "memory";
172 +diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
173 +index 46d46d894a44..74dd21b7373c 100644
174 +--- a/arch/arm/boot/dts/bcm283x.dtsi
175 ++++ b/arch/arm/boot/dts/bcm283x.dtsi
176 +@@ -104,7 +104,7 @@
177 + reg = <0x7e104000 0x10>;
178 + };
179 +
180 +- mailbox: mailbox@7e00b800 {
181 ++ mailbox: mailbox@7e00b880 {
182 + compatible = "brcm,bcm2835-mbox";
183 + reg = <0x7e00b880 0x40>;
184 + interrupts = <0 1>;
185 +diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
186 +index 41de15fe15a2..78492a0bbbab 100644
187 +--- a/arch/arm/boot/dts/da850-evm.dts
188 ++++ b/arch/arm/boot/dts/da850-evm.dts
189 +@@ -99,6 +99,7 @@
190 + #size-cells = <1>;
191 + compatible = "m25p64";
192 + spi-max-frequency = <30000000>;
193 ++ m25p,fast-read;
194 + reg = <0>;
195 + partition@0 {
196 + label = "U-Boot-SPL";
197 +diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
198 +index ff90a6ce6bdc..d87efab24fa2 100644
199 +--- a/arch/arm/boot/dts/dm814x.dtsi
200 ++++ b/arch/arm/boot/dts/dm814x.dtsi
201 +@@ -12,6 +12,7 @@
202 + interrupt-parent = <&intc>;
203 + #address-cells = <1>;
204 + #size-cells = <1>;
205 ++ chosen { };
206 +
207 + aliases {
208 + i2c0 = &i2c1;
209 +diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
210 +index f1e0f771ff29..cbdfbc4e4a26 100644
211 +--- a/arch/arm/boot/dts/dm816x.dtsi
212 ++++ b/arch/arm/boot/dts/dm816x.dtsi
213 +@@ -12,6 +12,7 @@
214 + interrupt-parent = <&intc>;
215 + #address-cells = <1>;
216 + #size-cells = <1>;
217 ++ chosen { };
218 +
219 + aliases {
220 + i2c0 = &i2c1;
221 +diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
222 +index d4fcd68f6349..064d84f87e45 100644
223 +--- a/arch/arm/boot/dts/dra7.dtsi
224 ++++ b/arch/arm/boot/dts/dra7.dtsi
225 +@@ -18,6 +18,7 @@
226 +
227 + compatible = "ti,dra7xx";
228 + interrupt-parent = <&crossbar_mpu>;
229 ++ chosen { };
230 +
231 + aliases {
232 + i2c0 = &i2c1;
233 +@@ -1376,6 +1377,7 @@
234 + phy-names = "sata-phy";
235 + clocks = <&sata_ref_clk>;
236 + ti,hwmods = "sata";
237 ++ ports-implemented = <0x1>;
238 + };
239 +
240 + rtc: rtc@48838000 {
241 +diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi
242 +index 1ce7ae94e7ad..11e9e6bd8abb 100644
243 +--- a/arch/arm/boot/dts/imx31.dtsi
244 ++++ b/arch/arm/boot/dts/imx31.dtsi
245 +@@ -30,11 +30,11 @@
246 + };
247 + };
248 +
249 +- avic: avic-interrupt-controller@60000000 {
250 ++ avic: interrupt-controller@68000000 {
251 + compatible = "fsl,imx31-avic", "fsl,avic";
252 + interrupt-controller;
253 + #interrupt-cells = <1>;
254 +- reg = <0x60000000 0x100000>;
255 ++ reg = <0x68000000 0x100000>;
256 + };
257 +
258 + soc {
259 +@@ -118,13 +118,6 @@
260 + interrupts = <19>;
261 + clocks = <&clks 25>;
262 + };
263 +-
264 +- clks: ccm@53f80000{
265 +- compatible = "fsl,imx31-ccm";
266 +- reg = <0x53f80000 0x4000>;
267 +- interrupts = <0 31 0x04 0 53 0x04>;
268 +- #clock-cells = <1>;
269 +- };
270 + };
271 +
272 + aips@53f00000 { /* AIPS2 */
273 +@@ -134,6 +127,13 @@
274 + reg = <0x53f00000 0x100000>;
275 + ranges;
276 +
277 ++ clks: ccm@53f80000{
278 ++ compatible = "fsl,imx31-ccm";
279 ++ reg = <0x53f80000 0x4000>;
280 ++ interrupts = <31>, <53>;
281 ++ #clock-cells = <1>;
282 ++ };
283 ++
284 + gpt: timer@53f90000 {
285 + compatible = "fsl,imx31-gpt";
286 + reg = <0x53f90000 0x4000>;
287 +diff --git a/arch/arm/boot/dts/imx6q-cm-fx6.dts b/arch/arm/boot/dts/imx6q-cm-fx6.dts
288 +index 59bc5a4dce17..a150bca84daa 100644
289 +--- a/arch/arm/boot/dts/imx6q-cm-fx6.dts
290 ++++ b/arch/arm/boot/dts/imx6q-cm-fx6.dts
291 +@@ -183,7 +183,6 @@
292 + MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
293 + MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
294 + MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
295 +- MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
296 + >;
297 + };
298 +
299 +diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
300 +index b0b3220a1fd9..01166ba36f27 100644
301 +--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
302 ++++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
303 +@@ -319,8 +319,6 @@
304 + compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
305 + "fsl,imx-audio-sgtl5000";
306 + model = "imx6q-nitrogen6_max-sgtl5000";
307 +- pinctrl-names = "default";
308 +- pinctrl-0 = <&pinctrl_sgtl5000>;
309 + ssi-controller = <&ssi1>;
310 + audio-codec = <&codec>;
311 + audio-routing =
312 +@@ -402,6 +400,8 @@
313 +
314 + codec: sgtl5000@0a {
315 + compatible = "fsl,sgtl5000";
316 ++ pinctrl-names = "default";
317 ++ pinctrl-0 = <&pinctrl_sgtl5000>;
318 + reg = <0x0a>;
319 + clocks = <&clks IMX6QDL_CLK_CKO>;
320 + VDDA-supply = <&reg_2p5v>;
321 +diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
322 +index da8598402ab8..38faa90007d7 100644
323 +--- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
324 ++++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
325 +@@ -158,7 +158,7 @@
326 + &mmc1 {
327 + interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
328 + pinctrl-names = "default";
329 +- pinctrl-0 = <&mmc1_pins &mmc1_cd>;
330 ++ pinctrl-0 = <&mmc1_pins>;
331 + wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
332 + cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */
333 + vmmc-supply = <&vmmc1>;
334 +@@ -193,7 +193,8 @@
335 + OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
336 + OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
337 + OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
338 +- OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 sdmmc1_wp*/
339 ++ OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 */
340 ++ OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
341 + >;
342 + };
343 +
344 +@@ -242,12 +243,6 @@
345 + OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4) /* sys_boot6.gpio_8 */
346 + >;
347 + };
348 +-
349 +- mmc1_cd: pinmux_mmc1_cd {
350 +- pinctrl-single,pins = <
351 +- OMAP3_WKUP_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
352 +- >;
353 +- };
354 + };
355 +
356 +
357 +diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
358 +index 4f793a025a72..f1d6de8b3c19 100644
359 +--- a/arch/arm/boot/dts/omap2.dtsi
360 ++++ b/arch/arm/boot/dts/omap2.dtsi
361 +@@ -17,6 +17,7 @@
362 + interrupt-parent = <&intc>;
363 + #address-cells = <1>;
364 + #size-cells = <1>;
365 ++ chosen { };
366 +
367 + aliases {
368 + serial0 = &uart1;
369 +diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
370 +index 353d818ce5a6..2008648b8c9f 100644
371 +--- a/arch/arm/boot/dts/omap3.dtsi
372 ++++ b/arch/arm/boot/dts/omap3.dtsi
373 +@@ -17,6 +17,7 @@
374 + interrupt-parent = <&intc>;
375 + #address-cells = <1>;
376 + #size-cells = <1>;
377 ++ chosen { };
378 +
379 + aliases {
380 + i2c0 = &i2c1;
381 +diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
382 +index 0ced079b7ae3..9c289ddab3df 100644
383 +--- a/arch/arm/boot/dts/omap4.dtsi
384 ++++ b/arch/arm/boot/dts/omap4.dtsi
385 +@@ -15,6 +15,7 @@
386 + interrupt-parent = <&wakeupgen>;
387 + #address-cells = <1>;
388 + #size-cells = <1>;
389 ++ chosen { };
390 +
391 + aliases {
392 + i2c0 = &i2c1;
393 +diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
394 +index 25262118ec3d..1d1d8e90cd80 100644
395 +--- a/arch/arm/boot/dts/omap5.dtsi
396 ++++ b/arch/arm/boot/dts/omap5.dtsi
397 +@@ -17,6 +17,7 @@
398 +
399 + compatible = "ti,omap5";
400 + interrupt-parent = <&wakeupgen>;
401 ++ chosen { };
402 +
403 + aliases {
404 + i2c0 = &i2c1;
405 +@@ -985,6 +986,7 @@
406 + phy-names = "sata-phy";
407 + clocks = <&sata_ref_clk>;
408 + ti,hwmods = "sata";
409 ++ ports-implemented = <0x1>;
410 + };
411 +
412 + dss: dss@58000000 {
413 +diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
414 +index 725ecb3c5fb4..7e860d3737ff 100644
415 +--- a/arch/arm/boot/dts/r8a7794.dtsi
416 ++++ b/arch/arm/boot/dts/r8a7794.dtsi
417 +@@ -319,7 +319,7 @@
418 + "ch12";
419 + clocks = <&mstp5_clks R8A7794_CLK_AUDIO_DMAC0>;
420 + clock-names = "fck";
421 +- power-domains = <&cpg_clocks>;
422 ++ power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
423 + #dma-cells = <1>;
424 + dma-channels = <13>;
425 + };
426 +@@ -1025,8 +1025,7 @@
427 + clocks = <&extal_clk &usb_extal_clk>;
428 + #clock-cells = <1>;
429 + clock-output-names = "main", "pll0", "pll1", "pll3",
430 +- "lb", "qspi", "sdh", "sd0", "z",
431 +- "rcan";
432 ++ "lb", "qspi", "sdh", "sd0", "rcan";
433 + #power-domain-cells = <0>;
434 + };
435 + /* Variable factor clocks */
436 +@@ -1483,7 +1482,7 @@
437 + "mix.0", "mix.1",
438 + "dvc.0", "dvc.1",
439 + "clk_a", "clk_b", "clk_c", "clk_i";
440 +- power-domains = <&cpg_clocks>;
441 ++ power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
442 +
443 + status = "disabled";
444 +
445 +diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
446 +index 522b5feb4eaa..b62eaeb147aa 100644
447 +--- a/arch/arm/include/asm/cputype.h
448 ++++ b/arch/arm/include/asm/cputype.h
449 +@@ -94,6 +94,9 @@
450 + #define ARM_CPU_XSCALE_ARCH_V2 0x4000
451 + #define ARM_CPU_XSCALE_ARCH_V3 0x6000
452 +
453 ++/* Qualcomm implemented cores */
454 ++#define ARM_CPU_PART_SCORPION 0x510002d0
455 ++
456 + extern unsigned int processor_id;
457 +
458 + #ifdef CONFIG_CPU_CP15
459 +diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
460 +index b8df45883cf7..25538a935874 100644
461 +--- a/arch/arm/kernel/hw_breakpoint.c
462 ++++ b/arch/arm/kernel/hw_breakpoint.c
463 +@@ -1066,6 +1066,22 @@ static int __init arch_hw_breakpoint_init(void)
464 + return 0;
465 + }
466 +
467 ++ /*
468 ++ * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
469 ++ * whenever a WFI is issued, even if the core is not powered down, in
470 ++ * violation of the architecture. When DBGPRSR.SPD is set, accesses to
471 ++ * breakpoint and watchpoint registers are treated as undefined, so
472 ++ * this results in boot time and runtime failures when these are
473 ++ * accessed and we unexpectedly take a trap.
474 ++ *
475 ++ * It's not clear if/how this can be worked around, so we blacklist
476 ++ * Scorpion CPUs to avoid these issues.
477 ++ */
478 ++ if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
479 ++ pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
480 ++ return 0;
481 ++ }
482 ++
483 + has_ossr = core_has_os_save_restore();
484 +
485 + /* Determine how many BRPs/WRPs are available. */
486 +diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
487 +index 22313cb53362..9af0701f7094 100644
488 +--- a/arch/arm/kernel/smp_tlb.c
489 ++++ b/arch/arm/kernel/smp_tlb.c
490 +@@ -9,6 +9,7 @@
491 + */
492 + #include <linux/preempt.h>
493 + #include <linux/smp.h>
494 ++#include <linux/uaccess.h>
495 +
496 + #include <asm/smp_plat.h>
497 + #include <asm/tlbflush.h>
498 +@@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
499 + static inline void ipi_flush_tlb_page(void *arg)
500 + {
501 + struct tlb_args *ta = (struct tlb_args *)arg;
502 ++ unsigned int __ua_flags = uaccess_save_and_enable();
503 +
504 + local_flush_tlb_page(ta->ta_vma, ta->ta_start);
505 ++
506 ++ uaccess_restore(__ua_flags);
507 + }
508 +
509 + static inline void ipi_flush_tlb_kernel_page(void *arg)
510 +@@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
511 + static inline void ipi_flush_tlb_range(void *arg)
512 + {
513 + struct tlb_args *ta = (struct tlb_args *)arg;
514 ++ unsigned int __ua_flags = uaccess_save_and_enable();
515 +
516 + local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
517 ++
518 ++ uaccess_restore(__ua_flags);
519 + }
520 +
521 + static inline void ipi_flush_tlb_kernel_range(void *arg)
522 +diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
523 +index 8538910db202..a970e7fcba9e 100644
524 +--- a/arch/arm/mach-ux500/pm.c
525 ++++ b/arch/arm/mach-ux500/pm.c
526 +@@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
527 + */
528 + bool prcmu_is_cpu_in_wfi(int cpu)
529 + {
530 +- return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
531 +- PRCM_ARM_WFI_STANDBY_WFI0;
532 ++ return readl(PRCM_ARM_WFI_STANDBY) &
533 ++ (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
534 + }
535 +
536 + /*
537 +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
538 +index b71086d25195..53211a0acf0f 100644
539 +--- a/arch/arm64/include/asm/memory.h
540 ++++ b/arch/arm64/include/asm/memory.h
541 +@@ -217,7 +217,7 @@ static inline void *phys_to_virt(phys_addr_t x)
542 + #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
543 + #else
544 + #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
545 +-#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
546 ++#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
547 +
548 + #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
549 + #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
550 +diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
551 +index b5c3933ed441..d1ff83dfe5de 100644
552 +--- a/arch/arm64/include/uapi/asm/ptrace.h
553 ++++ b/arch/arm64/include/uapi/asm/ptrace.h
554 +@@ -77,6 +77,7 @@ struct user_fpsimd_state {
555 + __uint128_t vregs[32];
556 + __u32 fpsr;
557 + __u32 fpcr;
558 ++ __u32 __reserved[2];
559 + };
560 +
561 + struct user_hwdebug_state {
562 +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
563 +index 223d54a4d66b..79b0fe24d5b7 100644
564 +--- a/arch/arm64/kernel/entry.S
565 ++++ b/arch/arm64/kernel/entry.S
566 +@@ -624,7 +624,7 @@ el0_inv:
567 + mov x0, sp
568 + mov x1, #BAD_SYNC
569 + mov x2, x25
570 +- bl bad_mode
571 ++ bl bad_el0_sync
572 + b ret_to_user
573 + ENDPROC(el0_sync)
574 +
575 +diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
576 +index e0c81da60f76..8eedeef375d6 100644
577 +--- a/arch/arm64/kernel/ptrace.c
578 ++++ b/arch/arm64/kernel/ptrace.c
579 +@@ -550,6 +550,8 @@ static int hw_break_set(struct task_struct *target,
580 + /* (address, ctrl) registers */
581 + limit = regset->n * regset->size;
582 + while (count && offset < limit) {
583 ++ if (count < PTRACE_HBP_ADDR_SZ)
584 ++ return -EINVAL;
585 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
586 + offset, offset + PTRACE_HBP_ADDR_SZ);
587 + if (ret)
588 +@@ -559,6 +561,8 @@ static int hw_break_set(struct task_struct *target,
589 + return ret;
590 + offset += PTRACE_HBP_ADDR_SZ;
591 +
592 ++ if (!count)
593 ++ break;
594 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
595 + offset, offset + PTRACE_HBP_CTRL_SZ);
596 + if (ret)
597 +@@ -595,7 +599,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
598 + const void *kbuf, const void __user *ubuf)
599 + {
600 + int ret;
601 +- struct user_pt_regs newregs;
602 ++ struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
603 +
604 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
605 + if (ret)
606 +@@ -625,7 +629,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
607 + const void *kbuf, const void __user *ubuf)
608 + {
609 + int ret;
610 +- struct user_fpsimd_state newstate;
611 ++ struct user_fpsimd_state newstate =
612 ++ target->thread.fpsimd_state.user_fpsimd;
613 +
614 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
615 + if (ret)
616 +@@ -649,7 +654,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
617 + const void *kbuf, const void __user *ubuf)
618 + {
619 + int ret;
620 +- unsigned long tls;
621 ++ unsigned long tls = target->thread.tp_value;
622 +
623 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
624 + if (ret)
625 +@@ -675,7 +680,8 @@ static int system_call_set(struct task_struct *target,
626 + unsigned int pos, unsigned int count,
627 + const void *kbuf, const void __user *ubuf)
628 + {
629 +- int syscallno, ret;
630 ++ int syscallno = task_pt_regs(target)->syscallno;
631 ++ int ret;
632 +
633 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
634 + if (ret)
635 +@@ -947,7 +953,7 @@ static int compat_tls_set(struct task_struct *target,
636 + const void __user *ubuf)
637 + {
638 + int ret;
639 +- compat_ulong_t tls;
640 ++ compat_ulong_t tls = target->thread.tp_value;
641 +
642 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
643 + if (ret)
644 +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
645 +index c9986b3e0a96..11e5eae088ab 100644
646 +--- a/arch/arm64/kernel/traps.c
647 ++++ b/arch/arm64/kernel/traps.c
648 +@@ -596,17 +596,34 @@ const char *esr_get_class_string(u32 esr)
649 + }
650 +
651 + /*
652 +- * bad_mode handles the impossible case in the exception vector.
653 ++ * bad_mode handles the impossible case in the exception vector. This is always
654 ++ * fatal.
655 + */
656 + asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
657 + {
658 +- siginfo_t info;
659 +- void __user *pc = (void __user *)instruction_pointer(regs);
660 + console_verbose();
661 +
662 + pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
663 + handler[reason], smp_processor_id(), esr,
664 + esr_get_class_string(esr));
665 ++
666 ++ die("Oops - bad mode", regs, 0);
667 ++ local_irq_disable();
668 ++ panic("bad mode");
669 ++}
670 ++
671 ++/*
672 ++ * bad_el0_sync handles unexpected, but potentially recoverable synchronous
673 ++ * exceptions taken from EL0. Unlike bad_mode, this returns.
674 ++ */
675 ++asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
676 ++{
677 ++ siginfo_t info;
678 ++ void __user *pc = (void __user *)instruction_pointer(regs);
679 ++ console_verbose();
680 ++
681 ++ pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
682 ++ smp_processor_id(), esr, esr_get_class_string(esr));
683 + __show_regs(regs);
684 +
685 + info.si_signo = SIGILL;
686 +@@ -614,7 +631,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
687 + info.si_code = ILL_ILLOPC;
688 + info.si_addr = pc;
689 +
690 +- arm64_notify_die("Oops - bad mode", regs, &info, 0);
691 ++ current->thread.fault_address = 0;
692 ++ current->thread.fault_code = 0;
693 ++
694 ++ force_sig_info(info.si_signo, &info, current);
695 + }
696 +
697 + void __pte_error(const char *file, int line, unsigned long val)
698 +diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
699 +index 3f74d0d98de6..02265a589ef5 100644
700 +--- a/arch/arm64/mm/dma-mapping.c
701 ++++ b/arch/arm64/mm/dma-mapping.c
702 +@@ -524,7 +524,8 @@ EXPORT_SYMBOL(dummy_dma_ops);
703 +
704 + static int __init arm64_dma_init(void)
705 + {
706 +- if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
707 ++ if (swiotlb_force == SWIOTLB_FORCE ||
708 ++ max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
709 + swiotlb = 1;
710 +
711 + return atomic_pool_init();
712 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
713 +index 212c4d1e2f26..380ebe705093 100644
714 +--- a/arch/arm64/mm/init.c
715 ++++ b/arch/arm64/mm/init.c
716 +@@ -401,8 +401,11 @@ static void __init free_unused_memmap(void)
717 + */
718 + void __init mem_init(void)
719 + {
720 +- if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
721 ++ if (swiotlb_force == SWIOTLB_FORCE ||
722 ++ max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
723 + swiotlb_init(1);
724 ++ else
725 ++ swiotlb_force = SWIOTLB_NO_FORCE;
726 +
727 + set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
728 +
729 +diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
730 +index c56ea8c84abb..c4ced1d01d57 100644
731 +--- a/arch/powerpc/include/asm/ppc-opcode.h
732 ++++ b/arch/powerpc/include/asm/ppc-opcode.h
733 +@@ -157,7 +157,7 @@
734 + #define PPC_INST_MCRXR 0x7c000400
735 + #define PPC_INST_MCRXR_MASK 0xfc0007fe
736 + #define PPC_INST_MFSPR_PVR 0x7c1f42a6
737 +-#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
738 ++#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
739 + #define PPC_INST_MFTMR 0x7c0002dc
740 + #define PPC_INST_MSGSND 0x7c00019c
741 + #define PPC_INST_MSGCLR 0x7c0001dc
742 +@@ -174,13 +174,13 @@
743 + #define PPC_INST_RFDI 0x4c00004e
744 + #define PPC_INST_RFMCI 0x4c00004c
745 + #define PPC_INST_MFSPR_DSCR 0x7c1102a6
746 +-#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
747 ++#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe
748 + #define PPC_INST_MTSPR_DSCR 0x7c1103a6
749 +-#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
750 ++#define PPC_INST_MTSPR_DSCR_MASK 0xfc1ffffe
751 + #define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
752 +-#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
753 ++#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
754 + #define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
755 +-#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
756 ++#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
757 + #define PPC_INST_MFVSRD 0x7c000066
758 + #define PPC_INST_MTVSRD 0x7c000166
759 + #define PPC_INST_SLBFEE 0x7c0007a7
760 +diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
761 +index b1ec62f2cc31..5c8f12fe9721 100644
762 +--- a/arch/powerpc/kernel/ptrace.c
763 ++++ b/arch/powerpc/kernel/ptrace.c
764 +@@ -463,6 +463,10 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
765 +
766 + flush_fp_to_thread(target);
767 +
768 ++ for (i = 0; i < 32 ; i++)
769 ++ buf[i] = target->thread.TS_FPR(i);
770 ++ buf[32] = target->thread.fp_state.fpscr;
771 ++
772 + /* copy to local buffer then write that out */
773 + i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
774 + if (i)
775 +@@ -672,6 +676,9 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
776 + flush_altivec_to_thread(target);
777 + flush_vsx_to_thread(target);
778 +
779 ++ for (i = 0; i < 32 ; i++)
780 ++ buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
781 ++
782 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
783 + buf, 0, 32 * sizeof(double));
784 + if (!ret)
785 +@@ -1019,6 +1026,10 @@ static int tm_cfpr_set(struct task_struct *target,
786 + flush_fp_to_thread(target);
787 + flush_altivec_to_thread(target);
788 +
789 ++ for (i = 0; i < 32; i++)
790 ++ buf[i] = target->thread.TS_CKFPR(i);
791 ++ buf[32] = target->thread.ckfp_state.fpscr;
792 ++
793 + /* copy to local buffer then write that out */
794 + i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
795 + if (i)
796 +@@ -1283,6 +1294,9 @@ static int tm_cvsx_set(struct task_struct *target,
797 + flush_altivec_to_thread(target);
798 + flush_vsx_to_thread(target);
799 +
800 ++ for (i = 0; i < 32 ; i++)
801 ++ buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
802 ++
803 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
804 + buf, 0, 32 * sizeof(double));
805 + if (!ret)
806 +diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
807 +index 6447dc1c3d89..929b56d47ad9 100644
808 +--- a/arch/powerpc/perf/power9-events-list.h
809 ++++ b/arch/powerpc/perf/power9-events-list.h
810 +@@ -16,7 +16,7 @@ EVENT(PM_CYC, 0x0001e)
811 + EVENT(PM_ICT_NOSLOT_CYC, 0x100f8)
812 + EVENT(PM_CMPLU_STALL, 0x1e054)
813 + EVENT(PM_INST_CMPL, 0x00002)
814 +-EVENT(PM_BRU_CMPL, 0x40060)
815 ++EVENT(PM_BRU_CMPL, 0x10012)
816 + EVENT(PM_BR_MPRED_CMPL, 0x400f6)
817 +
818 + /* All L1 D cache load references counted at finish, gated by reject */
819 +diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
820 +index d38e86fd5720..60c57657c772 100644
821 +--- a/arch/powerpc/sysdev/xics/icp-opal.c
822 ++++ b/arch/powerpc/sysdev/xics/icp-opal.c
823 +@@ -20,6 +20,7 @@
824 + #include <asm/xics.h>
825 + #include <asm/io.h>
826 + #include <asm/opal.h>
827 ++#include <asm/kvm_ppc.h>
828 +
829 + static void icp_opal_teardown_cpu(void)
830 + {
831 +@@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void)
832 + * Should we be flagging idle loop instead?
833 + * Or creating some task to be scheduled?
834 + */
835 +- opal_int_eoi((0x00 << 24) | XICS_IPI);
836 ++ if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
837 ++ force_external_irq_replay();
838 ++}
839 ++
840 ++static unsigned int icp_opal_get_xirr(void)
841 ++{
842 ++ unsigned int kvm_xirr;
843 ++ __be32 hw_xirr;
844 ++ int64_t rc;
845 ++
846 ++ /* Handle an interrupt latched by KVM first */
847 ++ kvm_xirr = kvmppc_get_xics_latch();
848 ++ if (kvm_xirr)
849 ++ return kvm_xirr;
850 ++
851 ++ /* Then ask OPAL */
852 ++ rc = opal_int_get_xirr(&hw_xirr, false);
853 ++ if (rc < 0)
854 ++ return 0;
855 ++ return be32_to_cpu(hw_xirr);
856 + }
857 +
858 + static unsigned int icp_opal_get_irq(void)
859 +@@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void)
860 + unsigned int xirr;
861 + unsigned int vec;
862 + unsigned int irq;
863 +- int64_t rc;
864 +
865 +- rc = opal_int_get_xirr(&xirr, false);
866 +- if (rc < 0)
867 +- return 0;
868 +- xirr = be32_to_cpu(xirr);
869 ++ xirr = icp_opal_get_xirr();
870 + vec = xirr & 0x00ffffff;
871 + if (vec == XICS_IRQ_SPURIOUS)
872 + return 0;
873 +@@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void)
874 + xics_mask_unknown_vec(vec);
875 +
876 + /* We might learn about it later, so EOI it */
877 +- opal_int_eoi(xirr);
878 ++ if (opal_int_eoi(xirr) > 0)
879 ++ force_external_irq_replay();
880 +
881 + return 0;
882 + }
883 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
884 +index 9c7a1ecfe6bd..47a1de77b18d 100644
885 +--- a/arch/s390/kvm/kvm-s390.c
886 ++++ b/arch/s390/kvm/kvm-s390.c
887 +@@ -916,7 +916,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
888 + memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
889 + S390_ARCH_FAC_LIST_SIZE_BYTE);
890 + memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
891 +- S390_ARCH_FAC_LIST_SIZE_BYTE);
892 ++ sizeof(S390_lowcore.stfle_fac_list));
893 + if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
894 + ret = -EFAULT;
895 + kfree(mach);
896 +@@ -1437,7 +1437,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
897 +
898 + /* Populate the facility mask initially. */
899 + memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
900 +- S390_ARCH_FAC_LIST_SIZE_BYTE);
901 ++ sizeof(S390_lowcore.stfle_fac_list));
902 + for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
903 + if (i < kvm_s390_fac_list_mask_size())
904 + kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
905 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
906 +index 48e6d84f173e..3d8ff40ecc6f 100644
907 +--- a/arch/x86/kernel/apic/io_apic.c
908 ++++ b/arch/x86/kernel/apic/io_apic.c
909 +@@ -1876,6 +1876,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
910 + .irq_ack = irq_chip_ack_parent,
911 + .irq_eoi = ioapic_ack_level,
912 + .irq_set_affinity = ioapic_set_affinity,
913 ++ .irq_retrigger = irq_chip_retrigger_hierarchy,
914 + .flags = IRQCHIP_SKIP_SET_WAKE,
915 + };
916 +
917 +@@ -1887,6 +1888,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
918 + .irq_ack = irq_chip_ack_parent,
919 + .irq_eoi = ioapic_ir_ack_level,
920 + .irq_set_affinity = ioapic_set_affinity,
921 ++ .irq_retrigger = irq_chip_retrigger_hierarchy,
922 + .flags = IRQCHIP_SKIP_SET_WAKE,
923 + };
924 +
925 +diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
926 +index b47edb8f5256..8da13d4e77cc 100644
927 +--- a/arch/x86/kernel/pci-swiotlb.c
928 ++++ b/arch/x86/kernel/pci-swiotlb.c
929 +@@ -70,7 +70,7 @@ int __init pci_swiotlb_detect_override(void)
930 + {
931 + int use_swiotlb = swiotlb | swiotlb_force;
932 +
933 +- if (swiotlb_force)
934 ++ if (swiotlb_force == SWIOTLB_FORCE)
935 + swiotlb = 1;
936 +
937 + return use_swiotlb;
938 +diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
939 +index 3cd69832d7f4..3961103e9176 100644
940 +--- a/arch/x86/pci/acpi.c
941 ++++ b/arch/x86/pci/acpi.c
942 +@@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
943 + DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
944 + },
945 + },
946 ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
947 ++ {
948 ++ .callback = set_nouse_crs,
949 ++ .ident = "Supermicro X8DTH",
950 ++ .matches = {
951 ++ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
952 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
953 ++ DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
954 ++ },
955 ++ },
956 +
957 + /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
958 + {
959 +diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
960 +index 0e98e5d241d0..5f8b4b0302b6 100644
961 +--- a/arch/x86/xen/pci-swiotlb-xen.c
962 ++++ b/arch/x86/xen/pci-swiotlb-xen.c
963 +@@ -49,7 +49,7 @@ int __init pci_xen_swiotlb_detect(void)
964 + * activate this IOMMU. If running as PV privileged, activate it
965 + * irregardless.
966 + */
967 +- if ((xen_initial_domain() || swiotlb || swiotlb_force))
968 ++ if (xen_initial_domain() || swiotlb || swiotlb_force == SWIOTLB_FORCE)
969 + xen_swiotlb = 1;
970 +
971 + /* If we are running under Xen, we MUST disable the native SWIOTLB.
972 +diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
973 +index 8f3488b80896..7f6fed9f0703 100644
974 +--- a/drivers/clocksource/exynos_mct.c
975 ++++ b/drivers/clocksource/exynos_mct.c
976 +@@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
977 + if (mct_int_type == MCT_INT_SPI) {
978 + if (evt->irq != -1)
979 + disable_irq_nosync(evt->irq);
980 ++ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
981 + } else {
982 + disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
983 + }
984 +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
985 +index bf3ea7603a58..712592cef1a2 100644
986 +--- a/drivers/devfreq/devfreq.c
987 ++++ b/drivers/devfreq/devfreq.c
988 +@@ -593,11 +593,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
989 + list_add(&devfreq->node, &devfreq_list);
990 +
991 + governor = find_devfreq_governor(devfreq->governor_name);
992 +- if (!IS_ERR(governor))
993 +- devfreq->governor = governor;
994 +- if (devfreq->governor)
995 +- err = devfreq->governor->event_handler(devfreq,
996 +- DEVFREQ_GOV_START, NULL);
997 ++ if (IS_ERR(governor)) {
998 ++ dev_err(dev, "%s: Unable to find governor for the device\n",
999 ++ __func__);
1000 ++ err = PTR_ERR(governor);
1001 ++ goto err_init;
1002 ++ }
1003 ++
1004 ++ devfreq->governor = governor;
1005 ++ err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
1006 ++ NULL);
1007 + if (err) {
1008 + dev_err(dev, "%s: Unable to start governor for the device\n",
1009 + __func__);
1010 +diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
1011 +index 29866f7e6d7e..1b21bb60e797 100644
1012 +--- a/drivers/devfreq/exynos-bus.c
1013 ++++ b/drivers/devfreq/exynos-bus.c
1014 +@@ -498,7 +498,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
1015 + if (IS_ERR(bus->devfreq)) {
1016 + dev_err(dev,
1017 + "failed to add devfreq dev with passive governor\n");
1018 +- ret = -EPROBE_DEFER;
1019 ++ ret = PTR_ERR(bus->devfreq);
1020 + goto err;
1021 + }
1022 +
1023 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
1024 +index 030fe05ed43b..9f3dbc8c63d2 100644
1025 +--- a/drivers/dma/pl330.c
1026 ++++ b/drivers/dma/pl330.c
1027 +@@ -448,6 +448,9 @@ struct dma_pl330_chan {
1028 +
1029 + /* for cyclic capability */
1030 + bool cyclic;
1031 ++
1032 ++ /* for runtime pm tracking */
1033 ++ bool active;
1034 + };
1035 +
1036 + struct pl330_dmac {
1037 +@@ -2031,6 +2034,7 @@ static void pl330_tasklet(unsigned long data)
1038 + _stop(pch->thread);
1039 + spin_unlock(&pch->thread->dmac->lock);
1040 + power_down = true;
1041 ++ pch->active = false;
1042 + } else {
1043 + /* Make sure the PL330 Channel thread is active */
1044 + spin_lock(&pch->thread->dmac->lock);
1045 +@@ -2050,6 +2054,7 @@ static void pl330_tasklet(unsigned long data)
1046 + desc->status = PREP;
1047 + list_move_tail(&desc->node, &pch->work_list);
1048 + if (power_down) {
1049 ++ pch->active = true;
1050 + spin_lock(&pch->thread->dmac->lock);
1051 + _start(pch->thread);
1052 + spin_unlock(&pch->thread->dmac->lock);
1053 +@@ -2164,6 +2169,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
1054 + unsigned long flags;
1055 + struct pl330_dmac *pl330 = pch->dmac;
1056 + LIST_HEAD(list);
1057 ++ bool power_down = false;
1058 +
1059 + pm_runtime_get_sync(pl330->ddma.dev);
1060 + spin_lock_irqsave(&pch->lock, flags);
1061 +@@ -2174,6 +2180,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
1062 + pch->thread->req[0].desc = NULL;
1063 + pch->thread->req[1].desc = NULL;
1064 + pch->thread->req_running = -1;
1065 ++ power_down = pch->active;
1066 ++ pch->active = false;
1067 +
1068 + /* Mark all desc done */
1069 + list_for_each_entry(desc, &pch->submitted_list, node) {
1070 +@@ -2191,6 +2199,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
1071 + list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
1072 + spin_unlock_irqrestore(&pch->lock, flags);
1073 + pm_runtime_mark_last_busy(pl330->ddma.dev);
1074 ++ if (power_down)
1075 ++ pm_runtime_put_autosuspend(pl330->ddma.dev);
1076 + pm_runtime_put_autosuspend(pl330->ddma.dev);
1077 +
1078 + return 0;
1079 +@@ -2350,6 +2360,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
1080 + * updated on work_list emptiness status.
1081 + */
1082 + WARN_ON(list_empty(&pch->submitted_list));
1083 ++ pch->active = true;
1084 + pm_runtime_get_sync(pch->dmac->ddma.dev);
1085 + }
1086 + list_splice_tail_init(&pch->submitted_list, &pch->work_list);
1087 +diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
1088 +index 2e441d0ccd79..4c357d475465 100644
1089 +--- a/drivers/dma/sh/rcar-dmac.c
1090 ++++ b/drivers/dma/sh/rcar-dmac.c
1091 +@@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
1092 + {
1093 + struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1094 + struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1095 ++ struct rcar_dmac_chan_map *map = &rchan->map;
1096 + struct rcar_dmac_desc_page *page, *_page;
1097 + struct rcar_dmac_desc *desc;
1098 + LIST_HEAD(list);
1099 +@@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
1100 + free_page((unsigned long)page);
1101 + }
1102 +
1103 ++ /* Remove slave mapping if present. */
1104 ++ if (map->slave.xfer_size) {
1105 ++ dma_unmap_resource(chan->device->dev, map->addr,
1106 ++ map->slave.xfer_size, map->dir, 0);
1107 ++ map->slave.xfer_size = 0;
1108 ++ }
1109 ++
1110 + pm_runtime_put(chan->device->dev);
1111 + }
1112 +
1113 +diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
1114 +index 717704e9ae07..c0303f61c26a 100644
1115 +--- a/drivers/hid/hid-corsair.c
1116 ++++ b/drivers/hid/hid-corsair.c
1117 +@@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
1118 + struct usb_interface *usbif = to_usb_interface(dev->parent);
1119 + struct usb_device *usbdev = interface_to_usbdev(usbif);
1120 + int brightness;
1121 +- char data[8];
1122 ++ char *data;
1123 ++
1124 ++ data = kmalloc(8, GFP_KERNEL);
1125 ++ if (!data)
1126 ++ return -ENOMEM;
1127 +
1128 + ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
1129 + K90_REQUEST_STATUS,
1130 + USB_DIR_IN | USB_TYPE_VENDOR |
1131 + USB_RECIP_DEVICE, 0, 0, data, 8,
1132 + USB_CTRL_SET_TIMEOUT);
1133 +- if (ret < 0) {
1134 ++ if (ret < 5) {
1135 + dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
1136 + ret);
1137 +- return -EIO;
1138 ++ ret = -EIO;
1139 ++ goto out;
1140 + }
1141 + brightness = data[4];
1142 + if (brightness < 0 || brightness > 3) {
1143 + dev_warn(dev,
1144 + "Read invalid backlight brightness: %02hhx.\n",
1145 + data[4]);
1146 +- return -EIO;
1147 ++ ret = -EIO;
1148 ++ goto out;
1149 + }
1150 +- return brightness;
1151 ++ ret = brightness;
1152 ++out:
1153 ++ kfree(data);
1154 ++
1155 ++ return ret;
1156 + }
1157 +
1158 + static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev)
1159 +@@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev,
1160 + struct usb_interface *usbif = to_usb_interface(dev->parent);
1161 + struct usb_device *usbdev = interface_to_usbdev(usbif);
1162 + const char *macro_mode;
1163 +- char data[8];
1164 ++ char *data;
1165 ++
1166 ++ data = kmalloc(2, GFP_KERNEL);
1167 ++ if (!data)
1168 ++ return -ENOMEM;
1169 +
1170 + ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
1171 + K90_REQUEST_GET_MODE,
1172 + USB_DIR_IN | USB_TYPE_VENDOR |
1173 + USB_RECIP_DEVICE, 0, 0, data, 2,
1174 + USB_CTRL_SET_TIMEOUT);
1175 +- if (ret < 0) {
1176 ++ if (ret < 1) {
1177 + dev_warn(dev, "Failed to get K90 initial mode (error %d).\n",
1178 + ret);
1179 +- return -EIO;
1180 ++ ret = -EIO;
1181 ++ goto out;
1182 + }
1183 +
1184 + switch (data[0]) {
1185 +@@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev,
1186 + default:
1187 + dev_warn(dev, "K90 in unknown mode: %02hhx.\n",
1188 + data[0]);
1189 +- return -EIO;
1190 ++ ret = -EIO;
1191 ++ goto out;
1192 + }
1193 +
1194 +- return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
1195 ++ ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
1196 ++out:
1197 ++ kfree(data);
1198 ++
1199 ++ return ret;
1200 + }
1201 +
1202 + static ssize_t k90_store_macro_mode(struct device *dev,
1203 +@@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev,
1204 + struct usb_interface *usbif = to_usb_interface(dev->parent);
1205 + struct usb_device *usbdev = interface_to_usbdev(usbif);
1206 + int current_profile;
1207 +- char data[8];
1208 ++ char *data;
1209 ++
1210 ++ data = kmalloc(8, GFP_KERNEL);
1211 ++ if (!data)
1212 ++ return -ENOMEM;
1213 +
1214 + ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
1215 + K90_REQUEST_STATUS,
1216 + USB_DIR_IN | USB_TYPE_VENDOR |
1217 + USB_RECIP_DEVICE, 0, 0, data, 8,
1218 + USB_CTRL_SET_TIMEOUT);
1219 +- if (ret < 0) {
1220 ++ if (ret < 8) {
1221 + dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
1222 + ret);
1223 +- return -EIO;
1224 ++ ret = -EIO;
1225 ++ goto out;
1226 + }
1227 + current_profile = data[7];
1228 + if (current_profile < 1 || current_profile > 3) {
1229 + dev_warn(dev, "Read invalid current profile: %02hhx.\n",
1230 + data[7]);
1231 +- return -EIO;
1232 ++ ret = -EIO;
1233 ++ goto out;
1234 + }
1235 +
1236 +- return snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
1237 ++ ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
1238 ++out:
1239 ++ kfree(data);
1240 ++
1241 ++ return ret;
1242 + }
1243 +
1244 + static ssize_t k90_store_current_profile(struct device *dev,
1245 +diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
1246 +index 1a2984c28b95..ae04826e82fc 100644
1247 +--- a/drivers/infiniband/core/cache.c
1248 ++++ b/drivers/infiniband/core/cache.c
1249 +@@ -770,12 +770,8 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)
1250 + int err = 0;
1251 +
1252 + table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
1253 +-
1254 +- if (!table) {
1255 +- pr_warn("failed to allocate ib gid cache for %s\n",
1256 +- ib_dev->name);
1257 ++ if (!table)
1258 + return -ENOMEM;
1259 +- }
1260 +
1261 + for (port = 0; port < ib_dev->phys_port_cnt; port++) {
1262 + u8 rdma_port = port + rdma_start_port(ib_dev);
1263 +@@ -1170,14 +1166,13 @@ int ib_cache_setup_one(struct ib_device *device)
1264 + GFP_KERNEL);
1265 + if (!device->cache.pkey_cache ||
1266 + !device->cache.lmc_cache) {
1267 +- pr_warn("Couldn't allocate cache for %s\n", device->name);
1268 +- return -ENOMEM;
1269 ++ err = -ENOMEM;
1270 ++ goto free;
1271 + }
1272 +
1273 + err = gid_table_setup_one(device);
1274 + if (err)
1275 +- /* Allocated memory will be cleaned in the release function */
1276 +- return err;
1277 ++ goto free;
1278 +
1279 + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1280 + ib_cache_update(device, p + rdma_start_port(device));
1281 +@@ -1192,6 +1187,9 @@ int ib_cache_setup_one(struct ib_device *device)
1282 +
1283 + err:
1284 + gid_table_cleanup_one(device);
1285 ++free:
1286 ++ kfree(device->cache.pkey_cache);
1287 ++ kfree(device->cache.lmc_cache);
1288 + return err;
1289 + }
1290 +
1291 +diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
1292 +index b9bf0759f10a..8dfc76f8cbb4 100644
1293 +--- a/drivers/infiniband/hw/mlx4/ah.c
1294 ++++ b/drivers/infiniband/hw/mlx4/ah.c
1295 +@@ -114,7 +114,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
1296 + !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
1297 + --ah->av.eth.stat_rate;
1298 + }
1299 +-
1300 ++ ah->av.eth.sl_tclass_flowlabel |=
1301 ++ cpu_to_be32((ah_attr->grh.traffic_class << 20) |
1302 ++ ah_attr->grh.flow_label);
1303 + /*
1304 + * HW requires multicast LID so we just choose one.
1305 + */
1306 +@@ -122,7 +124,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
1307 + ah->av.ib.dlid = cpu_to_be16(0xc000);
1308 +
1309 + memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
1310 +- ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
1311 ++ ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29);
1312 +
1313 + return &ah->ibah;
1314 + }
1315 +diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
1316 +index 1672907ff219..18d309e40f1b 100644
1317 +--- a/drivers/infiniband/hw/mlx4/mad.c
1318 ++++ b/drivers/infiniband/hw/mlx4/mad.c
1319 +@@ -702,10 +702,18 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
1320 +
1321 + /* If a grh is present, we demux according to it */
1322 + if (wc->wc_flags & IB_WC_GRH) {
1323 +- slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
1324 +- if (slave < 0) {
1325 +- mlx4_ib_warn(ibdev, "failed matching grh\n");
1326 +- return -ENOENT;
1327 ++ if (grh->dgid.global.interface_id ==
1328 ++ cpu_to_be64(IB_SA_WELL_KNOWN_GUID) &&
1329 ++ grh->dgid.global.subnet_prefix == cpu_to_be64(
1330 ++ atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) {
1331 ++ slave = 0;
1332 ++ } else {
1333 ++ slave = mlx4_ib_find_real_gid(ibdev, port,
1334 ++ grh->dgid.global.interface_id);
1335 ++ if (slave < 0) {
1336 ++ mlx4_ib_warn(ibdev, "failed matching grh\n");
1337 ++ return -ENOENT;
1338 ++ }
1339 + }
1340 + }
1341 + /* Class-specific handling */
1342 +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
1343 +index b597e8227591..46ad99595fd2 100644
1344 +--- a/drivers/infiniband/hw/mlx4/main.c
1345 ++++ b/drivers/infiniband/hw/mlx4/main.c
1346 +@@ -697,9 +697,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
1347 + if (err)
1348 + goto out;
1349 +
1350 +- props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
1351 +- IB_WIDTH_4X : IB_WIDTH_1X;
1352 +- props->active_speed = IB_SPEED_QDR;
1353 ++ props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
1354 ++ (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
1355 ++ IB_WIDTH_4X : IB_WIDTH_1X;
1356 ++ props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
1357 ++ IB_SPEED_FDR : IB_SPEED_QDR;
1358 + props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
1359 + props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
1360 + props->max_msg_sz = mdev->dev->caps.max_msg_sz;
1361 +@@ -2820,14 +2822,19 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1362 + goto err_steer_qp_release;
1363 + }
1364 +
1365 +- bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
1366 +-
1367 +- err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
1368 +- dev, ibdev->steer_qpn_base,
1369 +- ibdev->steer_qpn_base +
1370 +- ibdev->steer_qpn_count - 1);
1371 +- if (err)
1372 +- goto err_steer_free_bitmap;
1373 ++ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
1374 ++ bitmap_zero(ibdev->ib_uc_qpns_bitmap,
1375 ++ ibdev->steer_qpn_count);
1376 ++ err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
1377 ++ dev, ibdev->steer_qpn_base,
1378 ++ ibdev->steer_qpn_base +
1379 ++ ibdev->steer_qpn_count - 1);
1380 ++ if (err)
1381 ++ goto err_steer_free_bitmap;
1382 ++ } else {
1383 ++ bitmap_fill(ibdev->ib_uc_qpns_bitmap,
1384 ++ ibdev->steer_qpn_count);
1385 ++ }
1386 + }
1387 +
1388 + for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
1389 +diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
1390 +index 570bc866b1d6..c22454383976 100644
1391 +--- a/drivers/infiniband/hw/mlx4/qp.c
1392 ++++ b/drivers/infiniband/hw/mlx4/qp.c
1393 +@@ -1280,7 +1280,8 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
1394 + if (is_qp0(dev, mqp))
1395 + mlx4_CLOSE_PORT(dev->dev, mqp->port);
1396 +
1397 +- if (dev->qp1_proxy[mqp->port - 1] == mqp) {
1398 ++ if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI &&
1399 ++ dev->qp1_proxy[mqp->port - 1] == mqp) {
1400 + mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
1401 + dev->qp1_proxy[mqp->port - 1] = NULL;
1402 + mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
1403 +@@ -1764,14 +1765,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1404 + u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 :
1405 + attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1406 + union ib_gid gid;
1407 +- struct ib_gid_attr gid_attr;
1408 ++ struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_IB};
1409 + u16 vlan = 0xffff;
1410 + u8 smac[ETH_ALEN];
1411 + int status = 0;
1412 + int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
1413 + attr->ah_attr.ah_flags & IB_AH_GRH;
1414 +
1415 +- if (is_eth) {
1416 ++ if (is_eth && attr->ah_attr.ah_flags & IB_AH_GRH) {
1417 + int index = attr->ah_attr.grh.sgid_index;
1418 +
1419 + status = ib_get_cached_gid(ibqp->device, port_num,
1420 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
1421 +index 32b09f059c84..4cab29ea394c 100644
1422 +--- a/drivers/infiniband/hw/mlx5/main.c
1423 ++++ b/drivers/infiniband/hw/mlx5/main.c
1424 +@@ -496,6 +496,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1425 + struct mlx5_ib_dev *dev = to_mdev(ibdev);
1426 + struct mlx5_core_dev *mdev = dev->mdev;
1427 + int err = -ENOMEM;
1428 ++ int max_sq_desc;
1429 + int max_rq_sg;
1430 + int max_sq_sg;
1431 + u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
1432 +@@ -618,9 +619,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1433 + props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
1434 + max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
1435 + sizeof(struct mlx5_wqe_data_seg);
1436 +- max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
1437 +- sizeof(struct mlx5_wqe_ctrl_seg)) /
1438 +- sizeof(struct mlx5_wqe_data_seg);
1439 ++ max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
1440 ++ max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
1441 ++ sizeof(struct mlx5_wqe_raddr_seg)) /
1442 ++ sizeof(struct mlx5_wqe_data_seg);
1443 + props->max_sge = min(max_rq_sg, max_sq_sg);
1444 + props->max_sge_rd = MLX5_MAX_SGE_RD;
1445 + props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
1446 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
1447 +index 4e9012463c37..be2d02b6a6aa 100644
1448 +--- a/drivers/infiniband/hw/mlx5/mr.c
1449 ++++ b/drivers/infiniband/hw/mlx5/mr.c
1450 +@@ -628,7 +628,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
1451 + ent->order = i + 2;
1452 + ent->dev = dev;
1453 +
1454 +- if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
1455 ++ if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
1456 ++ (mlx5_core_is_pf(dev->mdev)))
1457 + limit = dev->mdev->profile->mr_cache[i].limit;
1458 + else
1459 + limit = 0;
1460 +@@ -646,6 +647,33 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
1461 + return 0;
1462 + }
1463 +
1464 ++static void wait_for_async_commands(struct mlx5_ib_dev *dev)
1465 ++{
1466 ++ struct mlx5_mr_cache *cache = &dev->cache;
1467 ++ struct mlx5_cache_ent *ent;
1468 ++ int total = 0;
1469 ++ int i;
1470 ++ int j;
1471 ++
1472 ++ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
1473 ++ ent = &cache->ent[i];
1474 ++ for (j = 0 ; j < 1000; j++) {
1475 ++ if (!ent->pending)
1476 ++ break;
1477 ++ msleep(50);
1478 ++ }
1479 ++ }
1480 ++ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
1481 ++ ent = &cache->ent[i];
1482 ++ total += ent->pending;
1483 ++ }
1484 ++
1485 ++ if (total)
1486 ++ mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
1487 ++ else
1488 ++ mlx5_ib_warn(dev, "done with all pending requests\n");
1489 ++}
1490 ++
1491 + int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
1492 + {
1493 + int i;
1494 +@@ -659,6 +687,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
1495 + clean_keys(dev, i);
1496 +
1497 + destroy_workqueue(dev->cache.wq);
1498 ++ wait_for_async_commands(dev);
1499 + del_timer_sync(&dev->delay_timer);
1500 +
1501 + return 0;
1502 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
1503 +index d1e921816bfe..aee3942ec68d 100644
1504 +--- a/drivers/infiniband/hw/mlx5/qp.c
1505 ++++ b/drivers/infiniband/hw/mlx5/qp.c
1506 +@@ -351,6 +351,29 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
1507 + return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
1508 + }
1509 +
1510 ++static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
1511 ++{
1512 ++ int max_sge;
1513 ++
1514 ++ if (attr->qp_type == IB_QPT_RC)
1515 ++ max_sge = (min_t(int, wqe_size, 512) -
1516 ++ sizeof(struct mlx5_wqe_ctrl_seg) -
1517 ++ sizeof(struct mlx5_wqe_raddr_seg)) /
1518 ++ sizeof(struct mlx5_wqe_data_seg);
1519 ++ else if (attr->qp_type == IB_QPT_XRC_INI)
1520 ++ max_sge = (min_t(int, wqe_size, 512) -
1521 ++ sizeof(struct mlx5_wqe_ctrl_seg) -
1522 ++ sizeof(struct mlx5_wqe_xrc_seg) -
1523 ++ sizeof(struct mlx5_wqe_raddr_seg)) /
1524 ++ sizeof(struct mlx5_wqe_data_seg);
1525 ++ else
1526 ++ max_sge = (wqe_size - sq_overhead(attr)) /
1527 ++ sizeof(struct mlx5_wqe_data_seg);
1528 ++
1529 ++ return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
1530 ++ sizeof(struct mlx5_wqe_data_seg));
1531 ++}
1532 ++
1533 + static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
1534 + struct mlx5_ib_qp *qp)
1535 + {
1536 +@@ -387,7 +410,11 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
1537 + return -ENOMEM;
1538 + }
1539 + qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
1540 +- qp->sq.max_gs = attr->cap.max_send_sge;
1541 ++ qp->sq.max_gs = get_send_sge(attr, wqe_size);
1542 ++ if (qp->sq.max_gs < attr->cap.max_send_sge)
1543 ++ return -ENOMEM;
1544 ++
1545 ++ attr->cap.max_send_sge = qp->sq.max_gs;
1546 + qp->sq.max_post = wq_size / wqe_size;
1547 + attr->cap.max_send_wr = qp->sq.max_post;
1548 +
1549 +diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
1550 +index 3857dbd9c956..729b0696626e 100644
1551 +--- a/drivers/infiniband/hw/mlx5/srq.c
1552 ++++ b/drivers/infiniband/hw/mlx5/srq.c
1553 +@@ -282,6 +282,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
1554 + mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
1555 + desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
1556 + srq->msrq.max_avail_gather);
1557 ++ in.type = init_attr->srq_type;
1558 +
1559 + if (pd->uobject)
1560 + err = create_srq_user(pd, srq, &in, udata, buf_size);
1561 +@@ -294,7 +295,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
1562 + goto err_srq;
1563 + }
1564 +
1565 +- in.type = init_attr->srq_type;
1566 + in.log_size = ilog2(srq->msrq.max);
1567 + in.wqe_shift = srq->msrq.wqe_shift - 4;
1568 + if (srq->wq_sig)
1569 +diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
1570 +index f459c43a77c8..13ed2cc6eaa2 100644
1571 +--- a/drivers/infiniband/sw/rxe/rxe_param.h
1572 ++++ b/drivers/infiniband/sw/rxe/rxe_param.h
1573 +@@ -82,7 +82,7 @@ enum rxe_device_param {
1574 + RXE_MAX_SGE = 32,
1575 + RXE_MAX_SGE_RD = 32,
1576 + RXE_MAX_CQ = 16384,
1577 +- RXE_MAX_LOG_CQE = 13,
1578 ++ RXE_MAX_LOG_CQE = 15,
1579 + RXE_MAX_MR = 2 * 1024,
1580 + RXE_MAX_PD = 0x7ffc,
1581 + RXE_MAX_QP_RD_ATOM = 128,
1582 +diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
1583 +index 22bd9630dcd9..9f46be52335e 100644
1584 +--- a/drivers/infiniband/sw/rxe/rxe_req.c
1585 ++++ b/drivers/infiniband/sw/rxe/rxe_req.c
1586 +@@ -548,23 +548,23 @@ static void update_wqe_psn(struct rxe_qp *qp,
1587 + static void save_state(struct rxe_send_wqe *wqe,
1588 + struct rxe_qp *qp,
1589 + struct rxe_send_wqe *rollback_wqe,
1590 +- struct rxe_qp *rollback_qp)
1591 ++ u32 *rollback_psn)
1592 + {
1593 + rollback_wqe->state = wqe->state;
1594 + rollback_wqe->first_psn = wqe->first_psn;
1595 + rollback_wqe->last_psn = wqe->last_psn;
1596 +- rollback_qp->req.psn = qp->req.psn;
1597 ++ *rollback_psn = qp->req.psn;
1598 + }
1599 +
1600 + static void rollback_state(struct rxe_send_wqe *wqe,
1601 + struct rxe_qp *qp,
1602 + struct rxe_send_wqe *rollback_wqe,
1603 +- struct rxe_qp *rollback_qp)
1604 ++ u32 rollback_psn)
1605 + {
1606 + wqe->state = rollback_wqe->state;
1607 + wqe->first_psn = rollback_wqe->first_psn;
1608 + wqe->last_psn = rollback_wqe->last_psn;
1609 +- qp->req.psn = rollback_qp->req.psn;
1610 ++ qp->req.psn = rollback_psn;
1611 + }
1612 +
1613 + static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
1614 +@@ -593,8 +593,8 @@ int rxe_requester(void *arg)
1615 + int mtu;
1616 + int opcode;
1617 + int ret;
1618 +- struct rxe_qp rollback_qp;
1619 + struct rxe_send_wqe rollback_wqe;
1620 ++ u32 rollback_psn;
1621 +
1622 + next_wqe:
1623 + if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
1624 +@@ -719,7 +719,7 @@ int rxe_requester(void *arg)
1625 + * rxe_xmit_packet().
1626 + * Otherwise, completer might initiate an unjustified retry flow.
1627 + */
1628 +- save_state(wqe, qp, &rollback_wqe, &rollback_qp);
1629 ++ save_state(wqe, qp, &rollback_wqe, &rollback_psn);
1630 + update_wqe_state(qp, wqe, &pkt);
1631 + update_wqe_psn(qp, wqe, &pkt, payload);
1632 + ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
1633 +@@ -727,7 +727,7 @@ int rxe_requester(void *arg)
1634 + qp->need_req_skb = 1;
1635 + kfree_skb(skb);
1636 +
1637 +- rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
1638 ++ rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
1639 +
1640 + if (ret == -EAGAIN) {
1641 + rxe_run_task(&qp->req.task, 1);
1642 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1643 +index 339a1eecdfe3..81a8080c18b3 100644
1644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1645 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1646 +@@ -1054,8 +1054,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
1647 +
1648 + tx_qp = ib_create_qp(priv->pd, &attr);
1649 + if (PTR_ERR(tx_qp) == -EINVAL) {
1650 +- ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
1651 +- priv->ca->name);
1652 + attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
1653 + tx_qp = ib_create_qp(priv->pd, &attr);
1654 + }
1655 +diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
1656 +index 6d7de9bfed9a..b93fe83a0b63 100644
1657 +--- a/drivers/input/mouse/alps.c
1658 ++++ b/drivers/input/mouse/alps.c
1659 +@@ -1346,6 +1346,18 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
1660 +
1661 + priv->multi_packet = 0;
1662 +
1663 ++ /* Report trackstick */
1664 ++ if (alps_get_pkt_id_ss4_v2(packet) == SS4_PACKET_ID_STICK) {
1665 ++ if (priv->flags & ALPS_DUALPOINT) {
1666 ++ input_report_key(dev2, BTN_LEFT, f->ts_left);
1667 ++ input_report_key(dev2, BTN_RIGHT, f->ts_right);
1668 ++ input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
1669 ++ input_sync(dev2);
1670 ++ }
1671 ++ return;
1672 ++ }
1673 ++
1674 ++ /* Report touchpad */
1675 + alps_report_mt_data(psmouse, (f->fingers <= 4) ? f->fingers : 4);
1676 +
1677 + input_mt_report_finger_count(dev, f->fingers);
1678 +@@ -1356,13 +1368,6 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
1679 +
1680 + input_report_abs(dev, ABS_PRESSURE, f->pressure);
1681 + input_sync(dev);
1682 +-
1683 +- if (priv->flags & ALPS_DUALPOINT) {
1684 +- input_report_key(dev2, BTN_LEFT, f->ts_left);
1685 +- input_report_key(dev2, BTN_RIGHT, f->ts_right);
1686 +- input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
1687 +- input_sync(dev2);
1688 +- }
1689 + }
1690 +
1691 + static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse)
1692 +diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
1693 +index ce4a96fccc43..5ff803efdc03 100644
1694 +--- a/drivers/media/platform/Kconfig
1695 ++++ b/drivers/media/platform/Kconfig
1696 +@@ -93,7 +93,7 @@ config VIDEO_OMAP3_DEBUG
1697 +
1698 + config VIDEO_PXA27x
1699 + tristate "PXA27x Quick Capture Interface driver"
1700 +- depends on VIDEO_DEV && HAS_DMA
1701 ++ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
1702 + depends on PXA27x || COMPILE_TEST
1703 + select VIDEOBUF2_DMA_SG
1704 + select SG_SPLIT
1705 +diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c
1706 +index cff63e511e6d..b8f3d9fa66e9 100644
1707 +--- a/drivers/media/platform/blackfin/ppi.c
1708 ++++ b/drivers/media/platform/blackfin/ppi.c
1709 +@@ -214,6 +214,8 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
1710 + if (params->dlen > 24 || params->dlen <= 0)
1711 + return -EINVAL;
1712 + pctrl = devm_pinctrl_get(ppi->dev);
1713 ++ if (IS_ERR(pctrl))
1714 ++ return PTR_ERR(pctrl);
1715 + pstate = pinctrl_lookup_state(pctrl,
1716 + pin_state[(params->dlen + 7) / 8 - 1]);
1717 + if (pinctrl_select_state(pctrl, pstate))
1718 +diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
1719 +index 3436eda58855..27e7cf65c2a7 100644
1720 +--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
1721 ++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
1722 +@@ -926,10 +926,11 @@ static int s5p_mfc_release(struct file *file)
1723 + mfc_debug_enter();
1724 + if (dev)
1725 + mutex_lock(&dev->mfc_mutex);
1726 +- s5p_mfc_clock_on();
1727 + vb2_queue_release(&ctx->vq_src);
1728 + vb2_queue_release(&ctx->vq_dst);
1729 + if (dev) {
1730 ++ s5p_mfc_clock_on();
1731 ++
1732 + /* Mark context as idle */
1733 + clear_work_bit_irqsave(ctx);
1734 + /*
1735 +@@ -951,9 +952,9 @@ static int s5p_mfc_release(struct file *file)
1736 + if (s5p_mfc_power_off() < 0)
1737 + mfc_err("Power off failed\n");
1738 + }
1739 ++ mfc_debug(2, "Shutting down clock\n");
1740 ++ s5p_mfc_clock_off();
1741 + }
1742 +- mfc_debug(2, "Shutting down clock\n");
1743 +- s5p_mfc_clock_off();
1744 + if (dev)
1745 + dev->ctx[ctx->num] = NULL;
1746 + s5p_mfc_dec_ctrls_delete(ctx);
1747 +diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
1748 +index d341d4994528..cf2a8d884536 100644
1749 +--- a/drivers/media/platform/sti/hva/hva-hw.c
1750 ++++ b/drivers/media/platform/sti/hva/hva-hw.c
1751 +@@ -305,16 +305,16 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
1752 + /* get memory for registers */
1753 + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1754 + hva->regs = devm_ioremap_resource(dev, regs);
1755 +- if (IS_ERR_OR_NULL(hva->regs)) {
1756 ++ if (IS_ERR(hva->regs)) {
1757 + dev_err(dev, "%s failed to get regs\n", HVA_PREFIX);
1758 + return PTR_ERR(hva->regs);
1759 + }
1760 +
1761 + /* get memory for esram */
1762 + esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1763 +- if (IS_ERR_OR_NULL(esram)) {
1764 ++ if (!esram) {
1765 + dev_err(dev, "%s failed to get esram\n", HVA_PREFIX);
1766 +- return PTR_ERR(esram);
1767 ++ return -ENODEV;
1768 + }
1769 + hva->esram_addr = esram->start;
1770 + hva->esram_size = resource_size(esram);
1771 +diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
1772 +index 0f301903aa6f..63165d324fff 100644
1773 +--- a/drivers/media/rc/ite-cir.c
1774 ++++ b/drivers/media/rc/ite-cir.c
1775 +@@ -263,6 +263,8 @@ static void ite_set_carrier_params(struct ite_dev *dev)
1776 +
1777 + if (allowance > ITE_RXDCR_MAX)
1778 + allowance = ITE_RXDCR_MAX;
1779 ++
1780 ++ use_demodulator = true;
1781 + }
1782 + }
1783 +
1784 +diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c
1785 +index d76f36233f43..5143a90219c0 100644
1786 +--- a/drivers/media/spi/gs1662.c
1787 ++++ b/drivers/media/spi/gs1662.c
1788 +@@ -453,10 +453,9 @@ static int gs_probe(struct spi_device *spi)
1789 + static int gs_remove(struct spi_device *spi)
1790 + {
1791 + struct v4l2_subdev *sd = spi_get_drvdata(spi);
1792 +- struct gs *gs = to_gs(sd);
1793 +
1794 + v4l2_device_unregister_subdev(sd);
1795 +- kfree(gs);
1796 ++
1797 + return 0;
1798 + }
1799 +
1800 +diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
1801 +index 44ecebd1ea8c..c8b8ac66ff7e 100644
1802 +--- a/drivers/mmc/host/mxs-mmc.c
1803 ++++ b/drivers/mmc/host/mxs-mmc.c
1804 +@@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
1805 + cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
1806 + cmd1 = cmd->arg;
1807 +
1808 ++ if (cmd->opcode == MMC_STOP_TRANSMISSION)
1809 ++ cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
1810 ++
1811 + if (host->sdio_irq_en) {
1812 + ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
1813 + cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
1814 +@@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
1815 + ssp->base + HW_SSP_BLOCK_SIZE);
1816 + }
1817 +
1818 +- if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
1819 +- (cmd->opcode == SD_IO_RW_EXTENDED))
1820 ++ if (cmd->opcode == SD_IO_RW_EXTENDED)
1821 + cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
1822 +
1823 + cmd1 = cmd->arg;
1824 +diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
1825 +index 81d4dc034793..fddd0be196f4 100644
1826 +--- a/drivers/mmc/host/sdhci-acpi.c
1827 ++++ b/drivers/mmc/host/sdhci-acpi.c
1828 +@@ -394,7 +394,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
1829 + /* Power on the SDHCI controller and its children */
1830 + acpi_device_fix_up_power(device);
1831 + list_for_each_entry(child, &device->children, node)
1832 +- acpi_device_fix_up_power(child);
1833 ++ if (child->status.present && child->status.enabled)
1834 ++ acpi_device_fix_up_power(child);
1835 +
1836 + if (acpi_bus_get_status(device) || !device->status.present)
1837 + return -ENODEV;
1838 +diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
1839 +index 7b7a887b4709..b254090b8a1b 100644
1840 +--- a/drivers/mtd/nand/Kconfig
1841 ++++ b/drivers/mtd/nand/Kconfig
1842 +@@ -537,7 +537,7 @@ config MTD_NAND_FSMC
1843 + Flexible Static Memory Controller (FSMC)
1844 +
1845 + config MTD_NAND_XWAY
1846 +- tristate "Support for NAND on Lantiq XWAY SoC"
1847 ++ bool "Support for NAND on Lantiq XWAY SoC"
1848 + depends on LANTIQ && SOC_TYPE_XWAY
1849 + help
1850 + Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
1851 +diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
1852 +index 852388171f20..bc6e49af063a 100644
1853 +--- a/drivers/mtd/nand/lpc32xx_mlc.c
1854 ++++ b/drivers/mtd/nand/lpc32xx_mlc.c
1855 +@@ -776,7 +776,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
1856 + init_completion(&host->comp_controller);
1857 +
1858 + host->irq = platform_get_irq(pdev, 0);
1859 +- if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
1860 ++ if (host->irq < 0) {
1861 + dev_err(&pdev->dev, "failed to get platform irq\n");
1862 + res = -EINVAL;
1863 + goto err_exit3;
1864 +diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
1865 +index 1f2948c0c458..895101a5e686 100644
1866 +--- a/drivers/mtd/nand/xway_nand.c
1867 ++++ b/drivers/mtd/nand/xway_nand.c
1868 +@@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = {
1869 + { .compatible = "lantiq,nand-xway" },
1870 + {},
1871 + };
1872 +-MODULE_DEVICE_TABLE(of, xway_nand_match);
1873 +
1874 + static struct platform_driver xway_nand_driver = {
1875 + .probe = xway_nand_probe,
1876 +@@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = {
1877 + },
1878 + };
1879 +
1880 +-module_platform_driver(xway_nand_driver);
1881 +-
1882 +-MODULE_LICENSE("GPL");
1883 ++builtin_platform_driver(xway_nand_driver);
1884 +diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
1885 +index d403ba7b8f43..d489fbd07c12 100644
1886 +--- a/drivers/mtd/spi-nor/cadence-quadspi.c
1887 ++++ b/drivers/mtd/spi-nor/cadence-quadspi.c
1888 +@@ -1077,12 +1077,14 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
1889 +
1890 + /* Get flash device data */
1891 + for_each_available_child_of_node(dev->of_node, np) {
1892 +- if (of_property_read_u32(np, "reg", &cs)) {
1893 ++ ret = of_property_read_u32(np, "reg", &cs);
1894 ++ if (ret) {
1895 + dev_err(dev, "Couldn't determine chip select.\n");
1896 + goto err;
1897 + }
1898 +
1899 +- if (cs > CQSPI_MAX_CHIPSELECT) {
1900 ++ if (cs >= CQSPI_MAX_CHIPSELECT) {
1901 ++ ret = -EINVAL;
1902 + dev_err(dev, "Chip select %d out of range.\n", cs);
1903 + goto err;
1904 + }
1905 +diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
1906 +index 1056ed142411..f186e0460cde 100644
1907 +--- a/drivers/net/ieee802154/atusb.c
1908 ++++ b/drivers/net/ieee802154/atusb.c
1909 +@@ -112,13 +112,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
1910 + {
1911 + struct usb_device *usb_dev = atusb->usb_dev;
1912 + int ret;
1913 ++ uint8_t *buffer;
1914 + uint8_t value;
1915 +
1916 ++ buffer = kmalloc(1, GFP_KERNEL);
1917 ++ if (!buffer)
1918 ++ return -ENOMEM;
1919 ++
1920 + dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
1921 + ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
1922 + ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
1923 +- 0, reg, &value, 1, 1000);
1924 +- return ret >= 0 ? value : ret;
1925 ++ 0, reg, buffer, 1, 1000);
1926 ++
1927 ++ if (ret >= 0) {
1928 ++ value = buffer[0];
1929 ++ kfree(buffer);
1930 ++ return value;
1931 ++ } else {
1932 ++ kfree(buffer);
1933 ++ return ret;
1934 ++ }
1935 + }
1936 +
1937 + static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
1938 +@@ -587,9 +600,13 @@ static struct ieee802154_ops atusb_ops = {
1939 + static int atusb_get_and_show_revision(struct atusb *atusb)
1940 + {
1941 + struct usb_device *usb_dev = atusb->usb_dev;
1942 +- unsigned char buffer[3];
1943 ++ unsigned char *buffer;
1944 + int ret;
1945 +
1946 ++ buffer = kmalloc(3, GFP_KERNEL);
1947 ++ if (!buffer)
1948 ++ return -ENOMEM;
1949 ++
1950 + /* Get a couple of the ATMega Firmware values */
1951 + ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
1952 + ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
1953 +@@ -605,15 +622,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
1954 + dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
1955 + }
1956 +
1957 ++ kfree(buffer);
1958 + return ret;
1959 + }
1960 +
1961 + static int atusb_get_and_show_build(struct atusb *atusb)
1962 + {
1963 + struct usb_device *usb_dev = atusb->usb_dev;
1964 +- char build[ATUSB_BUILD_SIZE + 1];
1965 ++ char *build;
1966 + int ret;
1967 +
1968 ++ build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL);
1969 ++ if (!build)
1970 ++ return -ENOMEM;
1971 ++
1972 + ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
1973 + ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
1974 + build, ATUSB_BUILD_SIZE, 1000);
1975 +@@ -622,6 +644,7 @@ static int atusb_get_and_show_build(struct atusb *atusb)
1976 + dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
1977 + }
1978 +
1979 ++ kfree(build);
1980 + return ret;
1981 + }
1982 +
1983 +diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
1984 +index abe5c6bc756c..1480734c2d6e 100644
1985 +--- a/drivers/nvdimm/namespace_devs.c
1986 ++++ b/drivers/nvdimm/namespace_devs.c
1987 +@@ -957,6 +957,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1988 + {
1989 + resource_size_t allocated = 0, available = 0;
1990 + struct nd_region *nd_region = to_nd_region(dev->parent);
1991 ++ struct nd_namespace_common *ndns = to_ndns(dev);
1992 + struct nd_mapping *nd_mapping;
1993 + struct nvdimm_drvdata *ndd;
1994 + struct nd_label_id label_id;
1995 +@@ -964,7 +965,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1996 + u8 *uuid = NULL;
1997 + int rc, i;
1998 +
1999 +- if (dev->driver || to_ndns(dev)->claim)
2000 ++ if (dev->driver || ndns->claim)
2001 + return -EBUSY;
2002 +
2003 + if (is_namespace_pmem(dev)) {
2004 +@@ -1034,20 +1035,16 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
2005 +
2006 + nd_namespace_pmem_set_resource(nd_region, nspm,
2007 + val * nd_region->ndr_mappings);
2008 +- } else if (is_namespace_blk(dev)) {
2009 +- struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
2010 +-
2011 +- /*
2012 +- * Try to delete the namespace if we deleted all of its
2013 +- * allocation, this is not the seed device for the
2014 +- * region, and it is not actively claimed by a btt
2015 +- * instance.
2016 +- */
2017 +- if (val == 0 && nd_region->ns_seed != dev
2018 +- && !nsblk->common.claim)
2019 +- nd_device_unregister(dev, ND_ASYNC);
2020 + }
2021 +
2022 ++ /*
2023 ++ * Try to delete the namespace if we deleted all of its
2024 ++ * allocation, this is not the seed device for the region, and
2025 ++ * it is not actively claimed by a btt instance.
2026 ++ */
2027 ++ if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
2028 ++ nd_device_unregister(dev, ND_ASYNC);
2029 ++
2030 + return rc;
2031 + }
2032 +
2033 +diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
2034 +index bed19994c1e9..af8f6e92e885 100644
2035 +--- a/drivers/pci/host/pcie-designware.c
2036 ++++ b/drivers/pci/host/pcie-designware.c
2037 +@@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
2038 + {
2039 + u32 val;
2040 +
2041 +- /* get iATU unroll support */
2042 +- pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
2043 +- dev_dbg(pp->dev, "iATU unroll: %s\n",
2044 +- pp->iatu_unroll_enabled ? "enabled" : "disabled");
2045 +-
2046 + /* set the number of lanes */
2047 + val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
2048 + val &= ~PORT_LINK_MODE_MASK;
2049 +@@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
2050 + * we should not program the ATU here.
2051 + */
2052 + if (!pp->ops->rd_other_conf) {
2053 ++ /* get iATU unroll support */
2054 ++ pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
2055 ++ dev_dbg(pp->dev, "iATU unroll: %s\n",
2056 ++ pp->iatu_unroll_enabled ? "enabled" : "disabled");
2057 ++
2058 + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
2059 + PCIE_ATU_TYPE_MEM, pp->mem_base,
2060 + pp->mem_bus_addr, pp->mem_size);
2061 +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
2062 +index 104c46d53121..300770cdc084 100644
2063 +--- a/drivers/pci/probe.c
2064 ++++ b/drivers/pci/probe.c
2065 +@@ -1050,6 +1050,7 @@ void set_pcie_port_type(struct pci_dev *pdev)
2066 + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2067 + if (!pos)
2068 + return;
2069 ++
2070 + pdev->pcie_cap = pos;
2071 + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
2072 + pdev->pcie_flags_reg = reg16;
2073 +@@ -1057,13 +1058,14 @@ void set_pcie_port_type(struct pci_dev *pdev)
2074 + pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
2075 +
2076 + /*
2077 +- * A Root Port is always the upstream end of a Link. No PCIe
2078 +- * component has two Links. Two Links are connected by a Switch
2079 +- * that has a Port on each Link and internal logic to connect the
2080 +- * two Ports.
2081 ++ * A Root Port or a PCI-to-PCIe bridge is always the upstream end
2082 ++ * of a Link. No PCIe component has two Links. Two Links are
2083 ++ * connected by a Switch that has a Port on each Link and internal
2084 ++ * logic to connect the two Ports.
2085 + */
2086 + type = pci_pcie_type(pdev);
2087 +- if (type == PCI_EXP_TYPE_ROOT_PORT)
2088 ++ if (type == PCI_EXP_TYPE_ROOT_PORT ||
2089 ++ type == PCI_EXP_TYPE_PCIE_BRIDGE)
2090 + pdev->has_secondary_link = 1;
2091 + else if (type == PCI_EXP_TYPE_UPSTREAM ||
2092 + type == PCI_EXP_TYPE_DOWNSTREAM) {
2093 +diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
2094 +index b6ea9ffa7381..e0a629eaceab 100644
2095 +--- a/drivers/rpmsg/rpmsg_core.c
2096 ++++ b/drivers/rpmsg/rpmsg_core.c
2097 +@@ -411,8 +411,8 @@ int rpmsg_register_device(struct rpmsg_device *rpdev)
2098 + struct device *dev = &rpdev->dev;
2099 + int ret;
2100 +
2101 +- dev_set_name(&rpdev->dev, "%s:%s",
2102 +- dev_name(dev->parent), rpdev->id.name);
2103 ++ dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent),
2104 ++ rpdev->id.name, rpdev->src, rpdev->dst);
2105 +
2106 + rpdev->dev.bus = &rpmsg_bus;
2107 + rpdev->dev.release = rpmsg_release_device;
2108 +diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
2109 +index 608140f16d98..e3b911c895b4 100644
2110 +--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
2111 ++++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
2112 +@@ -45,6 +45,7 @@
2113 +
2114 + #define INITIAL_SRP_LIMIT 800
2115 + #define DEFAULT_MAX_SECTORS 256
2116 ++#define MAX_TXU 1024 * 1024
2117 +
2118 + static uint max_vdma_size = MAX_H_COPY_RDMA;
2119 +
2120 +@@ -1239,7 +1240,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
2121 + }
2122 +
2123 + info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
2124 +- GFP_KERNEL);
2125 ++ GFP_ATOMIC);
2126 + if (!info) {
2127 + dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
2128 + iue->target);
2129 +@@ -1291,7 +1292,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
2130 + info->mad_version = cpu_to_be32(MAD_VERSION_1);
2131 + info->os_type = cpu_to_be32(LINUX);
2132 + memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
2133 +- info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
2134 ++ info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
2135 +
2136 + dma_wmb();
2137 + rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
2138 +@@ -1357,7 +1358,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
2139 + }
2140 +
2141 + cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
2142 +- GFP_KERNEL);
2143 ++ GFP_ATOMIC);
2144 + if (!cap) {
2145 + dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
2146 + iue->target);
2147 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
2148 +index 3e71bc1b4a80..7008061c4b5b 100644
2149 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
2150 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
2151 +@@ -393,6 +393,7 @@ struct MPT3SAS_TARGET {
2152 + * @eedp_enable: eedp support enable bit
2153 + * @eedp_type: 0(type_1), 1(type_2), 2(type_3)
2154 + * @eedp_block_length: block size
2155 ++ * @ata_command_pending: SATL passthrough outstanding for device
2156 + */
2157 + struct MPT3SAS_DEVICE {
2158 + struct MPT3SAS_TARGET *sas_target;
2159 +@@ -402,6 +403,17 @@ struct MPT3SAS_DEVICE {
2160 + u8 block;
2161 + u8 tlr_snoop_check;
2162 + u8 ignore_delay_remove;
2163 ++ /*
2164 ++ * Bug workaround for SATL handling: the mpt2/3sas firmware
2165 ++ * doesn't return BUSY or TASK_SET_FULL for subsequent
2166 ++ * commands while a SATL pass through is in operation as the
2167 ++ * spec requires, it simply does nothing with them until the
2168 ++ * pass through completes, causing them possibly to timeout if
2169 ++ * the passthrough is a long executing command (like format or
2170 ++ * secure erase). This variable allows us to do the right
2171 ++ * thing while a SATL command is pending.
2172 ++ */
2173 ++ unsigned long ata_command_pending;
2174 + };
2175 +
2176 + #define MPT3_CMD_NOT_USED 0x8000 /* free */
2177 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2178 +index 1c4744e78173..f84a6087cebd 100644
2179 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2180 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2181 +@@ -3885,9 +3885,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
2182 + }
2183 + }
2184 +
2185 +-static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
2186 ++static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
2187 + {
2188 +- return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
2189 ++ struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
2190 ++
2191 ++ if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
2192 ++ return 0;
2193 ++
2194 ++ if (pending)
2195 ++ return test_and_set_bit(0, &priv->ata_command_pending);
2196 ++
2197 ++ clear_bit(0, &priv->ata_command_pending);
2198 ++ return 0;
2199 + }
2200 +
2201 + /**
2202 +@@ -3911,9 +3920,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
2203 + if (!scmd)
2204 + continue;
2205 + count++;
2206 +- if (ata_12_16_cmd(scmd))
2207 +- scsi_internal_device_unblock(scmd->device,
2208 +- SDEV_RUNNING);
2209 ++ _scsih_set_satl_pending(scmd, false);
2210 + mpt3sas_base_free_smid(ioc, smid);
2211 + scsi_dma_unmap(scmd);
2212 + if (ioc->pci_error_recovery)
2213 +@@ -4044,13 +4051,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
2214 + if (ioc->logging_level & MPT_DEBUG_SCSI)
2215 + scsi_print_command(scmd);
2216 +
2217 +- /*
2218 +- * Lock the device for any subsequent command until command is
2219 +- * done.
2220 +- */
2221 +- if (ata_12_16_cmd(scmd))
2222 +- scsi_internal_device_block(scmd->device);
2223 +-
2224 + sas_device_priv_data = scmd->device->hostdata;
2225 + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
2226 + scmd->result = DID_NO_CONNECT << 16;
2227 +@@ -4064,6 +4064,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
2228 + return 0;
2229 + }
2230 +
2231 ++ /*
2232 ++ * Bug work around for firmware SATL handling. The loop
2233 ++ * is based on atomic operations and ensures consistency
2234 ++ * since we're lockless at this point
2235 ++ */
2236 ++ do {
2237 ++ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
2238 ++ scmd->result = SAM_STAT_BUSY;
2239 ++ scmd->scsi_done(scmd);
2240 ++ return 0;
2241 ++ }
2242 ++ } while (_scsih_set_satl_pending(scmd, true));
2243 ++
2244 + sas_target_priv_data = sas_device_priv_data->sas_target;
2245 +
2246 + /* invalid device handle */
2247 +@@ -4626,8 +4639,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2248 + if (scmd == NULL)
2249 + return 1;
2250 +
2251 +- if (ata_12_16_cmd(scmd))
2252 +- scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
2253 ++ _scsih_set_satl_pending(scmd, false);
2254 +
2255 + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2256 +
2257 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
2258 +index 56d6142852a5..078d797cb492 100644
2259 +--- a/drivers/scsi/qla2xxx/qla_os.c
2260 ++++ b/drivers/scsi/qla2xxx/qla_os.c
2261 +@@ -3489,7 +3489,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2262 + sizeof(struct ct6_dsd), 0,
2263 + SLAB_HWCACHE_ALIGN, NULL);
2264 + if (!ctx_cachep)
2265 +- goto fail_free_gid_list;
2266 ++ goto fail_free_srb_mempool;
2267 + }
2268 + ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
2269 + ctx_cachep);
2270 +@@ -3642,7 +3642,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2271 + ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
2272 + GFP_KERNEL);
2273 + if (!ha->loop_id_map)
2274 +- goto fail_async_pd;
2275 ++ goto fail_loop_id_map;
2276 + else {
2277 + qla2x00_set_reserved_loop_ids(ha);
2278 + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
2279 +@@ -3651,6 +3651,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2280 +
2281 + return 0;
2282 +
2283 ++fail_loop_id_map:
2284 ++ dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
2285 + fail_async_pd:
2286 + dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
2287 + fail_ex_init_cb:
2288 +@@ -3678,6 +3680,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2289 + dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2290 + ha->ms_iocb = NULL;
2291 + ha->ms_iocb_dma = 0;
2292 ++
2293 ++ if (ha->sns_cmd)
2294 ++ dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2295 ++ ha->sns_cmd, ha->sns_cmd_dma);
2296 + fail_dma_pool:
2297 + if (IS_QLA82XX(ha) || ql2xenabledif) {
2298 + dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2299 +@@ -3695,10 +3701,12 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2300 + kfree(ha->nvram);
2301 + ha->nvram = NULL;
2302 + fail_free_ctx_mempool:
2303 +- mempool_destroy(ha->ctx_mempool);
2304 ++ if (ha->ctx_mempool)
2305 ++ mempool_destroy(ha->ctx_mempool);
2306 + ha->ctx_mempool = NULL;
2307 + fail_free_srb_mempool:
2308 +- mempool_destroy(ha->srb_mempool);
2309 ++ if (ha->srb_mempool)
2310 ++ mempool_destroy(ha->srb_mempool);
2311 + ha->srb_mempool = NULL;
2312 + fail_free_gid_list:
2313 + dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
2314 +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
2315 +index 8c9a35c91705..50adabbb5808 100644
2316 +--- a/drivers/scsi/ses.c
2317 ++++ b/drivers/scsi/ses.c
2318 +@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
2319 +
2320 + ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
2321 +
2322 +- if (scsi_is_sas_rphy(&sdev->sdev_gendev))
2323 ++ if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
2324 + efd.addr = sas_get_address(sdev);
2325 +
2326 + if (efd.addr) {
2327 +diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
2328 +index 8823cc81ae45..5bb376009d98 100644
2329 +--- a/drivers/soc/ti/wkup_m3_ipc.c
2330 ++++ b/drivers/soc/ti/wkup_m3_ipc.c
2331 +@@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
2332 +
2333 + if (IS_ERR(task)) {
2334 + dev_err(dev, "can't create rproc_boot thread\n");
2335 ++ ret = PTR_ERR(task);
2336 + goto err_put_rproc;
2337 + }
2338 +
2339 +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
2340 +index dd7b5b47291d..d6239fa718be 100644
2341 +--- a/drivers/spi/spi-pxa2xx.c
2342 ++++ b/drivers/spi/spi-pxa2xx.c
2343 +@@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
2344 + pxa2xx_spi_write(drv_data, SSCR1, tmp);
2345 + tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
2346 + pxa2xx_spi_write(drv_data, SSCR0, tmp);
2347 ++ break;
2348 + default:
2349 + tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
2350 + SSCR1_TxTresh(TX_THRESH_DFLT);
2351 +diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c
2352 +index 1780a08b73c9..58d756231136 100644
2353 +--- a/drivers/staging/media/s5p-cec/s5p_cec.c
2354 ++++ b/drivers/staging/media/s5p-cec/s5p_cec.c
2355 +@@ -231,7 +231,7 @@ static int s5p_cec_remove(struct platform_device *pdev)
2356 + return 0;
2357 + }
2358 +
2359 +-static int s5p_cec_runtime_suspend(struct device *dev)
2360 ++static int __maybe_unused s5p_cec_runtime_suspend(struct device *dev)
2361 + {
2362 + struct s5p_cec_dev *cec = dev_get_drvdata(dev);
2363 +
2364 +@@ -239,7 +239,7 @@ static int s5p_cec_runtime_suspend(struct device *dev)
2365 + return 0;
2366 + }
2367 +
2368 +-static int s5p_cec_runtime_resume(struct device *dev)
2369 ++static int __maybe_unused s5p_cec_runtime_resume(struct device *dev)
2370 + {
2371 + struct s5p_cec_dev *cec = dev_get_drvdata(dev);
2372 + int ret;
2373 +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
2374 +index 87e6035c9e81..8e7a3d646531 100644
2375 +--- a/drivers/xen/swiotlb-xen.c
2376 ++++ b/drivers/xen/swiotlb-xen.c
2377 +@@ -392,7 +392,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
2378 + if (dma_capable(dev, dev_addr, size) &&
2379 + !range_straddles_page_boundary(phys, size) &&
2380 + !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
2381 +- !swiotlb_force) {
2382 ++ (swiotlb_force != SWIOTLB_FORCE)) {
2383 + /* we are not interested in the dma_addr returned by
2384 + * xen_dma_map_page, only in the potential cache flushes executed
2385 + * by the function. */
2386 +@@ -549,7 +549,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
2387 + phys_addr_t paddr = sg_phys(sg);
2388 + dma_addr_t dev_addr = xen_phys_to_bus(paddr);
2389 +
2390 +- if (swiotlb_force ||
2391 ++ if (swiotlb_force == SWIOTLB_FORCE ||
2392 + xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
2393 + !dma_capable(hwdev, dev_addr, sg->length) ||
2394 + range_straddles_page_boundary(paddr, sg->length)) {
2395 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
2396 +index 16e6ded0b7f2..f3f21105b860 100644
2397 +--- a/fs/ceph/caps.c
2398 ++++ b/fs/ceph/caps.c
2399 +@@ -2507,9 +2507,20 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2400 + if (err < 0)
2401 + ret = err;
2402 + } else {
2403 +- ret = wait_event_interruptible(ci->i_cap_wq,
2404 +- try_get_cap_refs(ci, need, want, endoff,
2405 +- true, &_got, &err));
2406 ++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
2407 ++ add_wait_queue(&ci->i_cap_wq, &wait);
2408 ++
2409 ++ while (!try_get_cap_refs(ci, need, want, endoff,
2410 ++ true, &_got, &err)) {
2411 ++ if (signal_pending(current)) {
2412 ++ ret = -ERESTARTSYS;
2413 ++ break;
2414 ++ }
2415 ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2416 ++ }
2417 ++
2418 ++ remove_wait_queue(&ci->i_cap_wq, &wait);
2419 ++
2420 + if (err == -EAGAIN)
2421 + continue;
2422 + if (err < 0)
2423 +diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
2424 +index a594c7879cc2..1afa11191000 100644
2425 +--- a/fs/ceph/dir.c
2426 ++++ b/fs/ceph/dir.c
2427 +@@ -1255,7 +1255,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
2428 + struct ceph_mds_client *mdsc =
2429 + ceph_sb_to_client(dir->i_sb)->mdsc;
2430 + struct ceph_mds_request *req;
2431 +- int op, mask, err;
2432 ++ int op, err;
2433 ++ u32 mask;
2434 +
2435 + if (flags & LOOKUP_RCU)
2436 + return -ECHILD;
2437 +@@ -1270,7 +1271,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
2438 + mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
2439 + if (ceph_security_xattr_wanted(dir))
2440 + mask |= CEPH_CAP_XATTR_SHARED;
2441 +- req->r_args.getattr.mask = mask;
2442 ++ req->r_args.getattr.mask = cpu_to_le32(mask);
2443 +
2444 + err = ceph_mdsc_do_request(mdsc, NULL, req);
2445 + switch (err) {
2446 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
2447 +index ef4d04647325..12f2252f6c98 100644
2448 +--- a/fs/ceph/inode.c
2449 ++++ b/fs/ceph/inode.c
2450 +@@ -305,7 +305,8 @@ static int frag_tree_split_cmp(const void *l, const void *r)
2451 + {
2452 + struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
2453 + struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
2454 +- return ceph_frag_compare(ls->frag, rs->frag);
2455 ++ return ceph_frag_compare(le32_to_cpu(ls->frag),
2456 ++ le32_to_cpu(rs->frag));
2457 + }
2458 +
2459 + static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
2460 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
2461 +index 815acd1a56d4..6a26c7bd1286 100644
2462 +--- a/fs/ceph/mds_client.c
2463 ++++ b/fs/ceph/mds_client.c
2464 +@@ -288,12 +288,13 @@ static int parse_reply_info_extra(void **p, void *end,
2465 + struct ceph_mds_reply_info_parsed *info,
2466 + u64 features)
2467 + {
2468 +- if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
2469 ++ u32 op = le32_to_cpu(info->head->op);
2470 ++
2471 ++ if (op == CEPH_MDS_OP_GETFILELOCK)
2472 + return parse_reply_info_filelock(p, end, info, features);
2473 +- else if (info->head->op == CEPH_MDS_OP_READDIR ||
2474 +- info->head->op == CEPH_MDS_OP_LSSNAP)
2475 ++ else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
2476 + return parse_reply_info_dir(p, end, info, features);
2477 +- else if (info->head->op == CEPH_MDS_OP_CREATE)
2478 ++ else if (op == CEPH_MDS_OP_CREATE)
2479 + return parse_reply_info_create(p, end, info, features);
2480 + else
2481 + return -EIO;
2482 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
2483 +index 70ea57c7b6bb..4e06a27ed7f8 100644
2484 +--- a/fs/fuse/dev.c
2485 ++++ b/fs/fuse/dev.c
2486 +@@ -2025,7 +2025,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
2487 + struct fuse_req *req;
2488 + req = list_entry(head->next, struct fuse_req, list);
2489 + req->out.h.error = -ECONNABORTED;
2490 +- clear_bit(FR_PENDING, &req->flags);
2491 + clear_bit(FR_SENT, &req->flags);
2492 + list_del_init(&req->list);
2493 + request_end(fc, req);
2494 +@@ -2103,6 +2102,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
2495 + spin_lock(&fiq->waitq.lock);
2496 + fiq->connected = 0;
2497 + list_splice_init(&fiq->pending, &to_end2);
2498 ++ list_for_each_entry(req, &to_end2, list)
2499 ++ clear_bit(FR_PENDING, &req->flags);
2500 + while (forget_pending(fiq))
2501 + kfree(dequeue_forget(fiq, 1, NULL));
2502 + wake_up_all_locked(&fiq->waitq);
2503 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
2504 +index 096f79997f75..642c57b8de7b 100644
2505 +--- a/fs/fuse/dir.c
2506 ++++ b/fs/fuse/dir.c
2507 +@@ -68,7 +68,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
2508 + if (sec || nsec) {
2509 + struct timespec64 ts = {
2510 + sec,
2511 +- max_t(u32, nsec, NSEC_PER_SEC - 1)
2512 ++ min_t(u32, nsec, NSEC_PER_SEC - 1)
2513 + };
2514 +
2515 + return get_jiffies_64() + timespec64_to_jiffies(&ts);
2516 +diff --git a/fs/posix_acl.c b/fs/posix_acl.c
2517 +index 595522022aca..c9d48dc78495 100644
2518 +--- a/fs/posix_acl.c
2519 ++++ b/fs/posix_acl.c
2520 +@@ -922,11 +922,10 @@ int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type)
2521 + int error;
2522 +
2523 + if (type == ACL_TYPE_ACCESS) {
2524 +- error = posix_acl_equiv_mode(acl, &inode->i_mode);
2525 +- if (error < 0)
2526 +- return 0;
2527 +- if (error == 0)
2528 +- acl = NULL;
2529 ++ error = posix_acl_update_mode(inode,
2530 ++ &inode->i_mode, &acl);
2531 ++ if (error)
2532 ++ return error;
2533 + }
2534 +
2535 + inode->i_ctime = current_time(inode);
2536 +diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
2537 +index fa9a20cc60d6..fe5e8d4970ae 100644
2538 +--- a/fs/ubifs/tnc.c
2539 ++++ b/fs/ubifs/tnc.c
2540 +@@ -34,6 +34,11 @@
2541 + #include <linux/slab.h>
2542 + #include "ubifs.h"
2543 +
2544 ++static int try_read_node(const struct ubifs_info *c, void *buf, int type,
2545 ++ int len, int lnum, int offs);
2546 ++static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
2547 ++ struct ubifs_zbranch *zbr, void *node);
2548 ++
2549 + /*
2550 + * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
2551 + * @NAME_LESS: name corresponding to the first argument is less than second
2552 +@@ -402,7 +407,19 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
2553 + return 0;
2554 + }
2555 +
2556 +- err = ubifs_tnc_read_node(c, zbr, node);
2557 ++ if (c->replaying) {
2558 ++ err = fallible_read_node(c, &zbr->key, zbr, node);
2559 ++ /*
2560 ++ * When the node was not found, return -ENOENT, 0 otherwise.
2561 ++ * Negative return codes stay as-is.
2562 ++ */
2563 ++ if (err == 0)
2564 ++ err = -ENOENT;
2565 ++ else if (err == 1)
2566 ++ err = 0;
2567 ++ } else {
2568 ++ err = ubifs_tnc_read_node(c, zbr, node);
2569 ++ }
2570 + if (err)
2571 + return err;
2572 +
2573 +@@ -2766,7 +2783,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
2574 + if (nm->name) {
2575 + if (err) {
2576 + /* Handle collisions */
2577 +- err = resolve_collision(c, key, &znode, &n, nm);
2578 ++ if (c->replaying)
2579 ++ err = fallible_resolve_collision(c, key, &znode, &n,
2580 ++ nm, 0);
2581 ++ else
2582 ++ err = resolve_collision(c, key, &znode, &n, nm);
2583 + dbg_tnc("rc returned %d, znode %p, n %d",
2584 + err, znode, n);
2585 + if (unlikely(err < 0))
2586 +diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
2587 +index 9d02f5317c7c..88e64846cf37 100644
2588 +--- a/include/dt-bindings/clock/r8a7794-clock.h
2589 ++++ b/include/dt-bindings/clock/r8a7794-clock.h
2590 +@@ -20,8 +20,7 @@
2591 + #define R8A7794_CLK_QSPI 5
2592 + #define R8A7794_CLK_SDH 6
2593 + #define R8A7794_CLK_SD0 7
2594 +-#define R8A7794_CLK_Z 8
2595 +-#define R8A7794_CLK_RCAN 9
2596 ++#define R8A7794_CLK_RCAN 8
2597 +
2598 + /* MSTP0 */
2599 + #define R8A7794_CLK_MSIOF0 0
2600 +diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
2601 +index 321f9ed552a9..01f71e1d2e94 100644
2602 +--- a/include/linux/rcupdate.h
2603 ++++ b/include/linux/rcupdate.h
2604 +@@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
2605 + #error "Unknown RCU implementation specified to kernel configuration"
2606 + #endif
2607 +
2608 ++#define RCU_SCHEDULER_INACTIVE 0
2609 ++#define RCU_SCHEDULER_INIT 1
2610 ++#define RCU_SCHEDULER_RUNNING 2
2611 ++
2612 + /*
2613 + * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
2614 + * initialization and destruction of rcu_head on the stack. rcu_head structures
2615 +diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
2616 +index e5d193440374..7440290f64ac 100644
2617 +--- a/include/linux/sunrpc/svc_xprt.h
2618 ++++ b/include/linux/sunrpc/svc_xprt.h
2619 +@@ -66,6 +66,7 @@ struct svc_xprt {
2620 + #define XPT_LISTENER 10 /* listening endpoint */
2621 + #define XPT_CACHE_AUTH 11 /* cache auth info */
2622 + #define XPT_LOCAL 12 /* connection from loopback interface */
2623 ++#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
2624 +
2625 + struct svc_serv *xpt_server; /* service for transport */
2626 + atomic_t xpt_reserved; /* space on outq that is rsvd */
2627 +diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
2628 +index 5f81f8a187f2..d2613536fd03 100644
2629 +--- a/include/linux/swiotlb.h
2630 ++++ b/include/linux/swiotlb.h
2631 +@@ -9,7 +9,13 @@ struct device;
2632 + struct page;
2633 + struct scatterlist;
2634 +
2635 +-extern int swiotlb_force;
2636 ++enum swiotlb_force {
2637 ++ SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */
2638 ++ SWIOTLB_FORCE, /* swiotlb=force */
2639 ++ SWIOTLB_NO_FORCE, /* swiotlb=noforce */
2640 ++};
2641 ++
2642 ++extern enum swiotlb_force swiotlb_force;
2643 +
2644 + /*
2645 + * Maximum allowable number of contiguous slabs to map,
2646 +diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h
2647 +index 7ea4c5e7c448..288c0c54a2b4 100644
2648 +--- a/include/trace/events/swiotlb.h
2649 ++++ b/include/trace/events/swiotlb.h
2650 +@@ -11,16 +11,16 @@ TRACE_EVENT(swiotlb_bounced,
2651 + TP_PROTO(struct device *dev,
2652 + dma_addr_t dev_addr,
2653 + size_t size,
2654 +- int swiotlb_force),
2655 ++ enum swiotlb_force swiotlb_force),
2656 +
2657 + TP_ARGS(dev, dev_addr, size, swiotlb_force),
2658 +
2659 + TP_STRUCT__entry(
2660 +- __string( dev_name, dev_name(dev) )
2661 +- __field( u64, dma_mask )
2662 +- __field( dma_addr_t, dev_addr )
2663 +- __field( size_t, size )
2664 +- __field( int, swiotlb_force )
2665 ++ __string( dev_name, dev_name(dev) )
2666 ++ __field( u64, dma_mask )
2667 ++ __field( dma_addr_t, dev_addr )
2668 ++ __field( size_t, size )
2669 ++ __field( enum swiotlb_force, swiotlb_force )
2670 + ),
2671 +
2672 + TP_fast_assign(
2673 +@@ -37,7 +37,10 @@ TRACE_EVENT(swiotlb_bounced,
2674 + __entry->dma_mask,
2675 + (unsigned long long)__entry->dev_addr,
2676 + __entry->size,
2677 +- __entry->swiotlb_force ? "swiotlb_force" : "" )
2678 ++ __print_symbolic(__entry->swiotlb_force,
2679 ++ { SWIOTLB_NORMAL, "NORMAL" },
2680 ++ { SWIOTLB_FORCE, "FORCE" },
2681 ++ { SWIOTLB_NO_FORCE, "NO_FORCE" }))
2682 + );
2683 +
2684 + #endif /* _TRACE_SWIOTLB_H */
2685 +diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
2686 +index 80adef7d4c3d..0d6ff3e471be 100644
2687 +--- a/kernel/rcu/rcu.h
2688 ++++ b/kernel/rcu/rcu.h
2689 +@@ -136,6 +136,7 @@ int rcu_jiffies_till_stall_check(void);
2690 + #define TPS(x) tracepoint_string(x)
2691 +
2692 + void rcu_early_boot_tests(void);
2693 ++void rcu_test_sync_prims(void);
2694 +
2695 + /*
2696 + * This function really isn't for public consumption, but RCU is special in
2697 +diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
2698 +index 1898559e6b60..b23a4d076f3d 100644
2699 +--- a/kernel/rcu/tiny.c
2700 ++++ b/kernel/rcu/tiny.c
2701 +@@ -185,9 +185,6 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
2702 + * benefits of doing might_sleep() to reduce latency.)
2703 + *
2704 + * Cool, huh? (Due to Josh Triplett.)
2705 +- *
2706 +- * But we want to make this a static inline later. The cond_resched()
2707 +- * currently makes this problematic.
2708 + */
2709 + void synchronize_sched(void)
2710 + {
2711 +@@ -195,7 +192,6 @@ void synchronize_sched(void)
2712 + lock_is_held(&rcu_lock_map) ||
2713 + lock_is_held(&rcu_sched_lock_map),
2714 + "Illegal synchronize_sched() in RCU read-side critical section");
2715 +- cond_resched();
2716 + }
2717 + EXPORT_SYMBOL_GPL(synchronize_sched);
2718 +
2719 +diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
2720 +index 196f0302e2f4..c64b827ecbca 100644
2721 +--- a/kernel/rcu/tiny_plugin.h
2722 ++++ b/kernel/rcu/tiny_plugin.h
2723 +@@ -60,12 +60,17 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
2724 +
2725 + /*
2726 + * During boot, we forgive RCU lockdep issues. After this function is
2727 +- * invoked, we start taking RCU lockdep issues seriously.
2728 ++ * invoked, we start taking RCU lockdep issues seriously. Note that unlike
2729 ++ * Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE
2730 ++ * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
2731 ++ * The reason for this is that Tiny RCU does not need kthreads, so does
2732 ++ * not have to care about the fact that the scheduler is half-initialized
2733 ++ * at a certain phase of the boot process.
2734 + */
2735 + void __init rcu_scheduler_starting(void)
2736 + {
2737 + WARN_ON(nr_context_switches() > 0);
2738 +- rcu_scheduler_active = 1;
2739 ++ rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
2740 + }
2741 +
2742 + #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
2743 +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
2744 +index 69a5611a7e7c..10f62c6f48e7 100644
2745 +--- a/kernel/rcu/tree.c
2746 ++++ b/kernel/rcu/tree.c
2747 +@@ -127,13 +127,16 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
2748 + int sysctl_panic_on_rcu_stall __read_mostly;
2749 +
2750 + /*
2751 +- * The rcu_scheduler_active variable transitions from zero to one just
2752 +- * before the first task is spawned. So when this variable is zero, RCU
2753 +- * can assume that there is but one task, allowing RCU to (for example)
2754 ++ * The rcu_scheduler_active variable is initialized to the value
2755 ++ * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
2756 ++ * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
2757 ++ * RCU can assume that there is but one task, allowing RCU to (for example)
2758 + * optimize synchronize_rcu() to a simple barrier(). When this variable
2759 +- * is one, RCU must actually do all the hard work required to detect real
2760 +- * grace periods. This variable is also used to suppress boot-time false
2761 +- * positives from lockdep-RCU error checking.
2762 ++ * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
2763 ++ * to detect real grace periods. This variable is also used to suppress
2764 ++ * boot-time false positives from lockdep-RCU error checking. Finally, it
2765 ++ * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
2766 ++ * is fully initialized, including all of its kthreads having been spawned.
2767 + */
2768 + int rcu_scheduler_active __read_mostly;
2769 + EXPORT_SYMBOL_GPL(rcu_scheduler_active);
2770 +@@ -3985,18 +3988,22 @@ static int __init rcu_spawn_gp_kthread(void)
2771 + early_initcall(rcu_spawn_gp_kthread);
2772 +
2773 + /*
2774 +- * This function is invoked towards the end of the scheduler's initialization
2775 +- * process. Before this is called, the idle task might contain
2776 +- * RCU read-side critical sections (during which time, this idle
2777 +- * task is booting the system). After this function is called, the
2778 +- * idle tasks are prohibited from containing RCU read-side critical
2779 +- * sections. This function also enables RCU lockdep checking.
2780 ++ * This function is invoked towards the end of the scheduler's
2781 ++ * initialization process. Before this is called, the idle task might
2782 ++ * contain synchronous grace-period primitives (during which time, this idle
2783 ++ * task is booting the system, and such primitives are no-ops). After this
2784 ++ * function is called, any synchronous grace-period primitives are run as
2785 ++ * expedited, with the requesting task driving the grace period forward.
2786 ++ * A later core_initcall() rcu_exp_runtime_mode() will switch to full
2787 ++ * runtime RCU functionality.
2788 + */
2789 + void rcu_scheduler_starting(void)
2790 + {
2791 + WARN_ON(num_online_cpus() != 1);
2792 + WARN_ON(nr_context_switches() > 0);
2793 +- rcu_scheduler_active = 1;
2794 ++ rcu_test_sync_prims();
2795 ++ rcu_scheduler_active = RCU_SCHEDULER_INIT;
2796 ++ rcu_test_sync_prims();
2797 + }
2798 +
2799 + /*
2800 +diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
2801 +index 24343eb87b58..78eba4120d46 100644
2802 +--- a/kernel/rcu/tree_exp.h
2803 ++++ b/kernel/rcu/tree_exp.h
2804 +@@ -522,18 +522,28 @@ struct rcu_exp_work {
2805 + };
2806 +
2807 + /*
2808 ++ * Common code to drive an expedited grace period forward, used by
2809 ++ * workqueues and mid-boot-time tasks.
2810 ++ */
2811 ++static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
2812 ++ smp_call_func_t func, unsigned long s)
2813 ++{
2814 ++ /* Initialize the rcu_node tree in preparation for the wait. */
2815 ++ sync_rcu_exp_select_cpus(rsp, func);
2816 ++
2817 ++ /* Wait and clean up, including waking everyone. */
2818 ++ rcu_exp_wait_wake(rsp, s);
2819 ++}
2820 ++
2821 ++/*
2822 + * Work-queue handler to drive an expedited grace period forward.
2823 + */
2824 + static void wait_rcu_exp_gp(struct work_struct *wp)
2825 + {
2826 + struct rcu_exp_work *rewp;
2827 +
2828 +- /* Initialize the rcu_node tree in preparation for the wait. */
2829 + rewp = container_of(wp, struct rcu_exp_work, rew_work);
2830 +- sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func);
2831 +-
2832 +- /* Wait and clean up, including waking everyone. */
2833 +- rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s);
2834 ++ rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
2835 + }
2836 +
2837 + /*
2838 +@@ -559,12 +569,18 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
2839 + if (exp_funnel_lock(rsp, s))
2840 + return; /* Someone else did our work for us. */
2841 +
2842 +- /* Marshall arguments and schedule the expedited grace period. */
2843 +- rew.rew_func = func;
2844 +- rew.rew_rsp = rsp;
2845 +- rew.rew_s = s;
2846 +- INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
2847 +- schedule_work(&rew.rew_work);
2848 ++ /* Ensure that load happens before action based on it. */
2849 ++ if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
2850 ++ /* Direct call during scheduler init and early_initcalls(). */
2851 ++ rcu_exp_sel_wait_wake(rsp, func, s);
2852 ++ } else {
2853 ++ /* Marshall arguments & schedule the expedited grace period. */
2854 ++ rew.rew_func = func;
2855 ++ rew.rew_rsp = rsp;
2856 ++ rew.rew_s = s;
2857 ++ INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
2858 ++ schedule_work(&rew.rew_work);
2859 ++ }
2860 +
2861 + /* Wait for expedited grace period to complete. */
2862 + rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
2863 +@@ -666,6 +682,8 @@ void synchronize_rcu_expedited(void)
2864 + {
2865 + struct rcu_state *rsp = rcu_state_p;
2866 +
2867 ++ if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
2868 ++ return;
2869 + _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
2870 + }
2871 + EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
2872 +@@ -683,3 +701,15 @@ void synchronize_rcu_expedited(void)
2873 + EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
2874 +
2875 + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
2876 ++
2877 ++/*
2878 ++ * Switch to run-time mode once Tree RCU has fully initialized.
2879 ++ */
2880 ++static int __init rcu_exp_runtime_mode(void)
2881 ++{
2882 ++ rcu_test_sync_prims();
2883 ++ rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
2884 ++ rcu_test_sync_prims();
2885 ++ return 0;
2886 ++}
2887 ++core_initcall(rcu_exp_runtime_mode);
2888 +diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
2889 +index 85c5a883c6e3..56583e764ebf 100644
2890 +--- a/kernel/rcu/tree_plugin.h
2891 ++++ b/kernel/rcu/tree_plugin.h
2892 +@@ -670,7 +670,7 @@ void synchronize_rcu(void)
2893 + lock_is_held(&rcu_lock_map) ||
2894 + lock_is_held(&rcu_sched_lock_map),
2895 + "Illegal synchronize_rcu() in RCU read-side critical section");
2896 +- if (!rcu_scheduler_active)
2897 ++ if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
2898 + return;
2899 + if (rcu_gp_is_expedited())
2900 + synchronize_rcu_expedited();
2901 +diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
2902 +index f19271dce0a9..4f6db7e6a117 100644
2903 +--- a/kernel/rcu/update.c
2904 ++++ b/kernel/rcu/update.c
2905 +@@ -121,11 +121,14 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
2906 + * Should expedited grace-period primitives always fall back to their
2907 + * non-expedited counterparts? Intended for use within RCU. Note
2908 + * that if the user specifies both rcu_expedited and rcu_normal, then
2909 +- * rcu_normal wins.
2910 ++ * rcu_normal wins. (Except during the time period during boot from
2911 ++ * when the first task is spawned until the rcu_exp_runtime_mode()
2912 ++ * core_initcall() is invoked, at which point everything is expedited.)
2913 + */
2914 + bool rcu_gp_is_normal(void)
2915 + {
2916 +- return READ_ONCE(rcu_normal);
2917 ++ return READ_ONCE(rcu_normal) &&
2918 ++ rcu_scheduler_active != RCU_SCHEDULER_INIT;
2919 + }
2920 + EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
2921 +
2922 +@@ -135,13 +138,14 @@ static atomic_t rcu_expedited_nesting =
2923 + /*
2924 + * Should normal grace-period primitives be expedited? Intended for
2925 + * use within RCU. Note that this function takes the rcu_expedited
2926 +- * sysfs/boot variable into account as well as the rcu_expedite_gp()
2927 +- * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
2928 +- * returns false is a -really- bad idea.
2929 ++ * sysfs/boot variable and rcu_scheduler_active into account as well
2930 ++ * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
2931 ++ * until rcu_gp_is_expedited() returns false is a -really- bad idea.
2932 + */
2933 + bool rcu_gp_is_expedited(void)
2934 + {
2935 +- return rcu_expedited || atomic_read(&rcu_expedited_nesting);
2936 ++ return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
2937 ++ rcu_scheduler_active == RCU_SCHEDULER_INIT;
2938 + }
2939 + EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
2940 +
2941 +@@ -257,7 +261,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map);
2942 +
2943 + int notrace debug_lockdep_rcu_enabled(void)
2944 + {
2945 +- return rcu_scheduler_active && debug_locks &&
2946 ++ return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
2947 + current->lockdep_recursion == 0;
2948 + }
2949 + EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
2950 +@@ -591,7 +595,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
2951 + void synchronize_rcu_tasks(void)
2952 + {
2953 + /* Complain if the scheduler has not started. */
2954 +- RCU_LOCKDEP_WARN(!rcu_scheduler_active,
2955 ++ RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
2956 + "synchronize_rcu_tasks called too soon");
2957 +
2958 + /* Wait for the grace period. */
2959 +@@ -813,6 +817,23 @@ static void rcu_spawn_tasks_kthread(void)
2960 +
2961 + #endif /* #ifdef CONFIG_TASKS_RCU */
2962 +
2963 ++/*
2964 ++ * Test each non-SRCU synchronous grace-period wait API. This is
2965 ++ * useful just after a change in mode for these primitives, and
2966 ++ * during early boot.
2967 ++ */
2968 ++void rcu_test_sync_prims(void)
2969 ++{
2970 ++ if (!IS_ENABLED(CONFIG_PROVE_RCU))
2971 ++ return;
2972 ++ synchronize_rcu();
2973 ++ synchronize_rcu_bh();
2974 ++ synchronize_sched();
2975 ++ synchronize_rcu_expedited();
2976 ++ synchronize_rcu_bh_expedited();
2977 ++ synchronize_sched_expedited();
2978 ++}
2979 ++
2980 + #ifdef CONFIG_PROVE_RCU
2981 +
2982 + /*
2983 +@@ -865,6 +886,7 @@ void rcu_early_boot_tests(void)
2984 + early_boot_test_call_rcu_bh();
2985 + if (rcu_self_test_sched)
2986 + early_boot_test_call_rcu_sched();
2987 ++ rcu_test_sync_prims();
2988 + }
2989 +
2990 + static int rcu_verify_early_boot_tests(void)
2991 +diff --git a/lib/swiotlb.c b/lib/swiotlb.c
2992 +index 22e13a0e19d7..ad1d2962d129 100644
2993 +--- a/lib/swiotlb.c
2994 ++++ b/lib/swiotlb.c
2995 +@@ -53,7 +53,7 @@
2996 + */
2997 + #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
2998 +
2999 +-int swiotlb_force;
3000 ++enum swiotlb_force swiotlb_force;
3001 +
3002 + /*
3003 + * Used to do a quick range check in swiotlb_tbl_unmap_single and
3004 +@@ -106,8 +106,12 @@ setup_io_tlb_npages(char *str)
3005 + }
3006 + if (*str == ',')
3007 + ++str;
3008 +- if (!strcmp(str, "force"))
3009 +- swiotlb_force = 1;
3010 ++ if (!strcmp(str, "force")) {
3011 ++ swiotlb_force = SWIOTLB_FORCE;
3012 ++ } else if (!strcmp(str, "noforce")) {
3013 ++ swiotlb_force = SWIOTLB_NO_FORCE;
3014 ++ io_tlb_nslabs = 1;
3015 ++ }
3016 +
3017 + return 0;
3018 + }
3019 +@@ -541,8 +545,15 @@ static phys_addr_t
3020 + map_single(struct device *hwdev, phys_addr_t phys, size_t size,
3021 + enum dma_data_direction dir)
3022 + {
3023 +- dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
3024 ++ dma_addr_t start_dma_addr;
3025 ++
3026 ++ if (swiotlb_force == SWIOTLB_NO_FORCE) {
3027 ++ dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
3028 ++ &phys);
3029 ++ return SWIOTLB_MAP_ERROR;
3030 ++ }
3031 +
3032 ++ start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
3033 + return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
3034 + }
3035 +
3036 +@@ -707,6 +718,9 @@ static void
3037 + swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
3038 + int do_panic)
3039 + {
3040 ++ if (swiotlb_force == SWIOTLB_NO_FORCE)
3041 ++ return;
3042 ++
3043 + /*
3044 + * Ran out of IOMMU space for this operation. This is very bad.
3045 + * Unfortunately the drivers cannot handle this operation properly.
3046 +@@ -749,7 +763,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
3047 + * we can safely return the device addr and not worry about bounce
3048 + * buffering it.
3049 + */
3050 +- if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
3051 ++ if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
3052 + return dev_addr;
3053 +
3054 + trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
3055 +@@ -888,7 +902,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
3056 + phys_addr_t paddr = sg_phys(sg);
3057 + dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
3058 +
3059 +- if (swiotlb_force ||
3060 ++ if (swiotlb_force == SWIOTLB_FORCE ||
3061 + !dma_capable(hwdev, dev_addr, sg->length)) {
3062 + phys_addr_t map = map_single(hwdev, sg_phys(sg),
3063 + sg->length, dir);
3064 +diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
3065 +index a0905f04bd13..b216131915e7 100644
3066 +--- a/net/ceph/auth_x.c
3067 ++++ b/net/ceph/auth_x.c
3068 +@@ -39,56 +39,58 @@ static int ceph_x_should_authenticate(struct ceph_auth_client *ac)
3069 + return need != 0;
3070 + }
3071 +
3072 ++static int ceph_x_encrypt_offset(void)
3073 ++{
3074 ++ return sizeof(u32) + sizeof(struct ceph_x_encrypt_header);
3075 ++}
3076 ++
3077 + static int ceph_x_encrypt_buflen(int ilen)
3078 + {
3079 +- return sizeof(struct ceph_x_encrypt_header) + ilen + 16 +
3080 +- sizeof(u32);
3081 ++ return ceph_x_encrypt_offset() + ilen + 16;
3082 + }
3083 +
3084 +-static int ceph_x_encrypt(struct ceph_crypto_key *secret,
3085 +- void *ibuf, int ilen, void *obuf, size_t olen)
3086 ++static int ceph_x_encrypt(struct ceph_crypto_key *secret, void *buf,
3087 ++ int buf_len, int plaintext_len)
3088 + {
3089 +- struct ceph_x_encrypt_header head = {
3090 +- .struct_v = 1,
3091 +- .magic = cpu_to_le64(CEPHX_ENC_MAGIC)
3092 +- };
3093 +- size_t len = olen - sizeof(u32);
3094 ++ struct ceph_x_encrypt_header *hdr = buf + sizeof(u32);
3095 ++ int ciphertext_len;
3096 + int ret;
3097 +
3098 +- ret = ceph_encrypt2(secret, obuf + sizeof(u32), &len,
3099 +- &head, sizeof(head), ibuf, ilen);
3100 ++ hdr->struct_v = 1;
3101 ++ hdr->magic = cpu_to_le64(CEPHX_ENC_MAGIC);
3102 ++
3103 ++ ret = ceph_crypt(secret, true, buf + sizeof(u32), buf_len - sizeof(u32),
3104 ++ plaintext_len + sizeof(struct ceph_x_encrypt_header),
3105 ++ &ciphertext_len);
3106 + if (ret)
3107 + return ret;
3108 +- ceph_encode_32(&obuf, len);
3109 +- return len + sizeof(u32);
3110 ++
3111 ++ ceph_encode_32(&buf, ciphertext_len);
3112 ++ return sizeof(u32) + ciphertext_len;
3113 + }
3114 +
3115 +-static int ceph_x_decrypt(struct ceph_crypto_key *secret,
3116 +- void **p, void *end, void **obuf, size_t olen)
3117 ++static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end)
3118 + {
3119 +- struct ceph_x_encrypt_header head;
3120 +- size_t head_len = sizeof(head);
3121 +- int len, ret;
3122 +-
3123 +- len = ceph_decode_32(p);
3124 +- if (*p + len > end)
3125 +- return -EINVAL;
3126 ++ struct ceph_x_encrypt_header *hdr = *p + sizeof(u32);
3127 ++ int ciphertext_len, plaintext_len;
3128 ++ int ret;
3129 +
3130 +- dout("ceph_x_decrypt len %d\n", len);
3131 +- if (*obuf == NULL) {
3132 +- *obuf = kmalloc(len, GFP_NOFS);
3133 +- if (!*obuf)
3134 +- return -ENOMEM;
3135 +- olen = len;
3136 +- }
3137 ++ ceph_decode_32_safe(p, end, ciphertext_len, e_inval);
3138 ++ ceph_decode_need(p, end, ciphertext_len, e_inval);
3139 +
3140 +- ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
3141 ++ ret = ceph_crypt(secret, false, *p, end - *p, ciphertext_len,
3142 ++ &plaintext_len);
3143 + if (ret)
3144 + return ret;
3145 +- if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
3146 ++
3147 ++ if (hdr->struct_v != 1 || le64_to_cpu(hdr->magic) != CEPHX_ENC_MAGIC)
3148 + return -EPERM;
3149 +- *p += len;
3150 +- return olen;
3151 ++
3152 ++ *p += ciphertext_len;
3153 ++ return plaintext_len - sizeof(struct ceph_x_encrypt_header);
3154 ++
3155 ++e_inval:
3156 ++ return -EINVAL;
3157 + }
3158 +
3159 + /*
3160 +@@ -143,13 +145,10 @@ static int process_one_ticket(struct ceph_auth_client *ac,
3161 + int type;
3162 + u8 tkt_struct_v, blob_struct_v;
3163 + struct ceph_x_ticket_handler *th;
3164 +- void *dbuf = NULL;
3165 + void *dp, *dend;
3166 + int dlen;
3167 + char is_enc;
3168 + struct timespec validity;
3169 +- struct ceph_crypto_key old_key;
3170 +- void *ticket_buf = NULL;
3171 + void *tp, *tpend;
3172 + void **ptp;
3173 + struct ceph_crypto_key new_session_key;
3174 +@@ -174,20 +173,17 @@ static int process_one_ticket(struct ceph_auth_client *ac,
3175 + }
3176 +
3177 + /* blob for me */
3178 +- dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
3179 +- if (dlen <= 0) {
3180 +- ret = dlen;
3181 ++ dp = *p + ceph_x_encrypt_offset();
3182 ++ ret = ceph_x_decrypt(secret, p, end);
3183 ++ if (ret < 0)
3184 + goto out;
3185 +- }
3186 +- dout(" decrypted %d bytes\n", dlen);
3187 +- dp = dbuf;
3188 +- dend = dp + dlen;
3189 ++ dout(" decrypted %d bytes\n", ret);
3190 ++ dend = dp + ret;
3191 +
3192 + tkt_struct_v = ceph_decode_8(&dp);
3193 + if (tkt_struct_v != 1)
3194 + goto bad;
3195 +
3196 +- memcpy(&old_key, &th->session_key, sizeof(old_key));
3197 + ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
3198 + if (ret)
3199 + goto out;
3200 +@@ -203,15 +199,13 @@ static int process_one_ticket(struct ceph_auth_client *ac,
3201 + ceph_decode_8_safe(p, end, is_enc, bad);
3202 + if (is_enc) {
3203 + /* encrypted */
3204 +- dout(" encrypted ticket\n");
3205 +- dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
3206 +- if (dlen < 0) {
3207 +- ret = dlen;
3208 ++ tp = *p + ceph_x_encrypt_offset();
3209 ++ ret = ceph_x_decrypt(&th->session_key, p, end);
3210 ++ if (ret < 0)
3211 + goto out;
3212 +- }
3213 +- tp = ticket_buf;
3214 ++ dout(" encrypted ticket, decrypted %d bytes\n", ret);
3215 + ptp = &tp;
3216 +- tpend = *ptp + dlen;
3217 ++ tpend = tp + ret;
3218 + } else {
3219 + /* unencrypted */
3220 + ptp = p;
3221 +@@ -242,8 +236,6 @@ static int process_one_ticket(struct ceph_auth_client *ac,
3222 + xi->have_keys |= th->service;
3223 +
3224 + out:
3225 +- kfree(ticket_buf);
3226 +- kfree(dbuf);
3227 + return ret;
3228 +
3229 + bad:
3230 +@@ -294,7 +286,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
3231 + {
3232 + int maxlen;
3233 + struct ceph_x_authorize_a *msg_a;
3234 +- struct ceph_x_authorize_b msg_b;
3235 ++ struct ceph_x_authorize_b *msg_b;
3236 + void *p, *end;
3237 + int ret;
3238 + int ticket_blob_len =
3239 +@@ -308,8 +300,8 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
3240 + if (ret)
3241 + goto out_au;
3242 +
3243 +- maxlen = sizeof(*msg_a) + sizeof(msg_b) +
3244 +- ceph_x_encrypt_buflen(ticket_blob_len);
3245 ++ maxlen = sizeof(*msg_a) + ticket_blob_len +
3246 ++ ceph_x_encrypt_buflen(sizeof(*msg_b));
3247 + dout(" need len %d\n", maxlen);
3248 + if (au->buf && au->buf->alloc_len < maxlen) {
3249 + ceph_buffer_put(au->buf);
3250 +@@ -343,18 +335,19 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
3251 + p += ticket_blob_len;
3252 + end = au->buf->vec.iov_base + au->buf->vec.iov_len;
3253 +
3254 ++ msg_b = p + ceph_x_encrypt_offset();
3255 ++ msg_b->struct_v = 1;
3256 + get_random_bytes(&au->nonce, sizeof(au->nonce));
3257 +- msg_b.struct_v = 1;
3258 +- msg_b.nonce = cpu_to_le64(au->nonce);
3259 +- ret = ceph_x_encrypt(&au->session_key, &msg_b, sizeof(msg_b),
3260 +- p, end - p);
3261 ++ msg_b->nonce = cpu_to_le64(au->nonce);
3262 ++ ret = ceph_x_encrypt(&au->session_key, p, end - p, sizeof(*msg_b));
3263 + if (ret < 0)
3264 + goto out_au;
3265 ++
3266 + p += ret;
3267 ++ WARN_ON(p > end);
3268 + au->buf->vec.iov_len = p - au->buf->vec.iov_base;
3269 + dout(" built authorizer nonce %llx len %d\n", au->nonce,
3270 + (int)au->buf->vec.iov_len);
3271 +- BUG_ON(au->buf->vec.iov_len > maxlen);
3272 + return 0;
3273 +
3274 + out_au:
3275 +@@ -452,8 +445,9 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
3276 + if (need & CEPH_ENTITY_TYPE_AUTH) {
3277 + struct ceph_x_authenticate *auth = (void *)(head + 1);
3278 + void *p = auth + 1;
3279 +- struct ceph_x_challenge_blob tmp;
3280 +- char tmp_enc[40];
3281 ++ void *enc_buf = xi->auth_authorizer.enc_buf;
3282 ++ struct ceph_x_challenge_blob *blob = enc_buf +
3283 ++ ceph_x_encrypt_offset();
3284 + u64 *u;
3285 +
3286 + if (p > end)
3287 +@@ -464,16 +458,16 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
3288 +
3289 + /* encrypt and hash */
3290 + get_random_bytes(&auth->client_challenge, sizeof(u64));
3291 +- tmp.client_challenge = auth->client_challenge;
3292 +- tmp.server_challenge = cpu_to_le64(xi->server_challenge);
3293 +- ret = ceph_x_encrypt(&xi->secret, &tmp, sizeof(tmp),
3294 +- tmp_enc, sizeof(tmp_enc));
3295 ++ blob->client_challenge = auth->client_challenge;
3296 ++ blob->server_challenge = cpu_to_le64(xi->server_challenge);
3297 ++ ret = ceph_x_encrypt(&xi->secret, enc_buf, CEPHX_AU_ENC_BUF_LEN,
3298 ++ sizeof(*blob));
3299 + if (ret < 0)
3300 + return ret;
3301 +
3302 + auth->struct_v = 1;
3303 + auth->key = 0;
3304 +- for (u = (u64 *)tmp_enc; u + 1 <= (u64 *)(tmp_enc + ret); u++)
3305 ++ for (u = (u64 *)enc_buf; u + 1 <= (u64 *)(enc_buf + ret); u++)
3306 + auth->key ^= *(__le64 *)u;
3307 + dout(" server_challenge %llx client_challenge %llx key %llx\n",
3308 + xi->server_challenge, le64_to_cpu(auth->client_challenge),
3309 +@@ -600,8 +594,8 @@ static int ceph_x_create_authorizer(
3310 + auth->authorizer = (struct ceph_authorizer *) au;
3311 + auth->authorizer_buf = au->buf->vec.iov_base;
3312 + auth->authorizer_buf_len = au->buf->vec.iov_len;
3313 +- auth->authorizer_reply_buf = au->reply_buf;
3314 +- auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
3315 ++ auth->authorizer_reply_buf = au->enc_buf;
3316 ++ auth->authorizer_reply_buf_len = CEPHX_AU_ENC_BUF_LEN;
3317 + auth->sign_message = ac->ops->sign_message;
3318 + auth->check_message_signature = ac->ops->check_message_signature;
3319 +
3320 +@@ -632,24 +626,22 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
3321 + struct ceph_authorizer *a, size_t len)
3322 + {
3323 + struct ceph_x_authorizer *au = (void *)a;
3324 +- int ret = 0;
3325 +- struct ceph_x_authorize_reply reply;
3326 +- void *preply = &reply;
3327 +- void *p = au->reply_buf;
3328 +- void *end = p + sizeof(au->reply_buf);
3329 ++ void *p = au->enc_buf;
3330 ++ struct ceph_x_authorize_reply *reply = p + ceph_x_encrypt_offset();
3331 ++ int ret;
3332 +
3333 +- ret = ceph_x_decrypt(&au->session_key, &p, end, &preply, sizeof(reply));
3334 ++ ret = ceph_x_decrypt(&au->session_key, &p, p + CEPHX_AU_ENC_BUF_LEN);
3335 + if (ret < 0)
3336 + return ret;
3337 +- if (ret != sizeof(reply))
3338 ++ if (ret != sizeof(*reply))
3339 + return -EPERM;
3340 +
3341 +- if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one))
3342 ++ if (au->nonce + 1 != le64_to_cpu(reply->nonce_plus_one))
3343 + ret = -EPERM;
3344 + else
3345 + ret = 0;
3346 + dout("verify_authorizer_reply nonce %llx got %llx ret %d\n",
3347 +- au->nonce, le64_to_cpu(reply.nonce_plus_one), ret);
3348 ++ au->nonce, le64_to_cpu(reply->nonce_plus_one), ret);
3349 + return ret;
3350 + }
3351 +
3352 +@@ -704,35 +696,48 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
3353 + invalidate_ticket(ac, CEPH_ENTITY_TYPE_AUTH);
3354 + }
3355 +
3356 +-static int calcu_signature(struct ceph_x_authorizer *au,
3357 +- struct ceph_msg *msg, __le64 *sig)
3358 ++static int calc_signature(struct ceph_x_authorizer *au, struct ceph_msg *msg,
3359 ++ __le64 *psig)
3360 + {
3361 ++ void *enc_buf = au->enc_buf;
3362 ++ struct {
3363 ++ __le32 len;
3364 ++ __le32 header_crc;
3365 ++ __le32 front_crc;
3366 ++ __le32 middle_crc;
3367 ++ __le32 data_crc;
3368 ++ } __packed *sigblock = enc_buf + ceph_x_encrypt_offset();
3369 + int ret;
3370 +- char tmp_enc[40];
3371 +- __le32 tmp[5] = {
3372 +- cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc,
3373 +- msg->footer.middle_crc, msg->footer.data_crc,
3374 +- };
3375 +- ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
3376 +- tmp_enc, sizeof(tmp_enc));
3377 ++
3378 ++ sigblock->len = cpu_to_le32(4*sizeof(u32));
3379 ++ sigblock->header_crc = msg->hdr.crc;
3380 ++ sigblock->front_crc = msg->footer.front_crc;
3381 ++ sigblock->middle_crc = msg->footer.middle_crc;
3382 ++ sigblock->data_crc = msg->footer.data_crc;
3383 ++ ret = ceph_x_encrypt(&au->session_key, enc_buf, CEPHX_AU_ENC_BUF_LEN,
3384 ++ sizeof(*sigblock));
3385 + if (ret < 0)
3386 + return ret;
3387 +- *sig = *(__le64*)(tmp_enc + 4);
3388 ++
3389 ++ *psig = *(__le64 *)(enc_buf + sizeof(u32));
3390 + return 0;
3391 + }
3392 +
3393 + static int ceph_x_sign_message(struct ceph_auth_handshake *auth,
3394 + struct ceph_msg *msg)
3395 + {
3396 ++ __le64 sig;
3397 + int ret;
3398 +
3399 + if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN))
3400 + return 0;
3401 +
3402 +- ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
3403 +- msg, &msg->footer.sig);
3404 +- if (ret < 0)
3405 ++ ret = calc_signature((struct ceph_x_authorizer *)auth->authorizer,
3406 ++ msg, &sig);
3407 ++ if (ret)
3408 + return ret;
3409 ++
3410 ++ msg->footer.sig = sig;
3411 + msg->footer.flags |= CEPH_MSG_FOOTER_SIGNED;
3412 + return 0;
3413 + }
3414 +@@ -746,9 +751,9 @@ static int ceph_x_check_message_signature(struct ceph_auth_handshake *auth,
3415 + if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN))
3416 + return 0;
3417 +
3418 +- ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
3419 +- msg, &sig_check);
3420 +- if (ret < 0)
3421 ++ ret = calc_signature((struct ceph_x_authorizer *)auth->authorizer,
3422 ++ msg, &sig_check);
3423 ++ if (ret)
3424 + return ret;
3425 + if (sig_check == msg->footer.sig)
3426 + return 0;
3427 +diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
3428 +index 21a5af904bae..48e9ad41bd2a 100644
3429 +--- a/net/ceph/auth_x.h
3430 ++++ b/net/ceph/auth_x.h
3431 +@@ -24,6 +24,7 @@ struct ceph_x_ticket_handler {
3432 + unsigned long renew_after, expires;
3433 + };
3434 +
3435 ++#define CEPHX_AU_ENC_BUF_LEN 128 /* big enough for encrypted blob */
3436 +
3437 + struct ceph_x_authorizer {
3438 + struct ceph_authorizer base;
3439 +@@ -32,7 +33,7 @@ struct ceph_x_authorizer {
3440 + unsigned int service;
3441 + u64 nonce;
3442 + u64 secret_id;
3443 +- char reply_buf[128]; /* big enough for encrypted blob */
3444 ++ char enc_buf[CEPHX_AU_ENC_BUF_LEN] __aligned(8);
3445 + };
3446 +
3447 + struct ceph_x_info {
3448 +diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
3449 +index db2847ac5f12..292e33bd916e 100644
3450 +--- a/net/ceph/crypto.c
3451 ++++ b/net/ceph/crypto.c
3452 +@@ -13,14 +13,60 @@
3453 + #include <linux/ceph/decode.h>
3454 + #include "crypto.h"
3455 +
3456 ++/*
3457 ++ * Set ->key and ->tfm. The rest of the key should be filled in before
3458 ++ * this function is called.
3459 ++ */
3460 ++static int set_secret(struct ceph_crypto_key *key, void *buf)
3461 ++{
3462 ++ unsigned int noio_flag;
3463 ++ int ret;
3464 ++
3465 ++ key->key = NULL;
3466 ++ key->tfm = NULL;
3467 ++
3468 ++ switch (key->type) {
3469 ++ case CEPH_CRYPTO_NONE:
3470 ++ return 0; /* nothing to do */
3471 ++ case CEPH_CRYPTO_AES:
3472 ++ break;
3473 ++ default:
3474 ++ return -ENOTSUPP;
3475 ++ }
3476 ++
3477 ++ WARN_ON(!key->len);
3478 ++ key->key = kmemdup(buf, key->len, GFP_NOIO);
3479 ++ if (!key->key) {
3480 ++ ret = -ENOMEM;
3481 ++ goto fail;
3482 ++ }
3483 ++
3484 ++ /* crypto_alloc_skcipher() allocates with GFP_KERNEL */
3485 ++ noio_flag = memalloc_noio_save();
3486 ++ key->tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
3487 ++ memalloc_noio_restore(noio_flag);
3488 ++ if (IS_ERR(key->tfm)) {
3489 ++ ret = PTR_ERR(key->tfm);
3490 ++ key->tfm = NULL;
3491 ++ goto fail;
3492 ++ }
3493 ++
3494 ++ ret = crypto_skcipher_setkey(key->tfm, key->key, key->len);
3495 ++ if (ret)
3496 ++ goto fail;
3497 ++
3498 ++ return 0;
3499 ++
3500 ++fail:
3501 ++ ceph_crypto_key_destroy(key);
3502 ++ return ret;
3503 ++}
3504 ++
3505 + int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
3506 + const struct ceph_crypto_key *src)
3507 + {
3508 + memcpy(dst, src, sizeof(struct ceph_crypto_key));
3509 +- dst->key = kmemdup(src->key, src->len, GFP_NOFS);
3510 +- if (!dst->key)
3511 +- return -ENOMEM;
3512 +- return 0;
3513 ++ return set_secret(dst, src->key);
3514 + }
3515 +
3516 + int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
3517 +@@ -37,16 +83,16 @@ int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
3518 +
3519 + int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
3520 + {
3521 ++ int ret;
3522 ++
3523 + ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
3524 + key->type = ceph_decode_16(p);
3525 + ceph_decode_copy(p, &key->created, sizeof(key->created));
3526 + key->len = ceph_decode_16(p);
3527 + ceph_decode_need(p, end, key->len, bad);
3528 +- key->key = kmalloc(key->len, GFP_NOFS);
3529 +- if (!key->key)
3530 +- return -ENOMEM;
3531 +- ceph_decode_copy(p, key->key, key->len);
3532 +- return 0;
3533 ++ ret = set_secret(key, *p);
3534 ++ *p += key->len;
3535 ++ return ret;
3536 +
3537 + bad:
3538 + dout("failed to decode crypto key\n");
3539 +@@ -80,9 +126,14 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
3540 + return 0;
3541 + }
3542 +
3543 +-static struct crypto_skcipher *ceph_crypto_alloc_cipher(void)
3544 ++void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
3545 + {
3546 +- return crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
3547 ++ if (key) {
3548 ++ kfree(key->key);
3549 ++ key->key = NULL;
3550 ++ crypto_free_skcipher(key->tfm);
3551 ++ key->tfm = NULL;
3552 ++ }
3553 + }
3554 +
3555 + static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
3556 +@@ -157,372 +208,82 @@ static void teardown_sgtable(struct sg_table *sgt)
3557 + sg_free_table(sgt);
3558 + }
3559 +
3560 +-static int ceph_aes_encrypt(const void *key, int key_len,
3561 +- void *dst, size_t *dst_len,
3562 +- const void *src, size_t src_len)
3563 +-{
3564 +- struct scatterlist sg_in[2], prealloc_sg;
3565 +- struct sg_table sg_out;
3566 +- struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
3567 +- SKCIPHER_REQUEST_ON_STACK(req, tfm);
3568 +- int ret;
3569 +- char iv[AES_BLOCK_SIZE];
3570 +- size_t zero_padding = (0x10 - (src_len & 0x0f));
3571 +- char pad[16];
3572 +-
3573 +- if (IS_ERR(tfm))
3574 +- return PTR_ERR(tfm);
3575 +-
3576 +- memset(pad, zero_padding, zero_padding);
3577 +-
3578 +- *dst_len = src_len + zero_padding;
3579 +-
3580 +- sg_init_table(sg_in, 2);
3581 +- sg_set_buf(&sg_in[0], src, src_len);
3582 +- sg_set_buf(&sg_in[1], pad, zero_padding);
3583 +- ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
3584 +- if (ret)
3585 +- goto out_tfm;
3586 +-
3587 +- crypto_skcipher_setkey((void *)tfm, key, key_len);
3588 +- memcpy(iv, aes_iv, AES_BLOCK_SIZE);
3589 +-
3590 +- skcipher_request_set_tfm(req, tfm);
3591 +- skcipher_request_set_callback(req, 0, NULL, NULL);
3592 +- skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
3593 +- src_len + zero_padding, iv);
3594 +-
3595 +- /*
3596 +- print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
3597 +- key, key_len, 1);
3598 +- print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1,
3599 +- src, src_len, 1);
3600 +- print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
3601 +- pad, zero_padding, 1);
3602 +- */
3603 +- ret = crypto_skcipher_encrypt(req);
3604 +- skcipher_request_zero(req);
3605 +- if (ret < 0) {
3606 +- pr_err("ceph_aes_crypt failed %d\n", ret);
3607 +- goto out_sg;
3608 +- }
3609 +- /*
3610 +- print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
3611 +- dst, *dst_len, 1);
3612 +- */
3613 +-
3614 +-out_sg:
3615 +- teardown_sgtable(&sg_out);
3616 +-out_tfm:
3617 +- crypto_free_skcipher(tfm);
3618 +- return ret;
3619 +-}
3620 +-
3621 +-static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
3622 +- size_t *dst_len,
3623 +- const void *src1, size_t src1_len,
3624 +- const void *src2, size_t src2_len)
3625 +-{
3626 +- struct scatterlist sg_in[3], prealloc_sg;
3627 +- struct sg_table sg_out;
3628 +- struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
3629 +- SKCIPHER_REQUEST_ON_STACK(req, tfm);
3630 +- int ret;
3631 +- char iv[AES_BLOCK_SIZE];
3632 +- size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
3633 +- char pad[16];
3634 +-
3635 +- if (IS_ERR(tfm))
3636 +- return PTR_ERR(tfm);
3637 +-
3638 +- memset(pad, zero_padding, zero_padding);
3639 +-
3640 +- *dst_len = src1_len + src2_len + zero_padding;
3641 +-
3642 +- sg_init_table(sg_in, 3);
3643 +- sg_set_buf(&sg_in[0], src1, src1_len);
3644 +- sg_set_buf(&sg_in[1], src2, src2_len);
3645 +- sg_set_buf(&sg_in[2], pad, zero_padding);
3646 +- ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
3647 +- if (ret)
3648 +- goto out_tfm;
3649 +-
3650 +- crypto_skcipher_setkey((void *)tfm, key, key_len);
3651 +- memcpy(iv, aes_iv, AES_BLOCK_SIZE);
3652 +-
3653 +- skcipher_request_set_tfm(req, tfm);
3654 +- skcipher_request_set_callback(req, 0, NULL, NULL);
3655 +- skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
3656 +- src1_len + src2_len + zero_padding, iv);
3657 +-
3658 +- /*
3659 +- print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
3660 +- key, key_len, 1);
3661 +- print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1,
3662 +- src1, src1_len, 1);
3663 +- print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1,
3664 +- src2, src2_len, 1);
3665 +- print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
3666 +- pad, zero_padding, 1);
3667 +- */
3668 +- ret = crypto_skcipher_encrypt(req);
3669 +- skcipher_request_zero(req);
3670 +- if (ret < 0) {
3671 +- pr_err("ceph_aes_crypt2 failed %d\n", ret);
3672 +- goto out_sg;
3673 +- }
3674 +- /*
3675 +- print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
3676 +- dst, *dst_len, 1);
3677 +- */
3678 +-
3679 +-out_sg:
3680 +- teardown_sgtable(&sg_out);
3681 +-out_tfm:
3682 +- crypto_free_skcipher(tfm);
3683 +- return ret;
3684 +-}
3685 +-
3686 +-static int ceph_aes_decrypt(const void *key, int key_len,
3687 +- void *dst, size_t *dst_len,
3688 +- const void *src, size_t src_len)
3689 ++static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
3690 ++ void *buf, int buf_len, int in_len, int *pout_len)
3691 + {
3692 +- struct sg_table sg_in;
3693 +- struct scatterlist sg_out[2], prealloc_sg;
3694 +- struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
3695 +- SKCIPHER_REQUEST_ON_STACK(req, tfm);
3696 +- char pad[16];
3697 +- char iv[AES_BLOCK_SIZE];
3698 ++ SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
3699 ++ struct sg_table sgt;
3700 ++ struct scatterlist prealloc_sg;
3701 ++ char iv[AES_BLOCK_SIZE] __aligned(8);
3702 ++ int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
3703 ++ int crypt_len = encrypt ? in_len + pad_byte : in_len;
3704 + int ret;
3705 +- int last_byte;
3706 +-
3707 +- if (IS_ERR(tfm))
3708 +- return PTR_ERR(tfm);
3709 +
3710 +- sg_init_table(sg_out, 2);
3711 +- sg_set_buf(&sg_out[0], dst, *dst_len);
3712 +- sg_set_buf(&sg_out[1], pad, sizeof(pad));
3713 +- ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
3714 ++ WARN_ON(crypt_len > buf_len);
3715 ++ if (encrypt)
3716 ++ memset(buf + in_len, pad_byte, pad_byte);
3717 ++ ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
3718 + if (ret)
3719 +- goto out_tfm;
3720 ++ return ret;
3721 +
3722 +- crypto_skcipher_setkey((void *)tfm, key, key_len);
3723 + memcpy(iv, aes_iv, AES_BLOCK_SIZE);
3724 +-
3725 +- skcipher_request_set_tfm(req, tfm);
3726 ++ skcipher_request_set_tfm(req, key->tfm);
3727 + skcipher_request_set_callback(req, 0, NULL, NULL);
3728 +- skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
3729 +- src_len, iv);
3730 ++ skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
3731 +
3732 + /*
3733 +- print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
3734 +- key, key_len, 1);
3735 +- print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
3736 +- src, src_len, 1);
3737 ++ print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
3738 ++ key->key, key->len, 1);
3739 ++ print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
3740 ++ buf, crypt_len, 1);
3741 + */
3742 +- ret = crypto_skcipher_decrypt(req);
3743 +- skcipher_request_zero(req);
3744 +- if (ret < 0) {
3745 +- pr_err("ceph_aes_decrypt failed %d\n", ret);
3746 +- goto out_sg;
3747 +- }
3748 +-
3749 +- if (src_len <= *dst_len)
3750 +- last_byte = ((char *)dst)[src_len - 1];
3751 ++ if (encrypt)
3752 ++ ret = crypto_skcipher_encrypt(req);
3753 + else
3754 +- last_byte = pad[src_len - *dst_len - 1];
3755 +- if (last_byte <= 16 && src_len >= last_byte) {
3756 +- *dst_len = src_len - last_byte;
3757 +- } else {
3758 +- pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
3759 +- last_byte, (int)src_len);
3760 +- return -EPERM; /* bad padding */
3761 +- }
3762 +- /*
3763 +- print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
3764 +- dst, *dst_len, 1);
3765 +- */
3766 +-
3767 +-out_sg:
3768 +- teardown_sgtable(&sg_in);
3769 +-out_tfm:
3770 +- crypto_free_skcipher(tfm);
3771 +- return ret;
3772 +-}
3773 +-
3774 +-static int ceph_aes_decrypt2(const void *key, int key_len,
3775 +- void *dst1, size_t *dst1_len,
3776 +- void *dst2, size_t *dst2_len,
3777 +- const void *src, size_t src_len)
3778 +-{
3779 +- struct sg_table sg_in;
3780 +- struct scatterlist sg_out[3], prealloc_sg;
3781 +- struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
3782 +- SKCIPHER_REQUEST_ON_STACK(req, tfm);
3783 +- char pad[16];
3784 +- char iv[AES_BLOCK_SIZE];
3785 +- int ret;
3786 +- int last_byte;
3787 +-
3788 +- if (IS_ERR(tfm))
3789 +- return PTR_ERR(tfm);
3790 +-
3791 +- sg_init_table(sg_out, 3);
3792 +- sg_set_buf(&sg_out[0], dst1, *dst1_len);
3793 +- sg_set_buf(&sg_out[1], dst2, *dst2_len);
3794 +- sg_set_buf(&sg_out[2], pad, sizeof(pad));
3795 +- ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
3796 +- if (ret)
3797 +- goto out_tfm;
3798 +-
3799 +- crypto_skcipher_setkey((void *)tfm, key, key_len);
3800 +- memcpy(iv, aes_iv, AES_BLOCK_SIZE);
3801 +-
3802 +- skcipher_request_set_tfm(req, tfm);
3803 +- skcipher_request_set_callback(req, 0, NULL, NULL);
3804 +- skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
3805 +- src_len, iv);
3806 +-
3807 +- /*
3808 +- print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
3809 +- key, key_len, 1);
3810 +- print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
3811 +- src, src_len, 1);
3812 +- */
3813 +- ret = crypto_skcipher_decrypt(req);
3814 ++ ret = crypto_skcipher_decrypt(req);
3815 + skcipher_request_zero(req);
3816 +- if (ret < 0) {
3817 +- pr_err("ceph_aes_decrypt failed %d\n", ret);
3818 +- goto out_sg;
3819 +- }
3820 +-
3821 +- if (src_len <= *dst1_len)
3822 +- last_byte = ((char *)dst1)[src_len - 1];
3823 +- else if (src_len <= *dst1_len + *dst2_len)
3824 +- last_byte = ((char *)dst2)[src_len - *dst1_len - 1];
3825 +- else
3826 +- last_byte = pad[src_len - *dst1_len - *dst2_len - 1];
3827 +- if (last_byte <= 16 && src_len >= last_byte) {
3828 +- src_len -= last_byte;
3829 +- } else {
3830 +- pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
3831 +- last_byte, (int)src_len);
3832 +- return -EPERM; /* bad padding */
3833 +- }
3834 +-
3835 +- if (src_len < *dst1_len) {
3836 +- *dst1_len = src_len;
3837 +- *dst2_len = 0;
3838 +- } else {
3839 +- *dst2_len = src_len - *dst1_len;
3840 ++ if (ret) {
3841 ++ pr_err("%s %scrypt failed: %d\n", __func__,
3842 ++ encrypt ? "en" : "de", ret);
3843 ++ goto out_sgt;
3844 + }
3845 + /*
3846 +- print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1,
3847 +- dst1, *dst1_len, 1);
3848 +- print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1,
3849 +- dst2, *dst2_len, 1);
3850 ++ print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
3851 ++ buf, crypt_len, 1);
3852 + */
3853 +
3854 +-out_sg:
3855 +- teardown_sgtable(&sg_in);
3856 +-out_tfm:
3857 +- crypto_free_skcipher(tfm);
3858 +- return ret;
3859 +-}
3860 +-
3861 +-
3862 +-int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
3863 +- const void *src, size_t src_len)
3864 +-{
3865 +- switch (secret->type) {
3866 +- case CEPH_CRYPTO_NONE:
3867 +- if (*dst_len < src_len)
3868 +- return -ERANGE;
3869 +- memcpy(dst, src, src_len);
3870 +- *dst_len = src_len;
3871 +- return 0;
3872 +-
3873 +- case CEPH_CRYPTO_AES:
3874 +- return ceph_aes_decrypt(secret->key, secret->len, dst,
3875 +- dst_len, src, src_len);
3876 +-
3877 +- default:
3878 +- return -EINVAL;
3879 +- }
3880 +-}
3881 +-
3882 +-int ceph_decrypt2(struct ceph_crypto_key *secret,
3883 +- void *dst1, size_t *dst1_len,
3884 +- void *dst2, size_t *dst2_len,
3885 +- const void *src, size_t src_len)
3886 +-{
3887 +- size_t t;
3888 +-
3889 +- switch (secret->type) {
3890 +- case CEPH_CRYPTO_NONE:
3891 +- if (*dst1_len + *dst2_len < src_len)
3892 +- return -ERANGE;
3893 +- t = min(*dst1_len, src_len);
3894 +- memcpy(dst1, src, t);
3895 +- *dst1_len = t;
3896 +- src += t;
3897 +- src_len -= t;
3898 +- if (src_len) {
3899 +- t = min(*dst2_len, src_len);
3900 +- memcpy(dst2, src, t);
3901 +- *dst2_len = t;
3902 ++ if (encrypt) {
3903 ++ *pout_len = crypt_len;
3904 ++ } else {
3905 ++ pad_byte = *(char *)(buf + in_len - 1);
3906 ++ if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
3907 ++ in_len >= pad_byte) {
3908 ++ *pout_len = in_len - pad_byte;
3909 ++ } else {
3910 ++ pr_err("%s got bad padding %d on in_len %d\n",
3911 ++ __func__, pad_byte, in_len);
3912 ++ ret = -EPERM;
3913 ++ goto out_sgt;
3914 + }
3915 +- return 0;
3916 +-
3917 +- case CEPH_CRYPTO_AES:
3918 +- return ceph_aes_decrypt2(secret->key, secret->len,
3919 +- dst1, dst1_len, dst2, dst2_len,
3920 +- src, src_len);
3921 +-
3922 +- default:
3923 +- return -EINVAL;
3924 + }
3925 +-}
3926 +-
3927 +-int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
3928 +- const void *src, size_t src_len)
3929 +-{
3930 +- switch (secret->type) {
3931 +- case CEPH_CRYPTO_NONE:
3932 +- if (*dst_len < src_len)
3933 +- return -ERANGE;
3934 +- memcpy(dst, src, src_len);
3935 +- *dst_len = src_len;
3936 +- return 0;
3937 +
3938 +- case CEPH_CRYPTO_AES:
3939 +- return ceph_aes_encrypt(secret->key, secret->len, dst,
3940 +- dst_len, src, src_len);
3941 +-
3942 +- default:
3943 +- return -EINVAL;
3944 +- }
3945 ++out_sgt:
3946 ++ teardown_sgtable(&sgt);
3947 ++ return ret;
3948 + }
3949 +
3950 +-int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
3951 +- const void *src1, size_t src1_len,
3952 +- const void *src2, size_t src2_len)
3953 ++int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
3954 ++ void *buf, int buf_len, int in_len, int *pout_len)
3955 + {
3956 +- switch (secret->type) {
3957 ++ switch (key->type) {
3958 + case CEPH_CRYPTO_NONE:
3959 +- if (*dst_len < src1_len + src2_len)
3960 +- return -ERANGE;
3961 +- memcpy(dst, src1, src1_len);
3962 +- memcpy(dst + src1_len, src2, src2_len);
3963 +- *dst_len = src1_len + src2_len;
3964 ++ *pout_len = in_len;
3965 + return 0;
3966 +-
3967 + case CEPH_CRYPTO_AES:
3968 +- return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len,
3969 +- src1, src1_len, src2, src2_len);
3970 +-
3971 ++ return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
3972 ++ pout_len);
3973 + default:
3974 +- return -EINVAL;
3975 ++ return -ENOTSUPP;
3976 + }
3977 + }
3978 +
3979 +diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
3980 +index 2e9cab09f37b..58d83aa7740f 100644
3981 +--- a/net/ceph/crypto.h
3982 ++++ b/net/ceph/crypto.h
3983 +@@ -12,37 +12,19 @@ struct ceph_crypto_key {
3984 + struct ceph_timespec created;
3985 + int len;
3986 + void *key;
3987 ++ struct crypto_skcipher *tfm;
3988 + };
3989 +
3990 +-static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
3991 +-{
3992 +- if (key) {
3993 +- kfree(key->key);
3994 +- key->key = NULL;
3995 +- }
3996 +-}
3997 +-
3998 + int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
3999 + const struct ceph_crypto_key *src);
4000 + int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end);
4001 + int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end);
4002 + int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
4003 ++void ceph_crypto_key_destroy(struct ceph_crypto_key *key);
4004 +
4005 + /* crypto.c */
4006 +-int ceph_decrypt(struct ceph_crypto_key *secret,
4007 +- void *dst, size_t *dst_len,
4008 +- const void *src, size_t src_len);
4009 +-int ceph_encrypt(struct ceph_crypto_key *secret,
4010 +- void *dst, size_t *dst_len,
4011 +- const void *src, size_t src_len);
4012 +-int ceph_decrypt2(struct ceph_crypto_key *secret,
4013 +- void *dst1, size_t *dst1_len,
4014 +- void *dst2, size_t *dst2_len,
4015 +- const void *src, size_t src_len);
4016 +-int ceph_encrypt2(struct ceph_crypto_key *secret,
4017 +- void *dst, size_t *dst_len,
4018 +- const void *src1, size_t src1_len,
4019 +- const void *src2, size_t src2_len);
4020 ++int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
4021 ++ void *buf, int buf_len, int in_len, int *pout_len);
4022 + int ceph_crypto_init(void);
4023 + void ceph_crypto_shutdown(void);
4024 +
4025 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4026 +index a47bbc973f2d..2384b4aae064 100644
4027 +--- a/net/mac80211/rx.c
4028 ++++ b/net/mac80211/rx.c
4029 +@@ -3939,21 +3939,31 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
4030 + u64_stats_update_end(&stats->syncp);
4031 +
4032 + if (fast_rx->internal_forward) {
4033 +- struct sta_info *dsta = sta_info_get(rx->sdata, skb->data);
4034 ++ struct sk_buff *xmit_skb = NULL;
4035 ++ bool multicast = is_multicast_ether_addr(skb->data);
4036 +
4037 +- if (dsta) {
4038 ++ if (multicast) {
4039 ++ xmit_skb = skb_copy(skb, GFP_ATOMIC);
4040 ++ } else if (sta_info_get(rx->sdata, skb->data)) {
4041 ++ xmit_skb = skb;
4042 ++ skb = NULL;
4043 ++ }
4044 ++
4045 ++ if (xmit_skb) {
4046 + /*
4047 + * Send to wireless media and increase priority by 256
4048 + * to keep the received priority instead of
4049 + * reclassifying the frame (see cfg80211_classify8021d).
4050 + */
4051 +- skb->priority += 256;
4052 +- skb->protocol = htons(ETH_P_802_3);
4053 +- skb_reset_network_header(skb);
4054 +- skb_reset_mac_header(skb);
4055 +- dev_queue_xmit(skb);
4056 +- return true;
4057 ++ xmit_skb->priority += 256;
4058 ++ xmit_skb->protocol = htons(ETH_P_802_3);
4059 ++ skb_reset_network_header(xmit_skb);
4060 ++ skb_reset_mac_header(xmit_skb);
4061 ++ dev_queue_xmit(xmit_skb);
4062 + }
4063 ++
4064 ++ if (!skb)
4065 ++ return true;
4066 + }
4067 +
4068 + /* deliver to local stack */
4069 +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
4070 +index 45662d7f0943..6fdffde28733 100644
4071 +--- a/net/sunrpc/auth_gss/svcauth_gss.c
4072 ++++ b/net/sunrpc/auth_gss/svcauth_gss.c
4073 +@@ -1489,7 +1489,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
4074 + case RPC_GSS_PROC_DESTROY:
4075 + if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
4076 + goto auth_err;
4077 +- rsci->h.expiry_time = get_seconds();
4078 ++ rsci->h.expiry_time = seconds_since_boot();
4079 + set_bit(CACHE_NEGATIVE, &rsci->h.flags);
4080 + if (resv->iov_len + 4 > PAGE_SIZE)
4081 + goto drop;
4082 +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
4083 +index 3bc1d61694cb..9c9db55a0c1e 100644
4084 +--- a/net/sunrpc/svc_xprt.c
4085 ++++ b/net/sunrpc/svc_xprt.c
4086 +@@ -799,6 +799,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
4087 +
4088 + if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
4089 + dprintk("svc_recv: found XPT_CLOSE\n");
4090 ++ if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
4091 ++ xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
4092 + svc_delete_xprt(xprt);
4093 + /* Leave XPT_BUSY set on the dead xprt: */
4094 + goto out;
4095 +@@ -1020,9 +1022,11 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
4096 + le = to_be_closed.next;
4097 + list_del_init(le);
4098 + xprt = list_entry(le, struct svc_xprt, xpt_list);
4099 +- dprintk("svc_age_temp_xprts_now: closing %p\n", xprt);
4100 +- xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
4101 +- svc_close_xprt(xprt);
4102 ++ set_bit(XPT_CLOSE, &xprt->xpt_flags);
4103 ++ set_bit(XPT_KILL_TEMP, &xprt->xpt_flags);
4104 ++ dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n",
4105 ++ xprt);
4106 ++ svc_xprt_enqueue(xprt);
4107 + }
4108 + }
4109 + EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now);
4110 +diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
4111 +index 26b26beef2d4..adbf52c6df83 100644
4112 +--- a/net/sunrpc/xprtrdma/frwr_ops.c
4113 ++++ b/net/sunrpc/xprtrdma/frwr_ops.c
4114 +@@ -421,7 +421,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
4115 + IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
4116 + IB_ACCESS_REMOTE_READ;
4117 +
4118 +- DECR_CQCOUNT(&r_xprt->rx_ep);
4119 ++ rpcrdma_set_signaled(&r_xprt->rx_ep, &reg_wr->wr);
4120 + rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
4121 + if (rc)
4122 + goto out_senderr;
4123 +@@ -486,7 +486,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
4124 + struct rpcrdma_ia *ia = &r_xprt->rx_ia;
4125 + struct rpcrdma_mw *mw, *tmp;
4126 + struct rpcrdma_frmr *f;
4127 +- int rc;
4128 ++ int count, rc;
4129 +
4130 + dprintk("RPC: %s: req %p\n", __func__, req);
4131 +
4132 +@@ -496,6 +496,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
4133 + * a single ib_post_send() call.
4134 + */
4135 + f = NULL;
4136 ++ count = 0;
4137 + invalidate_wrs = pos = prev = NULL;
4138 + list_for_each_entry(mw, &req->rl_registered, mw_list) {
4139 + if ((rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) &&
4140 +@@ -505,6 +506,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
4141 + }
4142 +
4143 + pos = __frwr_prepare_linv_wr(mw);
4144 ++ count++;
4145 +
4146 + if (!invalidate_wrs)
4147 + invalidate_wrs = pos;
4148 +@@ -523,7 +525,12 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
4149 + f->fr_invwr.send_flags = IB_SEND_SIGNALED;
4150 + f->fr_cqe.done = frwr_wc_localinv_wake;
4151 + reinit_completion(&f->fr_linv_done);
4152 +- INIT_CQCOUNT(&r_xprt->rx_ep);
4153 ++
4154 ++ /* Initialize CQ count, since there is always a signaled
4155 ++ * WR being posted here. The new cqcount depends on how
4156 ++ * many SQEs are about to be consumed.
4157 ++ */
4158 ++ rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
4159 +
4160 + /* Transport disconnect drains the receive CQ before it
4161 + * replaces the QP. The RPC reply handler won't call us
4162 +diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
4163 +index ad1df979b3f0..a47c9bdef5fa 100644
4164 +--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
4165 ++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
4166 +@@ -348,8 +348,6 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
4167 + atomic_inc(&rdma_stat_read);
4168 + return ret;
4169 + err:
4170 +- ib_dma_unmap_sg(xprt->sc_cm_id->device,
4171 +- frmr->sg, frmr->sg_nents, frmr->direction);
4172 + svc_rdma_put_context(ctxt, 0);
4173 + svc_rdma_put_frmr(xprt, frmr);
4174 + return ret;
4175 +diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
4176 +index ec74289af7ec..8da7f6a4dfc3 100644
4177 +--- a/net/sunrpc/xprtrdma/verbs.c
4178 ++++ b/net/sunrpc/xprtrdma/verbs.c
4179 +@@ -223,8 +223,8 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
4180 + cdata->inline_rsize = rsize;
4181 + if (wsize < cdata->inline_wsize)
4182 + cdata->inline_wsize = wsize;
4183 +- pr_info("rpcrdma: max send %u, max recv %u\n",
4184 +- cdata->inline_wsize, cdata->inline_rsize);
4185 ++ dprintk("RPC: %s: max send %u, max recv %u\n",
4186 ++ __func__, cdata->inline_wsize, cdata->inline_rsize);
4187 + rpcrdma_set_max_header_sizes(r_xprt);
4188 + }
4189 +
4190 +@@ -532,7 +532,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
4191 + ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
4192 + if (ep->rep_cqinit <= 2)
4193 + ep->rep_cqinit = 0; /* always signal? */
4194 +- INIT_CQCOUNT(ep);
4195 ++ rpcrdma_init_cqcount(ep, 0);
4196 + init_waitqueue_head(&ep->rep_connect_wait);
4197 + INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
4198 +
4199 +@@ -1311,13 +1311,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
4200 + dprintk("RPC: %s: posting %d s/g entries\n",
4201 + __func__, send_wr->num_sge);
4202 +
4203 +- if (DECR_CQCOUNT(ep) > 0)
4204 +- send_wr->send_flags = 0;
4205 +- else { /* Provider must take a send completion every now and then */
4206 +- INIT_CQCOUNT(ep);
4207 +- send_wr->send_flags = IB_SEND_SIGNALED;
4208 +- }
4209 +-
4210 ++ rpcrdma_set_signaled(ep, send_wr);
4211 + rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
4212 + if (rc)
4213 + goto out_postsend_err;
4214 +diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
4215 +index 6e1bba358203..f6ae1b22da47 100644
4216 +--- a/net/sunrpc/xprtrdma/xprt_rdma.h
4217 ++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
4218 +@@ -95,8 +95,24 @@ struct rpcrdma_ep {
4219 + struct delayed_work rep_connect_worker;
4220 + };
4221 +
4222 +-#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
4223 +-#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
4224 ++static inline void
4225 ++rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count)
4226 ++{
4227 ++ atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count);
4228 ++}
4229 ++
4230 ++/* To update send queue accounting, provider must take a
4231 ++ * send completion every now and then.
4232 ++ */
4233 ++static inline void
4234 ++rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr)
4235 ++{
4236 ++ send_wr->send_flags = 0;
4237 ++ if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) {
4238 ++ rpcrdma_init_cqcount(ep, 0);
4239 ++ send_wr->send_flags = IB_SEND_SIGNALED;
4240 ++ }
4241 ++}
4242 +
4243 + /* Pre-allocate extra Work Requests for handling backward receives
4244 + * and sends. This is a fixed value because the Work Queues are
4245 +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
4246 +index 72edf83d76b7..cffdd9cf3ebf 100644
4247 +--- a/tools/perf/Makefile.config
4248 ++++ b/tools/perf/Makefile.config
4249 +@@ -366,7 +366,7 @@ ifndef NO_SDT
4250 + endif
4251 +
4252 + ifdef PERF_HAVE_JITDUMP
4253 +- ifndef NO_DWARF
4254 ++ ifndef NO_LIBELF
4255 + $(call detected,CONFIG_JITDUMP)
4256 + CFLAGS += -DHAVE_JITDUMP
4257 + endif
4258 +diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
4259 +index d1ce29be560e..cd7bc4d104e2 100644
4260 +--- a/tools/perf/builtin-mem.c
4261 ++++ b/tools/perf/builtin-mem.c
4262 +@@ -70,8 +70,8 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
4263 + OPT_UINTEGER(0, "ldlat", &perf_mem_events__loads_ldlat, "mem-loads latency"),
4264 + OPT_INCR('v', "verbose", &verbose,
4265 + "be more verbose (show counter open errors, etc)"),
4266 +- OPT_BOOLEAN('U', "--all-user", &all_user, "collect only user level data"),
4267 +- OPT_BOOLEAN('K', "--all-kernel", &all_kernel, "collect only kernel level data"),
4268 ++ OPT_BOOLEAN('U', "all-user", &all_user, "collect only user level data"),
4269 ++ OPT_BOOLEAN('K', "all-kernel", &all_kernel, "collect only kernel level data"),
4270 + OPT_END()
4271 + };
4272 +
4273 +diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
4274 +index c298bd3e1d90..21f8a81797a0 100644
4275 +--- a/tools/perf/builtin-trace.c
4276 ++++ b/tools/perf/builtin-trace.c
4277 +@@ -1452,7 +1452,7 @@ static int trace__printf_interrupted_entry(struct trace *trace, struct perf_samp
4278 +
4279 + duration = sample->time - ttrace->entry_time;
4280 +
4281 +- printed = trace__fprintf_entry_head(trace, trace->current, duration, sample->time, trace->output);
4282 ++ printed = trace__fprintf_entry_head(trace, trace->current, duration, ttrace->entry_time, trace->output);
4283 + printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
4284 + ttrace->entry_pending = false;
4285 +
4286 +@@ -1499,7 +1499,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
4287 +
4288 + if (sc->is_exit) {
4289 + if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
4290 +- trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
4291 ++ trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, trace->output);
4292 + fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
4293 + }
4294 + } else {
4295 +@@ -1592,7 +1592,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
4296 + if (trace->summary_only)
4297 + goto out;
4298 +
4299 +- trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
4300 ++ trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, trace->output);
4301 +
4302 + if (ttrace->entry_pending) {
4303 + fprintf(trace->output, "%-70s", ttrace->entry_str);
4304 +diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c
4305 +index fd710ab33684..af1cfde6b97b 100644
4306 +--- a/tools/perf/trace/beauty/mmap.c
4307 ++++ b/tools/perf/trace/beauty/mmap.c
4308 +@@ -42,7 +42,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
4309 +
4310 + P_MMAP_FLAG(SHARED);
4311 + P_MMAP_FLAG(PRIVATE);
4312 ++#ifdef MAP_32BIT
4313 + P_MMAP_FLAG(32BIT);
4314 ++#endif
4315 + P_MMAP_FLAG(ANONYMOUS);
4316 + P_MMAP_FLAG(DENYWRITE);
4317 + P_MMAP_FLAG(EXECUTABLE);
4318 +diff --git a/tools/perf/util/Build b/tools/perf/util/Build
4319 +index eb60e613d795..1dc67efad634 100644
4320 +--- a/tools/perf/util/Build
4321 ++++ b/tools/perf/util/Build
4322 +@@ -120,7 +120,7 @@ libperf-y += demangle-rust.o
4323 + ifdef CONFIG_JITDUMP
4324 + libperf-$(CONFIG_LIBELF) += jitdump.o
4325 + libperf-$(CONFIG_LIBELF) += genelf.o
4326 +-libperf-$(CONFIG_LIBELF) += genelf_debug.o
4327 ++libperf-$(CONFIG_DWARF) += genelf_debug.o
4328 + endif
4329 +
4330 + CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
4331 +diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
4332 +index 07fd30bc2f81..ae58b493af45 100644
4333 +--- a/tools/perf/util/callchain.c
4334 ++++ b/tools/perf/util/callchain.c
4335 +@@ -193,7 +193,6 @@ int perf_callchain_config(const char *var, const char *value)
4336 +
4337 + if (!strcmp(var, "record-mode"))
4338 + return parse_callchain_record_opt(value, &callchain_param);
4339 +-#ifdef HAVE_DWARF_UNWIND_SUPPORT
4340 + if (!strcmp(var, "dump-size")) {
4341 + unsigned long size = 0;
4342 + int ret;
4343 +@@ -203,7 +202,6 @@ int perf_callchain_config(const char *var, const char *value)
4344 +
4345 + return ret;
4346 + }
4347 +-#endif
4348 + if (!strcmp(var, "print-type"))
4349 + return parse_callchain_mode(value);
4350 + if (!strcmp(var, "order"))
4351 +diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
4352 +index 13e75549c440..47cfd1080975 100644
4353 +--- a/tools/perf/util/callchain.h
4354 ++++ b/tools/perf/util/callchain.h
4355 +@@ -11,11 +11,7 @@
4356 +
4357 + #define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace):\n\n"
4358 +
4359 +-#ifdef HAVE_DWARF_UNWIND_SUPPORT
4360 + # define RECORD_MODE_HELP HELP_PAD "record_mode:\tcall graph recording mode (fp|dwarf|lbr)\n"
4361 +-#else
4362 +-# define RECORD_MODE_HELP HELP_PAD "record_mode:\tcall graph recording mode (fp|lbr)\n"
4363 +-#endif
4364 +
4365 + #define RECORD_SIZE_HELP \
4366 + HELP_PAD "record_size:\tif record_mode is 'dwarf', max size of stack recording (<bytes>)\n" \
4367 +diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
4368 +index c1ef805c6a8f..14a73acc549c 100644
4369 +--- a/tools/perf/util/genelf.c
4370 ++++ b/tools/perf/util/genelf.c
4371 +@@ -19,7 +19,9 @@
4372 + #include <limits.h>
4373 + #include <fcntl.h>
4374 + #include <err.h>
4375 ++#ifdef HAVE_DWARF_SUPPORT
4376 + #include <dwarf.h>
4377 ++#endif
4378 +
4379 + #include "perf.h"
4380 + #include "genelf.h"
4381 +@@ -157,7 +159,7 @@ gen_build_id(struct buildid_note *note, unsigned long load_addr, const void *cod
4382 + int
4383 + jit_write_elf(int fd, uint64_t load_addr, const char *sym,
4384 + const void *code, int csize,
4385 +- void *debug, int nr_debug_entries)
4386 ++ void *debug __maybe_unused, int nr_debug_entries __maybe_unused)
4387 + {
4388 + Elf *e;
4389 + Elf_Data *d;
4390 +@@ -386,11 +388,14 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
4391 + shdr->sh_size = sizeof(bnote);
4392 + shdr->sh_entsize = 0;
4393 +
4394 ++#ifdef HAVE_DWARF_SUPPORT
4395 + if (debug && nr_debug_entries) {
4396 + retval = jit_add_debug_info(e, load_addr, debug, nr_debug_entries);
4397 + if (retval)
4398 + goto error;
4399 +- } else {
4400 ++ } else
4401 ++#endif
4402 ++ {
4403 + if (elf_update(e, ELF_C_WRITE) < 0) {
4404 + warnx("elf_update 4 failed");
4405 + goto error;
4406 +diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
4407 +index 2fbeb59c4bdd..5c933ac71451 100644
4408 +--- a/tools/perf/util/genelf.h
4409 ++++ b/tools/perf/util/genelf.h
4410 +@@ -4,8 +4,10 @@
4411 + /* genelf.c */
4412 + int jit_write_elf(int fd, uint64_t code_addr, const char *sym,
4413 + const void *code, int csize, void *debug, int nr_debug_entries);
4414 ++#ifdef HAVE_DWARF_SUPPORT
4415 + /* genelf_debug.c */
4416 + int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_entries);
4417 ++#endif
4418 +
4419 + #if defined(__arm__)
4420 + #define GEN_ELF_ARCH EM_ARM
4421 +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
4422 +index aecff69a510d..f7b35e178582 100644
4423 +--- a/tools/perf/util/symbol.c
4424 ++++ b/tools/perf/util/symbol.c
4425 +@@ -1459,7 +1459,8 @@ int dso__load(struct dso *dso, struct map *map)
4426 + * Read the build id if possible. This is required for
4427 + * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
4428 + */
4429 +- if (is_regular_file(dso->long_name) &&
4430 ++ if (!dso->has_build_id &&
4431 ++ is_regular_file(dso->long_name) &&
4432 + filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
4433 + dso__set_build_id(dso, build_id);
4434 +
4435 +diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
4436 +index 9df61059a85d..a2fd6e79d5a5 100644
4437 +--- a/tools/perf/util/trace-event-scripting.c
4438 ++++ b/tools/perf/util/trace-event-scripting.c
4439 +@@ -95,7 +95,8 @@ static void register_python_scripting(struct scripting_ops *scripting_ops)
4440 + if (err)
4441 + die("error registering py script extension");
4442 +
4443 +- scripting_context = malloc(sizeof(struct scripting_context));
4444 ++ if (scripting_context == NULL)
4445 ++ scripting_context = malloc(sizeof(*scripting_context));
4446 + }
4447 +
4448 + #ifdef NO_LIBPYTHON
4449 +@@ -159,7 +160,8 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops)
4450 + if (err)
4451 + die("error registering pl script extension");
4452 +
4453 +- scripting_context = malloc(sizeof(struct scripting_context));
4454 ++ if (scripting_context == NULL)
4455 ++ scripting_context = malloc(sizeof(*scripting_context));
4456 + }
4457 +
4458 + #ifdef NO_LIBPERL
4459 +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
4460 +index c22860ab9733..30e1ac62e8cb 100644
4461 +--- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
4462 ++++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
4463 +@@ -66,7 +66,7 @@ int pmc56_overflow(void)
4464 +
4465 + FAIL_IF(ebb_event_enable(&event));
4466 +
4467 +- mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
4468 ++ mtspr(SPRN_PMC2, pmc_sample_period(sample_period));
4469 + mtspr(SPRN_PMC5, 0);
4470 + mtspr(SPRN_PMC6, 0);
4471 +
4472 +diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh
4473 +index 2e69ca812b4c..29b0d3920bfc 100755
4474 +--- a/tools/virtio/ringtest/run-on-all.sh
4475 ++++ b/tools/virtio/ringtest/run-on-all.sh
4476 +@@ -1,12 +1,13 @@
4477 + #!/bin/sh
4478 +
4479 ++CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#')
4480 + #use last CPU for host. Why not the first?
4481 + #many devices tend to use cpu0 by default so
4482 + #it tends to be busier
4483 +-HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
4484 ++HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1)
4485 +
4486 + #run command on all cpus
4487 +-for cpu in $(seq 0 $HOST_AFFINITY)
4488 ++for cpu in $CPUS_ONLINE
4489 + do
4490 + #Don't run guest and host on same CPU
4491 + #It actually works ok if using signalling
4492 +diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
4493 +index 8cebfbc19e90..539d3f5cb619 100644
4494 +--- a/virt/kvm/arm/vgic/vgic-init.c
4495 ++++ b/virt/kvm/arm/vgic/vgic-init.c
4496 +@@ -268,15 +268,11 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
4497 + {
4498 + struct vgic_dist *dist = &kvm->arch.vgic;
4499 +
4500 +- mutex_lock(&kvm->lock);
4501 +-
4502 + dist->ready = false;
4503 + dist->initialized = false;
4504 +
4505 + kfree(dist->spis);
4506 + dist->nr_spis = 0;
4507 +-
4508 +- mutex_unlock(&kvm->lock);
4509 + }
4510 +
4511 + void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
4512 +@@ -286,7 +282,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
4513 + INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
4514 + }
4515 +
4516 +-void kvm_vgic_destroy(struct kvm *kvm)
4517 ++/* To be called with kvm->lock held */
4518 ++static void __kvm_vgic_destroy(struct kvm *kvm)
4519 + {
4520 + struct kvm_vcpu *vcpu;
4521 + int i;
4522 +@@ -297,6 +294,13 @@ void kvm_vgic_destroy(struct kvm *kvm)
4523 + kvm_vgic_vcpu_destroy(vcpu);
4524 + }
4525 +
4526 ++void kvm_vgic_destroy(struct kvm *kvm)
4527 ++{
4528 ++ mutex_lock(&kvm->lock);
4529 ++ __kvm_vgic_destroy(kvm);
4530 ++ mutex_unlock(&kvm->lock);
4531 ++}
4532 ++
4533 + /**
4534 + * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
4535 + * is a GICv2. A GICv3 must be explicitly initialized by the guest using the
4536 +@@ -348,6 +352,10 @@ int kvm_vgic_map_resources(struct kvm *kvm)
4537 + ret = vgic_v2_map_resources(kvm);
4538 + else
4539 + ret = vgic_v3_map_resources(kvm);
4540 ++
4541 ++ if (ret)
4542 ++ __kvm_vgic_destroy(kvm);
4543 ++
4544 + out:
4545 + mutex_unlock(&kvm->lock);
4546 + return ret;
4547 +diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
4548 +index 9bab86757fa4..834137e7b83f 100644
4549 +--- a/virt/kvm/arm/vgic/vgic-v2.c
4550 ++++ b/virt/kvm/arm/vgic/vgic-v2.c
4551 +@@ -293,8 +293,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
4552 + dist->ready = true;
4553 +
4554 + out:
4555 +- if (ret)
4556 +- kvm_vgic_destroy(kvm);
4557 + return ret;
4558 + }
4559 +
4560 +diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
4561 +index 5c9f9745e6ca..e6b03fd8c374 100644
4562 +--- a/virt/kvm/arm/vgic/vgic-v3.c
4563 ++++ b/virt/kvm/arm/vgic/vgic-v3.c
4564 +@@ -302,8 +302,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
4565 + dist->ready = true;
4566 +
4567 + out:
4568 +- if (ret)
4569 +- kvm_vgic_destroy(kvm);
4570 + return ret;
4571 + }
4572 +