Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Tue, 13 Nov 2018 21:19:05
Message-Id: 1542143922.b532e0ad59b32dbc7e8fda496d02d04ad124426e.mpagano@gentoo
1 commit: b532e0ad59b32dbc7e8fda496d02d04ad124426e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Nov 13 21:18:42 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Nov 13 21:18:42 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b532e0ad
7
8 proj/linux-patches: Linux patch 4.14.81
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1080_linux-4.14.81.patch | 6990 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6994 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 28ef8f2..fd76211 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -363,6 +363,10 @@ Patch: 1079_linux-4.14.80.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.80
23
24 +Patch: 1080-4.14.81.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.81
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1080_linux-4.14.81.patch b/1080_linux-4.14.81.patch
33 new file mode 100644
34 index 0000000..ad133e4
35 --- /dev/null
36 +++ b/1080_linux-4.14.81.patch
37 @@ -0,0 +1,6990 @@
38 +diff --git a/Documentation/media/uapi/v4l/biblio.rst b/Documentation/media/uapi/v4l/biblio.rst
39 +index 1cedcfc04327..386d6cf83e9c 100644
40 +--- a/Documentation/media/uapi/v4l/biblio.rst
41 ++++ b/Documentation/media/uapi/v4l/biblio.rst
42 +@@ -226,16 +226,6 @@ xvYCC
43 +
44 + :author: International Electrotechnical Commission (http://www.iec.ch)
45 +
46 +-.. _adobergb:
47 +-
48 +-AdobeRGB
49 +-========
50 +-
51 +-
52 +-:title: Adobe© RGB (1998) Color Image Encoding Version 2005-05
53 +-
54 +-:author: Adobe Systems Incorporated (http://www.adobe.com)
55 +-
56 + .. _oprgb:
57 +
58 + opRGB
59 +diff --git a/Documentation/media/uapi/v4l/colorspaces-defs.rst b/Documentation/media/uapi/v4l/colorspaces-defs.rst
60 +index 410907fe9415..f24615544792 100644
61 +--- a/Documentation/media/uapi/v4l/colorspaces-defs.rst
62 ++++ b/Documentation/media/uapi/v4l/colorspaces-defs.rst
63 +@@ -51,8 +51,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
64 + - See :ref:`col-rec709`.
65 + * - ``V4L2_COLORSPACE_SRGB``
66 + - See :ref:`col-srgb`.
67 +- * - ``V4L2_COLORSPACE_ADOBERGB``
68 +- - See :ref:`col-adobergb`.
69 ++ * - ``V4L2_COLORSPACE_OPRGB``
70 ++ - See :ref:`col-oprgb`.
71 + * - ``V4L2_COLORSPACE_BT2020``
72 + - See :ref:`col-bt2020`.
73 + * - ``V4L2_COLORSPACE_DCI_P3``
74 +@@ -90,8 +90,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
75 + - Use the Rec. 709 transfer function.
76 + * - ``V4L2_XFER_FUNC_SRGB``
77 + - Use the sRGB transfer function.
78 +- * - ``V4L2_XFER_FUNC_ADOBERGB``
79 +- - Use the AdobeRGB transfer function.
80 ++ * - ``V4L2_XFER_FUNC_OPRGB``
81 ++ - Use the opRGB transfer function.
82 + * - ``V4L2_XFER_FUNC_SMPTE240M``
83 + - Use the SMPTE 240M transfer function.
84 + * - ``V4L2_XFER_FUNC_NONE``
85 +diff --git a/Documentation/media/uapi/v4l/colorspaces-details.rst b/Documentation/media/uapi/v4l/colorspaces-details.rst
86 +index b5d551b9cc8f..09fabf4cd412 100644
87 +--- a/Documentation/media/uapi/v4l/colorspaces-details.rst
88 ++++ b/Documentation/media/uapi/v4l/colorspaces-details.rst
89 +@@ -290,15 +290,14 @@ Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
90 + 170M/BT.601. The Y'CbCr quantization is limited range.
91 +
92 +
93 +-.. _col-adobergb:
94 ++.. _col-oprgb:
95 +
96 +-Colorspace Adobe RGB (V4L2_COLORSPACE_ADOBERGB)
97 ++Colorspace opRGB (V4L2_COLORSPACE_OPRGB)
98 + ===============================================
99 +
100 +-The :ref:`adobergb` standard defines the colorspace used by computer
101 +-graphics that use the AdobeRGB colorspace. This is also known as the
102 +-:ref:`oprgb` standard. The default transfer function is
103 +-``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is
104 ++The :ref:`oprgb` standard defines the colorspace used by computer
105 ++graphics that use the opRGB colorspace. The default transfer function is
106 ++``V4L2_XFER_FUNC_OPRGB``. The default Y'CbCr encoding is
107 + ``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited
108 + range.
109 +
110 +@@ -312,7 +311,7 @@ The chromaticities of the primary colors and the white reference are:
111 +
112 + .. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
113 +
114 +-.. flat-table:: Adobe RGB Chromaticities
115 ++.. flat-table:: opRGB Chromaticities
116 + :header-rows: 1
117 + :stub-columns: 0
118 + :widths: 1 1 2
119 +diff --git a/Makefile b/Makefile
120 +index f4cad5e03561..2fe1424d61d2 100644
121 +--- a/Makefile
122 ++++ b/Makefile
123 +@@ -1,7 +1,7 @@
124 + # SPDX-License-Identifier: GPL-2.0
125 + VERSION = 4
126 + PATCHLEVEL = 14
127 +-SUBLEVEL = 80
128 ++SUBLEVEL = 81
129 + EXTRAVERSION =
130 + NAME = Petit Gorille
131 +
132 +diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
133 +index a5bd8f0205e8..0bf354024ef5 100644
134 +--- a/arch/arm/boot/dts/dra7.dtsi
135 ++++ b/arch/arm/boot/dts/dra7.dtsi
136 +@@ -333,7 +333,7 @@
137 + ti,hwmods = "pcie1";
138 + phys = <&pcie1_phy>;
139 + phy-names = "pcie-phy0";
140 +- ti,syscon-unaligned-access = <&scm_conf1 0x14 2>;
141 ++ ti,syscon-unaligned-access = <&scm_conf1 0x14 1>;
142 + status = "disabled";
143 + };
144 + };
145 +diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
146 +index 590ee442d0ae..3ed3d1a0fd40 100644
147 +--- a/arch/arm/boot/dts/exynos3250.dtsi
148 ++++ b/arch/arm/boot/dts/exynos3250.dtsi
149 +@@ -82,6 +82,22 @@
150 + compatible = "arm,cortex-a7";
151 + reg = <1>;
152 + clock-frequency = <1000000000>;
153 ++ clocks = <&cmu CLK_ARM_CLK>;
154 ++ clock-names = "cpu";
155 ++ #cooling-cells = <2>;
156 ++
157 ++ operating-points = <
158 ++ 1000000 1150000
159 ++ 900000 1112500
160 ++ 800000 1075000
161 ++ 700000 1037500
162 ++ 600000 1000000
163 ++ 500000 962500
164 ++ 400000 925000
165 ++ 300000 887500
166 ++ 200000 850000
167 ++ 100000 850000
168 ++ >;
169 + };
170 + };
171 +
172 +diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
173 +index 084fcc5574ef..e4876186d5cd 100644
174 +--- a/arch/arm/boot/dts/exynos4210-origen.dts
175 ++++ b/arch/arm/boot/dts/exynos4210-origen.dts
176 +@@ -152,6 +152,8 @@
177 + reg = <0x66>;
178 + interrupt-parent = <&gpx0>;
179 + interrupts = <4 IRQ_TYPE_NONE>, <3 IRQ_TYPE_NONE>;
180 ++ pinctrl-names = "default";
181 ++ pinctrl-0 = <&max8997_irq>;
182 +
183 + max8997,pmic-buck1-dvs-voltage = <1350000>;
184 + max8997,pmic-buck2-dvs-voltage = <1100000>;
185 +@@ -289,6 +291,13 @@
186 + };
187 + };
188 +
189 ++&pinctrl_1 {
190 ++ max8997_irq: max8997-irq {
191 ++ samsung,pins = "gpx0-3", "gpx0-4";
192 ++ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
193 ++ };
194 ++};
195 ++
196 + &sdhci_0 {
197 + bus-width = <4>;
198 + pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4 &sd0_cd>;
199 +diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
200 +index 768fb075b1fd..27e17471ab7a 100644
201 +--- a/arch/arm/boot/dts/exynos4210.dtsi
202 ++++ b/arch/arm/boot/dts/exynos4210.dtsi
203 +@@ -52,8 +52,6 @@
204 + 400000 975000
205 + 200000 950000
206 + >;
207 +- cooling-min-level = <4>;
208 +- cooling-max-level = <2>;
209 + #cooling-cells = <2>; /* min followed by max */
210 + };
211 +
212 +@@ -61,6 +59,19 @@
213 + device_type = "cpu";
214 + compatible = "arm,cortex-a9";
215 + reg = <0x901>;
216 ++ clocks = <&clock CLK_ARM_CLK>;
217 ++ clock-names = "cpu";
218 ++ clock-latency = <160000>;
219 ++
220 ++ operating-points = <
221 ++ 1200000 1250000
222 ++ 1000000 1150000
223 ++ 800000 1075000
224 ++ 500000 975000
225 ++ 400000 975000
226 ++ 200000 950000
227 ++ >;
228 ++ #cooling-cells = <2>; /* min followed by max */
229 + };
230 + };
231 +
232 +diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi
233 +index 7ff03a7e8fb9..1a35e6336e53 100644
234 +--- a/arch/arm/boot/dts/exynos4412.dtsi
235 ++++ b/arch/arm/boot/dts/exynos4412.dtsi
236 +@@ -45,8 +45,6 @@
237 + clocks = <&clock CLK_ARM_CLK>;
238 + clock-names = "cpu";
239 + operating-points-v2 = <&cpu0_opp_table>;
240 +- cooling-min-level = <13>;
241 +- cooling-max-level = <7>;
242 + #cooling-cells = <2>; /* min followed by max */
243 + };
244 +
245 +diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
246 +index 35b1949a3e3c..9f73a8bf6e1c 100644
247 +--- a/arch/arm/boot/dts/exynos5250.dtsi
248 ++++ b/arch/arm/boot/dts/exynos5250.dtsi
249 +@@ -57,38 +57,106 @@
250 + device_type = "cpu";
251 + compatible = "arm,cortex-a15";
252 + reg = <0>;
253 +- clock-frequency = <1700000000>;
254 + clocks = <&clock CLK_ARM_CLK>;
255 + clock-names = "cpu";
256 +- clock-latency = <140000>;
257 +-
258 +- operating-points = <
259 +- 1700000 1300000
260 +- 1600000 1250000
261 +- 1500000 1225000
262 +- 1400000 1200000
263 +- 1300000 1150000
264 +- 1200000 1125000
265 +- 1100000 1100000
266 +- 1000000 1075000
267 +- 900000 1050000
268 +- 800000 1025000
269 +- 700000 1012500
270 +- 600000 1000000
271 +- 500000 975000
272 +- 400000 950000
273 +- 300000 937500
274 +- 200000 925000
275 +- >;
276 +- cooling-min-level = <15>;
277 +- cooling-max-level = <9>;
278 ++ operating-points-v2 = <&cpu0_opp_table>;
279 + #cooling-cells = <2>; /* min followed by max */
280 + };
281 + cpu@1 {
282 + device_type = "cpu";
283 + compatible = "arm,cortex-a15";
284 + reg = <1>;
285 +- clock-frequency = <1700000000>;
286 ++ clocks = <&clock CLK_ARM_CLK>;
287 ++ clock-names = "cpu";
288 ++ operating-points-v2 = <&cpu0_opp_table>;
289 ++ #cooling-cells = <2>; /* min followed by max */
290 ++ };
291 ++ };
292 ++
293 ++ cpu0_opp_table: opp_table0 {
294 ++ compatible = "operating-points-v2";
295 ++ opp-shared;
296 ++
297 ++ opp-200000000 {
298 ++ opp-hz = /bits/ 64 <200000000>;
299 ++ opp-microvolt = <925000>;
300 ++ clock-latency-ns = <140000>;
301 ++ };
302 ++ opp-300000000 {
303 ++ opp-hz = /bits/ 64 <300000000>;
304 ++ opp-microvolt = <937500>;
305 ++ clock-latency-ns = <140000>;
306 ++ };
307 ++ opp-400000000 {
308 ++ opp-hz = /bits/ 64 <400000000>;
309 ++ opp-microvolt = <950000>;
310 ++ clock-latency-ns = <140000>;
311 ++ };
312 ++ opp-500000000 {
313 ++ opp-hz = /bits/ 64 <500000000>;
314 ++ opp-microvolt = <975000>;
315 ++ clock-latency-ns = <140000>;
316 ++ };
317 ++ opp-600000000 {
318 ++ opp-hz = /bits/ 64 <600000000>;
319 ++ opp-microvolt = <1000000>;
320 ++ clock-latency-ns = <140000>;
321 ++ };
322 ++ opp-700000000 {
323 ++ opp-hz = /bits/ 64 <700000000>;
324 ++ opp-microvolt = <1012500>;
325 ++ clock-latency-ns = <140000>;
326 ++ };
327 ++ opp-800000000 {
328 ++ opp-hz = /bits/ 64 <800000000>;
329 ++ opp-microvolt = <1025000>;
330 ++ clock-latency-ns = <140000>;
331 ++ };
332 ++ opp-900000000 {
333 ++ opp-hz = /bits/ 64 <900000000>;
334 ++ opp-microvolt = <1050000>;
335 ++ clock-latency-ns = <140000>;
336 ++ };
337 ++ opp-1000000000 {
338 ++ opp-hz = /bits/ 64 <1000000000>;
339 ++ opp-microvolt = <1075000>;
340 ++ clock-latency-ns = <140000>;
341 ++ opp-suspend;
342 ++ };
343 ++ opp-1100000000 {
344 ++ opp-hz = /bits/ 64 <1100000000>;
345 ++ opp-microvolt = <1100000>;
346 ++ clock-latency-ns = <140000>;
347 ++ };
348 ++ opp-1200000000 {
349 ++ opp-hz = /bits/ 64 <1200000000>;
350 ++ opp-microvolt = <1125000>;
351 ++ clock-latency-ns = <140000>;
352 ++ };
353 ++ opp-1300000000 {
354 ++ opp-hz = /bits/ 64 <1300000000>;
355 ++ opp-microvolt = <1150000>;
356 ++ clock-latency-ns = <140000>;
357 ++ };
358 ++ opp-1400000000 {
359 ++ opp-hz = /bits/ 64 <1400000000>;
360 ++ opp-microvolt = <1200000>;
361 ++ clock-latency-ns = <140000>;
362 ++ };
363 ++ opp-1500000000 {
364 ++ opp-hz = /bits/ 64 <1500000000>;
365 ++ opp-microvolt = <1225000>;
366 ++ clock-latency-ns = <140000>;
367 ++ };
368 ++ opp-1600000000 {
369 ++ opp-hz = /bits/ 64 <1600000000>;
370 ++ opp-microvolt = <1250000>;
371 ++ clock-latency-ns = <140000>;
372 ++ };
373 ++ opp-1700000000 {
374 ++ opp-hz = /bits/ 64 <1700000000>;
375 ++ opp-microvolt = <1300000>;
376 ++ clock-latency-ns = <140000>;
377 + };
378 + };
379 +
380 +diff --git a/arch/arm/boot/dts/exynos5420-cpus.dtsi b/arch/arm/boot/dts/exynos5420-cpus.dtsi
381 +index 5c052d7ff554..7e6b55561b1d 100644
382 +--- a/arch/arm/boot/dts/exynos5420-cpus.dtsi
383 ++++ b/arch/arm/boot/dts/exynos5420-cpus.dtsi
384 +@@ -33,8 +33,6 @@
385 + clock-frequency = <1800000000>;
386 + cci-control-port = <&cci_control1>;
387 + operating-points-v2 = <&cluster_a15_opp_table>;
388 +- cooling-min-level = <0>;
389 +- cooling-max-level = <11>;
390 + #cooling-cells = <2>; /* min followed by max */
391 + };
392 +
393 +@@ -45,8 +43,6 @@
394 + clock-frequency = <1800000000>;
395 + cci-control-port = <&cci_control1>;
396 + operating-points-v2 = <&cluster_a15_opp_table>;
397 +- cooling-min-level = <0>;
398 +- cooling-max-level = <11>;
399 + #cooling-cells = <2>; /* min followed by max */
400 + };
401 +
402 +@@ -57,8 +53,6 @@
403 + clock-frequency = <1800000000>;
404 + cci-control-port = <&cci_control1>;
405 + operating-points-v2 = <&cluster_a15_opp_table>;
406 +- cooling-min-level = <0>;
407 +- cooling-max-level = <11>;
408 + #cooling-cells = <2>; /* min followed by max */
409 + };
410 +
411 +@@ -69,8 +63,6 @@
412 + clock-frequency = <1800000000>;
413 + cci-control-port = <&cci_control1>;
414 + operating-points-v2 = <&cluster_a15_opp_table>;
415 +- cooling-min-level = <0>;
416 +- cooling-max-level = <11>;
417 + #cooling-cells = <2>; /* min followed by max */
418 + };
419 +
420 +@@ -82,8 +74,6 @@
421 + clock-frequency = <1000000000>;
422 + cci-control-port = <&cci_control0>;
423 + operating-points-v2 = <&cluster_a7_opp_table>;
424 +- cooling-min-level = <0>;
425 +- cooling-max-level = <7>;
426 + #cooling-cells = <2>; /* min followed by max */
427 + };
428 +
429 +@@ -94,8 +84,6 @@
430 + clock-frequency = <1000000000>;
431 + cci-control-port = <&cci_control0>;
432 + operating-points-v2 = <&cluster_a7_opp_table>;
433 +- cooling-min-level = <0>;
434 +- cooling-max-level = <7>;
435 + #cooling-cells = <2>; /* min followed by max */
436 + };
437 +
438 +@@ -106,8 +94,6 @@
439 + clock-frequency = <1000000000>;
440 + cci-control-port = <&cci_control0>;
441 + operating-points-v2 = <&cluster_a7_opp_table>;
442 +- cooling-min-level = <0>;
443 +- cooling-max-level = <7>;
444 + #cooling-cells = <2>; /* min followed by max */
445 + };
446 +
447 +@@ -118,8 +104,6 @@
448 + clock-frequency = <1000000000>;
449 + cci-control-port = <&cci_control0>;
450 + operating-points-v2 = <&cluster_a7_opp_table>;
451 +- cooling-min-level = <0>;
452 +- cooling-max-level = <7>;
453 + #cooling-cells = <2>; /* min followed by max */
454 + };
455 + };
456 +diff --git a/arch/arm/boot/dts/exynos5422-cpus.dtsi b/arch/arm/boot/dts/exynos5422-cpus.dtsi
457 +index bf3c6f1ec4ee..c8afdf821a77 100644
458 +--- a/arch/arm/boot/dts/exynos5422-cpus.dtsi
459 ++++ b/arch/arm/boot/dts/exynos5422-cpus.dtsi
460 +@@ -32,8 +32,6 @@
461 + clock-frequency = <1000000000>;
462 + cci-control-port = <&cci_control0>;
463 + operating-points-v2 = <&cluster_a7_opp_table>;
464 +- cooling-min-level = <0>;
465 +- cooling-max-level = <11>;
466 + #cooling-cells = <2>; /* min followed by max */
467 + };
468 +
469 +@@ -44,8 +42,6 @@
470 + clock-frequency = <1000000000>;
471 + cci-control-port = <&cci_control0>;
472 + operating-points-v2 = <&cluster_a7_opp_table>;
473 +- cooling-min-level = <0>;
474 +- cooling-max-level = <11>;
475 + #cooling-cells = <2>; /* min followed by max */
476 + };
477 +
478 +@@ -56,8 +52,6 @@
479 + clock-frequency = <1000000000>;
480 + cci-control-port = <&cci_control0>;
481 + operating-points-v2 = <&cluster_a7_opp_table>;
482 +- cooling-min-level = <0>;
483 +- cooling-max-level = <11>;
484 + #cooling-cells = <2>; /* min followed by max */
485 + };
486 +
487 +@@ -68,8 +62,6 @@
488 + clock-frequency = <1000000000>;
489 + cci-control-port = <&cci_control0>;
490 + operating-points-v2 = <&cluster_a7_opp_table>;
491 +- cooling-min-level = <0>;
492 +- cooling-max-level = <11>;
493 + #cooling-cells = <2>; /* min followed by max */
494 + };
495 +
496 +@@ -81,8 +73,6 @@
497 + clock-frequency = <1800000000>;
498 + cci-control-port = <&cci_control1>;
499 + operating-points-v2 = <&cluster_a15_opp_table>;
500 +- cooling-min-level = <0>;
501 +- cooling-max-level = <15>;
502 + #cooling-cells = <2>; /* min followed by max */
503 + };
504 +
505 +@@ -93,8 +83,6 @@
506 + clock-frequency = <1800000000>;
507 + cci-control-port = <&cci_control1>;
508 + operating-points-v2 = <&cluster_a15_opp_table>;
509 +- cooling-min-level = <0>;
510 +- cooling-max-level = <15>;
511 + #cooling-cells = <2>; /* min followed by max */
512 + };
513 +
514 +@@ -105,8 +93,6 @@
515 + clock-frequency = <1800000000>;
516 + cci-control-port = <&cci_control1>;
517 + operating-points-v2 = <&cluster_a15_opp_table>;
518 +- cooling-min-level = <0>;
519 +- cooling-max-level = <15>;
520 + #cooling-cells = <2>; /* min followed by max */
521 + };
522 +
523 +@@ -117,8 +103,6 @@
524 + clock-frequency = <1800000000>;
525 + cci-control-port = <&cci_control1>;
526 + operating-points-v2 = <&cluster_a15_opp_table>;
527 +- cooling-min-level = <0>;
528 +- cooling-max-level = <15>;
529 + #cooling-cells = <2>; /* min followed by max */
530 + };
531 + };
532 +diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
533 +index 791ca15c799e..bd1985694bca 100644
534 +--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
535 ++++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
536 +@@ -601,7 +601,7 @@
537 + status = "disabled";
538 + };
539 +
540 +- sdr: sdr@ffc25000 {
541 ++ sdr: sdr@ffcfb100 {
542 + compatible = "altr,sdr-ctl", "syscon";
543 + reg = <0xffcfb100 0x80>;
544 + };
545 +diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
546 +index c2b9bcb0ef61..e79f3defe002 100644
547 +--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
548 ++++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
549 +@@ -249,7 +249,7 @@
550 +
551 + sysmgr: sysmgr@ffd12000 {
552 + compatible = "altr,sys-mgr", "syscon";
553 +- reg = <0xffd12000 0x1000>;
554 ++ reg = <0xffd12000 0x228>;
555 + };
556 +
557 + /* Local timer */
558 +diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
559 +index 9a8cb96555d6..9a947afaf74c 100644
560 +--- a/arch/arm64/lib/Makefile
561 ++++ b/arch/arm64/lib/Makefile
562 +@@ -12,7 +12,7 @@ lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
563 + # when supported by the CPU. Result and argument registers are handled
564 + # correctly, based on the function prototype.
565 + lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
566 +-CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
567 ++CFLAGS_atomic_ll_sc.o := -ffixed-x1 -ffixed-x2 \
568 + -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \
569 + -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \
570 + -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
571 +diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
572 +index f24be0b5db50..c683c369bca5 100644
573 +--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
574 ++++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
575 +@@ -67,7 +67,7 @@ void (*cvmx_override_pko_queue_priority) (int pko_port,
576 + void (*cvmx_override_ipd_port_setup) (int ipd_port);
577 +
578 + /* Port count per interface */
579 +-static int interface_port_count[5];
580 ++static int interface_port_count[9];
581 +
582 + /**
583 + * Return the number of interfaces the chip has. Each interface
584 +diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
585 +index 1b4732e20137..843825a7e6e2 100644
586 +--- a/arch/parisc/kernel/entry.S
587 ++++ b/arch/parisc/kernel/entry.S
588 +@@ -185,7 +185,7 @@
589 + bv,n 0(%r3)
590 + nop
591 + .word 0 /* checksum (will be patched) */
592 +- .word PA(os_hpmc) /* address of handler */
593 ++ .word 0 /* address of handler */
594 + .word 0 /* length of handler */
595 + .endm
596 +
597 +diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
598 +index 781c3b9a3e46..fde654115564 100644
599 +--- a/arch/parisc/kernel/hpmc.S
600 ++++ b/arch/parisc/kernel/hpmc.S
601 +@@ -85,7 +85,7 @@ END(hpmc_pim_data)
602 +
603 + .import intr_save, code
604 + .align 16
605 +-ENTRY_CFI(os_hpmc)
606 ++ENTRY(os_hpmc)
607 + .os_hpmc:
608 +
609 + /*
610 +@@ -302,7 +302,6 @@ os_hpmc_6:
611 + b .
612 + nop
613 + .align 16 /* make function length multiple of 16 bytes */
614 +-ENDPROC_CFI(os_hpmc)
615 + .os_hpmc_end:
616 +
617 +
618 +diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
619 +index 8453724b8009..9a898d68f4a0 100644
620 +--- a/arch/parisc/kernel/traps.c
621 ++++ b/arch/parisc/kernel/traps.c
622 +@@ -836,7 +836,8 @@ void __init initialize_ivt(const void *iva)
623 + if (pdc_instr(&instr) == PDC_OK)
624 + ivap[0] = instr;
625 +
626 +- /* Compute Checksum for HPMC handler */
627 ++ /* Setup IVA and compute checksum for HPMC handler */
628 ++ ivap[6] = (u32)__pa(os_hpmc);
629 + length = os_hpmc_size;
630 + ivap[7] = length;
631 +
632 +diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
633 +index 13f7854e0d49..cc700f7dda54 100644
634 +--- a/arch/parisc/mm/init.c
635 ++++ b/arch/parisc/mm/init.c
636 +@@ -495,12 +495,8 @@ static void __init map_pages(unsigned long start_vaddr,
637 + pte = pte_mkhuge(pte);
638 + }
639 +
640 +- if (address >= end_paddr) {
641 +- if (force)
642 +- break;
643 +- else
644 +- pte_val(pte) = 0;
645 +- }
646 ++ if (address >= end_paddr)
647 ++ break;
648 +
649 + set_pte(pg_table, pte);
650 +
651 +diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
652 +index fad8ddd697ac..0abf2e7fd222 100644
653 +--- a/arch/powerpc/include/asm/mpic.h
654 ++++ b/arch/powerpc/include/asm/mpic.h
655 +@@ -393,7 +393,14 @@ extern struct bus_type mpic_subsys;
656 + #define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
657 +
658 + /* Get the version of primary MPIC */
659 ++#ifdef CONFIG_MPIC
660 + extern u32 fsl_mpic_primary_get_version(void);
661 ++#else
662 ++static inline u32 fsl_mpic_primary_get_version(void)
663 ++{
664 ++ return 0;
665 ++}
666 ++#endif
667 +
668 + /* Allocate the controller structure and setup the linux irq descs
669 + * for the range if interrupts passed in. No HW initialization is
670 +diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
671 +index 395926b8c1ed..ffba4617d108 100644
672 +--- a/arch/s390/kvm/sthyi.c
673 ++++ b/arch/s390/kvm/sthyi.c
674 +@@ -174,17 +174,19 @@ static void fill_hdr(struct sthyi_sctns *sctns)
675 + static void fill_stsi_mac(struct sthyi_sctns *sctns,
676 + struct sysinfo_1_1_1 *sysinfo)
677 + {
678 ++ sclp_ocf_cpc_name_copy(sctns->mac.infmname);
679 ++ if (*(u64 *)sctns->mac.infmname != 0)
680 ++ sctns->mac.infmval1 |= MAC_NAME_VLD;
681 ++
682 + if (stsi(sysinfo, 1, 1, 1))
683 + return;
684 +
685 +- sclp_ocf_cpc_name_copy(sctns->mac.infmname);
686 +-
687 + memcpy(sctns->mac.infmtype, sysinfo->type, sizeof(sctns->mac.infmtype));
688 + memcpy(sctns->mac.infmmanu, sysinfo->manufacturer, sizeof(sctns->mac.infmmanu));
689 + memcpy(sctns->mac.infmpman, sysinfo->plant, sizeof(sctns->mac.infmpman));
690 + memcpy(sctns->mac.infmseq, sysinfo->sequence, sizeof(sctns->mac.infmseq));
691 +
692 +- sctns->mac.infmval1 |= MAC_ID_VLD | MAC_NAME_VLD;
693 ++ sctns->mac.infmval1 |= MAC_ID_VLD;
694 + }
695 +
696 + static void fill_stsi_par(struct sthyi_sctns *sctns,
697 +diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
698 +index 666d6b5c0440..9c3fc03abe9a 100644
699 +--- a/arch/sparc/include/asm/cpudata_64.h
700 ++++ b/arch/sparc/include/asm/cpudata_64.h
701 +@@ -28,7 +28,7 @@ typedef struct {
702 + unsigned short sock_id; /* physical package */
703 + unsigned short core_id;
704 + unsigned short max_cache_id; /* groupings of highest shared cache */
705 +- unsigned short proc_id; /* strand (aka HW thread) id */
706 ++ signed short proc_id; /* strand (aka HW thread) id */
707 + } cpuinfo_sparc;
708 +
709 + DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
710 +diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
711 +index 5c1f54758312..eceb0215bdee 100644
712 +--- a/arch/sparc/kernel/perf_event.c
713 ++++ b/arch/sparc/kernel/perf_event.c
714 +@@ -24,6 +24,7 @@
715 + #include <asm/cpudata.h>
716 + #include <linux/uaccess.h>
717 + #include <linux/atomic.h>
718 ++#include <linux/sched/clock.h>
719 + #include <asm/nmi.h>
720 + #include <asm/pcr.h>
721 + #include <asm/cacheflush.h>
722 +@@ -927,6 +928,8 @@ static void read_in_all_counters(struct cpu_hw_events *cpuc)
723 + sparc_perf_event_update(cp, &cp->hw,
724 + cpuc->current_idx[i]);
725 + cpuc->current_idx[i] = PIC_NO_INDEX;
726 ++ if (cp->hw.state & PERF_HES_STOPPED)
727 ++ cp->hw.state |= PERF_HES_ARCH;
728 + }
729 + }
730 + }
731 +@@ -959,10 +962,12 @@ static void calculate_single_pcr(struct cpu_hw_events *cpuc)
732 +
733 + enc = perf_event_get_enc(cpuc->events[i]);
734 + cpuc->pcr[0] &= ~mask_for_index(idx);
735 +- if (hwc->state & PERF_HES_STOPPED)
736 ++ if (hwc->state & PERF_HES_ARCH) {
737 + cpuc->pcr[0] |= nop_for_index(idx);
738 +- else
739 ++ } else {
740 + cpuc->pcr[0] |= event_encoding(enc, idx);
741 ++ hwc->state = 0;
742 ++ }
743 + }
744 + out:
745 + cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
746 +@@ -988,6 +993,9 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
747 +
748 + cpuc->current_idx[i] = idx;
749 +
750 ++ if (cp->hw.state & PERF_HES_ARCH)
751 ++ continue;
752 ++
753 + sparc_pmu_start(cp, PERF_EF_RELOAD);
754 + }
755 + out:
756 +@@ -1079,6 +1087,8 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
757 + event->hw.state = 0;
758 +
759 + sparc_pmu_enable_event(cpuc, &event->hw, idx);
760 ++
761 ++ perf_event_update_userpage(event);
762 + }
763 +
764 + static void sparc_pmu_stop(struct perf_event *event, int flags)
765 +@@ -1371,9 +1381,9 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
766 + cpuc->events[n0] = event->hw.event_base;
767 + cpuc->current_idx[n0] = PIC_NO_INDEX;
768 +
769 +- event->hw.state = PERF_HES_UPTODATE;
770 ++ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
771 + if (!(ef_flags & PERF_EF_START))
772 +- event->hw.state |= PERF_HES_STOPPED;
773 ++ event->hw.state |= PERF_HES_ARCH;
774 +
775 + /*
776 + * If group events scheduling transaction was started,
777 +@@ -1603,6 +1613,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
778 + struct perf_sample_data data;
779 + struct cpu_hw_events *cpuc;
780 + struct pt_regs *regs;
781 ++ u64 finish_clock;
782 ++ u64 start_clock;
783 + int i;
784 +
785 + if (!atomic_read(&active_events))
786 +@@ -1616,6 +1628,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
787 + return NOTIFY_DONE;
788 + }
789 +
790 ++ start_clock = sched_clock();
791 ++
792 + regs = args->regs;
793 +
794 + cpuc = this_cpu_ptr(&cpu_hw_events);
795 +@@ -1654,6 +1668,10 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
796 + sparc_pmu_stop(event, 0);
797 + }
798 +
799 ++ finish_clock = sched_clock();
800 ++
801 ++ perf_sample_event_took(finish_clock - start_clock);
802 ++
803 + return NOTIFY_STOP;
804 + }
805 +
806 +diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
807 +index d4e6cd4577e5..bf0e82400358 100644
808 +--- a/arch/x86/boot/tools/build.c
809 ++++ b/arch/x86/boot/tools/build.c
810 +@@ -391,6 +391,13 @@ int main(int argc, char ** argv)
811 + die("Unable to mmap '%s': %m", argv[2]);
812 + /* Number of 16-byte paragraphs, including space for a 4-byte CRC */
813 + sys_size = (sz + 15 + 4) / 16;
814 ++#ifdef CONFIG_EFI_STUB
815 ++ /*
816 ++ * COFF requires minimum 32-byte alignment of sections, and
817 ++ * adding a signature is problematic without that alignment.
818 ++ */
819 ++ sys_size = (sys_size + 1) & ~1;
820 ++#endif
821 +
822 + /* Patch the setup code with the appropriate size parameters */
823 + buf[0x1f1] = setup_sectors-1;
824 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
825 +index 8418462298e7..673d6e988196 100644
826 +--- a/arch/x86/include/asm/cpufeatures.h
827 ++++ b/arch/x86/include/asm/cpufeatures.h
828 +@@ -220,6 +220,7 @@
829 + #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
830 + #define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
831 + #define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
832 ++#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
833 +
834 + /* Virtualization flags: Linux defined, word 8 */
835 + #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
836 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
837 +index 4015b88383ce..367cdd263a5c 100644
838 +--- a/arch/x86/include/asm/kvm_host.h
839 ++++ b/arch/x86/include/asm/kvm_host.h
840 +@@ -174,6 +174,7 @@ enum {
841 +
842 + #define DR6_BD (1 << 13)
843 + #define DR6_BS (1 << 14)
844 ++#define DR6_BT (1 << 15)
845 + #define DR6_RTM (1 << 16)
846 + #define DR6_FIXED_1 0xfffe0ff0
847 + #define DR6_INIT 0xffff0ff0
848 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
849 +index 8b38df98548e..1b4132161c1f 100644
850 +--- a/arch/x86/include/asm/nospec-branch.h
851 ++++ b/arch/x86/include/asm/nospec-branch.h
852 +@@ -215,6 +215,7 @@ enum spectre_v2_mitigation {
853 + SPECTRE_V2_RETPOLINE_GENERIC,
854 + SPECTRE_V2_RETPOLINE_AMD,
855 + SPECTRE_V2_IBRS,
856 ++ SPECTRE_V2_IBRS_ENHANCED,
857 + };
858 +
859 + /* The Speculative Store Bypass disable variants */
860 +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
861 +index 5f00ecb9d251..2501be609b82 100644
862 +--- a/arch/x86/include/asm/tlbflush.h
863 ++++ b/arch/x86/include/asm/tlbflush.h
864 +@@ -466,6 +466,12 @@ static inline void __native_flush_tlb_one_user(unsigned long addr)
865 + */
866 + static inline void __flush_tlb_all(void)
867 + {
868 ++ /*
869 ++ * This is to catch users with enabled preemption and the PGE feature
870 ++ * and don't trigger the warning in __native_flush_tlb().
871 ++ */
872 ++ VM_WARN_ON_ONCE(preemptible());
873 ++
874 + if (boot_cpu_has(X86_FEATURE_PGE)) {
875 + __flush_tlb_global();
876 + } else {
877 +diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
878 +index 33399426793e..cc8258a5378b 100644
879 +--- a/arch/x86/kernel/check.c
880 ++++ b/arch/x86/kernel/check.c
881 +@@ -31,6 +31,11 @@ static __init int set_corruption_check(char *arg)
882 + ssize_t ret;
883 + unsigned long val;
884 +
885 ++ if (!arg) {
886 ++ pr_err("memory_corruption_check config string not provided\n");
887 ++ return -EINVAL;
888 ++ }
889 ++
890 + ret = kstrtoul(arg, 10, &val);
891 + if (ret)
892 + return ret;
893 +@@ -45,6 +50,11 @@ static __init int set_corruption_check_period(char *arg)
894 + ssize_t ret;
895 + unsigned long val;
896 +
897 ++ if (!arg) {
898 ++ pr_err("memory_corruption_check_period config string not provided\n");
899 ++ return -EINVAL;
900 ++ }
901 ++
902 + ret = kstrtoul(arg, 10, &val);
903 + if (ret)
904 + return ret;
905 +@@ -59,6 +69,11 @@ static __init int set_corruption_check_size(char *arg)
906 + char *end;
907 + unsigned size;
908 +
909 ++ if (!arg) {
910 ++ pr_err("memory_corruption_check_size config string not provided\n");
911 ++ return -EINVAL;
912 ++ }
913 ++
914 + size = memparse(arg, &end);
915 +
916 + if (*end == '\0')
917 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
918 +index 3e435f88621d..aa6e7f75bccc 100644
919 +--- a/arch/x86/kernel/cpu/bugs.c
920 ++++ b/arch/x86/kernel/cpu/bugs.c
921 +@@ -34,12 +34,10 @@ static void __init spectre_v2_select_mitigation(void);
922 + static void __init ssb_select_mitigation(void);
923 + static void __init l1tf_select_mitigation(void);
924 +
925 +-/*
926 +- * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
927 +- * writes to SPEC_CTRL contain whatever reserved bits have been set.
928 +- */
929 +-u64 __ro_after_init x86_spec_ctrl_base;
930 ++/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
931 ++u64 x86_spec_ctrl_base;
932 + EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
933 ++static DEFINE_MUTEX(spec_ctrl_mutex);
934 +
935 + /*
936 + * The vendor and possibly platform specific bits which can be modified in
937 +@@ -140,6 +138,7 @@ static const char *spectre_v2_strings[] = {
938 + [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
939 + [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
940 + [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
941 ++ [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
942 + };
943 +
944 + #undef pr_fmt
945 +@@ -322,6 +321,46 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
946 + return cmd;
947 + }
948 +
949 ++static bool stibp_needed(void)
950 ++{
951 ++ if (spectre_v2_enabled == SPECTRE_V2_NONE)
952 ++ return false;
953 ++
954 ++ if (!boot_cpu_has(X86_FEATURE_STIBP))
955 ++ return false;
956 ++
957 ++ return true;
958 ++}
959 ++
960 ++static void update_stibp_msr(void *info)
961 ++{
962 ++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
963 ++}
964 ++
965 ++void arch_smt_update(void)
966 ++{
967 ++ u64 mask;
968 ++
969 ++ if (!stibp_needed())
970 ++ return;
971 ++
972 ++ mutex_lock(&spec_ctrl_mutex);
973 ++ mask = x86_spec_ctrl_base;
974 ++ if (cpu_smt_control == CPU_SMT_ENABLED)
975 ++ mask |= SPEC_CTRL_STIBP;
976 ++ else
977 ++ mask &= ~SPEC_CTRL_STIBP;
978 ++
979 ++ if (mask != x86_spec_ctrl_base) {
980 ++ pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
981 ++ cpu_smt_control == CPU_SMT_ENABLED ?
982 ++ "Enabling" : "Disabling");
983 ++ x86_spec_ctrl_base = mask;
984 ++ on_each_cpu(update_stibp_msr, NULL, 1);
985 ++ }
986 ++ mutex_unlock(&spec_ctrl_mutex);
987 ++}
988 ++
989 + static void __init spectre_v2_select_mitigation(void)
990 + {
991 + enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
992 +@@ -341,6 +380,13 @@ static void __init spectre_v2_select_mitigation(void)
993 +
994 + case SPECTRE_V2_CMD_FORCE:
995 + case SPECTRE_V2_CMD_AUTO:
996 ++ if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
997 ++ mode = SPECTRE_V2_IBRS_ENHANCED;
998 ++ /* Force it so VMEXIT will restore correctly */
999 ++ x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
1000 ++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1001 ++ goto specv2_set_mode;
1002 ++ }
1003 + if (IS_ENABLED(CONFIG_RETPOLINE))
1004 + goto retpoline_auto;
1005 + break;
1006 +@@ -378,6 +424,7 @@ retpoline_auto:
1007 + setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
1008 + }
1009 +
1010 ++specv2_set_mode:
1011 + spectre_v2_enabled = mode;
1012 + pr_info("%s\n", spectre_v2_strings[mode]);
1013 +
1014 +@@ -400,12 +447,22 @@ retpoline_auto:
1015 +
1016 + /*
1017 + * Retpoline means the kernel is safe because it has no indirect
1018 +- * branches. But firmware isn't, so use IBRS to protect that.
1019 ++ * branches. Enhanced IBRS protects firmware too, so, enable restricted
1020 ++ * speculation around firmware calls only when Enhanced IBRS isn't
1021 ++ * supported.
1022 ++ *
1023 ++ * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1024 ++ * the user might select retpoline on the kernel command line and if
1025 ++ * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1026 ++ * enable IBRS around firmware calls.
1027 + */
1028 +- if (boot_cpu_has(X86_FEATURE_IBRS)) {
1029 ++ if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
1030 + setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
1031 + pr_info("Enabling Restricted Speculation for firmware calls\n");
1032 + }
1033 ++
1034 ++ /* Enable STIBP if appropriate */
1035 ++ arch_smt_update();
1036 + }
1037 +
1038 + #undef pr_fmt
1039 +@@ -798,6 +855,8 @@ static ssize_t l1tf_show_state(char *buf)
1040 + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1041 + char *buf, unsigned int bug)
1042 + {
1043 ++ int ret;
1044 ++
1045 + if (!boot_cpu_has_bug(bug))
1046 + return sprintf(buf, "Not affected\n");
1047 +
1048 +@@ -812,10 +871,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
1049 + return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1050 +
1051 + case X86_BUG_SPECTRE_V2:
1052 +- return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1053 ++ ret = sprintf(buf, "%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1054 + boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
1055 + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1056 ++ (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
1057 + spectre_v2_module_string());
1058 ++ return ret;
1059 +
1060 + case X86_BUG_SPEC_STORE_BYPASS:
1061 + return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1062 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
1063 +index 7d2a7890a823..96643e2c75b8 100644
1064 +--- a/arch/x86/kernel/cpu/common.c
1065 ++++ b/arch/x86/kernel/cpu/common.c
1066 +@@ -967,6 +967,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1067 + setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1068 + setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1069 +
1070 ++ if (ia32_cap & ARCH_CAP_IBRS_ALL)
1071 ++ setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1072 ++
1073 + if (x86_match_cpu(cpu_no_meltdown))
1074 + return;
1075 +
1076 +diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
1077 +index 23f1691670b6..61a949d84dfa 100644
1078 +--- a/arch/x86/kernel/fpu/signal.c
1079 ++++ b/arch/x86/kernel/fpu/signal.c
1080 +@@ -314,7 +314,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
1081 + * thread's fpu state, reconstruct fxstate from the fsave
1082 + * header. Validate and sanitize the copied state.
1083 + */
1084 +- struct fpu *fpu = &tsk->thread.fpu;
1085 + struct user_i387_ia32_struct env;
1086 + int err = 0;
1087 +
1088 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1089 +index fd46d890296c..ec588cf4fe95 100644
1090 +--- a/arch/x86/kvm/vmx.c
1091 ++++ b/arch/x86/kvm/vmx.c
1092 +@@ -2733,10 +2733,13 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit
1093 + }
1094 + } else {
1095 + if (vmcs12->exception_bitmap & (1u << nr)) {
1096 +- if (nr == DB_VECTOR)
1097 ++ if (nr == DB_VECTOR) {
1098 + *exit_qual = vcpu->arch.dr6;
1099 +- else
1100 ++ *exit_qual &= ~(DR6_FIXED_1 | DR6_BT);
1101 ++ *exit_qual ^= DR6_RTM;
1102 ++ } else {
1103 + *exit_qual = 0;
1104 ++ }
1105 + return 1;
1106 + }
1107 + }
1108 +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
1109 +index 464f53da3a6f..835620ab435f 100644
1110 +--- a/arch/x86/mm/pageattr.c
1111 ++++ b/arch/x86/mm/pageattr.c
1112 +@@ -2037,9 +2037,13 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
1113 +
1114 + /*
1115 + * We should perform an IPI and flush all tlbs,
1116 +- * but that can deadlock->flush only current cpu:
1117 ++ * but that can deadlock->flush only current cpu.
1118 ++ * Preemption needs to be disabled around __flush_tlb_all() due to
1119 ++ * CR3 reload in __native_flush_tlb().
1120 + */
1121 ++ preempt_disable();
1122 + __flush_tlb_all();
1123 ++ preempt_enable();
1124 +
1125 + arch_flush_lazy_mmu_mode();
1126 + }
1127 +diff --git a/arch/x86/platform/olpc/olpc-xo1-rtc.c b/arch/x86/platform/olpc/olpc-xo1-rtc.c
1128 +index a2b4efddd61a..8e7ddd7e313a 100644
1129 +--- a/arch/x86/platform/olpc/olpc-xo1-rtc.c
1130 ++++ b/arch/x86/platform/olpc/olpc-xo1-rtc.c
1131 +@@ -16,6 +16,7 @@
1132 +
1133 + #include <asm/msr.h>
1134 + #include <asm/olpc.h>
1135 ++#include <asm/x86_init.h>
1136 +
1137 + static void rtc_wake_on(struct device *dev)
1138 + {
1139 +@@ -75,6 +76,8 @@ static int __init xo1_rtc_init(void)
1140 + if (r)
1141 + return r;
1142 +
1143 ++ x86_platform.legacy.rtc = 0;
1144 ++
1145 + device_init_wakeup(&xo1_rtc_device.dev, 1);
1146 + return 0;
1147 + }
1148 +diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
1149 +index 7bd3ee08393e..d6d7b29b3be0 100644
1150 +--- a/arch/x86/xen/enlighten_pvh.c
1151 ++++ b/arch/x86/xen/enlighten_pvh.c
1152 +@@ -76,7 +76,7 @@ static void __init init_pvh_bootparams(void)
1153 + * Version 2.12 supports Xen entry point but we will use default x86/PC
1154 + * environment (i.e. hardware_subarch 0).
1155 + */
1156 +- pvh_bootparams.hdr.version = 0x212;
1157 ++ pvh_bootparams.hdr.version = (2 << 8) | 12;
1158 + pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */
1159 + }
1160 +
1161 +diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
1162 +index 33a783c77d96..184b36922397 100644
1163 +--- a/arch/x86/xen/platform-pci-unplug.c
1164 ++++ b/arch/x86/xen/platform-pci-unplug.c
1165 +@@ -146,6 +146,10 @@ void xen_unplug_emulated_devices(void)
1166 + {
1167 + int r;
1168 +
1169 ++ /* PVH guests don't have emulated devices. */
1170 ++ if (xen_pvh_domain())
1171 ++ return;
1172 ++
1173 + /* user explicitly requested no unplug */
1174 + if (xen_emul_unplug & XEN_UNPLUG_NEVER)
1175 + return;
1176 +diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
1177 +index 08324c64005d..2527540051ff 100644
1178 +--- a/arch/x86/xen/spinlock.c
1179 ++++ b/arch/x86/xen/spinlock.c
1180 +@@ -9,6 +9,7 @@
1181 + #include <linux/log2.h>
1182 + #include <linux/gfp.h>
1183 + #include <linux/slab.h>
1184 ++#include <linux/atomic.h>
1185 +
1186 + #include <asm/paravirt.h>
1187 +
1188 +@@ -20,6 +21,7 @@
1189 +
1190 + static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
1191 + static DEFINE_PER_CPU(char *, irq_name);
1192 ++static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
1193 + static bool xen_pvspin = true;
1194 +
1195 + #include <asm/qspinlock.h>
1196 +@@ -41,33 +43,24 @@ static void xen_qlock_kick(int cpu)
1197 + static void xen_qlock_wait(u8 *byte, u8 val)
1198 + {
1199 + int irq = __this_cpu_read(lock_kicker_irq);
1200 ++ atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
1201 +
1202 + /* If kicker interrupts not initialized yet, just spin */
1203 +- if (irq == -1)
1204 ++ if (irq == -1 || in_nmi())
1205 + return;
1206 +
1207 +- /* clear pending */
1208 +- xen_clear_irq_pending(irq);
1209 +- barrier();
1210 +-
1211 +- /*
1212 +- * We check the byte value after clearing pending IRQ to make sure
1213 +- * that we won't miss a wakeup event because of the clearing.
1214 +- *
1215 +- * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
1216 +- * So it is effectively a memory barrier for x86.
1217 +- */
1218 +- if (READ_ONCE(*byte) != val)
1219 +- return;
1220 ++ /* Detect reentry. */
1221 ++ atomic_inc(nest_cnt);
1222 +
1223 +- /*
1224 +- * If an interrupt happens here, it will leave the wakeup irq
1225 +- * pending, which will cause xen_poll_irq() to return
1226 +- * immediately.
1227 +- */
1228 ++ /* If irq pending already and no nested call clear it. */
1229 ++ if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
1230 ++ xen_clear_irq_pending(irq);
1231 ++ } else if (READ_ONCE(*byte) == val) {
1232 ++ /* Block until irq becomes pending (or a spurious wakeup) */
1233 ++ xen_poll_irq(irq);
1234 ++ }
1235 +
1236 +- /* Block until irq becomes pending (or perhaps a spurious wakeup) */
1237 +- xen_poll_irq(irq);
1238 ++ atomic_dec(nest_cnt);
1239 + }
1240 +
1241 + static irqreturn_t dummy_handler(int irq, void *dev_id)
1242 +diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
1243 +index 5d7554c025fd..7ecbd3dde2ea 100644
1244 +--- a/arch/x86/xen/xen-pvh.S
1245 ++++ b/arch/x86/xen/xen-pvh.S
1246 +@@ -178,7 +178,7 @@ canary:
1247 + .fill 48, 1, 0
1248 +
1249 + early_stack:
1250 +- .fill 256, 1, 0
1251 ++ .fill BOOT_STACK_SIZE, 1, 0
1252 + early_stack_end:
1253 +
1254 + ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
1255 +diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
1256 +index 414ba686a847..c1727604ad14 100644
1257 +--- a/block/bfq-wf2q.c
1258 ++++ b/block/bfq-wf2q.c
1259 +@@ -1172,10 +1172,17 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
1260 + st = bfq_entity_service_tree(entity);
1261 + is_in_service = entity == sd->in_service_entity;
1262 +
1263 +- if (is_in_service) {
1264 +- bfq_calc_finish(entity, entity->service);
1265 ++ bfq_calc_finish(entity, entity->service);
1266 ++
1267 ++ if (is_in_service)
1268 + sd->in_service_entity = NULL;
1269 +- }
1270 ++ else
1271 ++ /*
1272 ++ * Non in-service entity: nobody will take care of
1273 ++ * resetting its service counter on expiration. Do it
1274 ++ * now.
1275 ++ */
1276 ++ entity->service = 0;
1277 +
1278 + if (entity->tree == &st->active)
1279 + bfq_active_extract(st, entity);
1280 +diff --git a/crypto/lrw.c b/crypto/lrw.c
1281 +index fdba6dd6db63..886f91f2426c 100644
1282 +--- a/crypto/lrw.c
1283 ++++ b/crypto/lrw.c
1284 +@@ -139,7 +139,12 @@ static inline int get_index128(be128 *block)
1285 + return x + ffz(val);
1286 + }
1287 +
1288 +- return x;
1289 ++ /*
1290 ++ * If we get here, then x == 128 and we are incrementing the counter
1291 ++ * from all ones to all zeros. This means we must return index 127, i.e.
1292 ++ * the one corresponding to key2*{ 1,...,1 }.
1293 ++ */
1294 ++ return 127;
1295 + }
1296 +
1297 + static int post_crypt(struct skcipher_request *req)
1298 +diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
1299 +index e339960dcac7..f7affe7cf0b4 100644
1300 +--- a/crypto/tcrypt.c
1301 ++++ b/crypto/tcrypt.c
1302 +@@ -727,6 +727,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
1303 + break;
1304 + }
1305 +
1306 ++ if (speed[i].klen)
1307 ++ crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
1308 ++
1309 + pr_info("test%3u "
1310 + "(%5u byte blocks,%5u bytes per update,%4u updates): ",
1311 + i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
1312 +diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
1313 +index 75c3cb377b98..a56d3f352765 100644
1314 +--- a/drivers/acpi/acpi_lpss.c
1315 ++++ b/drivers/acpi/acpi_lpss.c
1316 +@@ -326,9 +326,11 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
1317 + { "INT33FC", },
1318 +
1319 + /* Braswell LPSS devices */
1320 ++ { "80862286", LPSS_ADDR(lpss_dma_desc) },
1321 + { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
1322 + { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
1323 + { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
1324 ++ { "808622C0", LPSS_ADDR(lpss_dma_desc) },
1325 + { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
1326 +
1327 + /* Broadwell LPSS devices */
1328 +diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
1329 +index 86c10599d9f8..ccf07674a2a0 100644
1330 +--- a/drivers/acpi/acpi_processor.c
1331 ++++ b/drivers/acpi/acpi_processor.c
1332 +@@ -642,7 +642,7 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
1333 +
1334 + status = acpi_get_type(handle, &acpi_type);
1335 + if (ACPI_FAILURE(status))
1336 +- return false;
1337 ++ return status;
1338 +
1339 + switch (acpi_type) {
1340 + case ACPI_TYPE_PROCESSOR:
1341 +@@ -662,11 +662,12 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
1342 + }
1343 +
1344 + processor_validated_ids_update(uid);
1345 +- return true;
1346 ++ return AE_OK;
1347 +
1348 + err:
1349 ++ /* Exit on error, but don't abort the namespace walk */
1350 + acpi_handle_info(handle, "Invalid processor object\n");
1351 +- return false;
1352 ++ return AE_OK;
1353 +
1354 + }
1355 +
1356 +diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
1357 +index 92da886180aa..1dacc42e2dcf 100644
1358 +--- a/drivers/block/ataflop.c
1359 ++++ b/drivers/block/ataflop.c
1360 +@@ -1935,6 +1935,11 @@ static int __init atari_floppy_init (void)
1361 + unit[i].disk = alloc_disk(1);
1362 + if (!unit[i].disk)
1363 + goto Enomem;
1364 ++
1365 ++ unit[i].disk->queue = blk_init_queue(do_fd_request,
1366 ++ &ataflop_lock);
1367 ++ if (!unit[i].disk->queue)
1368 ++ goto Enomem;
1369 + }
1370 +
1371 + if (UseTrackbuffer < 0)
1372 +@@ -1966,10 +1971,6 @@ static int __init atari_floppy_init (void)
1373 + sprintf(unit[i].disk->disk_name, "fd%d", i);
1374 + unit[i].disk->fops = &floppy_fops;
1375 + unit[i].disk->private_data = &unit[i];
1376 +- unit[i].disk->queue = blk_init_queue(do_fd_request,
1377 +- &ataflop_lock);
1378 +- if (!unit[i].disk->queue)
1379 +- goto Enomem;
1380 + set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
1381 + add_disk(unit[i].disk);
1382 + }
1383 +@@ -1984,13 +1985,17 @@ static int __init atari_floppy_init (void)
1384 +
1385 + return 0;
1386 + Enomem:
1387 +- while (i--) {
1388 +- struct request_queue *q = unit[i].disk->queue;
1389 ++ do {
1390 ++ struct gendisk *disk = unit[i].disk;
1391 +
1392 +- put_disk(unit[i].disk);
1393 +- if (q)
1394 +- blk_cleanup_queue(q);
1395 +- }
1396 ++ if (disk) {
1397 ++ if (disk->queue) {
1398 ++ blk_cleanup_queue(disk->queue);
1399 ++ disk->queue = NULL;
1400 ++ }
1401 ++ put_disk(unit[i].disk);
1402 ++ }
1403 ++ } while (i--);
1404 +
1405 + unregister_blkdev(FLOPPY_MAJOR, "fd");
1406 + return -ENOMEM;
1407 +diff --git a/drivers/block/swim.c b/drivers/block/swim.c
1408 +index e88d50f75a4a..58e308145e95 100644
1409 +--- a/drivers/block/swim.c
1410 ++++ b/drivers/block/swim.c
1411 +@@ -887,8 +887,17 @@ static int swim_floppy_init(struct swim_priv *swd)
1412 +
1413 + exit_put_disks:
1414 + unregister_blkdev(FLOPPY_MAJOR, "fd");
1415 +- while (drive--)
1416 +- put_disk(swd->unit[drive].disk);
1417 ++ do {
1418 ++ struct gendisk *disk = swd->unit[drive].disk;
1419 ++
1420 ++ if (disk) {
1421 ++ if (disk->queue) {
1422 ++ blk_cleanup_queue(disk->queue);
1423 ++ disk->queue = NULL;
1424 ++ }
1425 ++ put_disk(disk);
1426 ++ }
1427 ++ } while (drive--);
1428 + return err;
1429 + }
1430 +
1431 +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
1432 +index 7d23225f79ed..32ac5f551e55 100644
1433 +--- a/drivers/block/xen-blkfront.c
1434 ++++ b/drivers/block/xen-blkfront.c
1435 +@@ -1910,6 +1910,7 @@ static int negotiate_mq(struct blkfront_info *info)
1436 + info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1437 + if (!info->rinfo) {
1438 + xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1439 ++ info->nr_rings = 0;
1440 + return -ENOMEM;
1441 + }
1442 +
1443 +@@ -2471,6 +2472,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
1444 +
1445 + dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
1446 +
1447 ++ if (!info)
1448 ++ return 0;
1449 ++
1450 + blkif_free(info, 0);
1451 +
1452 + mutex_lock(&info->mutex);
1453 +diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
1454 +index cc4bdefa6648..67315cb28826 100644
1455 +--- a/drivers/bluetooth/btbcm.c
1456 ++++ b/drivers/bluetooth/btbcm.c
1457 +@@ -325,6 +325,7 @@ static const struct {
1458 + { 0x4103, "BCM4330B1" }, /* 002.001.003 */
1459 + { 0x410e, "BCM43341B0" }, /* 002.001.014 */
1460 + { 0x4406, "BCM4324B3" }, /* 002.004.006 */
1461 ++ { 0x6109, "BCM4335C0" }, /* 003.001.009 */
1462 + { 0x610c, "BCM4354" }, /* 003.001.012 */
1463 + { 0x2209, "BCM43430A1" }, /* 001.002.009 */
1464 + { }
1465 +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
1466 +index 932678617dfa..0904ab442d31 100644
1467 +--- a/drivers/char/ipmi/ipmi_ssif.c
1468 ++++ b/drivers/char/ipmi/ipmi_ssif.c
1469 +@@ -621,8 +621,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
1470 + flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
1471 + ssif_info->waiting_alert = true;
1472 + ssif_info->rtc_us_timer = SSIF_MSG_USEC;
1473 +- mod_timer(&ssif_info->retry_timer,
1474 +- jiffies + SSIF_MSG_JIFFIES);
1475 ++ if (!ssif_info->stopping)
1476 ++ mod_timer(&ssif_info->retry_timer,
1477 ++ jiffies + SSIF_MSG_JIFFIES);
1478 + ipmi_ssif_unlock_cond(ssif_info, flags);
1479 + return;
1480 + }
1481 +@@ -954,8 +955,9 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
1482 + ssif_info->waiting_alert = true;
1483 + ssif_info->retries_left = SSIF_RECV_RETRIES;
1484 + ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
1485 +- mod_timer(&ssif_info->retry_timer,
1486 +- jiffies + SSIF_MSG_PART_JIFFIES);
1487 ++ if (!ssif_info->stopping)
1488 ++ mod_timer(&ssif_info->retry_timer,
1489 ++ jiffies + SSIF_MSG_PART_JIFFIES);
1490 + ipmi_ssif_unlock_cond(ssif_info, flags);
1491 + }
1492 + }
1493 +diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1494 +index 89d5915b1a3f..6e93df272c20 100644
1495 +--- a/drivers/char/tpm/tpm-interface.c
1496 ++++ b/drivers/char/tpm/tpm-interface.c
1497 +@@ -653,7 +653,8 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space,
1498 + return len;
1499 +
1500 + err = be32_to_cpu(header->return_code);
1501 +- if (err != 0 && desc)
1502 ++ if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED
1503 ++ && desc)
1504 + dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
1505 + desc);
1506 + if (err)
1507 +diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
1508 +index 656e8af95d52..2cffaf567d99 100644
1509 +--- a/drivers/char/tpm/xen-tpmfront.c
1510 ++++ b/drivers/char/tpm/xen-tpmfront.c
1511 +@@ -203,7 +203,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
1512 + return -ENOMEM;
1513 + }
1514 +
1515 +- rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref);
1516 ++ rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
1517 + if (rv < 0)
1518 + return rv;
1519 +
1520 +diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
1521 +index d83ab94d041a..ca6ee9f389b6 100644
1522 +--- a/drivers/cpufreq/cpufreq-dt.c
1523 ++++ b/drivers/cpufreq/cpufreq-dt.c
1524 +@@ -32,6 +32,7 @@ struct private_data {
1525 + struct device *cpu_dev;
1526 + struct thermal_cooling_device *cdev;
1527 + const char *reg_name;
1528 ++ bool have_static_opps;
1529 + };
1530 +
1531 + static struct freq_attr *cpufreq_dt_attr[] = {
1532 +@@ -196,6 +197,15 @@ static int cpufreq_init(struct cpufreq_policy *policy)
1533 + }
1534 + }
1535 +
1536 ++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1537 ++ if (!priv) {
1538 ++ ret = -ENOMEM;
1539 ++ goto out_put_regulator;
1540 ++ }
1541 ++
1542 ++ priv->reg_name = name;
1543 ++ priv->opp_table = opp_table;
1544 ++
1545 + /*
1546 + * Initialize OPP tables for all policy->cpus. They will be shared by
1547 + * all CPUs which have marked their CPUs shared with OPP bindings.
1548 +@@ -206,7 +216,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
1549 + *
1550 + * OPPs might be populated at runtime, don't check for error here
1551 + */
1552 +- dev_pm_opp_of_cpumask_add_table(policy->cpus);
1553 ++ if (!dev_pm_opp_of_cpumask_add_table(policy->cpus))
1554 ++ priv->have_static_opps = true;
1555 +
1556 + /*
1557 + * But we need OPP table to function so if it is not there let's
1558 +@@ -232,19 +243,10 @@ static int cpufreq_init(struct cpufreq_policy *policy)
1559 + __func__, ret);
1560 + }
1561 +
1562 +- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1563 +- if (!priv) {
1564 +- ret = -ENOMEM;
1565 +- goto out_free_opp;
1566 +- }
1567 +-
1568 +- priv->reg_name = name;
1569 +- priv->opp_table = opp_table;
1570 +-
1571 + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
1572 + if (ret) {
1573 + dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
1574 +- goto out_free_priv;
1575 ++ goto out_free_opp;
1576 + }
1577 +
1578 + priv->cpu_dev = cpu_dev;
1579 +@@ -280,10 +282,11 @@ static int cpufreq_init(struct cpufreq_policy *policy)
1580 +
1581 + out_free_cpufreq_table:
1582 + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
1583 +-out_free_priv:
1584 +- kfree(priv);
1585 + out_free_opp:
1586 +- dev_pm_opp_of_cpumask_remove_table(policy->cpus);
1587 ++ if (priv->have_static_opps)
1588 ++ dev_pm_opp_of_cpumask_remove_table(policy->cpus);
1589 ++ kfree(priv);
1590 ++out_put_regulator:
1591 + if (name)
1592 + dev_pm_opp_put_regulators(opp_table);
1593 + out_put_clk:
1594 +@@ -298,7 +301,8 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
1595 +
1596 + cpufreq_cooling_unregister(priv->cdev);
1597 + dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
1598 +- dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
1599 ++ if (priv->have_static_opps)
1600 ++ dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
1601 + if (priv->reg_name)
1602 + dev_pm_opp_put_regulators(priv->opp_table);
1603 +
1604 +diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
1605 +index f20f20a77d4d..4268f87e99fc 100644
1606 +--- a/drivers/cpufreq/cpufreq_conservative.c
1607 ++++ b/drivers/cpufreq/cpufreq_conservative.c
1608 +@@ -80,8 +80,10 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
1609 + * changed in the meantime, so fall back to current frequency in that
1610 + * case.
1611 + */
1612 +- if (requested_freq > policy->max || requested_freq < policy->min)
1613 ++ if (requested_freq > policy->max || requested_freq < policy->min) {
1614 + requested_freq = policy->cur;
1615 ++ dbs_info->requested_freq = requested_freq;
1616 ++ }
1617 +
1618 + freq_step = get_freq_step(cs_tuners, policy);
1619 +
1620 +@@ -92,7 +94,7 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
1621 + if (policy_dbs->idle_periods < UINT_MAX) {
1622 + unsigned int freq_steps = policy_dbs->idle_periods * freq_step;
1623 +
1624 +- if (requested_freq > freq_steps)
1625 ++ if (requested_freq > policy->min + freq_steps)
1626 + requested_freq -= freq_steps;
1627 + else
1628 + requested_freq = policy->min;
1629 +diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
1630 +index fee363865d88..e5513cc59ec3 100644
1631 +--- a/drivers/crypto/caam/regs.h
1632 ++++ b/drivers/crypto/caam/regs.h
1633 +@@ -70,22 +70,22 @@
1634 + extern bool caam_little_end;
1635 + extern bool caam_imx;
1636 +
1637 +-#define caam_to_cpu(len) \
1638 +-static inline u##len caam##len ## _to_cpu(u##len val) \
1639 +-{ \
1640 +- if (caam_little_end) \
1641 +- return le##len ## _to_cpu(val); \
1642 +- else \
1643 +- return be##len ## _to_cpu(val); \
1644 ++#define caam_to_cpu(len) \
1645 ++static inline u##len caam##len ## _to_cpu(u##len val) \
1646 ++{ \
1647 ++ if (caam_little_end) \
1648 ++ return le##len ## _to_cpu((__force __le##len)val); \
1649 ++ else \
1650 ++ return be##len ## _to_cpu((__force __be##len)val); \
1651 + }
1652 +
1653 +-#define cpu_to_caam(len) \
1654 +-static inline u##len cpu_to_caam##len(u##len val) \
1655 +-{ \
1656 +- if (caam_little_end) \
1657 +- return cpu_to_le##len(val); \
1658 +- else \
1659 +- return cpu_to_be##len(val); \
1660 ++#define cpu_to_caam(len) \
1661 ++static inline u##len cpu_to_caam##len(u##len val) \
1662 ++{ \
1663 ++ if (caam_little_end) \
1664 ++ return (__force u##len)cpu_to_le##len(val); \
1665 ++ else \
1666 ++ return (__force u##len)cpu_to_be##len(val); \
1667 + }
1668 +
1669 + caam_to_cpu(16)
1670 +diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
1671 +index 7373b7a555ec..803cfb4523b0 100644
1672 +--- a/drivers/dma/dma-jz4780.c
1673 ++++ b/drivers/dma/dma-jz4780.c
1674 +@@ -754,6 +754,11 @@ static int jz4780_dma_probe(struct platform_device *pdev)
1675 + struct resource *res;
1676 + int i, ret;
1677 +
1678 ++ if (!dev->of_node) {
1679 ++ dev_err(dev, "This driver must be probed from devicetree\n");
1680 ++ return -EINVAL;
1681 ++ }
1682 ++
1683 + jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL);
1684 + if (!jzdma)
1685 + return -ENOMEM;
1686 +diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
1687 +index 854deb0da07c..68680e4151ea 100644
1688 +--- a/drivers/dma/ioat/init.c
1689 ++++ b/drivers/dma/ioat/init.c
1690 +@@ -1205,8 +1205,15 @@ static void ioat_shutdown(struct pci_dev *pdev)
1691 +
1692 + spin_lock_bh(&ioat_chan->prep_lock);
1693 + set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1694 +- del_timer_sync(&ioat_chan->timer);
1695 + spin_unlock_bh(&ioat_chan->prep_lock);
1696 ++ /*
1697 ++ * Synchronization rule for del_timer_sync():
1698 ++ * - The caller must not hold locks which would prevent
1699 ++ * completion of the timer's handler.
1700 ++ * So prep_lock cannot be held before calling it.
1701 ++ */
1702 ++ del_timer_sync(&ioat_chan->timer);
1703 ++
1704 + /* this should quiesce then reset */
1705 + ioat_reset_hw(ioat_chan);
1706 + }
1707 +diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
1708 +index 4cf0d4d0cecf..25610286979f 100644
1709 +--- a/drivers/dma/ppc4xx/adma.c
1710 ++++ b/drivers/dma/ppc4xx/adma.c
1711 +@@ -4360,7 +4360,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
1712 + }
1713 + static DRIVER_ATTR_RW(enable);
1714 +
1715 +-static ssize_t poly_store(struct device_driver *dev, char *buf)
1716 ++static ssize_t poly_show(struct device_driver *dev, char *buf)
1717 + {
1718 + ssize_t size = 0;
1719 + u32 reg;
1720 +diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
1721 +index 786fc8fcc38e..32192e98159b 100644
1722 +--- a/drivers/dma/stm32-dma.c
1723 ++++ b/drivers/dma/stm32-dma.c
1724 +@@ -429,6 +429,8 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
1725 + dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr);
1726 + }
1727 +
1728 ++static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
1729 ++
1730 + static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
1731 + {
1732 + struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1733 +@@ -471,6 +473,9 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
1734 + if (status)
1735 + stm32_dma_irq_clear(chan, status);
1736 +
1737 ++ if (chan->desc->cyclic)
1738 ++ stm32_dma_configure_next_sg(chan);
1739 ++
1740 + stm32_dma_dump_reg(chan);
1741 +
1742 + /* Start DMA */
1743 +@@ -564,8 +569,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
1744 + if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
1745 + dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
1746 + stm32_dma_start_transfer(chan);
1747 +- if (chan->desc->cyclic)
1748 +- stm32_dma_configure_next_sg(chan);
1749 ++
1750 + }
1751 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
1752 + }
1753 +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
1754 +index 59ce32e405ac..667f5ba0403c 100644
1755 +--- a/drivers/edac/amd64_edac.c
1756 ++++ b/drivers/edac/amd64_edac.c
1757 +@@ -2200,6 +2200,15 @@ static struct amd64_family_type family_types[] = {
1758 + .dbam_to_cs = f17_base_addr_to_cs_size,
1759 + }
1760 + },
1761 ++ [F17_M10H_CPUS] = {
1762 ++ .ctl_name = "F17h_M10h",
1763 ++ .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
1764 ++ .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
1765 ++ .ops = {
1766 ++ .early_channel_count = f17_early_channel_count,
1767 ++ .dbam_to_cs = f17_base_addr_to_cs_size,
1768 ++ }
1769 ++ },
1770 + };
1771 +
1772 + /*
1773 +@@ -3188,6 +3197,11 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
1774 + break;
1775 +
1776 + case 0x17:
1777 ++ if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
1778 ++ fam_type = &family_types[F17_M10H_CPUS];
1779 ++ pvt->ops = &family_types[F17_M10H_CPUS].ops;
1780 ++ break;
1781 ++ }
1782 + fam_type = &family_types[F17_CPUS];
1783 + pvt->ops = &family_types[F17_CPUS].ops;
1784 + break;
1785 +diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
1786 +index 1d4b74e9a037..4242f8e39c18 100644
1787 +--- a/drivers/edac/amd64_edac.h
1788 ++++ b/drivers/edac/amd64_edac.h
1789 +@@ -115,6 +115,8 @@
1790 + #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
1791 + #define PCI_DEVICE_ID_AMD_17H_DF_F0 0x1460
1792 + #define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
1793 ++#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
1794 ++#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
1795 +
1796 + /*
1797 + * Function 1 - Address Map
1798 +@@ -281,6 +283,7 @@ enum amd_families {
1799 + F16_CPUS,
1800 + F16_M30H_CPUS,
1801 + F17_CPUS,
1802 ++ F17_M10H_CPUS,
1803 + NUM_FAMILIES,
1804 + };
1805 +
1806 +diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
1807 +index 6c7d5f20eacb..2054a24b41d7 100644
1808 +--- a/drivers/edac/i7core_edac.c
1809 ++++ b/drivers/edac/i7core_edac.c
1810 +@@ -1711,6 +1711,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
1811 + u32 errnum = find_first_bit(&error, 32);
1812 +
1813 + if (uncorrected_error) {
1814 ++ core_err_cnt = 1;
1815 + if (ripv)
1816 + tp_event = HW_EVENT_ERR_FATAL;
1817 + else
1818 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
1819 +index 0dc0d595c47c..b0b390a1da15 100644
1820 +--- a/drivers/edac/sb_edac.c
1821 ++++ b/drivers/edac/sb_edac.c
1822 +@@ -2891,6 +2891,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1823 + recoverable = GET_BITFIELD(m->status, 56, 56);
1824 +
1825 + if (uncorrected_error) {
1826 ++ core_err_cnt = 1;
1827 + if (ripv) {
1828 + type = "FATAL";
1829 + tp_event = HW_EVENT_ERR_FATAL;
1830 +diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
1831 +index 16dea97568a1..5dafd4fa8f5e 100644
1832 +--- a/drivers/edac/skx_edac.c
1833 ++++ b/drivers/edac/skx_edac.c
1834 +@@ -604,7 +604,7 @@ sad_found:
1835 + break;
1836 + case 2:
1837 + lchan = (addr >> shift) % 2;
1838 +- lchan = (lchan << 1) | ~lchan;
1839 ++ lchan = (lchan << 1) | !lchan;
1840 + break;
1841 + case 3:
1842 + lchan = ((addr >> shift) % 2) << 1;
1843 +@@ -895,6 +895,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
1844 + recoverable = GET_BITFIELD(m->status, 56, 56);
1845 +
1846 + if (uncorrected_error) {
1847 ++ core_err_cnt = 1;
1848 + if (ripv) {
1849 + type = "FATAL";
1850 + tp_event = HW_EVENT_ERR_FATAL;
1851 +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
1852 +index cf307bdc3d53..89761551c15d 100644
1853 +--- a/drivers/hid/usbhid/hiddev.c
1854 ++++ b/drivers/hid/usbhid/hiddev.c
1855 +@@ -512,14 +512,24 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
1856 + if (cmd == HIDIOCGCOLLECTIONINDEX) {
1857 + if (uref->usage_index >= field->maxusage)
1858 + goto inval;
1859 ++ uref->usage_index =
1860 ++ array_index_nospec(uref->usage_index,
1861 ++ field->maxusage);
1862 + } else if (uref->usage_index >= field->report_count)
1863 + goto inval;
1864 + }
1865 +
1866 +- if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
1867 +- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
1868 +- uref->usage_index + uref_multi->num_values > field->report_count))
1869 +- goto inval;
1870 ++ if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
1871 ++ if (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
1872 ++ uref->usage_index + uref_multi->num_values >
1873 ++ field->report_count)
1874 ++ goto inval;
1875 ++
1876 ++ uref->usage_index =
1877 ++ array_index_nospec(uref->usage_index,
1878 ++ field->report_count -
1879 ++ uref_multi->num_values);
1880 ++ }
1881 +
1882 + switch (cmd) {
1883 + case HIDIOCGUSAGE:
1884 +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
1885 +index 1700b4e7758d..752c52f7353d 100644
1886 +--- a/drivers/hv/channel_mgmt.c
1887 ++++ b/drivers/hv/channel_mgmt.c
1888 +@@ -599,16 +599,18 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
1889 + bool perf_chn = vmbus_devs[dev_type].perf_device;
1890 + struct vmbus_channel *primary = channel->primary_channel;
1891 + int next_node;
1892 +- struct cpumask available_mask;
1893 ++ cpumask_var_t available_mask;
1894 + struct cpumask *alloced_mask;
1895 +
1896 + if ((vmbus_proto_version == VERSION_WS2008) ||
1897 +- (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
1898 ++ (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
1899 ++ !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
1900 + /*
1901 + * Prior to win8, all channel interrupts are
1902 + * delivered on cpu 0.
1903 + * Also if the channel is not a performance critical
1904 + * channel, bind it to cpu 0.
1905 ++ * In case alloc_cpumask_var() fails, bind it to cpu 0.
1906 + */
1907 + channel->numa_node = 0;
1908 + channel->target_cpu = 0;
1909 +@@ -646,7 +648,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
1910 + cpumask_clear(alloced_mask);
1911 + }
1912 +
1913 +- cpumask_xor(&available_mask, alloced_mask,
1914 ++ cpumask_xor(available_mask, alloced_mask,
1915 + cpumask_of_node(primary->numa_node));
1916 +
1917 + cur_cpu = -1;
1918 +@@ -664,10 +666,10 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
1919 + }
1920 +
1921 + while (true) {
1922 +- cur_cpu = cpumask_next(cur_cpu, &available_mask);
1923 ++ cur_cpu = cpumask_next(cur_cpu, available_mask);
1924 + if (cur_cpu >= nr_cpu_ids) {
1925 + cur_cpu = -1;
1926 +- cpumask_copy(&available_mask,
1927 ++ cpumask_copy(available_mask,
1928 + cpumask_of_node(primary->numa_node));
1929 + continue;
1930 + }
1931 +@@ -697,6 +699,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
1932 +
1933 + channel->target_cpu = cur_cpu;
1934 + channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
1935 ++
1936 ++ free_cpumask_var(available_mask);
1937 + }
1938 +
1939 + static void vmbus_wait_for_unload(void)
1940 +diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
1941 +index 7718e58dbda5..7688dab32f6e 100644
1942 +--- a/drivers/hwmon/pmbus/pmbus.c
1943 ++++ b/drivers/hwmon/pmbus/pmbus.c
1944 +@@ -118,6 +118,8 @@ static int pmbus_identify(struct i2c_client *client,
1945 + } else {
1946 + info->pages = 1;
1947 + }
1948 ++
1949 ++ pmbus_clear_faults(client);
1950 + }
1951 +
1952 + if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
1953 +diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
1954 +index a139940cd991..924f3ca41c65 100644
1955 +--- a/drivers/hwmon/pmbus/pmbus_core.c
1956 ++++ b/drivers/hwmon/pmbus/pmbus_core.c
1957 +@@ -1802,7 +1802,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
1958 + if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
1959 + client->flags |= I2C_CLIENT_PEC;
1960 +
1961 +- pmbus_clear_faults(client);
1962 ++ if (data->info->pages)
1963 ++ pmbus_clear_faults(client);
1964 ++ else
1965 ++ pmbus_clear_fault_page(client, -1);
1966 +
1967 + if (info->identify) {
1968 + ret = (*info->identify)(client, info);
1969 +diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
1970 +index 70cc0d134f3c..ca250e7ac511 100644
1971 +--- a/drivers/hwmon/pwm-fan.c
1972 ++++ b/drivers/hwmon/pwm-fan.c
1973 +@@ -290,9 +290,19 @@ static int pwm_fan_remove(struct platform_device *pdev)
1974 + static int pwm_fan_suspend(struct device *dev)
1975 + {
1976 + struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
1977 ++ struct pwm_args args;
1978 ++ int ret;
1979 ++
1980 ++ pwm_get_args(ctx->pwm, &args);
1981 ++
1982 ++ if (ctx->pwm_value) {
1983 ++ ret = pwm_config(ctx->pwm, 0, args.period);
1984 ++ if (ret < 0)
1985 ++ return ret;
1986 +
1987 +- if (ctx->pwm_value)
1988 + pwm_disable(ctx->pwm);
1989 ++ }
1990 ++
1991 + return 0;
1992 + }
1993 +
1994 +diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
1995 +index 56ecd7aff5eb..d14a9cb7959a 100644
1996 +--- a/drivers/hwtracing/coresight/coresight-etb10.c
1997 ++++ b/drivers/hwtracing/coresight/coresight-etb10.c
1998 +@@ -155,6 +155,10 @@ static int etb_enable(struct coresight_device *csdev, u32 mode)
1999 + if (val == CS_MODE_PERF)
2000 + return -EBUSY;
2001 +
2002 ++ /* Don't let perf disturb sysFS sessions */
2003 ++ if (val == CS_MODE_SYSFS && mode == CS_MODE_PERF)
2004 ++ return -EBUSY;
2005 ++
2006 + /* Nothing to do, the tracer is already enabled. */
2007 + if (val == CS_MODE_SYSFS)
2008 + goto out;
2009 +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
2010 +index 7f044df1ea07..3415733a9364 100644
2011 +--- a/drivers/i2c/busses/i2c-rcar.c
2012 ++++ b/drivers/i2c/busses/i2c-rcar.c
2013 +@@ -761,8 +761,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
2014 +
2015 + time_left = wait_event_timeout(priv->wait, priv->flags & ID_DONE,
2016 + num * adap->timeout);
2017 +- if (!time_left) {
2018 ++
2019 ++ /* cleanup DMA if it couldn't complete properly due to an error */
2020 ++ if (priv->dma_direction != DMA_NONE)
2021 + rcar_i2c_cleanup_dma(priv);
2022 ++
2023 ++ if (!time_left) {
2024 + rcar_i2c_init(priv);
2025 + ret = -ETIMEDOUT;
2026 + } else if (priv->flags & ID_NACK) {
2027 +diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
2028 +index 15109728cae7..cd686179aa92 100644
2029 +--- a/drivers/iio/adc/at91_adc.c
2030 ++++ b/drivers/iio/adc/at91_adc.c
2031 +@@ -248,12 +248,14 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
2032 + struct iio_poll_func *pf = p;
2033 + struct iio_dev *idev = pf->indio_dev;
2034 + struct at91_adc_state *st = iio_priv(idev);
2035 ++ struct iio_chan_spec const *chan;
2036 + int i, j = 0;
2037 +
2038 + for (i = 0; i < idev->masklength; i++) {
2039 + if (!test_bit(i, idev->active_scan_mask))
2040 + continue;
2041 +- st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, i));
2042 ++ chan = idev->channels + i;
2043 ++ st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, chan->channel));
2044 + j++;
2045 + }
2046 +
2047 +@@ -279,6 +281,8 @@ static void handle_adc_eoc_trigger(int irq, struct iio_dev *idev)
2048 + iio_trigger_poll(idev->trig);
2049 + } else {
2050 + st->last_value = at91_adc_readl(st, AT91_ADC_CHAN(st, st->chnb));
2051 ++ /* Needed to ACK the DRDY interruption */
2052 ++ at91_adc_readl(st, AT91_ADC_LCDR);
2053 + st->done = true;
2054 + wake_up_interruptible(&st->wq_data_avail);
2055 + }
2056 +diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
2057 +index ea264fa9e567..929c617db364 100644
2058 +--- a/drivers/iio/adc/fsl-imx25-gcq.c
2059 ++++ b/drivers/iio/adc/fsl-imx25-gcq.c
2060 +@@ -209,12 +209,14 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
2061 + ret = of_property_read_u32(child, "reg", &reg);
2062 + if (ret) {
2063 + dev_err(dev, "Failed to get reg property\n");
2064 ++ of_node_put(child);
2065 + return ret;
2066 + }
2067 +
2068 + if (reg >= MX25_NUM_CFGS) {
2069 + dev_err(dev,
2070 + "reg value is greater than the number of available configuration registers\n");
2071 ++ of_node_put(child);
2072 + return -EINVAL;
2073 + }
2074 +
2075 +@@ -228,6 +230,7 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
2076 + if (IS_ERR(priv->vref[refp])) {
2077 + dev_err(dev, "Error, trying to use external voltage reference without a vref-%s regulator.",
2078 + mx25_gcq_refp_names[refp]);
2079 ++ of_node_put(child);
2080 + return PTR_ERR(priv->vref[refp]);
2081 + }
2082 + priv->channel_vref_mv[reg] =
2083 +@@ -240,6 +243,7 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
2084 + break;
2085 + default:
2086 + dev_err(dev, "Invalid positive reference %d\n", refp);
2087 ++ of_node_put(child);
2088 + return -EINVAL;
2089 + }
2090 +
2091 +@@ -254,10 +258,12 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
2092 +
2093 + if ((refp & MX25_ADCQ_CFG_REFP_MASK) != refp) {
2094 + dev_err(dev, "Invalid fsl,adc-refp property value\n");
2095 ++ of_node_put(child);
2096 + return -EINVAL;
2097 + }
2098 + if ((refn & MX25_ADCQ_CFG_REFN_MASK) != refn) {
2099 + dev_err(dev, "Invalid fsl,adc-refn property value\n");
2100 ++ of_node_put(child);
2101 + return -EINVAL;
2102 + }
2103 +
2104 +diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
2105 +index 3f9399c27869..efc762da2ba8 100644
2106 +--- a/drivers/iio/dac/ad5064.c
2107 ++++ b/drivers/iio/dac/ad5064.c
2108 +@@ -809,6 +809,40 @@ static int ad5064_set_config(struct ad5064_state *st, unsigned int val)
2109 + return ad5064_write(st, cmd, 0, val, 0);
2110 + }
2111 +
2112 ++static int ad5064_request_vref(struct ad5064_state *st, struct device *dev)
2113 ++{
2114 ++ unsigned int i;
2115 ++ int ret;
2116 ++
2117 ++ for (i = 0; i < ad5064_num_vref(st); ++i)
2118 ++ st->vref_reg[i].supply = ad5064_vref_name(st, i);
2119 ++
2120 ++ if (!st->chip_info->internal_vref)
2121 ++ return devm_regulator_bulk_get(dev, ad5064_num_vref(st),
2122 ++ st->vref_reg);
2123 ++
2124 ++ /*
2125 ++ * This assumes that when the regulator has an internal VREF
2126 ++ * there is only one external VREF connection, which is
2127 ++ * currently the case for all supported devices.
2128 ++ */
2129 ++ st->vref_reg[0].consumer = devm_regulator_get_optional(dev, "vref");
2130 ++ if (!IS_ERR(st->vref_reg[0].consumer))
2131 ++ return 0;
2132 ++
2133 ++ ret = PTR_ERR(st->vref_reg[0].consumer);
2134 ++ if (ret != -ENODEV)
2135 ++ return ret;
2136 ++
2137 ++ /* If no external regulator was supplied use the internal VREF */
2138 ++ st->use_internal_vref = true;
2139 ++ ret = ad5064_set_config(st, AD5064_CONFIG_INT_VREF_ENABLE);
2140 ++ if (ret)
2141 ++ dev_err(dev, "Failed to enable internal vref: %d\n", ret);
2142 ++
2143 ++ return ret;
2144 ++}
2145 ++
2146 + static int ad5064_probe(struct device *dev, enum ad5064_type type,
2147 + const char *name, ad5064_write_func write)
2148 + {
2149 +@@ -829,22 +863,11 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
2150 + st->dev = dev;
2151 + st->write = write;
2152 +
2153 +- for (i = 0; i < ad5064_num_vref(st); ++i)
2154 +- st->vref_reg[i].supply = ad5064_vref_name(st, i);
2155 ++ ret = ad5064_request_vref(st, dev);
2156 ++ if (ret)
2157 ++ return ret;
2158 +
2159 +- ret = devm_regulator_bulk_get(dev, ad5064_num_vref(st),
2160 +- st->vref_reg);
2161 +- if (ret) {
2162 +- if (!st->chip_info->internal_vref)
2163 +- return ret;
2164 +- st->use_internal_vref = true;
2165 +- ret = ad5064_set_config(st, AD5064_CONFIG_INT_VREF_ENABLE);
2166 +- if (ret) {
2167 +- dev_err(dev, "Failed to enable internal vref: %d\n",
2168 +- ret);
2169 +- return ret;
2170 +- }
2171 +- } else {
2172 ++ if (!st->use_internal_vref) {
2173 + ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg);
2174 + if (ret)
2175 + return ret;
2176 +diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
2177 +index 0a1e96c25ca3..f75f99476ad0 100644
2178 +--- a/drivers/infiniband/core/sysfs.c
2179 ++++ b/drivers/infiniband/core/sysfs.c
2180 +@@ -489,7 +489,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
2181 + ret = get_perf_mad(p->ibdev, p->port_num, tab_attr->attr_id, &data,
2182 + 40 + offset / 8, sizeof(data));
2183 + if (ret < 0)
2184 +- return sprintf(buf, "N/A (no PMA)\n");
2185 ++ return ret;
2186 +
2187 + switch (width) {
2188 + case 4:
2189 +@@ -1012,10 +1012,12 @@ static int add_port(struct ib_device *device, int port_num,
2190 + goto err_put;
2191 + }
2192 +
2193 +- p->pma_table = get_counter_table(device, port_num);
2194 +- ret = sysfs_create_group(&p->kobj, p->pma_table);
2195 +- if (ret)
2196 +- goto err_put_gid_attrs;
2197 ++ if (device->process_mad) {
2198 ++ p->pma_table = get_counter_table(device, port_num);
2199 ++ ret = sysfs_create_group(&p->kobj, p->pma_table);
2200 ++ if (ret)
2201 ++ goto err_put_gid_attrs;
2202 ++ }
2203 +
2204 + p->gid_group.name = "gids";
2205 + p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len);
2206 +@@ -1128,7 +1130,8 @@ err_free_gid:
2207 + p->gid_group.attrs = NULL;
2208 +
2209 + err_remove_pma:
2210 +- sysfs_remove_group(&p->kobj, p->pma_table);
2211 ++ if (p->pma_table)
2212 ++ sysfs_remove_group(&p->kobj, p->pma_table);
2213 +
2214 + err_put_gid_attrs:
2215 + kobject_put(&p->gid_attr_group->kobj);
2216 +@@ -1240,7 +1243,9 @@ static void free_port_list_attributes(struct ib_device *device)
2217 + kfree(port->hw_stats);
2218 + free_hsag(&port->kobj, port->hw_stats_ag);
2219 + }
2220 +- sysfs_remove_group(p, port->pma_table);
2221 ++
2222 ++ if (port->pma_table)
2223 ++ sysfs_remove_group(p, port->pma_table);
2224 + sysfs_remove_group(p, &port->pkey_group);
2225 + sysfs_remove_group(p, &port->gid_group);
2226 + sysfs_remove_group(&port->gid_attr_group->kobj,
2227 +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
2228 +index 8d91733009a4..ad74988837c9 100644
2229 +--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
2230 ++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
2231 +@@ -311,8 +311,17 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
2232 + bnxt_qplib_release_cq_locks(qp, &flags);
2233 + break;
2234 + default:
2235 +- /* Command Response */
2236 +- spin_lock_irqsave(&cmdq->lock, flags);
2237 ++ /*
2238 ++ * Command Response
2239 ++ * cmdq->lock needs to be acquired to synchronie
2240 ++ * the command send and completion reaping. This function
2241 ++ * is always called with creq->lock held. Using
2242 ++ * the nested variant of spin_lock.
2243 ++ *
2244 ++ */
2245 ++
2246 ++ spin_lock_irqsave_nested(&cmdq->lock, flags,
2247 ++ SINGLE_DEPTH_NESTING);
2248 + cookie = le16_to_cpu(qp_event->cookie);
2249 + mcookie = qp_event->cookie;
2250 + blocked = cookie & RCFW_CMD_IS_BLOCKING;
2251 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
2252 +index 9866c5d1b99f..e88bb71056cd 100644
2253 +--- a/drivers/infiniband/hw/mlx5/mr.c
2254 ++++ b/drivers/infiniband/hw/mlx5/mr.c
2255 +@@ -675,7 +675,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
2256 + init_completion(&ent->compl);
2257 + INIT_WORK(&ent->work, cache_work_func);
2258 + INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
2259 +- queue_work(cache->wq, &ent->work);
2260 +
2261 + if (i > MR_CACHE_LAST_STD_ENTRY) {
2262 + mlx5_odp_init_mr_cache_entry(ent);
2263 +@@ -694,6 +693,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
2264 + ent->limit = dev->mdev->profile->mr_cache[i].limit;
2265 + else
2266 + ent->limit = 0;
2267 ++ queue_work(cache->wq, &ent->work);
2268 + }
2269 +
2270 + err = mlx5_mr_cache_debugfs_init(dev);
2271 +diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
2272 +index bd43c1c7a42f..4d84b010b3ee 100644
2273 +--- a/drivers/infiniband/sw/rxe/rxe_resp.c
2274 ++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
2275 +@@ -683,6 +683,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
2276 + rxe_advance_resp_resource(qp);
2277 +
2278 + res->type = RXE_READ_MASK;
2279 ++ res->replay = 0;
2280 +
2281 + res->read.va = qp->resp.va;
2282 + res->read.va_org = qp->resp.va;
2283 +@@ -753,7 +754,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
2284 + state = RESPST_DONE;
2285 + } else {
2286 + qp->resp.res = NULL;
2287 +- qp->resp.opcode = -1;
2288 ++ if (!res->replay)
2289 ++ qp->resp.opcode = -1;
2290 + if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
2291 + qp->resp.psn = res->cur_psn;
2292 + state = RESPST_CLEANUP;
2293 +@@ -815,6 +817,7 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
2294 +
2295 + /* next expected psn, read handles this separately */
2296 + qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
2297 ++ qp->resp.ack_psn = qp->resp.psn;
2298 +
2299 + qp->resp.opcode = pkt->opcode;
2300 + qp->resp.status = IB_WC_SUCCESS;
2301 +@@ -1071,7 +1074,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
2302 + struct rxe_pkt_info *pkt)
2303 + {
2304 + enum resp_states rc;
2305 +- u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
2306 ++ u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
2307 +
2308 + if (pkt->mask & RXE_SEND_MASK ||
2309 + pkt->mask & RXE_WRITE_MASK) {
2310 +@@ -1114,6 +1117,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
2311 + res->state = (pkt->psn == res->first_psn) ?
2312 + rdatm_res_state_new :
2313 + rdatm_res_state_replay;
2314 ++ res->replay = 1;
2315 +
2316 + /* Reset the resource, except length. */
2317 + res->read.va_org = iova;
2318 +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
2319 +index 1019f5e7dbdd..59f6a24db064 100644
2320 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
2321 ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
2322 +@@ -173,6 +173,7 @@ enum rdatm_res_state {
2323 +
2324 + struct resp_res {
2325 + int type;
2326 ++ int replay;
2327 + u32 first_psn;
2328 + u32 last_psn;
2329 + u32 cur_psn;
2330 +@@ -197,6 +198,7 @@ struct rxe_resp_info {
2331 + enum rxe_qp_state state;
2332 + u32 msn;
2333 + u32 psn;
2334 ++ u32 ack_psn;
2335 + int opcode;
2336 + int drop_msg;
2337 + int goto_error;
2338 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2339 +index 9939f32d0154..0e85b3445c07 100644
2340 +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2341 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2342 +@@ -1427,11 +1427,15 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
2343 + spin_unlock_irqrestore(&priv->lock, flags);
2344 + netif_tx_unlock_bh(dev);
2345 +
2346 +- if (skb->protocol == htons(ETH_P_IP))
2347 ++ if (skb->protocol == htons(ETH_P_IP)) {
2348 ++ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
2349 + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
2350 ++ }
2351 + #if IS_ENABLED(CONFIG_IPV6)
2352 +- else if (skb->protocol == htons(ETH_P_IPV6))
2353 ++ else if (skb->protocol == htons(ETH_P_IPV6)) {
2354 ++ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
2355 + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
2356 ++ }
2357 + #endif
2358 + dev_kfree_skb_any(skb);
2359 +
2360 +diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
2361 +index 2c436376f13e..15b5856475fc 100644
2362 +--- a/drivers/iommu/arm-smmu.c
2363 ++++ b/drivers/iommu/arm-smmu.c
2364 +@@ -475,6 +475,9 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
2365 + bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
2366 + void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
2367 +
2368 ++ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
2369 ++ wmb();
2370 ++
2371 + if (stage1) {
2372 + reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
2373 +
2374 +@@ -516,6 +519,9 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
2375 + struct arm_smmu_domain *smmu_domain = cookie;
2376 + void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
2377 +
2378 ++ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
2379 ++ wmb();
2380 ++
2381 + writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
2382 + }
2383 +
2384 +diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
2385 +index cb556e06673e..5d0912bf9eab 100644
2386 +--- a/drivers/lightnvm/pblk-recovery.c
2387 ++++ b/drivers/lightnvm/pblk-recovery.c
2388 +@@ -1001,12 +1001,14 @@ next:
2389 + }
2390 + }
2391 +
2392 +- spin_lock(&l_mg->free_lock);
2393 + if (!open_lines) {
2394 ++ spin_lock(&l_mg->free_lock);
2395 + WARN_ON_ONCE(!test_and_clear_bit(meta_line,
2396 + &l_mg->meta_bitmap));
2397 ++ spin_unlock(&l_mg->free_lock);
2398 + pblk_line_replace_data(pblk);
2399 + } else {
2400 ++ spin_lock(&l_mg->free_lock);
2401 + /* Allocate next line for preparation */
2402 + l_mg->data_next = pblk_line_get(pblk);
2403 + if (l_mg->data_next) {
2404 +@@ -1014,8 +1016,8 @@ next:
2405 + l_mg->data_next->type = PBLK_LINETYPE_DATA;
2406 + is_next = 1;
2407 + }
2408 ++ spin_unlock(&l_mg->free_lock);
2409 + }
2410 +- spin_unlock(&l_mg->free_lock);
2411 +
2412 + if (is_next) {
2413 + pblk_line_erase(pblk, l_mg->data_next);
2414 +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
2415 +index 89d088cf95d9..9406326216f1 100644
2416 +--- a/drivers/md/bcache/btree.c
2417 ++++ b/drivers/md/bcache/btree.c
2418 +@@ -2371,7 +2371,7 @@ static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2419 + struct keybuf *buf = refill->buf;
2420 + int ret = MAP_CONTINUE;
2421 +
2422 +- if (bkey_cmp(k, refill->end) >= 0) {
2423 ++ if (bkey_cmp(k, refill->end) > 0) {
2424 + ret = MAP_DONE;
2425 + goto out;
2426 + }
2427 +diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
2428 +index 5b63afff46d5..69b336d8c05a 100644
2429 +--- a/drivers/md/bcache/request.c
2430 ++++ b/drivers/md/bcache/request.c
2431 +@@ -792,7 +792,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
2432 +
2433 + bch_mark_cache_accounting(s->iop.c, s->d,
2434 + !s->cache_missed, s->iop.bypass);
2435 +- trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
2436 ++ trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
2437 +
2438 + if (s->iop.status)
2439 + continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
2440 +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
2441 +index e52676fa9832..ca948155191a 100644
2442 +--- a/drivers/md/dm-ioctl.c
2443 ++++ b/drivers/md/dm-ioctl.c
2444 +@@ -1719,8 +1719,7 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
2445 + }
2446 +
2447 + static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
2448 +- int ioctl_flags,
2449 +- struct dm_ioctl **param, int *param_flags)
2450 ++ int ioctl_flags, struct dm_ioctl **param, int *param_flags)
2451 + {
2452 + struct dm_ioctl *dmi;
2453 + int secure_data;
2454 +@@ -1761,18 +1760,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
2455 +
2456 + *param_flags |= DM_PARAMS_MALLOC;
2457 +
2458 +- if (copy_from_user(dmi, user, param_kernel->data_size))
2459 +- goto bad;
2460 ++ /* Copy from param_kernel (which was already copied from user) */
2461 ++ memcpy(dmi, param_kernel, minimum_data_size);
2462 +
2463 +-data_copied:
2464 +- /*
2465 +- * Abort if something changed the ioctl data while it was being copied.
2466 +- */
2467 +- if (dmi->data_size != param_kernel->data_size) {
2468 +- DMERR("rejecting ioctl: data size modified while processing parameters");
2469 ++ if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
2470 ++ param_kernel->data_size - minimum_data_size))
2471 + goto bad;
2472 +- }
2473 +-
2474 ++data_copied:
2475 + /* Wipe the user buffer so we do not return it to userspace */
2476 + if (secure_data && clear_user(user, param_kernel->data_size))
2477 + goto bad;
2478 +diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
2479 +index 70485de37b66..34968ca6b84a 100644
2480 +--- a/drivers/md/dm-zoned-metadata.c
2481 ++++ b/drivers/md/dm-zoned-metadata.c
2482 +@@ -99,7 +99,7 @@ struct dmz_mblock {
2483 + struct rb_node node;
2484 + struct list_head link;
2485 + sector_t no;
2486 +- atomic_t ref;
2487 ++ unsigned int ref;
2488 + unsigned long state;
2489 + struct page *page;
2490 + void *data;
2491 +@@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
2492 +
2493 + RB_CLEAR_NODE(&mblk->node);
2494 + INIT_LIST_HEAD(&mblk->link);
2495 +- atomic_set(&mblk->ref, 0);
2496 ++ mblk->ref = 0;
2497 + mblk->state = 0;
2498 + mblk->no = mblk_no;
2499 + mblk->data = page_address(mblk->page);
2500 +@@ -339,10 +339,11 @@ static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
2501 + }
2502 +
2503 + /*
2504 +- * Lookup a metadata block in the rbtree.
2505 ++ * Lookup a metadata block in the rbtree. If the block is found, increment
2506 ++ * its reference count.
2507 + */
2508 +-static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
2509 +- sector_t mblk_no)
2510 ++static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
2511 ++ sector_t mblk_no)
2512 + {
2513 + struct rb_root *root = &zmd->mblk_rbtree;
2514 + struct rb_node *node = root->rb_node;
2515 +@@ -350,8 +351,17 @@ static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
2516 +
2517 + while (node) {
2518 + mblk = container_of(node, struct dmz_mblock, node);
2519 +- if (mblk->no == mblk_no)
2520 ++ if (mblk->no == mblk_no) {
2521 ++ /*
2522 ++ * If this is the first reference to the block,
2523 ++ * remove it from the LRU list.
2524 ++ */
2525 ++ mblk->ref++;
2526 ++ if (mblk->ref == 1 &&
2527 ++ !test_bit(DMZ_META_DIRTY, &mblk->state))
2528 ++ list_del_init(&mblk->link);
2529 + return mblk;
2530 ++ }
2531 + node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
2532 + }
2533 +
2534 +@@ -382,32 +392,47 @@ static void dmz_mblock_bio_end_io(struct bio *bio)
2535 + }
2536 +
2537 + /*
2538 +- * Read a metadata block from disk.
2539 ++ * Read an uncached metadata block from disk and add it to the cache.
2540 + */
2541 +-static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
2542 +- sector_t mblk_no)
2543 ++static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
2544 ++ sector_t mblk_no)
2545 + {
2546 +- struct dmz_mblock *mblk;
2547 ++ struct dmz_mblock *mblk, *m;
2548 + sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
2549 + struct bio *bio;
2550 +
2551 +- /* Get block and insert it */
2552 ++ /* Get a new block and a BIO to read it */
2553 + mblk = dmz_alloc_mblock(zmd, mblk_no);
2554 + if (!mblk)
2555 + return NULL;
2556 +
2557 +- spin_lock(&zmd->mblk_lock);
2558 +- atomic_inc(&mblk->ref);
2559 +- set_bit(DMZ_META_READING, &mblk->state);
2560 +- dmz_insert_mblock(zmd, mblk);
2561 +- spin_unlock(&zmd->mblk_lock);
2562 +-
2563 + bio = bio_alloc(GFP_NOIO, 1);
2564 + if (!bio) {
2565 + dmz_free_mblock(zmd, mblk);
2566 + return NULL;
2567 + }
2568 +
2569 ++ spin_lock(&zmd->mblk_lock);
2570 ++
2571 ++ /*
2572 ++ * Make sure that another context did not start reading
2573 ++ * the block already.
2574 ++ */
2575 ++ m = dmz_get_mblock_fast(zmd, mblk_no);
2576 ++ if (m) {
2577 ++ spin_unlock(&zmd->mblk_lock);
2578 ++ dmz_free_mblock(zmd, mblk);
2579 ++ bio_put(bio);
2580 ++ return m;
2581 ++ }
2582 ++
2583 ++ mblk->ref++;
2584 ++ set_bit(DMZ_META_READING, &mblk->state);
2585 ++ dmz_insert_mblock(zmd, mblk);
2586 ++
2587 ++ spin_unlock(&zmd->mblk_lock);
2588 ++
2589 ++ /* Submit read BIO */
2590 + bio->bi_iter.bi_sector = dmz_blk2sect(block);
2591 + bio_set_dev(bio, zmd->dev->bdev);
2592 + bio->bi_private = mblk;
2593 +@@ -484,7 +509,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd,
2594 +
2595 + spin_lock(&zmd->mblk_lock);
2596 +
2597 +- if (atomic_dec_and_test(&mblk->ref)) {
2598 ++ mblk->ref--;
2599 ++ if (mblk->ref == 0) {
2600 + if (test_bit(DMZ_META_ERROR, &mblk->state)) {
2601 + rb_erase(&mblk->node, &zmd->mblk_rbtree);
2602 + dmz_free_mblock(zmd, mblk);
2603 +@@ -508,18 +534,12 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
2604 +
2605 + /* Check rbtree */
2606 + spin_lock(&zmd->mblk_lock);
2607 +- mblk = dmz_lookup_mblock(zmd, mblk_no);
2608 +- if (mblk) {
2609 +- /* Cache hit: remove block from LRU list */
2610 +- if (atomic_inc_return(&mblk->ref) == 1 &&
2611 +- !test_bit(DMZ_META_DIRTY, &mblk->state))
2612 +- list_del_init(&mblk->link);
2613 +- }
2614 ++ mblk = dmz_get_mblock_fast(zmd, mblk_no);
2615 + spin_unlock(&zmd->mblk_lock);
2616 +
2617 + if (!mblk) {
2618 + /* Cache miss: read the block from disk */
2619 +- mblk = dmz_fetch_mblock(zmd, mblk_no);
2620 ++ mblk = dmz_get_mblock_slow(zmd, mblk_no);
2621 + if (!mblk)
2622 + return ERR_PTR(-ENOMEM);
2623 + }
2624 +@@ -753,7 +773,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
2625 +
2626 + spin_lock(&zmd->mblk_lock);
2627 + clear_bit(DMZ_META_DIRTY, &mblk->state);
2628 +- if (atomic_read(&mblk->ref) == 0)
2629 ++ if (mblk->ref == 0)
2630 + list_add_tail(&mblk->link, &zmd->mblk_lru_list);
2631 + spin_unlock(&zmd->mblk_lock);
2632 + }
2633 +@@ -2308,7 +2328,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
2634 + mblk = list_first_entry(&zmd->mblk_dirty_list,
2635 + struct dmz_mblock, link);
2636 + dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
2637 +- (u64)mblk->no, atomic_read(&mblk->ref));
2638 ++ (u64)mblk->no, mblk->ref);
2639 + list_del_init(&mblk->link);
2640 + rb_erase(&mblk->node, &zmd->mblk_rbtree);
2641 + dmz_free_mblock(zmd, mblk);
2642 +@@ -2326,8 +2346,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
2643 + root = &zmd->mblk_rbtree;
2644 + rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
2645 + dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
2646 +- (u64)mblk->no, atomic_read(&mblk->ref));
2647 +- atomic_set(&mblk->ref, 0);
2648 ++ (u64)mblk->no, mblk->ref);
2649 ++ mblk->ref = 0;
2650 + dmz_free_mblock(zmd, mblk);
2651 + }
2652 +
2653 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2654 +index 78d830763704..205f86f1a6cb 100644
2655 +--- a/drivers/md/raid1.c
2656 ++++ b/drivers/md/raid1.c
2657 +@@ -1725,6 +1725,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
2658 + */
2659 + if (rdev->saved_raid_disk >= 0 &&
2660 + rdev->saved_raid_disk >= first &&
2661 ++ rdev->saved_raid_disk < conf->raid_disks &&
2662 + conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
2663 + first = last = rdev->saved_raid_disk;
2664 +
2665 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
2666 +index 927b60e9d3ca..e786546bf3b8 100644
2667 +--- a/drivers/md/raid10.c
2668 ++++ b/drivers/md/raid10.c
2669 +@@ -1775,6 +1775,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
2670 + first = last = rdev->raid_disk;
2671 +
2672 + if (rdev->saved_raid_disk >= first &&
2673 ++ rdev->saved_raid_disk < conf->geo.raid_disks &&
2674 + conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
2675 + mirror = rdev->saved_raid_disk;
2676 + else
2677 +diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
2678 +index a772976cfe26..a1aacd6fb96f 100644
2679 +--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
2680 ++++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
2681 +@@ -1765,7 +1765,7 @@ typedef struct { u16 __; u8 _; } __packed x24;
2682 + pos[7] = (chr & (0x01 << 0) ? fg : bg); \
2683 + } \
2684 + \
2685 +- pos += (tpg->hflip ? -8 : 8) / hdiv; \
2686 ++ pos += (tpg->hflip ? -8 : 8) / (int)hdiv; \
2687 + } \
2688 + } \
2689 + } while (0)
2690 +diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
2691 +index 59b0c1fce9be..4d3e97f97c76 100644
2692 +--- a/drivers/media/i2c/tvp5150.c
2693 ++++ b/drivers/media/i2c/tvp5150.c
2694 +@@ -1530,7 +1530,7 @@ static int tvp5150_probe(struct i2c_client *c,
2695 + 27000000, 1, 27000000);
2696 + v4l2_ctrl_new_std_menu_items(&core->hdl, &tvp5150_ctrl_ops,
2697 + V4L2_CID_TEST_PATTERN,
2698 +- ARRAY_SIZE(tvp5150_test_patterns),
2699 ++ ARRAY_SIZE(tvp5150_test_patterns) - 1,
2700 + 0, 0, tvp5150_test_patterns);
2701 + sd->ctrl_handler = &core->hdl;
2702 + if (core->hdl.error) {
2703 +diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
2704 +index 11a59854a0a6..9747e23aad27 100644
2705 +--- a/drivers/media/usb/em28xx/em28xx-cards.c
2706 ++++ b/drivers/media/usb/em28xx/em28xx-cards.c
2707 +@@ -2112,13 +2112,13 @@ struct em28xx_board em28xx_boards[] = {
2708 + .input = { {
2709 + .type = EM28XX_VMUX_COMPOSITE,
2710 + .vmux = TVP5150_COMPOSITE1,
2711 +- .amux = EM28XX_AUDIO_SRC_LINE,
2712 ++ .amux = EM28XX_AMUX_LINE_IN,
2713 + .gpio = terratec_av350_unmute_gpio,
2714 +
2715 + }, {
2716 + .type = EM28XX_VMUX_SVIDEO,
2717 + .vmux = TVP5150_SVIDEO,
2718 +- .amux = EM28XX_AUDIO_SRC_LINE,
2719 ++ .amux = EM28XX_AMUX_LINE_IN,
2720 + .gpio = terratec_av350_unmute_gpio,
2721 + } },
2722 + },
2723 +diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
2724 +index 8d253a5df0a9..92a74bc34527 100644
2725 +--- a/drivers/media/usb/em28xx/em28xx-video.c
2726 ++++ b/drivers/media/usb/em28xx/em28xx-video.c
2727 +@@ -900,6 +900,8 @@ static int em28xx_enable_analog_tuner(struct em28xx *dev)
2728 + if (!mdev || !v4l2->decoder)
2729 + return 0;
2730 +
2731 ++ dev->v4l2->field_count = 0;
2732 ++
2733 + /*
2734 + * This will find the tuner that is connected into the decoder.
2735 + * Technically, this is not 100% correct, as the device may be
2736 +@@ -1445,9 +1447,9 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
2737 +
2738 + fmt = format_by_fourcc(f->fmt.pix.pixelformat);
2739 + if (!fmt) {
2740 +- em28xx_videodbg("Fourcc format (%08x) invalid.\n",
2741 +- f->fmt.pix.pixelformat);
2742 +- return -EINVAL;
2743 ++ fmt = &format[0];
2744 ++ em28xx_videodbg("Fourcc format (%08x) invalid. Using default (%08x).\n",
2745 ++ f->fmt.pix.pixelformat, fmt->fourcc);
2746 + }
2747 +
2748 + if (dev->board.is_em2800) {
2749 +diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
2750 +index 29b7164a823b..d28ebe7ecd21 100644
2751 +--- a/drivers/mfd/menelaus.c
2752 ++++ b/drivers/mfd/menelaus.c
2753 +@@ -1094,6 +1094,7 @@ static void menelaus_rtc_alarm_work(struct menelaus_chip *m)
2754 + static inline void menelaus_rtc_init(struct menelaus_chip *m)
2755 + {
2756 + int alarm = (m->client->irq > 0);
2757 ++ int err;
2758 +
2759 + /* assume 32KDETEN pin is pulled high */
2760 + if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) {
2761 +@@ -1101,6 +1102,12 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
2762 + return;
2763 + }
2764 +
2765 ++ m->rtc = devm_rtc_allocate_device(&m->client->dev);
2766 ++ if (IS_ERR(m->rtc))
2767 ++ return;
2768 ++
2769 ++ m->rtc->ops = &menelaus_rtc_ops;
2770 ++
2771 + /* support RTC alarm; it can issue wakeups */
2772 + if (alarm) {
2773 + if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ,
2774 +@@ -1125,10 +1132,8 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
2775 + menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control);
2776 + }
2777 +
2778 +- m->rtc = rtc_device_register(DRIVER_NAME,
2779 +- &m->client->dev,
2780 +- &menelaus_rtc_ops, THIS_MODULE);
2781 +- if (IS_ERR(m->rtc)) {
2782 ++ err = rtc_register_device(m->rtc);
2783 ++ if (err) {
2784 + if (alarm) {
2785 + menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ);
2786 + device_init_wakeup(&m->client->dev, 0);
2787 +diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
2788 +index 5813b5f25006..135e02f257c1 100644
2789 +--- a/drivers/misc/genwqe/card_base.h
2790 ++++ b/drivers/misc/genwqe/card_base.h
2791 +@@ -403,7 +403,7 @@ struct genwqe_file {
2792 + struct file *filp;
2793 +
2794 + struct fasync_struct *async_queue;
2795 +- struct task_struct *owner;
2796 ++ struct pid *opener;
2797 + struct list_head list; /* entry in list of open files */
2798 +
2799 + spinlock_t map_lock; /* lock for dma_mappings */
2800 +diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
2801 +index dd4617764f14..dbd5eaa69311 100644
2802 +--- a/drivers/misc/genwqe/card_dev.c
2803 ++++ b/drivers/misc/genwqe/card_dev.c
2804 +@@ -52,7 +52,7 @@ static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
2805 + {
2806 + unsigned long flags;
2807 +
2808 +- cfile->owner = current;
2809 ++ cfile->opener = get_pid(task_tgid(current));
2810 + spin_lock_irqsave(&cd->file_lock, flags);
2811 + list_add(&cfile->list, &cd->file_list);
2812 + spin_unlock_irqrestore(&cd->file_lock, flags);
2813 +@@ -65,6 +65,7 @@ static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
2814 + spin_lock_irqsave(&cd->file_lock, flags);
2815 + list_del(&cfile->list);
2816 + spin_unlock_irqrestore(&cd->file_lock, flags);
2817 ++ put_pid(cfile->opener);
2818 +
2819 + return 0;
2820 + }
2821 +@@ -275,7 +276,7 @@ static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
2822 + return files;
2823 + }
2824 +
2825 +-static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
2826 ++static int genwqe_terminate(struct genwqe_dev *cd)
2827 + {
2828 + unsigned int files = 0;
2829 + unsigned long flags;
2830 +@@ -283,7 +284,7 @@ static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
2831 +
2832 + spin_lock_irqsave(&cd->file_lock, flags);
2833 + list_for_each_entry(cfile, &cd->file_list, list) {
2834 +- force_sig(sig, cfile->owner);
2835 ++ kill_pid(cfile->opener, SIGKILL, 1);
2836 + files++;
2837 + }
2838 + spin_unlock_irqrestore(&cd->file_lock, flags);
2839 +@@ -1356,7 +1357,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
2840 + dev_warn(&pci_dev->dev,
2841 + "[%s] send SIGKILL and wait ...\n", __func__);
2842 +
2843 +- rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */
2844 ++ rc = genwqe_terminate(cd);
2845 + if (rc) {
2846 + /* Give kill_timout more seconds to end processes */
2847 + for (i = 0; (i < genwqe_kill_timeout) &&
2848 +diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
2849 +index d7eaf1eb11e7..003bfba40758 100644
2850 +--- a/drivers/misc/vmw_vmci/vmci_driver.c
2851 ++++ b/drivers/misc/vmw_vmci/vmci_driver.c
2852 +@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
2853 +
2854 + MODULE_AUTHOR("VMware, Inc.");
2855 + MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
2856 +-MODULE_VERSION("1.1.5.0-k");
2857 ++MODULE_VERSION("1.1.6.0-k");
2858 + MODULE_LICENSE("GPL v2");
2859 +diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
2860 +index 1ab6e8737a5f..da1ee2e1ba99 100644
2861 +--- a/drivers/misc/vmw_vmci/vmci_resource.c
2862 ++++ b/drivers/misc/vmw_vmci/vmci_resource.c
2863 +@@ -57,7 +57,8 @@ static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
2864 +
2865 + if (r->type == type &&
2866 + rid == handle.resource &&
2867 +- (cid == handle.context || cid == VMCI_INVALID_ID)) {
2868 ++ (cid == handle.context || cid == VMCI_INVALID_ID ||
2869 ++ handle.context == VMCI_INVALID_ID)) {
2870 + resource = r;
2871 + break;
2872 + }
2873 +diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
2874 +index 14273ca00641..44a809a20d3a 100644
2875 +--- a/drivers/mmc/host/sdhci-pci-o2micro.c
2876 ++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
2877 +@@ -334,6 +334,9 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
2878 + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
2879 + break;
2880 + case PCI_DEVICE_ID_O2_SEABIRD0:
2881 ++ if (chip->pdev->revision == 0x01)
2882 ++ chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
2883 ++ /* fall through */
2884 + case PCI_DEVICE_ID_O2_SEABIRD1:
2885 + /* UnLock WP */
2886 + ret = pci_read_config_byte(chip->pdev,
2887 +diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
2888 +index 148744418e82..32a2f947a454 100644
2889 +--- a/drivers/mtd/nand/atmel/nand-controller.c
2890 ++++ b/drivers/mtd/nand/atmel/nand-controller.c
2891 +@@ -2079,6 +2079,10 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
2892 + nand_np = dev->of_node;
2893 + nfc_np = of_find_compatible_node(dev->of_node, NULL,
2894 + "atmel,sama5d3-nfc");
2895 ++ if (!nfc_np) {
2896 ++ dev_err(dev, "Could not find device node for sama5d3-nfc\n");
2897 ++ return -ENODEV;
2898 ++ }
2899 +
2900 + nc->clk = of_clk_get(nfc_np, 0);
2901 + if (IS_ERR(nc->clk)) {
2902 +diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
2903 +index f17d22435bfc..62f5763482b3 100644
2904 +--- a/drivers/mtd/spi-nor/fsl-quadspi.c
2905 ++++ b/drivers/mtd/spi-nor/fsl-quadspi.c
2906 +@@ -468,6 +468,7 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
2907 + {
2908 + switch (cmd) {
2909 + case SPINOR_OP_READ_1_1_4:
2910 ++ case SPINOR_OP_READ_1_1_4_4B:
2911 + return SEQID_READ;
2912 + case SPINOR_OP_WREN:
2913 + return SEQID_WREN;
2914 +diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
2915 +index 436668bd50dc..e53ce9610fee 100644
2916 +--- a/drivers/net/dsa/mv88e6xxx/phy.c
2917 ++++ b/drivers/net/dsa/mv88e6xxx/phy.c
2918 +@@ -110,6 +110,9 @@ int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
2919 + err = mv88e6xxx_phy_page_get(chip, phy, page);
2920 + if (!err) {
2921 + err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
2922 ++ if (!err)
2923 ++ err = mv88e6xxx_phy_write(chip, phy, reg, val);
2924 ++
2925 + mv88e6xxx_phy_page_put(chip, phy);
2926 + }
2927 +
2928 +diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2929 +index 90be4385bf36..e238f6e85ab6 100644
2930 +--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2931 ++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2932 +@@ -3427,6 +3427,10 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2933 + skb_checksum_help(skb);
2934 + goto no_csum;
2935 + }
2936 ++
2937 ++ if (first->protocol == htons(ETH_P_IP))
2938 ++ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2939 ++
2940 + /* update TX checksum flag */
2941 + first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
2942 + vlan_macip_lens = skb_checksum_start_offset(skb) -
2943 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
2944 +index 6c9f29c2e975..90a6c4fbc113 100644
2945 +--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
2946 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
2947 +@@ -96,6 +96,7 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
2948 + {
2949 + struct nfp_pf *pf = devlink_priv(devlink);
2950 + struct nfp_eth_table_port eth_port;
2951 ++ unsigned int lanes;
2952 + int ret;
2953 +
2954 + if (count < 2)
2955 +@@ -114,8 +115,12 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
2956 + goto out;
2957 + }
2958 +
2959 +- ret = nfp_devlink_set_lanes(pf, eth_port.index,
2960 +- eth_port.port_lanes / count);
2961 ++ /* Special case the 100G CXP -> 2x40G split */
2962 ++ lanes = eth_port.port_lanes / count;
2963 ++ if (eth_port.lanes == 10 && count == 2)
2964 ++ lanes = 8 / count;
2965 ++
2966 ++ ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
2967 + out:
2968 + mutex_unlock(&pf->lock);
2969 +
2970 +@@ -127,6 +132,7 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index)
2971 + {
2972 + struct nfp_pf *pf = devlink_priv(devlink);
2973 + struct nfp_eth_table_port eth_port;
2974 ++ unsigned int lanes;
2975 + int ret;
2976 +
2977 + mutex_lock(&pf->lock);
2978 +@@ -142,7 +148,12 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index)
2979 + goto out;
2980 + }
2981 +
2982 +- ret = nfp_devlink_set_lanes(pf, eth_port.index, eth_port.port_lanes);
2983 ++ /* Special case the 100G CXP -> 2x40G unsplit */
2984 ++ lanes = eth_port.port_lanes;
2985 ++ if (eth_port.port_lanes == 8)
2986 ++ lanes = 10;
2987 ++
2988 ++ ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
2989 + out:
2990 + mutex_unlock(&pf->lock);
2991 +
2992 +diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
2993 +index 2991179c2fd0..080d00520362 100644
2994 +--- a/drivers/net/ethernet/qlogic/qla3xxx.c
2995 ++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
2996 +@@ -380,8 +380,6 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
2997 +
2998 + qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
2999 + ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
3000 +- ql_write_nvram_reg(qdev, spir,
3001 +- ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
3002 + }
3003 +
3004 + /*
3005 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
3006 +index 79f28b9186c6..70ce7da26d1f 100644
3007 +--- a/drivers/net/phy/phylink.c
3008 ++++ b/drivers/net/phy/phylink.c
3009 +@@ -747,6 +747,9 @@ void phylink_start(struct phylink *pl)
3010 + phylink_an_mode_str(pl->link_an_mode),
3011 + phy_modes(pl->link_config.interface));
3012 +
3013 ++ /* Always set the carrier off */
3014 ++ netif_carrier_off(pl->netdev);
3015 ++
3016 + /* Apply the link configuration to the MAC when starting. This allows
3017 + * a fixed-link to start with the correct parameters, and also
3018 + * ensures that we set the appropriate advertisment for Serdes links.
3019 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
3020 +index e0baea2dfd3c..7f8c7e3aa356 100644
3021 +--- a/drivers/net/tun.c
3022 ++++ b/drivers/net/tun.c
3023 +@@ -1814,6 +1814,8 @@ static void tun_setup(struct net_device *dev)
3024 + static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
3025 + struct netlink_ext_ack *extack)
3026 + {
3027 ++ if (!data)
3028 ++ return 0;
3029 + return -EINVAL;
3030 + }
3031 +
3032 +diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
3033 +index 2ab5311659ea..8cb47858eb00 100644
3034 +--- a/drivers/net/wireless/ath/ath10k/wmi.c
3035 ++++ b/drivers/net/wireless/ath/ath10k/wmi.c
3036 +@@ -1852,6 +1852,12 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
3037 + if (ret)
3038 + dev_kfree_skb_any(skb);
3039 +
3040 ++ if (ret == -EAGAIN) {
3041 ++ ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
3042 ++ cmd_id);
3043 ++ queue_work(ar->workqueue, &ar->restart_work);
3044 ++ }
3045 ++
3046 + return ret;
3047 + }
3048 +
3049 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
3050 +index d8b79cb72b58..e7584b842dce 100644
3051 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
3052 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
3053 +@@ -77,6 +77,8 @@ static u16 d11ac_bw(enum brcmu_chan_bw bw)
3054 + return BRCMU_CHSPEC_D11AC_BW_40;
3055 + case BRCMU_CHAN_BW_80:
3056 + return BRCMU_CHSPEC_D11AC_BW_80;
3057 ++ case BRCMU_CHAN_BW_160:
3058 ++ return BRCMU_CHSPEC_D11AC_BW_160;
3059 + default:
3060 + WARN_ON(1);
3061 + }
3062 +@@ -190,8 +192,38 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
3063 + break;
3064 + }
3065 + break;
3066 +- case BRCMU_CHSPEC_D11AC_BW_8080:
3067 + case BRCMU_CHSPEC_D11AC_BW_160:
3068 ++ switch (ch->sb) {
3069 ++ case BRCMU_CHAN_SB_LLL:
3070 ++ ch->control_ch_num -= CH_70MHZ_APART;
3071 ++ break;
3072 ++ case BRCMU_CHAN_SB_LLU:
3073 ++ ch->control_ch_num -= CH_50MHZ_APART;
3074 ++ break;
3075 ++ case BRCMU_CHAN_SB_LUL:
3076 ++ ch->control_ch_num -= CH_30MHZ_APART;
3077 ++ break;
3078 ++ case BRCMU_CHAN_SB_LUU:
3079 ++ ch->control_ch_num -= CH_10MHZ_APART;
3080 ++ break;
3081 ++ case BRCMU_CHAN_SB_ULL:
3082 ++ ch->control_ch_num += CH_10MHZ_APART;
3083 ++ break;
3084 ++ case BRCMU_CHAN_SB_ULU:
3085 ++ ch->control_ch_num += CH_30MHZ_APART;
3086 ++ break;
3087 ++ case BRCMU_CHAN_SB_UUL:
3088 ++ ch->control_ch_num += CH_50MHZ_APART;
3089 ++ break;
3090 ++ case BRCMU_CHAN_SB_UUU:
3091 ++ ch->control_ch_num += CH_70MHZ_APART;
3092 ++ break;
3093 ++ default:
3094 ++ WARN_ON_ONCE(1);
3095 ++ break;
3096 ++ }
3097 ++ break;
3098 ++ case BRCMU_CHSPEC_D11AC_BW_8080:
3099 + default:
3100 + WARN_ON_ONCE(1);
3101 + break;
3102 +diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
3103 +index 7b9a77981df1..75b2a0438cfa 100644
3104 +--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
3105 ++++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
3106 +@@ -29,6 +29,8 @@
3107 + #define CH_UPPER_SB 0x01
3108 + #define CH_LOWER_SB 0x02
3109 + #define CH_EWA_VALID 0x04
3110 ++#define CH_70MHZ_APART 14
3111 ++#define CH_50MHZ_APART 10
3112 + #define CH_30MHZ_APART 6
3113 + #define CH_20MHZ_APART 4
3114 + #define CH_10MHZ_APART 2
3115 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3116 +index db1fab9aa1c6..80a653950e86 100644
3117 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3118 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3119 +@@ -1225,12 +1225,15 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
3120 + iwl_mvm_del_aux_sta(mvm);
3121 +
3122 + /*
3123 +- * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
3124 +- * won't be called in this case).
3125 ++ * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
3126 ++ * hw (as restart_complete() won't be called in this case) and mac80211
3127 ++ * won't execute the restart.
3128 + * But make sure to cleanup interfaces that have gone down before/during
3129 + * HW restart was requested.
3130 + */
3131 +- if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
3132 ++ if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
3133 ++ test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
3134 ++ &mvm->status))
3135 + ieee80211_iterate_interfaces(mvm->hw, 0,
3136 + iwl_mvm_cleanup_iterator, mvm);
3137 +
3138 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
3139 +index 386fdee23eb0..bd48cd0eb395 100644
3140 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
3141 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
3142 +@@ -1226,7 +1226,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3143 + !(info->flags & IEEE80211_TX_STAT_AMPDU))
3144 + return;
3145 +
3146 +- rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
3147 ++ if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
3148 ++ &tx_resp_rate)) {
3149 ++ WARN_ON_ONCE(1);
3150 ++ return;
3151 ++ }
3152 +
3153 + #ifdef CONFIG_MAC80211_DEBUGFS
3154 + /* Disable last tx check if we are debugging with fixed rate but
3155 +@@ -1277,7 +1281,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3156 + */
3157 + table = &lq_sta->lq;
3158 + lq_hwrate = le32_to_cpu(table->rs_table[0]);
3159 +- rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
3160 ++ if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
3161 ++ WARN_ON_ONCE(1);
3162 ++ return;
3163 ++ }
3164 +
3165 + /* Here we actually compare this rate to the latest LQ command */
3166 + if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
3167 +@@ -1379,8 +1386,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3168 + /* Collect data for each rate used during failed TX attempts */
3169 + for (i = 0; i <= retries; ++i) {
3170 + lq_hwrate = le32_to_cpu(table->rs_table[i]);
3171 +- rs_rate_from_ucode_rate(lq_hwrate, info->band,
3172 +- &lq_rate);
3173 ++ if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
3174 ++ &lq_rate)) {
3175 ++ WARN_ON_ONCE(1);
3176 ++ return;
3177 ++ }
3178 ++
3179 + /*
3180 + * Only collect stats if retried rate is in the same RS
3181 + * table as active/search.
3182 +@@ -3244,7 +3255,10 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
3183 + for (i = 0; i < num_rates; i++)
3184 + lq_cmd->rs_table[i] = ucode_rate_le32;
3185 +
3186 +- rs_rate_from_ucode_rate(ucode_rate, band, &rate);
3187 ++ if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) {
3188 ++ WARN_ON_ONCE(1);
3189 ++ return;
3190 ++ }
3191 +
3192 + if (is_mimo(&rate))
3193 + lq_cmd->mimo_delim = num_rates - 1;
3194 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
3195 +index 6c014c273922..62a6e293cf12 100644
3196 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
3197 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
3198 +@@ -1345,6 +1345,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
3199 + while (!skb_queue_empty(&skbs)) {
3200 + struct sk_buff *skb = __skb_dequeue(&skbs);
3201 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3202 ++ struct ieee80211_hdr *hdr = (void *)skb->data;
3203 + bool flushed = false;
3204 +
3205 + skb_freed++;
3206 +@@ -1389,11 +1390,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
3207 + info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
3208 + info->flags &= ~IEEE80211_TX_CTL_AMPDU;
3209 +
3210 +- /* W/A FW bug: seq_ctl is wrong when the status isn't success */
3211 +- if (status != TX_STATUS_SUCCESS) {
3212 +- struct ieee80211_hdr *hdr = (void *)skb->data;
3213 ++ /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
3214 ++ if (ieee80211_is_back_req(hdr->frame_control))
3215 ++ seq_ctl = 0;
3216 ++ else if (status != TX_STATUS_SUCCESS)
3217 + seq_ctl = le16_to_cpu(hdr->seq_ctrl);
3218 +- }
3219 +
3220 + if (unlikely(!seq_ctl)) {
3221 + struct ieee80211_hdr *hdr = (void *)skb->data;
3222 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
3223 +index ca99c3cf41c2..5a15362ef671 100644
3224 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
3225 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
3226 +@@ -1049,6 +1049,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
3227 + kfree(trans_pcie->rxq);
3228 + }
3229 +
3230 ++static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
3231 ++ struct iwl_rb_allocator *rba)
3232 ++{
3233 ++ spin_lock(&rba->lock);
3234 ++ list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
3235 ++ spin_unlock(&rba->lock);
3236 ++}
3237 ++
3238 + /*
3239 + * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
3240 + *
3241 +@@ -1080,9 +1088,7 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
3242 + if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
3243 + /* Move the 2 RBDs to the allocator ownership.
3244 + Allocator has another 6 from pool for the request completion*/
3245 +- spin_lock(&rba->lock);
3246 +- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
3247 +- spin_unlock(&rba->lock);
3248 ++ iwl_pcie_rx_move_to_allocator(rxq, rba);
3249 +
3250 + atomic_inc(&rba->req_pending);
3251 + queue_work(rba->alloc_wq, &rba->rx_alloc);
3252 +@@ -1260,10 +1266,18 @@ restart:
3253 + IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
3254 +
3255 + while (i != r) {
3256 ++ struct iwl_rb_allocator *rba = &trans_pcie->rba;
3257 + struct iwl_rx_mem_buffer *rxb;
3258 +-
3259 +- if (unlikely(rxq->used_count == rxq->queue_size / 2))
3260 ++ /* number of RBDs still waiting for page allocation */
3261 ++ u32 rb_pending_alloc =
3262 ++ atomic_read(&trans_pcie->rba.req_pending) *
3263 ++ RX_CLAIM_REQ_ALLOC;
3264 ++
3265 ++ if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
3266 ++ !emergency)) {
3267 ++ iwl_pcie_rx_move_to_allocator(rxq, rba);
3268 + emergency = true;
3269 ++ }
3270 +
3271 + if (trans->cfg->mq_rx_supported) {
3272 + /*
3273 +@@ -1306,17 +1320,13 @@ restart:
3274 + iwl_pcie_rx_allocator_get(trans, rxq);
3275 +
3276 + if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
3277 +- struct iwl_rb_allocator *rba = &trans_pcie->rba;
3278 +-
3279 + /* Add the remaining empty RBDs for allocator use */
3280 +- spin_lock(&rba->lock);
3281 +- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
3282 +- spin_unlock(&rba->lock);
3283 ++ iwl_pcie_rx_move_to_allocator(rxq, rba);
3284 + } else if (emergency) {
3285 + count++;
3286 + if (count == 8) {
3287 + count = 0;
3288 +- if (rxq->used_count < rxq->queue_size / 3)
3289 ++ if (rb_pending_alloc < rxq->queue_size / 3)
3290 + emergency = false;
3291 +
3292 + rxq->read = i;
3293 +diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
3294 +index 16e54c757dd0..e4ae2b5a71c2 100644
3295 +--- a/drivers/net/wireless/marvell/libertas/if_usb.c
3296 ++++ b/drivers/net/wireless/marvell/libertas/if_usb.c
3297 +@@ -456,8 +456,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
3298 + MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn,
3299 + cardp);
3300 +
3301 +- cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
3302 +-
3303 + lbs_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb);
3304 + if ((ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC))) {
3305 + lbs_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret);
3306 +diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
3307 +index fb5ab5812a22..a6746a1f20ae 100644
3308 +--- a/drivers/nvdimm/bus.c
3309 ++++ b/drivers/nvdimm/bus.c
3310 +@@ -484,6 +484,8 @@ static void nd_async_device_register(void *d, async_cookie_t cookie)
3311 + put_device(dev);
3312 + }
3313 + put_device(dev);
3314 ++ if (dev->parent)
3315 ++ put_device(dev->parent);
3316 + }
3317 +
3318 + static void nd_async_device_unregister(void *d, async_cookie_t cookie)
3319 +@@ -503,6 +505,8 @@ void __nd_device_register(struct device *dev)
3320 + if (!dev)
3321 + return;
3322 + dev->bus = &nvdimm_bus_type;
3323 ++ if (dev->parent)
3324 ++ get_device(dev->parent);
3325 + get_device(dev);
3326 + async_schedule_domain(nd_async_device_register, dev,
3327 + &nd_async_domain);
3328 +diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
3329 +index abaf38c61220..050deb56ee62 100644
3330 +--- a/drivers/nvdimm/region_devs.c
3331 ++++ b/drivers/nvdimm/region_devs.c
3332 +@@ -513,10 +513,17 @@ static ssize_t region_badblocks_show(struct device *dev,
3333 + struct device_attribute *attr, char *buf)
3334 + {
3335 + struct nd_region *nd_region = to_nd_region(dev);
3336 ++ ssize_t rc;
3337 +
3338 +- return badblocks_show(&nd_region->bb, buf, 0);
3339 +-}
3340 ++ device_lock(dev);
3341 ++ if (dev->driver)
3342 ++ rc = badblocks_show(&nd_region->bb, buf, 0);
3343 ++ else
3344 ++ rc = -ENXIO;
3345 ++ device_unlock(dev);
3346 +
3347 ++ return rc;
3348 ++}
3349 + static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
3350 +
3351 + static ssize_t resource_show(struct device *dev,
3352 +diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
3353 +index 362607f727ee..06eae132aff7 100644
3354 +--- a/drivers/pci/dwc/pci-dra7xx.c
3355 ++++ b/drivers/pci/dwc/pci-dra7xx.c
3356 +@@ -546,7 +546,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
3357 + };
3358 +
3359 + /*
3360 +- * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
3361 ++ * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
3362 + * @dra7xx: the dra7xx device where the workaround should be applied
3363 + *
3364 + * Access to the PCIe slave port that are not 32-bit aligned will result
3365 +@@ -556,7 +556,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
3366 + *
3367 + * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
3368 + */
3369 +-static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
3370 ++static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
3371 + {
3372 + int ret;
3373 + struct device_node *np = dev->of_node;
3374 +@@ -707,6 +707,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
3375 + case DW_PCIE_RC_TYPE:
3376 + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
3377 + DEVICE_TYPE_RC);
3378 ++
3379 ++ ret = dra7xx_pcie_unaligned_memaccess(dev);
3380 ++ if (ret)
3381 ++ dev_err(dev, "WA for Errata i870 not applied\n");
3382 ++
3383 + ret = dra7xx_add_pcie_port(dra7xx, pdev);
3384 + if (ret < 0)
3385 + goto err_gpio;
3386 +@@ -715,7 +720,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
3387 + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
3388 + DEVICE_TYPE_EP);
3389 +
3390 +- ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
3391 ++ ret = dra7xx_pcie_unaligned_memaccess(dev);
3392 + if (ret)
3393 + goto err_gpio;
3394 +
3395 +diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c
3396 +index db93efdf1d63..c896bb9ef968 100644
3397 +--- a/drivers/pci/host/pcie-mediatek.c
3398 ++++ b/drivers/pci/host/pcie-mediatek.c
3399 +@@ -333,6 +333,17 @@ static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
3400 + {
3401 + struct mtk_pcie *pcie = bus->sysdata;
3402 + struct mtk_pcie_port *port;
3403 ++ struct pci_dev *dev = NULL;
3404 ++
3405 ++ /*
3406 ++ * Walk the bus hierarchy to get the devfn value
3407 ++ * of the port in the root bus.
3408 ++ */
3409 ++ while (bus && bus->number) {
3410 ++ dev = bus->self;
3411 ++ bus = dev->bus;
3412 ++ devfn = dev->devfn;
3413 ++ }
3414 +
3415 + list_for_each_entry(port, &pcie->ports, list)
3416 + if (port->slot == PCI_SLOT(devfn))
3417 +diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
3418 +index 509893bc3e63..2537b022f42d 100644
3419 +--- a/drivers/pci/host/vmd.c
3420 ++++ b/drivers/pci/host/vmd.c
3421 +@@ -183,9 +183,20 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d
3422 + int i, best = 1;
3423 + unsigned long flags;
3424 +
3425 +- if (pci_is_bridge(msi_desc_to_pci_dev(desc)) || vmd->msix_count == 1)
3426 ++ if (vmd->msix_count == 1)
3427 + return &vmd->irqs[0];
3428 +
3429 ++ /*
3430 ++ * White list for fast-interrupt handlers. All others will share the
3431 ++ * "slow" interrupt vector.
3432 ++ */
3433 ++ switch (msi_desc_to_pci_dev(desc)->class) {
3434 ++ case PCI_CLASS_STORAGE_EXPRESS:
3435 ++ break;
3436 ++ default:
3437 ++ return &vmd->irqs[0];
3438 ++ }
3439 ++
3440 + raw_spin_lock_irqsave(&list_lock, flags);
3441 + for (i = 1; i < vmd->msix_count; i++)
3442 + if (vmd->irqs[i].count < vmd->irqs[best].count)
3443 +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
3444 +index 496ed9130600..536e9a5cd2b1 100644
3445 +--- a/drivers/pci/msi.c
3446 ++++ b/drivers/pci/msi.c
3447 +@@ -958,7 +958,6 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
3448 + }
3449 + }
3450 + }
3451 +- WARN_ON(!!dev->msix_enabled);
3452 +
3453 + /* Check whether driver already requested for MSI irq */
3454 + if (dev->msi_enabled) {
3455 +@@ -1028,8 +1027,6 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
3456 + if (!pci_msi_supported(dev, minvec))
3457 + return -EINVAL;
3458 +
3459 +- WARN_ON(!!dev->msi_enabled);
3460 +-
3461 + /* Check whether driver already requested MSI-X irqs */
3462 + if (dev->msix_enabled) {
3463 + dev_info(&dev->dev,
3464 +@@ -1040,6 +1037,9 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
3465 + if (maxvec < minvec)
3466 + return -ERANGE;
3467 +
3468 ++ if (WARN_ON_ONCE(dev->msi_enabled))
3469 ++ return -EINVAL;
3470 ++
3471 + nvec = pci_msi_vec_count(dev);
3472 + if (nvec < 0)
3473 + return nvec;
3474 +@@ -1088,6 +1088,9 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
3475 + if (maxvec < minvec)
3476 + return -ERANGE;
3477 +
3478 ++ if (WARN_ON_ONCE(dev->msix_enabled))
3479 ++ return -EINVAL;
3480 ++
3481 + for (;;) {
3482 + if (affd) {
3483 + nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
3484 +diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
3485 +index 4708eb9df71b..a3cedf8de863 100644
3486 +--- a/drivers/pci/pci-acpi.c
3487 ++++ b/drivers/pci/pci-acpi.c
3488 +@@ -738,19 +738,33 @@ static void pci_acpi_setup(struct device *dev)
3489 + return;
3490 +
3491 + device_set_wakeup_capable(dev, true);
3492 ++ /*
3493 ++ * For bridges that can do D3 we enable wake automatically (as
3494 ++ * we do for the power management itself in that case). The
3495 ++ * reason is that the bridge may have additional methods such as
3496 ++ * _DSW that need to be called.
3497 ++ */
3498 ++ if (pci_dev->bridge_d3)
3499 ++ device_wakeup_enable(dev);
3500 ++
3501 + acpi_pci_wakeup(pci_dev, false);
3502 + }
3503 +
3504 + static void pci_acpi_cleanup(struct device *dev)
3505 + {
3506 + struct acpi_device *adev = ACPI_COMPANION(dev);
3507 ++ struct pci_dev *pci_dev = to_pci_dev(dev);
3508 +
3509 + if (!adev)
3510 + return;
3511 +
3512 + pci_acpi_remove_pm_notifier(adev);
3513 +- if (adev->wakeup.flags.valid)
3514 ++ if (adev->wakeup.flags.valid) {
3515 ++ if (pci_dev->bridge_d3)
3516 ++ device_wakeup_disable(dev);
3517 ++
3518 + device_set_wakeup_capable(dev, false);
3519 ++ }
3520 + }
3521 +
3522 + static bool pci_acpi_bus_match(struct device *dev)
3523 +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
3524 +index 633e55c57b13..c0e1985e4c75 100644
3525 +--- a/drivers/pci/pcie/aspm.c
3526 ++++ b/drivers/pci/pcie/aspm.c
3527 +@@ -937,7 +937,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
3528 + * All PCIe functions are in one slot, remove one function will remove
3529 + * the whole slot, so just wait until we are the last function left.
3530 + */
3531 +- if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices))
3532 ++ if (!list_empty(&parent->subordinate->devices))
3533 + goto out;
3534 +
3535 + link = parent->link_state;
3536 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
3537 +index 35c9b2f4b293..d442afa195ab 100644
3538 +--- a/drivers/pci/quirks.c
3539 ++++ b/drivers/pci/quirks.c
3540 +@@ -3163,7 +3163,11 @@ static void disable_igfx_irq(struct pci_dev *dev)
3541 +
3542 + pci_iounmap(dev, regs);
3543 + }
3544 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
3545 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
3546 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
3547 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
3548 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
3549 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
3550 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
3551 +
3552 +diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
3553 +index 2fa0dbde36b7..0911217467bc 100644
3554 +--- a/drivers/pci/remove.c
3555 ++++ b/drivers/pci/remove.c
3556 +@@ -24,9 +24,6 @@ static void pci_stop_dev(struct pci_dev *dev)
3557 + pci_remove_sysfs_dev_files(dev);
3558 + dev->is_added = 0;
3559 + }
3560 +-
3561 +- if (dev->bus->self)
3562 +- pcie_aspm_exit_link_state(dev);
3563 + }
3564 +
3565 + static void pci_destroy_dev(struct pci_dev *dev)
3566 +@@ -40,6 +37,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
3567 + list_del(&dev->bus_list);
3568 + up_write(&pci_bus_sem);
3569 +
3570 ++ pcie_aspm_exit_link_state(dev);
3571 + pci_bridge_d3_update(dev);
3572 + pci_free_resources(dev);
3573 + put_device(&dev->dev);
3574 +diff --git a/drivers/pcmcia/ricoh.h b/drivers/pcmcia/ricoh.h
3575 +index 01098c841f87..8ac7b138c094 100644
3576 +--- a/drivers/pcmcia/ricoh.h
3577 ++++ b/drivers/pcmcia/ricoh.h
3578 +@@ -119,6 +119,10 @@
3579 + #define RL5C4XX_MISC_CONTROL 0x2F /* 8 bit */
3580 + #define RL5C4XX_ZV_ENABLE 0x08
3581 +
3582 ++/* Misc Control 3 Register */
3583 ++#define RL5C4XX_MISC3 0x00A2 /* 16 bit */
3584 ++#define RL5C47X_MISC3_CB_CLKRUN_DIS BIT(1)
3585 ++
3586 + #ifdef __YENTA_H
3587 +
3588 + #define rl_misc(socket) ((socket)->private[0])
3589 +@@ -156,6 +160,35 @@ static void ricoh_set_zv(struct yenta_socket *socket)
3590 + }
3591 + }
3592 +
3593 ++static void ricoh_set_clkrun(struct yenta_socket *socket, bool quiet)
3594 ++{
3595 ++ u16 misc3;
3596 ++
3597 ++ /*
3598 ++ * RL5C475II likely has this setting, too, however no datasheet
3599 ++ * is publicly available for this chip
3600 ++ */
3601 ++ if (socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C476 &&
3602 ++ socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C478)
3603 ++ return;
3604 ++
3605 ++ if (socket->dev->revision < 0x80)
3606 ++ return;
3607 ++
3608 ++ misc3 = config_readw(socket, RL5C4XX_MISC3);
3609 ++ if (misc3 & RL5C47X_MISC3_CB_CLKRUN_DIS) {
3610 ++ if (!quiet)
3611 ++ dev_dbg(&socket->dev->dev,
3612 ++ "CLKRUN feature already disabled\n");
3613 ++ } else if (disable_clkrun) {
3614 ++ if (!quiet)
3615 ++ dev_info(&socket->dev->dev,
3616 ++ "Disabling CLKRUN feature\n");
3617 ++ misc3 |= RL5C47X_MISC3_CB_CLKRUN_DIS;
3618 ++ config_writew(socket, RL5C4XX_MISC3, misc3);
3619 ++ }
3620 ++}
3621 ++
3622 + static void ricoh_save_state(struct yenta_socket *socket)
3623 + {
3624 + rl_misc(socket) = config_readw(socket, RL5C4XX_MISC);
3625 +@@ -172,6 +205,7 @@ static void ricoh_restore_state(struct yenta_socket *socket)
3626 + config_writew(socket, RL5C4XX_16BIT_IO_0, rl_io(socket));
3627 + config_writew(socket, RL5C4XX_16BIT_MEM_0, rl_mem(socket));
3628 + config_writew(socket, RL5C4XX_CONFIG, rl_config(socket));
3629 ++ ricoh_set_clkrun(socket, true);
3630 + }
3631 +
3632 +
3633 +@@ -197,6 +231,7 @@ static int ricoh_override(struct yenta_socket *socket)
3634 + config_writew(socket, RL5C4XX_CONFIG, config);
3635 +
3636 + ricoh_set_zv(socket);
3637 ++ ricoh_set_clkrun(socket, false);
3638 +
3639 + return 0;
3640 + }
3641 +diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
3642 +index 5d6d9b1549bc..5034422a1d96 100644
3643 +--- a/drivers/pcmcia/yenta_socket.c
3644 ++++ b/drivers/pcmcia/yenta_socket.c
3645 +@@ -26,7 +26,8 @@
3646 +
3647 + static bool disable_clkrun;
3648 + module_param(disable_clkrun, bool, 0444);
3649 +-MODULE_PARM_DESC(disable_clkrun, "If PC card doesn't function properly, please try this option");
3650 ++MODULE_PARM_DESC(disable_clkrun,
3651 ++ "If PC card doesn't function properly, please try this option (TI and Ricoh bridges only)");
3652 +
3653 + static bool isa_probe = 1;
3654 + module_param(isa_probe, bool, 0444);
3655 +diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
3656 +index 6556dbeae65e..ac251c62bc66 100644
3657 +--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
3658 ++++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
3659 +@@ -319,6 +319,8 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
3660 + pad->function = function;
3661 +
3662 + ret = pmic_mpp_write_mode_ctl(state, pad);
3663 ++ if (ret < 0)
3664 ++ return ret;
3665 +
3666 + val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
3667 +
3668 +@@ -343,13 +345,12 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
3669 +
3670 + switch (param) {
3671 + case PIN_CONFIG_BIAS_DISABLE:
3672 +- arg = pad->pullup == PMIC_MPP_PULL_UP_OPEN;
3673 ++ if (pad->pullup != PMIC_MPP_PULL_UP_OPEN)
3674 ++ return -EINVAL;
3675 ++ arg = 1;
3676 + break;
3677 + case PIN_CONFIG_BIAS_PULL_UP:
3678 + switch (pad->pullup) {
3679 +- case PMIC_MPP_PULL_UP_OPEN:
3680 +- arg = 0;
3681 +- break;
3682 + case PMIC_MPP_PULL_UP_0P6KOHM:
3683 + arg = 600;
3684 + break;
3685 +@@ -364,13 +365,17 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
3686 + }
3687 + break;
3688 + case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
3689 +- arg = !pad->is_enabled;
3690 ++ if (pad->is_enabled)
3691 ++ return -EINVAL;
3692 ++ arg = 1;
3693 + break;
3694 + case PIN_CONFIG_POWER_SOURCE:
3695 + arg = pad->power_source;
3696 + break;
3697 + case PIN_CONFIG_INPUT_ENABLE:
3698 +- arg = pad->input_enabled;
3699 ++ if (!pad->input_enabled)
3700 ++ return -EINVAL;
3701 ++ arg = 1;
3702 + break;
3703 + case PIN_CONFIG_OUTPUT:
3704 + arg = pad->out_value;
3705 +@@ -382,7 +387,9 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
3706 + arg = pad->amux_input;
3707 + break;
3708 + case PMIC_MPP_CONF_PAIRED:
3709 +- arg = pad->paired;
3710 ++ if (!pad->paired)
3711 ++ return -EINVAL;
3712 ++ arg = 1;
3713 + break;
3714 + case PIN_CONFIG_DRIVE_STRENGTH:
3715 + arg = pad->drive_strength;
3716 +@@ -455,7 +462,7 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
3717 + pad->dtest = arg;
3718 + break;
3719 + case PIN_CONFIG_DRIVE_STRENGTH:
3720 +- arg = pad->drive_strength;
3721 ++ pad->drive_strength = arg;
3722 + break;
3723 + case PMIC_MPP_CONF_AMUX_ROUTE:
3724 + if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4)
3725 +@@ -502,6 +509,10 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
3726 + if (ret < 0)
3727 + return ret;
3728 +
3729 ++ ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_SINK_CTL, pad->drive_strength);
3730 ++ if (ret < 0)
3731 ++ return ret;
3732 ++
3733 + val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
3734 +
3735 + return pmic_mpp_write(state, pad, PMIC_MPP_REG_EN_CTL, val);
3736 +diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
3737 +index f53e32a9d8fc..0e153bae322e 100644
3738 +--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
3739 ++++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
3740 +@@ -260,22 +260,32 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
3741 +
3742 + switch (param) {
3743 + case PIN_CONFIG_BIAS_DISABLE:
3744 +- arg = pin->bias == PM8XXX_GPIO_BIAS_NP;
3745 ++ if (pin->bias != PM8XXX_GPIO_BIAS_NP)
3746 ++ return -EINVAL;
3747 ++ arg = 1;
3748 + break;
3749 + case PIN_CONFIG_BIAS_PULL_DOWN:
3750 +- arg = pin->bias == PM8XXX_GPIO_BIAS_PD;
3751 ++ if (pin->bias != PM8XXX_GPIO_BIAS_PD)
3752 ++ return -EINVAL;
3753 ++ arg = 1;
3754 + break;
3755 + case PIN_CONFIG_BIAS_PULL_UP:
3756 +- arg = pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30;
3757 ++ if (pin->bias > PM8XXX_GPIO_BIAS_PU_1P5_30)
3758 ++ return -EINVAL;
3759 ++ arg = 1;
3760 + break;
3761 + case PM8XXX_QCOM_PULL_UP_STRENGTH:
3762 + arg = pin->pull_up_strength;
3763 + break;
3764 + case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
3765 +- arg = pin->disable;
3766 ++ if (!pin->disable)
3767 ++ return -EINVAL;
3768 ++ arg = 1;
3769 + break;
3770 + case PIN_CONFIG_INPUT_ENABLE:
3771 +- arg = pin->mode == PM8XXX_GPIO_MODE_INPUT;
3772 ++ if (pin->mode != PM8XXX_GPIO_MODE_INPUT)
3773 ++ return -EINVAL;
3774 ++ arg = 1;
3775 + break;
3776 + case PIN_CONFIG_OUTPUT:
3777 + if (pin->mode & PM8XXX_GPIO_MODE_OUTPUT)
3778 +@@ -290,10 +300,14 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
3779 + arg = pin->output_strength;
3780 + break;
3781 + case PIN_CONFIG_DRIVE_PUSH_PULL:
3782 +- arg = !pin->open_drain;
3783 ++ if (pin->open_drain)
3784 ++ return -EINVAL;
3785 ++ arg = 1;
3786 + break;
3787 + case PIN_CONFIG_DRIVE_OPEN_DRAIN:
3788 +- arg = pin->open_drain;
3789 ++ if (!pin->open_drain)
3790 ++ return -EINVAL;
3791 ++ arg = 1;
3792 + break;
3793 + default:
3794 + return -EINVAL;
3795 +diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
3796 +index f1a2147a6d84..72d02bfeda9e 100644
3797 +--- a/drivers/rpmsg/qcom_smd.c
3798 ++++ b/drivers/rpmsg/qcom_smd.c
3799 +@@ -1049,8 +1049,10 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
3800 +
3801 + channel->edge = edge;
3802 + channel->name = kstrdup(name, GFP_KERNEL);
3803 +- if (!channel->name)
3804 +- return ERR_PTR(-ENOMEM);
3805 ++ if (!channel->name) {
3806 ++ ret = -ENOMEM;
3807 ++ goto free_channel;
3808 ++ }
3809 +
3810 + mutex_init(&channel->tx_lock);
3811 + spin_lock_init(&channel->recv_lock);
3812 +@@ -1099,6 +1101,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
3813 +
3814 + free_name_and_channel:
3815 + kfree(channel->name);
3816 ++free_channel:
3817 + kfree(channel);
3818 +
3819 + return ERR_PTR(ret);
3820 +diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
3821 +index c3fc34b9964d..9e5d3f7d29ae 100644
3822 +--- a/drivers/scsi/esp_scsi.c
3823 ++++ b/drivers/scsi/esp_scsi.c
3824 +@@ -1338,6 +1338,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
3825 +
3826 + bytes_sent = esp->data_dma_len;
3827 + bytes_sent -= ecount;
3828 ++ bytes_sent -= esp->send_cmd_residual;
3829 +
3830 + /*
3831 + * The am53c974 has a DMA 'pecularity'. The doc states:
3832 +diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
3833 +index 8163dca2071b..a77772777a30 100644
3834 +--- a/drivers/scsi/esp_scsi.h
3835 ++++ b/drivers/scsi/esp_scsi.h
3836 +@@ -540,6 +540,8 @@ struct esp {
3837 +
3838 + void *dma;
3839 + int dmarev;
3840 ++
3841 ++ u32 send_cmd_residual;
3842 + };
3843 +
3844 + /* A front-end driver for the ESP chip should do the following in
3845 +diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
3846 +index 1a6f122bb25d..4ade13d72deb 100644
3847 +--- a/drivers/scsi/lpfc/lpfc_scsi.c
3848 ++++ b/drivers/scsi/lpfc/lpfc_scsi.c
3849 +@@ -4149,9 +4149,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3850 +
3851 + lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
3852 +
3853 +- spin_lock_irqsave(&phba->hbalock, flags);
3854 +- lpfc_cmd->pCmd = NULL;
3855 +- spin_unlock_irqrestore(&phba->hbalock, flags);
3856 ++ /* If pCmd was set to NULL from abort path, do not call scsi_done */
3857 ++ if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
3858 ++ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3859 ++ "0711 FCP cmd already NULL, sid: 0x%06x, "
3860 ++ "did: 0x%06x, oxid: 0x%04x\n",
3861 ++ vport->fc_myDID,
3862 ++ (pnode) ? pnode->nlp_DID : 0,
3863 ++ phba->sli_rev == LPFC_SLI_REV4 ?
3864 ++ lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff);
3865 ++ return;
3866 ++ }
3867 +
3868 + /* The sdev is not guaranteed to be valid post scsi_done upcall. */
3869 + cmd->scsi_done(cmd);
3870 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
3871 +index dc83498024dc..24b6e56f6e97 100644
3872 +--- a/drivers/scsi/lpfc/lpfc_sli.c
3873 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
3874 +@@ -3585,6 +3585,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3875 + struct hbq_dmabuf *dmabuf;
3876 + struct lpfc_cq_event *cq_event;
3877 + unsigned long iflag;
3878 ++ int count = 0;
3879 +
3880 + spin_lock_irqsave(&phba->hbalock, iflag);
3881 + phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3882 +@@ -3606,16 +3607,22 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3883 + if (irspiocbq)
3884 + lpfc_sli_sp_handle_rspiocb(phba, pring,
3885 + irspiocbq);
3886 ++ count++;
3887 + break;
3888 + case CQE_CODE_RECEIVE:
3889 + case CQE_CODE_RECEIVE_V1:
3890 + dmabuf = container_of(cq_event, struct hbq_dmabuf,
3891 + cq_event);
3892 + lpfc_sli4_handle_received_buffer(phba, dmabuf);
3893 ++ count++;
3894 + break;
3895 + default:
3896 + break;
3897 + }
3898 ++
3899 ++ /* Limit the number of events to 64 to avoid soft lockups */
3900 ++ if (count == 64)
3901 ++ break;
3902 + }
3903 + }
3904 +
3905 +diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
3906 +index eb551f3cc471..71879f2207e0 100644
3907 +--- a/drivers/scsi/mac_esp.c
3908 ++++ b/drivers/scsi/mac_esp.c
3909 +@@ -427,6 +427,8 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
3910 + scsi_esp_cmd(esp, ESP_CMD_TI);
3911 + }
3912 + }
3913 ++
3914 ++ esp->send_cmd_residual = esp_count;
3915 + }
3916 +
3917 + static int mac_esp_irq_pending(struct esp *esp)
3918 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
3919 +index d55c365be238..d0abee3e6ed9 100644
3920 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
3921 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
3922 +@@ -7361,6 +7361,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
3923 + get_user(user_sense_off, &cioc->sense_off))
3924 + return -EFAULT;
3925 +
3926 ++ if (local_sense_off != user_sense_off)
3927 ++ return -EINVAL;
3928 ++
3929 + if (local_sense_len) {
3930 + void __user **sense_ioc_ptr =
3931 + (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
3932 +diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
3933 +index 0453ff6839a7..7e9ef3431bea 100644
3934 +--- a/drivers/soc/tegra/pmc.c
3935 ++++ b/drivers/soc/tegra/pmc.c
3936 +@@ -1321,7 +1321,7 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
3937 + if (!pmc->soc->has_tsense_reset)
3938 + return;
3939 +
3940 +- np = of_find_node_by_name(pmc->dev->of_node, "i2c-thermtrip");
3941 ++ np = of_get_child_by_name(pmc->dev->of_node, "i2c-thermtrip");
3942 + if (!np) {
3943 + dev_warn(dev, "i2c-thermtrip node not found, %s.\n", disabled);
3944 + return;
3945 +diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
3946 +index 6573152ce893..0316fae20cfe 100644
3947 +--- a/drivers/spi/spi-bcm-qspi.c
3948 ++++ b/drivers/spi/spi-bcm-qspi.c
3949 +@@ -88,7 +88,7 @@
3950 + #define BSPI_BPP_MODE_SELECT_MASK BIT(8)
3951 + #define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
3952 +
3953 +-#define BSPI_READ_LENGTH 512
3954 ++#define BSPI_READ_LENGTH 256
3955 +
3956 + /* MSPI register offsets */
3957 + #define MSPI_SPCR0_LSB 0x000
3958 +diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
3959 +index e5cc07357746..ce28c910ee48 100644
3960 +--- a/drivers/spi/spi-ep93xx.c
3961 ++++ b/drivers/spi/spi-ep93xx.c
3962 +@@ -246,6 +246,19 @@ static int ep93xx_spi_read_write(struct spi_master *master)
3963 + return -EINPROGRESS;
3964 + }
3965 +
3966 ++static enum dma_transfer_direction
3967 ++ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir)
3968 ++{
3969 ++ switch (dir) {
3970 ++ case DMA_TO_DEVICE:
3971 ++ return DMA_MEM_TO_DEV;
3972 ++ case DMA_FROM_DEVICE:
3973 ++ return DMA_DEV_TO_MEM;
3974 ++ default:
3975 ++ return DMA_TRANS_NONE;
3976 ++ }
3977 ++}
3978 ++
3979 + /**
3980 + * ep93xx_spi_dma_prepare() - prepares a DMA transfer
3981 + * @master: SPI master
3982 +@@ -257,7 +270,7 @@ static int ep93xx_spi_read_write(struct spi_master *master)
3983 + */
3984 + static struct dma_async_tx_descriptor *
3985 + ep93xx_spi_dma_prepare(struct spi_master *master,
3986 +- enum dma_transfer_direction dir)
3987 ++ enum dma_data_direction dir)
3988 + {
3989 + struct ep93xx_spi *espi = spi_master_get_devdata(master);
3990 + struct spi_transfer *xfer = master->cur_msg->state;
3991 +@@ -277,9 +290,9 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
3992 + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
3993 +
3994 + memset(&conf, 0, sizeof(conf));
3995 +- conf.direction = dir;
3996 ++ conf.direction = ep93xx_dma_data_to_trans_dir(dir);
3997 +
3998 +- if (dir == DMA_DEV_TO_MEM) {
3999 ++ if (dir == DMA_FROM_DEVICE) {
4000 + chan = espi->dma_rx;
4001 + buf = xfer->rx_buf;
4002 + sgt = &espi->rx_sgt;
4003 +@@ -343,7 +356,8 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
4004 + if (!nents)
4005 + return ERR_PTR(-ENOMEM);
4006 +
4007 +- txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
4008 ++ txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, conf.direction,
4009 ++ DMA_CTRL_ACK);
4010 + if (!txd) {
4011 + dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
4012 + return ERR_PTR(-ENOMEM);
4013 +@@ -360,13 +374,13 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
4014 + * unmapped.
4015 + */
4016 + static void ep93xx_spi_dma_finish(struct spi_master *master,
4017 +- enum dma_transfer_direction dir)
4018 ++ enum dma_data_direction dir)
4019 + {
4020 + struct ep93xx_spi *espi = spi_master_get_devdata(master);
4021 + struct dma_chan *chan;
4022 + struct sg_table *sgt;
4023 +
4024 +- if (dir == DMA_DEV_TO_MEM) {
4025 ++ if (dir == DMA_FROM_DEVICE) {
4026 + chan = espi->dma_rx;
4027 + sgt = &espi->rx_sgt;
4028 + } else {
4029 +@@ -381,8 +395,8 @@ static void ep93xx_spi_dma_callback(void *callback_param)
4030 + {
4031 + struct spi_master *master = callback_param;
4032 +
4033 +- ep93xx_spi_dma_finish(master, DMA_MEM_TO_DEV);
4034 +- ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM);
4035 ++ ep93xx_spi_dma_finish(master, DMA_TO_DEVICE);
4036 ++ ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
4037 +
4038 + spi_finalize_current_transfer(master);
4039 + }
4040 +@@ -392,15 +406,15 @@ static int ep93xx_spi_dma_transfer(struct spi_master *master)
4041 + struct ep93xx_spi *espi = spi_master_get_devdata(master);
4042 + struct dma_async_tx_descriptor *rxd, *txd;
4043 +
4044 +- rxd = ep93xx_spi_dma_prepare(master, DMA_DEV_TO_MEM);
4045 ++ rxd = ep93xx_spi_dma_prepare(master, DMA_FROM_DEVICE);
4046 + if (IS_ERR(rxd)) {
4047 + dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
4048 + return PTR_ERR(rxd);
4049 + }
4050 +
4051 +- txd = ep93xx_spi_dma_prepare(master, DMA_MEM_TO_DEV);
4052 ++ txd = ep93xx_spi_dma_prepare(master, DMA_TO_DEVICE);
4053 + if (IS_ERR(txd)) {
4054 +- ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM);
4055 ++ ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
4056 + dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
4057 + return PTR_ERR(txd);
4058 + }
4059 +diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
4060 +index 3be9519654e5..cf3fad2cb871 100644
4061 +--- a/drivers/tc/tc.c
4062 ++++ b/drivers/tc/tc.c
4063 +@@ -2,7 +2,7 @@
4064 + * TURBOchannel bus services.
4065 + *
4066 + * Copyright (c) Harald Koerfgen, 1998
4067 +- * Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki
4068 ++ * Copyright (c) 2001, 2003, 2005, 2006, 2018 Maciej W. Rozycki
4069 + * Copyright (c) 2005 James Simmons
4070 + *
4071 + * This file is subject to the terms and conditions of the GNU
4072 +@@ -10,6 +10,7 @@
4073 + * directory of this archive for more details.
4074 + */
4075 + #include <linux/compiler.h>
4076 ++#include <linux/dma-mapping.h>
4077 + #include <linux/errno.h>
4078 + #include <linux/init.h>
4079 + #include <linux/ioport.h>
4080 +@@ -92,6 +93,11 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
4081 + tdev->dev.bus = &tc_bus_type;
4082 + tdev->slot = slot;
4083 +
4084 ++ /* TURBOchannel has 34-bit DMA addressing (16GiB space). */
4085 ++ tdev->dma_mask = DMA_BIT_MASK(34);
4086 ++ tdev->dev.dma_mask = &tdev->dma_mask;
4087 ++ tdev->dev.coherent_dma_mask = DMA_BIT_MASK(34);
4088 ++
4089 + for (i = 0; i < 8; i++) {
4090 + tdev->firmware[i] =
4091 + readb(module + offset + TC_FIRM_VER + 4 * i);
4092 +diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c
4093 +index dd8dd947b7f0..01b0cb994457 100644
4094 +--- a/drivers/thermal/da9062-thermal.c
4095 ++++ b/drivers/thermal/da9062-thermal.c
4096 +@@ -106,7 +106,7 @@ static void da9062_thermal_poll_on(struct work_struct *work)
4097 + THERMAL_EVENT_UNSPECIFIED);
4098 +
4099 + delay = msecs_to_jiffies(thermal->zone->passive_delay);
4100 +- schedule_delayed_work(&thermal->work, delay);
4101 ++ queue_delayed_work(system_freezable_wq, &thermal->work, delay);
4102 + return;
4103 + }
4104 +
4105 +@@ -125,7 +125,7 @@ static irqreturn_t da9062_thermal_irq_handler(int irq, void *data)
4106 + struct da9062_thermal *thermal = data;
4107 +
4108 + disable_irq_nosync(thermal->irq);
4109 +- schedule_delayed_work(&thermal->work, 0);
4110 ++ queue_delayed_work(system_freezable_wq, &thermal->work, 0);
4111 +
4112 + return IRQ_HANDLED;
4113 + }
4114 +diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
4115 +index a260cde743e2..2db68dfe497d 100644
4116 +--- a/drivers/tty/serial/kgdboc.c
4117 ++++ b/drivers/tty/serial/kgdboc.c
4118 +@@ -133,6 +133,11 @@ static void kgdboc_unregister_kbd(void)
4119 +
4120 + static int kgdboc_option_setup(char *opt)
4121 + {
4122 ++ if (!opt) {
4123 ++ pr_err("kgdboc: config string not provided\n");
4124 ++ return -EINVAL;
4125 ++ }
4126 ++
4127 + if (strlen(opt) >= MAX_CONFIG_LEN) {
4128 + printk(KERN_ERR "kgdboc: config string too long\n");
4129 + return -ENOSPC;
4130 +diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
4131 +index 41784798c789..0a730136646d 100644
4132 +--- a/drivers/uio/uio.c
4133 ++++ b/drivers/uio/uio.c
4134 +@@ -249,6 +249,8 @@ static struct class uio_class = {
4135 + .dev_groups = uio_groups,
4136 + };
4137 +
4138 ++bool uio_class_registered;
4139 ++
4140 + /*
4141 + * device functions
4142 + */
4143 +@@ -780,6 +782,9 @@ static int init_uio_class(void)
4144 + printk(KERN_ERR "class_register failed for uio\n");
4145 + goto err_class_register;
4146 + }
4147 ++
4148 ++ uio_class_registered = true;
4149 ++
4150 + return 0;
4151 +
4152 + err_class_register:
4153 +@@ -790,6 +795,7 @@ exit:
4154 +
4155 + static void release_uio_class(void)
4156 + {
4157 ++ uio_class_registered = false;
4158 + class_unregister(&uio_class);
4159 + uio_major_cleanup();
4160 + }
4161 +@@ -809,6 +815,9 @@ int __uio_register_device(struct module *owner,
4162 + struct uio_device *idev;
4163 + int ret = 0;
4164 +
4165 ++ if (!uio_class_registered)
4166 ++ return -EPROBE_DEFER;
4167 ++
4168 + if (!parent || !info || !info->name || !info->version)
4169 + return -EINVAL;
4170 +
4171 +diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h
4172 +index 9ecb598e48f0..a5557c70034a 100644
4173 +--- a/drivers/usb/chipidea/otg.h
4174 ++++ b/drivers/usb/chipidea/otg.h
4175 +@@ -20,7 +20,8 @@ void ci_handle_vbus_change(struct ci_hdrc *ci);
4176 + static inline void ci_otg_queue_work(struct ci_hdrc *ci)
4177 + {
4178 + disable_irq_nosync(ci->irq);
4179 +- queue_work(ci->wq, &ci->work);
4180 ++ if (queue_work(ci->wq, &ci->work) == false)
4181 ++ enable_irq(ci->irq);
4182 + }
4183 +
4184 + #endif /* __DRIVERS_USB_CHIPIDEA_OTG_H */
4185 +diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
4186 +index a884c022df7a..cb66f982c313 100644
4187 +--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
4188 ++++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
4189 +@@ -2071,6 +2071,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
4190 +
4191 + udc->errata = match->data;
4192 + udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
4193 ++ if (IS_ERR(udc->pmc))
4194 ++ udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9rl-pmc");
4195 + if (IS_ERR(udc->pmc))
4196 + udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc");
4197 + if (udc->errata && IS_ERR(udc->pmc))
4198 +diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
4199 +index 36a706f475d2..ade0723787e5 100644
4200 +--- a/drivers/usb/gadget/udc/renesas_usb3.c
4201 ++++ b/drivers/usb/gadget/udc/renesas_usb3.c
4202 +@@ -2374,6 +2374,9 @@ static ssize_t renesas_usb3_b_device_write(struct file *file,
4203 + else
4204 + usb3->forced_b_device = false;
4205 +
4206 ++ if (usb3->workaround_for_vbus)
4207 ++ usb3_disconnect(usb3);
4208 ++
4209 + /* Let this driver call usb3_connect() anyway */
4210 + usb3_check_id(usb3);
4211 +
4212 +diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
4213 +index 5302f988e7e6..e0ebd3d513c6 100644
4214 +--- a/drivers/usb/host/ohci-at91.c
4215 ++++ b/drivers/usb/host/ohci-at91.c
4216 +@@ -550,6 +550,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
4217 + pdata->overcurrent_pin[i] =
4218 + devm_gpiod_get_index_optional(&pdev->dev, "atmel,oc",
4219 + i, GPIOD_IN);
4220 ++ if (!pdata->overcurrent_pin[i])
4221 ++ continue;
4222 + if (IS_ERR(pdata->overcurrent_pin[i])) {
4223 + err = PTR_ERR(pdata->overcurrent_pin[i]);
4224 + dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
4225 +diff --git a/drivers/usb/usbip/vudc_main.c b/drivers/usb/usbip/vudc_main.c
4226 +index 9e655714e389..916e2eefc886 100644
4227 +--- a/drivers/usb/usbip/vudc_main.c
4228 ++++ b/drivers/usb/usbip/vudc_main.c
4229 +@@ -85,6 +85,10 @@ static int __init init(void)
4230 + cleanup:
4231 + list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
4232 + list_del(&udc_dev->dev_entry);
4233 ++ /*
4234 ++ * Just do platform_device_del() here, put_vudc_device()
4235 ++ * calls the platform_device_put()
4236 ++ */
4237 + platform_device_del(udc_dev->pdev);
4238 + put_vudc_device(udc_dev);
4239 + }
4240 +@@ -101,7 +105,11 @@ static void __exit cleanup(void)
4241 +
4242 + list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
4243 + list_del(&udc_dev->dev_entry);
4244 +- platform_device_unregister(udc_dev->pdev);
4245 ++ /*
4246 ++ * Just do platform_device_del() here, put_vudc_device()
4247 ++ * calls the platform_device_put()
4248 ++ */
4249 ++ platform_device_del(udc_dev->pdev);
4250 + put_vudc_device(udc_dev);
4251 + }
4252 + platform_driver_unregister(&vudc_driver);
4253 +diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
4254 +index 83fc9aab34e8..3099052e1243 100644
4255 +--- a/drivers/w1/masters/omap_hdq.c
4256 ++++ b/drivers/w1/masters/omap_hdq.c
4257 +@@ -763,6 +763,8 @@ static int omap_hdq_remove(struct platform_device *pdev)
4258 + /* remove module dependency */
4259 + pm_runtime_disable(&pdev->dev);
4260 +
4261 ++ w1_remove_master_device(&omap_w1_master);
4262 ++
4263 + return 0;
4264 + }
4265 +
4266 +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
4267 +index f98b8c135db9..95dbee89b758 100644
4268 +--- a/drivers/xen/swiotlb-xen.c
4269 ++++ b/drivers/xen/swiotlb-xen.c
4270 +@@ -317,6 +317,9 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
4271 + */
4272 + flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
4273 +
4274 ++ /* Convert the size to actually allocated. */
4275 ++ size = 1UL << (order + XEN_PAGE_SHIFT);
4276 ++
4277 + /* On ARM this function returns an ioremap'ped virtual address for
4278 + * which virt_to_phys doesn't return the corresponding physical
4279 + * address. In fact on ARM virt_to_phys only works for kernel direct
4280 +@@ -365,6 +368,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
4281 + * physical address */
4282 + phys = xen_bus_to_phys(dev_addr);
4283 +
4284 ++ /* Convert the size to actually allocated. */
4285 ++ size = 1UL << (order + XEN_PAGE_SHIFT);
4286 ++
4287 + if (((dev_addr + size - 1 <= dma_mask)) ||
4288 + range_straddles_page_boundary(phys, size))
4289 + xen_destroy_contiguous_region(phys, order);
4290 +diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
4291 +index 294f35ce9e46..cf8ef8cee5a0 100644
4292 +--- a/drivers/xen/xen-balloon.c
4293 ++++ b/drivers/xen/xen-balloon.c
4294 +@@ -75,12 +75,15 @@ static void watch_target(struct xenbus_watch *watch,
4295 +
4296 + if (!watch_fired) {
4297 + watch_fired = true;
4298 +- err = xenbus_scanf(XBT_NIL, "memory", "static-max", "%llu",
4299 +- &static_max);
4300 +- if (err != 1)
4301 +- static_max = new_target;
4302 +- else
4303 ++
4304 ++ if ((xenbus_scanf(XBT_NIL, "memory", "static-max",
4305 ++ "%llu", &static_max) == 1) ||
4306 ++ (xenbus_scanf(XBT_NIL, "memory", "memory_static_max",
4307 ++ "%llu", &static_max) == 1))
4308 + static_max >>= PAGE_SHIFT - 10;
4309 ++ else
4310 ++ static_max = new_target;
4311 ++
4312 + target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0
4313 + : static_max - balloon_stats.target_pages;
4314 + }
4315 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
4316 +index f96f72659693..2cb3569ac548 100644
4317 +--- a/fs/btrfs/extent-tree.c
4318 ++++ b/fs/btrfs/extent-tree.c
4319 +@@ -7573,6 +7573,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
4320 + struct btrfs_block_group_cache *block_group = NULL;
4321 + u64 search_start = 0;
4322 + u64 max_extent_size = 0;
4323 ++ u64 max_free_space = 0;
4324 + u64 empty_cluster = 0;
4325 + struct btrfs_space_info *space_info;
4326 + int loop = 0;
4327 +@@ -7867,8 +7868,8 @@ unclustered_alloc:
4328 + spin_lock(&ctl->tree_lock);
4329 + if (ctl->free_space <
4330 + num_bytes + empty_cluster + empty_size) {
4331 +- if (ctl->free_space > max_extent_size)
4332 +- max_extent_size = ctl->free_space;
4333 ++ max_free_space = max(max_free_space,
4334 ++ ctl->free_space);
4335 + spin_unlock(&ctl->tree_lock);
4336 + goto loop;
4337 + }
4338 +@@ -8037,6 +8038,8 @@ loop:
4339 + }
4340 + out:
4341 + if (ret == -ENOSPC) {
4342 ++ if (!max_extent_size)
4343 ++ max_extent_size = max_free_space;
4344 + spin_lock(&space_info->lock);
4345 + space_info->max_extent_size = max_extent_size;
4346 + spin_unlock(&space_info->lock);
4347 +@@ -8398,6 +8401,19 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4348 + if (IS_ERR(buf))
4349 + return buf;
4350 +
4351 ++ /*
4352 ++ * Extra safety check in case the extent tree is corrupted and extent
4353 ++ * allocator chooses to use a tree block which is already used and
4354 ++ * locked.
4355 ++ */
4356 ++ if (buf->lock_owner == current->pid) {
4357 ++ btrfs_err_rl(fs_info,
4358 ++"tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
4359 ++ buf->start, btrfs_header_owner(buf), current->pid);
4360 ++ free_extent_buffer(buf);
4361 ++ return ERR_PTR(-EUCLEAN);
4362 ++ }
4363 ++
4364 + btrfs_set_header_generation(buf, trans->transid);
4365 + btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
4366 + btrfs_tree_lock(buf);
4367 +@@ -9028,15 +9044,14 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
4368 + if (eb == root->node) {
4369 + if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4370 + parent = eb->start;
4371 +- else
4372 +- BUG_ON(root->root_key.objectid !=
4373 +- btrfs_header_owner(eb));
4374 ++ else if (root->root_key.objectid != btrfs_header_owner(eb))
4375 ++ goto owner_mismatch;
4376 + } else {
4377 + if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4378 + parent = path->nodes[level + 1]->start;
4379 +- else
4380 +- BUG_ON(root->root_key.objectid !=
4381 +- btrfs_header_owner(path->nodes[level + 1]));
4382 ++ else if (root->root_key.objectid !=
4383 ++ btrfs_header_owner(path->nodes[level + 1]))
4384 ++ goto owner_mismatch;
4385 + }
4386 +
4387 + btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
4388 +@@ -9044,6 +9059,11 @@ out:
4389 + wc->refs[level] = 0;
4390 + wc->flags[level] = 0;
4391 + return 0;
4392 ++
4393 ++owner_mismatch:
4394 ++ btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
4395 ++ btrfs_header_owner(eb), root->root_key.objectid);
4396 ++ return -EUCLEAN;
4397 + }
4398 +
4399 + static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
4400 +@@ -9097,6 +9117,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
4401 + ret = walk_up_proc(trans, root, path, wc);
4402 + if (ret > 0)
4403 + return 0;
4404 ++ if (ret < 0)
4405 ++ return ret;
4406 +
4407 + if (path->locks[level]) {
4408 + btrfs_tree_unlock_rw(path->nodes[level],
4409 +@@ -9862,6 +9884,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
4410 +
4411 + block_group = btrfs_lookup_first_block_group(info, last);
4412 + while (block_group) {
4413 ++ wait_block_group_cache_done(block_group);
4414 + spin_lock(&block_group->lock);
4415 + if (block_group->iref)
4416 + break;
4417 +@@ -10250,7 +10273,7 @@ error:
4418 + void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
4419 + struct btrfs_fs_info *fs_info)
4420 + {
4421 +- struct btrfs_block_group_cache *block_group, *tmp;
4422 ++ struct btrfs_block_group_cache *block_group;
4423 + struct btrfs_root *extent_root = fs_info->extent_root;
4424 + struct btrfs_block_group_item item;
4425 + struct btrfs_key key;
4426 +@@ -10258,7 +10281,10 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
4427 + bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
4428 +
4429 + trans->can_flush_pending_bgs = false;
4430 +- list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
4431 ++ while (!list_empty(&trans->new_bgs)) {
4432 ++ block_group = list_first_entry(&trans->new_bgs,
4433 ++ struct btrfs_block_group_cache,
4434 ++ bg_list);
4435 + if (ret)
4436 + goto next;
4437 +
4438 +@@ -10957,6 +10983,10 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
4439 +
4440 + *trimmed = 0;
4441 +
4442 ++ /* Discard not supported = nothing to do. */
4443 ++ if (!blk_queue_discard(bdev_get_queue(device->bdev)))
4444 ++ return 0;
4445 ++
4446 + /* Not writeable = nothing to do. */
4447 + if (!device->writeable)
4448 + return 0;
4449 +@@ -11018,6 +11048,15 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
4450 + return ret;
4451 + }
4452 +
4453 ++/*
4454 ++ * Trim the whole filesystem by:
4455 ++ * 1) trimming the free space in each block group
4456 ++ * 2) trimming the unallocated space on each device
4457 ++ *
4458 ++ * This will also continue trimming even if a block group or device encounters
4459 ++ * an error. The return value will be the last error, or 0 if nothing bad
4460 ++ * happens.
4461 ++ */
4462 + int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
4463 + {
4464 + struct btrfs_block_group_cache *cache = NULL;
4465 +@@ -11027,18 +11066,14 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
4466 + u64 start;
4467 + u64 end;
4468 + u64 trimmed = 0;
4469 +- u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
4470 ++ u64 bg_failed = 0;
4471 ++ u64 dev_failed = 0;
4472 ++ int bg_ret = 0;
4473 ++ int dev_ret = 0;
4474 + int ret = 0;
4475 +
4476 +- /*
4477 +- * try to trim all FS space, our block group may start from non-zero.
4478 +- */
4479 +- if (range->len == total_bytes)
4480 +- cache = btrfs_lookup_first_block_group(fs_info, range->start);
4481 +- else
4482 +- cache = btrfs_lookup_block_group(fs_info, range->start);
4483 +-
4484 +- while (cache) {
4485 ++ cache = btrfs_lookup_first_block_group(fs_info, range->start);
4486 ++ for (; cache; cache = next_block_group(fs_info, cache)) {
4487 + if (cache->key.objectid >= (range->start + range->len)) {
4488 + btrfs_put_block_group(cache);
4489 + break;
4490 +@@ -11052,13 +11087,15 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
4491 + if (!block_group_cache_done(cache)) {
4492 + ret = cache_block_group(cache, 0);
4493 + if (ret) {
4494 +- btrfs_put_block_group(cache);
4495 +- break;
4496 ++ bg_failed++;
4497 ++ bg_ret = ret;
4498 ++ continue;
4499 + }
4500 + ret = wait_block_group_cache_done(cache);
4501 + if (ret) {
4502 +- btrfs_put_block_group(cache);
4503 +- break;
4504 ++ bg_failed++;
4505 ++ bg_ret = ret;
4506 ++ continue;
4507 + }
4508 + }
4509 + ret = btrfs_trim_block_group(cache,
4510 +@@ -11069,28 +11106,40 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
4511 +
4512 + trimmed += group_trimmed;
4513 + if (ret) {
4514 +- btrfs_put_block_group(cache);
4515 +- break;
4516 ++ bg_failed++;
4517 ++ bg_ret = ret;
4518 ++ continue;
4519 + }
4520 + }
4521 +-
4522 +- cache = next_block_group(fs_info, cache);
4523 + }
4524 +
4525 ++ if (bg_failed)
4526 ++ btrfs_warn(fs_info,
4527 ++ "failed to trim %llu block group(s), last error %d",
4528 ++ bg_failed, bg_ret);
4529 + mutex_lock(&fs_info->fs_devices->device_list_mutex);
4530 +- devices = &fs_info->fs_devices->alloc_list;
4531 +- list_for_each_entry(device, devices, dev_alloc_list) {
4532 ++ devices = &fs_info->fs_devices->devices;
4533 ++ list_for_each_entry(device, devices, dev_list) {
4534 + ret = btrfs_trim_free_extents(device, range->minlen,
4535 + &group_trimmed);
4536 +- if (ret)
4537 ++ if (ret) {
4538 ++ dev_failed++;
4539 ++ dev_ret = ret;
4540 + break;
4541 ++ }
4542 +
4543 + trimmed += group_trimmed;
4544 + }
4545 + mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4546 +
4547 ++ if (dev_failed)
4548 ++ btrfs_warn(fs_info,
4549 ++ "failed to trim %llu device(s), last error %d",
4550 ++ dev_failed, dev_ret);
4551 + range->len = trimmed;
4552 +- return ret;
4553 ++ if (bg_ret)
4554 ++ return bg_ret;
4555 ++ return dev_ret;
4556 + }
4557 +
4558 + /*
4559 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
4560 +index 5690feded0de..57e25e83b81a 100644
4561 +--- a/fs/btrfs/file.c
4562 ++++ b/fs/btrfs/file.c
4563 +@@ -2078,6 +2078,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
4564 + goto out;
4565 +
4566 + inode_lock(inode);
4567 ++
4568 ++ /*
4569 ++ * We take the dio_sem here because the tree log stuff can race with
4570 ++ * lockless dio writes and get an extent map logged for an extent we
4571 ++ * never waited on. We need it this high up for lockdep reasons.
4572 ++ */
4573 ++ down_write(&BTRFS_I(inode)->dio_sem);
4574 ++
4575 + atomic_inc(&root->log_batch);
4576 + full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4577 + &BTRFS_I(inode)->runtime_flags);
4578 +@@ -2129,6 +2137,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
4579 + ret = start_ordered_ops(inode, start, end);
4580 + }
4581 + if (ret) {
4582 ++ up_write(&BTRFS_I(inode)->dio_sem);
4583 + inode_unlock(inode);
4584 + goto out;
4585 + }
4586 +@@ -2184,6 +2193,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
4587 + * checked called fsync.
4588 + */
4589 + ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
4590 ++ up_write(&BTRFS_I(inode)->dio_sem);
4591 + inode_unlock(inode);
4592 + goto out;
4593 + }
4594 +@@ -2208,6 +2218,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
4595 + trans = btrfs_start_transaction(root, 0);
4596 + if (IS_ERR(trans)) {
4597 + ret = PTR_ERR(trans);
4598 ++ up_write(&BTRFS_I(inode)->dio_sem);
4599 + inode_unlock(inode);
4600 + goto out;
4601 + }
4602 +@@ -2229,6 +2240,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
4603 + * file again, but that will end up using the synchronization
4604 + * inside btrfs_sync_log to keep things safe.
4605 + */
4606 ++ up_write(&BTRFS_I(inode)->dio_sem);
4607 + inode_unlock(inode);
4608 +
4609 + /*
4610 +diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
4611 +index 4426d1c73e50..9f31b81a5e27 100644
4612 +--- a/fs/btrfs/free-space-cache.c
4613 ++++ b/fs/btrfs/free-space-cache.c
4614 +@@ -22,6 +22,7 @@
4615 + #include <linux/slab.h>
4616 + #include <linux/math64.h>
4617 + #include <linux/ratelimit.h>
4618 ++#include <linux/sched/mm.h>
4619 + #include "ctree.h"
4620 + #include "free-space-cache.h"
4621 + #include "transaction.h"
4622 +@@ -59,6 +60,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
4623 + struct btrfs_free_space_header *header;
4624 + struct extent_buffer *leaf;
4625 + struct inode *inode = NULL;
4626 ++ unsigned nofs_flag;
4627 + int ret;
4628 +
4629 + key.objectid = BTRFS_FREE_SPACE_OBJECTID;
4630 +@@ -80,7 +82,13 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
4631 + btrfs_disk_key_to_cpu(&location, &disk_key);
4632 + btrfs_release_path(path);
4633 +
4634 ++ /*
4635 ++ * We are often under a trans handle at this point, so we need to make
4636 ++ * sure NOFS is set to keep us from deadlocking.
4637 ++ */
4638 ++ nofs_flag = memalloc_nofs_save();
4639 + inode = btrfs_iget(fs_info->sb, &location, root, NULL);
4640 ++ memalloc_nofs_restore(nofs_flag);
4641 + if (IS_ERR(inode))
4642 + return inode;
4643 + if (is_bad_inode(inode)) {
4644 +@@ -1702,6 +1710,8 @@ static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
4645 + bitmap_clear(info->bitmap, start, count);
4646 +
4647 + info->bytes -= bytes;
4648 ++ if (info->max_extent_size > ctl->unit)
4649 ++ info->max_extent_size = 0;
4650 + }
4651 +
4652 + static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
4653 +@@ -1785,6 +1795,13 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
4654 + return -1;
4655 + }
4656 +
4657 ++static inline u64 get_max_extent_size(struct btrfs_free_space *entry)
4658 ++{
4659 ++ if (entry->bitmap)
4660 ++ return entry->max_extent_size;
4661 ++ return entry->bytes;
4662 ++}
4663 ++
4664 + /* Cache the size of the max extent in bytes */
4665 + static struct btrfs_free_space *
4666 + find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
4667 +@@ -1806,8 +1823,8 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
4668 + for (node = &entry->offset_index; node; node = rb_next(node)) {
4669 + entry = rb_entry(node, struct btrfs_free_space, offset_index);
4670 + if (entry->bytes < *bytes) {
4671 +- if (entry->bytes > *max_extent_size)
4672 +- *max_extent_size = entry->bytes;
4673 ++ *max_extent_size = max(get_max_extent_size(entry),
4674 ++ *max_extent_size);
4675 + continue;
4676 + }
4677 +
4678 +@@ -1825,8 +1842,8 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
4679 + }
4680 +
4681 + if (entry->bytes < *bytes + align_off) {
4682 +- if (entry->bytes > *max_extent_size)
4683 +- *max_extent_size = entry->bytes;
4684 ++ *max_extent_size = max(get_max_extent_size(entry),
4685 ++ *max_extent_size);
4686 + continue;
4687 + }
4688 +
4689 +@@ -1838,8 +1855,10 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
4690 + *offset = tmp;
4691 + *bytes = size;
4692 + return entry;
4693 +- } else if (size > *max_extent_size) {
4694 +- *max_extent_size = size;
4695 ++ } else {
4696 ++ *max_extent_size =
4697 ++ max(get_max_extent_size(entry),
4698 ++ *max_extent_size);
4699 + }
4700 + continue;
4701 + }
4702 +@@ -2463,6 +2482,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
4703 + struct rb_node *n;
4704 + int count = 0;
4705 +
4706 ++ spin_lock(&ctl->tree_lock);
4707 + for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
4708 + info = rb_entry(n, struct btrfs_free_space, offset_index);
4709 + if (info->bytes >= bytes && !block_group->ro)
4710 +@@ -2471,6 +2491,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
4711 + info->offset, info->bytes,
4712 + (info->bitmap) ? "yes" : "no");
4713 + }
4714 ++ spin_unlock(&ctl->tree_lock);
4715 + btrfs_info(fs_info, "block group has cluster?: %s",
4716 + list_empty(&block_group->cluster_list) ? "no" : "yes");
4717 + btrfs_info(fs_info,
4718 +@@ -2699,8 +2720,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
4719 +
4720 + err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
4721 + if (err) {
4722 +- if (search_bytes > *max_extent_size)
4723 +- *max_extent_size = search_bytes;
4724 ++ *max_extent_size = max(get_max_extent_size(entry),
4725 ++ *max_extent_size);
4726 + return 0;
4727 + }
4728 +
4729 +@@ -2737,8 +2758,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
4730 +
4731 + entry = rb_entry(node, struct btrfs_free_space, offset_index);
4732 + while (1) {
4733 +- if (entry->bytes < bytes && entry->bytes > *max_extent_size)
4734 +- *max_extent_size = entry->bytes;
4735 ++ if (entry->bytes < bytes)
4736 ++ *max_extent_size = max(get_max_extent_size(entry),
4737 ++ *max_extent_size);
4738 +
4739 + if (entry->bytes < bytes ||
4740 + (!entry->bitmap && entry->offset < min_start)) {
4741 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4742 +index e8bfafa25a71..90568a21fa77 100644
4743 +--- a/fs/btrfs/inode.c
4744 ++++ b/fs/btrfs/inode.c
4745 +@@ -524,6 +524,7 @@ again:
4746 + pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
4747 + if (!pages) {
4748 + /* just bail out to the uncompressed code */
4749 ++ nr_pages = 0;
4750 + goto cont;
4751 + }
4752 +
4753 +@@ -2965,6 +2966,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
4754 + bool truncated = false;
4755 + bool range_locked = false;
4756 + bool clear_new_delalloc_bytes = false;
4757 ++ bool clear_reserved_extent = true;
4758 +
4759 + if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
4760 + !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
4761 +@@ -3068,10 +3070,12 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
4762 + logical_len, logical_len,
4763 + compress_type, 0, 0,
4764 + BTRFS_FILE_EXTENT_REG);
4765 +- if (!ret)
4766 ++ if (!ret) {
4767 ++ clear_reserved_extent = false;
4768 + btrfs_release_delalloc_bytes(fs_info,
4769 + ordered_extent->start,
4770 + ordered_extent->disk_len);
4771 ++ }
4772 + }
4773 + unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
4774 + ordered_extent->file_offset, ordered_extent->len,
4775 +@@ -3131,8 +3135,13 @@ out:
4776 + * wrong we need to return the space for this ordered extent
4777 + * back to the allocator. We only free the extent in the
4778 + * truncated case if we didn't write out the extent at all.
4779 ++ *
4780 ++ * If we made it past insert_reserved_file_extent before we
4781 ++ * errored out then we don't need to do this as the accounting
4782 ++ * has already been done.
4783 + */
4784 + if ((ret || !logical_len) &&
4785 ++ clear_reserved_extent &&
4786 + !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
4787 + !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
4788 + btrfs_free_reserved_extent(fs_info,
4789 +@@ -5326,11 +5335,13 @@ static void evict_inode_truncate_pages(struct inode *inode)
4790 + struct extent_state *cached_state = NULL;
4791 + u64 start;
4792 + u64 end;
4793 ++ unsigned state_flags;
4794 +
4795 + node = rb_first(&io_tree->state);
4796 + state = rb_entry(node, struct extent_state, rb_node);
4797 + start = state->start;
4798 + end = state->end;
4799 ++ state_flags = state->state;
4800 + spin_unlock(&io_tree->lock);
4801 +
4802 + lock_extent_bits(io_tree, start, end, &cached_state);
4803 +@@ -5343,7 +5354,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
4804 + *
4805 + * Note, end is the bytenr of last byte, so we need + 1 here.
4806 + */
4807 +- if (state->state & EXTENT_DELALLOC)
4808 ++ if (state_flags & EXTENT_DELALLOC)
4809 + btrfs_qgroup_free_data(inode, NULL, start, end - start + 1);
4810 +
4811 + clear_extent_bit(io_tree, start, end,
4812 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
4813 +index a507c0d25354..9333e4cda68d 100644
4814 +--- a/fs/btrfs/ioctl.c
4815 ++++ b/fs/btrfs/ioctl.c
4816 +@@ -352,7 +352,6 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
4817 + struct fstrim_range range;
4818 + u64 minlen = ULLONG_MAX;
4819 + u64 num_devices = 0;
4820 +- u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
4821 + int ret;
4822 +
4823 + if (!capable(CAP_SYS_ADMIN))
4824 +@@ -376,11 +375,15 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
4825 + return -EOPNOTSUPP;
4826 + if (copy_from_user(&range, arg, sizeof(range)))
4827 + return -EFAULT;
4828 +- if (range.start > total_bytes ||
4829 +- range.len < fs_info->sb->s_blocksize)
4830 ++
4831 ++ /*
4832 ++ * NOTE: Don't truncate the range using super->total_bytes. Bytenr of
4833 ++ * block group is in the logical address space, which can be any
4834 ++ * sectorsize aligned bytenr in the range [0, U64_MAX].
4835 ++ */
4836 ++ if (range.len < fs_info->sb->s_blocksize)
4837 + return -EINVAL;
4838 +
4839 +- range.len = min(range.len, total_bytes - range.start);
4840 + range.minlen = max(range.minlen, minlen);
4841 + ret = btrfs_trim_fs(fs_info, &range);
4842 + if (ret < 0)
4843 +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
4844 +index 47dec283628d..d6d6e9593e39 100644
4845 +--- a/fs/btrfs/qgroup.c
4846 ++++ b/fs/btrfs/qgroup.c
4847 +@@ -2763,6 +2763,7 @@ qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
4848 + qgroup->rfer_cmpr = 0;
4849 + qgroup->excl = 0;
4850 + qgroup->excl_cmpr = 0;
4851 ++ qgroup_dirty(fs_info, qgroup);
4852 + }
4853 + spin_unlock(&fs_info->qgroup_lock);
4854 + }
4855 +@@ -2972,6 +2973,10 @@ static int __btrfs_qgroup_release_data(struct inode *inode,
4856 + int trace_op = QGROUP_RELEASE;
4857 + int ret;
4858 +
4859 ++ if (!test_bit(BTRFS_FS_QUOTA_ENABLED,
4860 ++ &BTRFS_I(inode)->root->fs_info->flags))
4861 ++ return 0;
4862 ++
4863 + /* In release case, we shouldn't have @reserved */
4864 + WARN_ON(!free && reserved);
4865 + if (free && reserved)
4866 +diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
4867 +index d9984e87cddf..83483ade3b19 100644
4868 +--- a/fs/btrfs/qgroup.h
4869 ++++ b/fs/btrfs/qgroup.h
4870 +@@ -232,6 +232,8 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
4871 + static inline void btrfs_qgroup_free_delayed_ref(struct btrfs_fs_info *fs_info,
4872 + u64 ref_root, u64 num_bytes)
4873 + {
4874 ++ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
4875 ++ return;
4876 + trace_btrfs_qgroup_free_delayed_ref(fs_info, ref_root, num_bytes);
4877 + btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes);
4878 + }
4879 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
4880 +index b80b03e0c5d3..eeae2c3ab17e 100644
4881 +--- a/fs/btrfs/relocation.c
4882 ++++ b/fs/btrfs/relocation.c
4883 +@@ -1334,7 +1334,7 @@ static void __del_reloc_root(struct btrfs_root *root)
4884 + struct mapping_node *node = NULL;
4885 + struct reloc_control *rc = fs_info->reloc_ctl;
4886 +
4887 +- if (rc) {
4888 ++ if (rc && root->node) {
4889 + spin_lock(&rc->reloc_root_tree.lock);
4890 + rb_node = tree_search(&rc->reloc_root_tree.rb_root,
4891 + root->node->start);
4892 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
4893 +index 27638b96079d..f74005ca8f08 100644
4894 +--- a/fs/btrfs/transaction.c
4895 ++++ b/fs/btrfs/transaction.c
4896 +@@ -2307,15 +2307,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
4897 +
4898 + kmem_cache_free(btrfs_trans_handle_cachep, trans);
4899 +
4900 +- /*
4901 +- * If fs has been frozen, we can not handle delayed iputs, otherwise
4902 +- * it'll result in deadlock about SB_FREEZE_FS.
4903 +- */
4904 +- if (current != fs_info->transaction_kthread &&
4905 +- current != fs_info->cleaner_kthread &&
4906 +- !test_bit(BTRFS_FS_FROZEN, &fs_info->flags))
4907 +- btrfs_run_delayed_iputs(fs_info);
4908 +-
4909 + return ret;
4910 +
4911 + scrub_continue:
4912 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
4913 +index e1b4a59485df..2109db196449 100644
4914 +--- a/fs/btrfs/tree-log.c
4915 ++++ b/fs/btrfs/tree-log.c
4916 +@@ -273,6 +273,13 @@ struct walk_control {
4917 + /* what stage of the replay code we're currently in */
4918 + int stage;
4919 +
4920 ++ /*
4921 ++ * Ignore any items from the inode currently being processed. Needs
4922 ++ * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
4923 ++ * the LOG_WALK_REPLAY_INODES stage.
4924 ++ */
4925 ++ bool ignore_cur_inode;
4926 ++
4927 + /* the root we are currently replaying */
4928 + struct btrfs_root *replay_dest;
4929 +
4930 +@@ -2363,6 +2370,20 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
4931 +
4932 + inode_item = btrfs_item_ptr(eb, i,
4933 + struct btrfs_inode_item);
4934 ++ /*
4935 ++ * If we have a tmpfile (O_TMPFILE) that got fsync'ed
4936 ++ * and never got linked before the fsync, skip it, as
4937 ++ * replaying it is pointless since it would be deleted
4938 ++ * later. We skip logging tmpfiles, but it's always
4939 ++ * possible we are replaying a log created with a kernel
4940 ++ * that used to log tmpfiles.
4941 ++ */
4942 ++ if (btrfs_inode_nlink(eb, inode_item) == 0) {
4943 ++ wc->ignore_cur_inode = true;
4944 ++ continue;
4945 ++ } else {
4946 ++ wc->ignore_cur_inode = false;
4947 ++ }
4948 + ret = replay_xattr_deletes(wc->trans, root, log,
4949 + path, key.objectid);
4950 + if (ret)
4951 +@@ -2400,16 +2421,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
4952 + root->fs_info->sectorsize);
4953 + ret = btrfs_drop_extents(wc->trans, root, inode,
4954 + from, (u64)-1, 1);
4955 +- /*
4956 +- * If the nlink count is zero here, the iput
4957 +- * will free the inode. We bump it to make
4958 +- * sure it doesn't get freed until the link
4959 +- * count fixup is done.
4960 +- */
4961 + if (!ret) {
4962 +- if (inode->i_nlink == 0)
4963 +- inc_nlink(inode);
4964 +- /* Update link count and nbytes. */
4965 ++ /* Update the inode's nbytes. */
4966 + ret = btrfs_update_inode(wc->trans,
4967 + root, inode);
4968 + }
4969 +@@ -2424,6 +2437,9 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
4970 + break;
4971 + }
4972 +
4973 ++ if (wc->ignore_cur_inode)
4974 ++ continue;
4975 ++
4976 + if (key.type == BTRFS_DIR_INDEX_KEY &&
4977 + wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
4978 + ret = replay_one_dir_item(wc->trans, root, path,
4979 +@@ -3078,9 +3094,12 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
4980 + };
4981 +
4982 + ret = walk_log_tree(trans, log, &wc);
4983 +- /* I don't think this can happen but just in case */
4984 +- if (ret)
4985 +- btrfs_abort_transaction(trans, ret);
4986 ++ if (ret) {
4987 ++ if (trans)
4988 ++ btrfs_abort_transaction(trans, ret);
4989 ++ else
4990 ++ btrfs_handle_fs_error(log->fs_info, ret, NULL);
4991 ++ }
4992 +
4993 + while (1) {
4994 + ret = find_first_extent_bit(&log->dirty_log_pages,
4995 +@@ -3959,6 +3978,36 @@ fill_holes:
4996 + break;
4997 + *last_extent = extent_end;
4998 + }
4999 ++
5000 ++ /*
5001 ++ * Check if there is a hole between the last extent found in our leaf
5002 ++ * and the first extent in the next leaf. If there is one, we need to
5003 ++ * log an explicit hole so that at replay time we can punch the hole.
5004 ++ */
5005 ++ if (ret == 0 &&
5006 ++ key.objectid == btrfs_ino(inode) &&
5007 ++ key.type == BTRFS_EXTENT_DATA_KEY &&
5008 ++ i == btrfs_header_nritems(src_path->nodes[0])) {
5009 ++ ret = btrfs_next_leaf(inode->root, src_path);
5010 ++ need_find_last_extent = true;
5011 ++ if (ret > 0) {
5012 ++ ret = 0;
5013 ++ } else if (ret == 0) {
5014 ++ btrfs_item_key_to_cpu(src_path->nodes[0], &key,
5015 ++ src_path->slots[0]);
5016 ++ if (key.objectid == btrfs_ino(inode) &&
5017 ++ key.type == BTRFS_EXTENT_DATA_KEY &&
5018 ++ *last_extent < key.offset) {
5019 ++ const u64 len = key.offset - *last_extent;
5020 ++
5021 ++ ret = btrfs_insert_file_extent(trans, log,
5022 ++ btrfs_ino(inode),
5023 ++ *last_extent, 0,
5024 ++ 0, len, 0, len,
5025 ++ 0, 0, 0);
5026 ++ }
5027 ++ }
5028 ++ }
5029 + /*
5030 + * Need to let the callers know we dropped the path so they should
5031 + * re-search.
5032 +@@ -4343,7 +4392,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
5033 +
5034 + INIT_LIST_HEAD(&extents);
5035 +
5036 +- down_write(&inode->dio_sem);
5037 + write_lock(&tree->lock);
5038 + test_gen = root->fs_info->last_trans_committed;
5039 + logged_start = start;
5040 +@@ -4424,7 +4472,6 @@ process:
5041 + }
5042 + WARN_ON(!list_empty(&extents));
5043 + write_unlock(&tree->lock);
5044 +- up_write(&inode->dio_sem);
5045 +
5046 + btrfs_release_path(path);
5047 + if (!ret)
5048 +@@ -4622,7 +4669,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
5049 + ASSERT(len == i_size ||
5050 + (len == fs_info->sectorsize &&
5051 + btrfs_file_extent_compression(leaf, extent) !=
5052 +- BTRFS_COMPRESS_NONE));
5053 ++ BTRFS_COMPRESS_NONE) ||
5054 ++ (len < i_size && i_size < fs_info->sectorsize));
5055 + return 0;
5056 + }
5057 +
5058 +@@ -5564,9 +5612,33 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5059 +
5060 + dir_inode = btrfs_iget(fs_info->sb, &inode_key,
5061 + root, NULL);
5062 +- /* If parent inode was deleted, skip it. */
5063 +- if (IS_ERR(dir_inode))
5064 +- continue;
5065 ++ /*
5066 ++ * If the parent inode was deleted, return an error to
5067 ++ * fallback to a transaction commit. This is to prevent
5068 ++ * getting an inode that was moved from one parent A to
5069 ++ * a parent B, got its former parent A deleted and then
5070 ++ * it got fsync'ed, from existing at both parents after
5071 ++ * a log replay (and the old parent still existing).
5072 ++ * Example:
5073 ++ *
5074 ++ * mkdir /mnt/A
5075 ++ * mkdir /mnt/B
5076 ++ * touch /mnt/B/bar
5077 ++ * sync
5078 ++ * mv /mnt/B/bar /mnt/A/bar
5079 ++ * mv -T /mnt/A /mnt/B
5080 ++ * fsync /mnt/B/bar
5081 ++ * <power fail>
5082 ++ *
5083 ++ * If we ignore the old parent B which got deleted,
5084 ++ * after a log replay we would have file bar linked
5085 ++ * at both parents and the old parent B would still
5086 ++ * exist.
5087 ++ */
5088 ++ if (IS_ERR(dir_inode)) {
5089 ++ ret = PTR_ERR(dir_inode);
5090 ++ goto out;
5091 ++ }
5092 +
5093 + if (ctx)
5094 + ctx->log_new_dentries = false;
5095 +@@ -5641,7 +5713,13 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5096 + if (ret)
5097 + goto end_no_trans;
5098 +
5099 +- if (btrfs_inode_in_log(inode, trans->transid)) {
5100 ++ /*
5101 ++ * Skip already logged inodes or inodes corresponding to tmpfiles
5102 ++ * (since logging them is pointless, a link count of 0 means they
5103 ++ * will never be accessible).
5104 ++ */
5105 ++ if (btrfs_inode_in_log(inode, trans->transid) ||
5106 ++ inode->vfs_inode.i_nlink == 0) {
5107 + ret = BTRFS_NO_LOG_SYNC;
5108 + goto end_no_trans;
5109 + }
5110 +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
5111 +index 2565cee702e4..106a715101f9 100644
5112 +--- a/fs/cifs/cifs_debug.c
5113 ++++ b/fs/cifs/cifs_debug.c
5114 +@@ -289,6 +289,9 @@ static ssize_t cifs_stats_proc_write(struct file *file,
5115 + atomic_set(&totBufAllocCount, 0);
5116 + atomic_set(&totSmBufAllocCount, 0);
5117 + #endif /* CONFIG_CIFS_STATS2 */
5118 ++ atomic_set(&tcpSesReconnectCount, 0);
5119 ++ atomic_set(&tconInfoReconnectCount, 0);
5120 ++
5121 + spin_lock(&GlobalMid_Lock);
5122 + GlobalMaxActiveXid = 0;
5123 + GlobalCurrentXid = 0;
5124 +diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
5125 +index b611fc2e8984..7f01c6e60791 100644
5126 +--- a/fs/cifs/cifs_spnego.c
5127 ++++ b/fs/cifs/cifs_spnego.c
5128 +@@ -147,8 +147,10 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
5129 + sprintf(dp, ";sec=krb5");
5130 + else if (server->sec_mskerberos)
5131 + sprintf(dp, ";sec=mskrb5");
5132 +- else
5133 +- goto out;
5134 ++ else {
5135 ++ cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n");
5136 ++ sprintf(dp, ";sec=krb5");
5137 ++ }
5138 +
5139 + dp = description + strlen(description);
5140 + sprintf(dp, ";uid=0x%x",
5141 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
5142 +index d01cbca84701..a90a637ae79a 100644
5143 +--- a/fs/cifs/inode.c
5144 ++++ b/fs/cifs/inode.c
5145 +@@ -776,7 +776,15 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
5146 + } else if (rc == -EREMOTE) {
5147 + cifs_create_dfs_fattr(&fattr, sb);
5148 + rc = 0;
5149 +- } else if (rc == -EACCES && backup_cred(cifs_sb)) {
5150 ++ } else if ((rc == -EACCES) && backup_cred(cifs_sb) &&
5151 ++ (strcmp(server->vals->version_string, SMB1_VERSION_STRING)
5152 ++ == 0)) {
5153 ++ /*
5154 ++ * For SMB2 and later the backup intent flag is already
5155 ++ * sent if needed on open and there is no path based
5156 ++ * FindFirst operation to use to retry with
5157 ++ */
5158 ++
5159 + srchinf = kzalloc(sizeof(struct cifs_search_info),
5160 + GFP_KERNEL);
5161 + if (srchinf == NULL) {
5162 +diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
5163 +index 7919967488cb..011c6f53dcda 100644
5164 +--- a/fs/cramfs/inode.c
5165 ++++ b/fs/cramfs/inode.c
5166 +@@ -186,7 +186,8 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
5167 + continue;
5168 + blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
5169 + blk_offset += offset;
5170 +- if (blk_offset + len > BUFFER_SIZE)
5171 ++ if (blk_offset > BUFFER_SIZE ||
5172 ++ blk_offset + len > BUFFER_SIZE)
5173 + continue;
5174 + return read_buffers[i] + blk_offset;
5175 + }
5176 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
5177 +index c96778c39885..c0c6562b3c44 100644
5178 +--- a/fs/ext4/ext4.h
5179 ++++ b/fs/ext4/ext4.h
5180 +@@ -1421,7 +1421,8 @@ struct ext4_sb_info {
5181 + u32 s_min_batch_time;
5182 + struct block_device *journal_bdev;
5183 + #ifdef CONFIG_QUOTA
5184 +- char *s_qf_names[EXT4_MAXQUOTAS]; /* Names of quota files with journalled quota */
5185 ++ /* Names of quota files with journalled quota */
5186 ++ char __rcu *s_qf_names[EXT4_MAXQUOTAS];
5187 + int s_jquota_fmt; /* Format of quota to use */
5188 + #endif
5189 + unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
5190 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
5191 +index 4e1d62ba0703..ac2e0516c16f 100644
5192 +--- a/fs/ext4/inline.c
5193 ++++ b/fs/ext4/inline.c
5194 +@@ -869,7 +869,7 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
5195 + handle_t *handle;
5196 + struct page *page;
5197 + struct ext4_iloc iloc;
5198 +- int retries;
5199 ++ int retries = 0;
5200 +
5201 + ret = ext4_get_inode_loc(inode, &iloc);
5202 + if (ret)
5203 +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
5204 +index 1eb68e626931..b2a47058e04c 100644
5205 +--- a/fs/ext4/ioctl.c
5206 ++++ b/fs/ext4/ioctl.c
5207 +@@ -344,19 +344,14 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
5208 + if (projid_eq(kprojid, EXT4_I(inode)->i_projid))
5209 + return 0;
5210 +
5211 +- err = mnt_want_write_file(filp);
5212 +- if (err)
5213 +- return err;
5214 +-
5215 + err = -EPERM;
5216 +- inode_lock(inode);
5217 + /* Is it quota file? Do not allow user to mess with it */
5218 + if (ext4_is_quota_file(inode))
5219 +- goto out_unlock;
5220 ++ return err;
5221 +
5222 + err = ext4_get_inode_loc(inode, &iloc);
5223 + if (err)
5224 +- goto out_unlock;
5225 ++ return err;
5226 +
5227 + raw_inode = ext4_raw_inode(&iloc);
5228 + if (!EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) {
5229 +@@ -364,20 +359,20 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
5230 + EXT4_SB(sb)->s_want_extra_isize,
5231 + &iloc);
5232 + if (err)
5233 +- goto out_unlock;
5234 ++ return err;
5235 + } else {
5236 + brelse(iloc.bh);
5237 + }
5238 +
5239 +- dquot_initialize(inode);
5240 ++ err = dquot_initialize(inode);
5241 ++ if (err)
5242 ++ return err;
5243 +
5244 + handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5245 + EXT4_QUOTA_INIT_BLOCKS(sb) +
5246 + EXT4_QUOTA_DEL_BLOCKS(sb) + 3);
5247 +- if (IS_ERR(handle)) {
5248 +- err = PTR_ERR(handle);
5249 +- goto out_unlock;
5250 +- }
5251 ++ if (IS_ERR(handle))
5252 ++ return PTR_ERR(handle);
5253 +
5254 + err = ext4_reserve_inode_write(handle, inode, &iloc);
5255 + if (err)
5256 +@@ -405,9 +400,6 @@ out_dirty:
5257 + err = rc;
5258 + out_stop:
5259 + ext4_journal_stop(handle);
5260 +-out_unlock:
5261 +- inode_unlock(inode);
5262 +- mnt_drop_write_file(filp);
5263 + return err;
5264 + }
5265 + #else
5266 +@@ -592,6 +584,30 @@ static int ext4_ioc_getfsmap(struct super_block *sb,
5267 + return 0;
5268 + }
5269 +
5270 ++static int ext4_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
5271 ++{
5272 ++ /*
5273 ++ * Project Quota ID state is only allowed to change from within the init
5274 ++ * namespace. Enforce that restriction only if we are trying to change
5275 ++ * the quota ID state. Everything else is allowed in user namespaces.
5276 ++ */
5277 ++ if (current_user_ns() == &init_user_ns)
5278 ++ return 0;
5279 ++
5280 ++ if (__kprojid_val(EXT4_I(inode)->i_projid) != fa->fsx_projid)
5281 ++ return -EINVAL;
5282 ++
5283 ++ if (ext4_test_inode_flag(inode, EXT4_INODE_PROJINHERIT)) {
5284 ++ if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
5285 ++ return -EINVAL;
5286 ++ } else {
5287 ++ if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
5288 ++ return -EINVAL;
5289 ++ }
5290 ++
5291 ++ return 0;
5292 ++}
5293 ++
5294 + long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5295 + {
5296 + struct inode *inode = file_inode(filp);
5297 +@@ -1029,19 +1045,19 @@ resizefs_out:
5298 + return err;
5299 +
5300 + inode_lock(inode);
5301 ++ err = ext4_ioctl_check_project(inode, &fa);
5302 ++ if (err)
5303 ++ goto out;
5304 + flags = (ei->i_flags & ~EXT4_FL_XFLAG_VISIBLE) |
5305 + (flags & EXT4_FL_XFLAG_VISIBLE);
5306 + err = ext4_ioctl_setflags(inode, flags);
5307 +- inode_unlock(inode);
5308 +- mnt_drop_write_file(filp);
5309 + if (err)
5310 +- return err;
5311 +-
5312 ++ goto out;
5313 + err = ext4_ioctl_setproject(filp, fa.fsx_projid);
5314 +- if (err)
5315 +- return err;
5316 +-
5317 +- return 0;
5318 ++out:
5319 ++ inode_unlock(inode);
5320 ++ mnt_drop_write_file(filp);
5321 ++ return err;
5322 + }
5323 + case EXT4_IOC_SHUTDOWN:
5324 + return ext4_shutdown(sb, arg);
5325 +diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
5326 +index 9bb36909ec92..cd8d481e0c48 100644
5327 +--- a/fs/ext4/move_extent.c
5328 ++++ b/fs/ext4/move_extent.c
5329 +@@ -526,9 +526,13 @@ mext_check_arguments(struct inode *orig_inode,
5330 + orig_inode->i_ino, donor_inode->i_ino);
5331 + return -EINVAL;
5332 + }
5333 +- if (orig_eof < orig_start + *len - 1)
5334 ++ if (orig_eof <= orig_start)
5335 ++ *len = 0;
5336 ++ else if (orig_eof < orig_start + *len - 1)
5337 + *len = orig_eof - orig_start;
5338 +- if (donor_eof < donor_start + *len - 1)
5339 ++ if (donor_eof <= donor_start)
5340 ++ *len = 0;
5341 ++ else if (donor_eof < donor_start + *len - 1)
5342 + *len = donor_eof - donor_start;
5343 + if (!*len) {
5344 + ext4_debug("ext4 move extent: len should not be 0 "
5345 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
5346 +index 9dbd27f7b778..46ad267ef6d6 100644
5347 +--- a/fs/ext4/super.c
5348 ++++ b/fs/ext4/super.c
5349 +@@ -855,6 +855,18 @@ static inline void ext4_quota_off_umount(struct super_block *sb)
5350 + for (type = 0; type < EXT4_MAXQUOTAS; type++)
5351 + ext4_quota_off(sb, type);
5352 + }
5353 ++
5354 ++/*
5355 ++ * This is a helper function which is used in the mount/remount
5356 ++ * codepaths (which holds s_umount) to fetch the quota file name.
5357 ++ */
5358 ++static inline char *get_qf_name(struct super_block *sb,
5359 ++ struct ext4_sb_info *sbi,
5360 ++ int type)
5361 ++{
5362 ++ return rcu_dereference_protected(sbi->s_qf_names[type],
5363 ++ lockdep_is_held(&sb->s_umount));
5364 ++}
5365 + #else
5366 + static inline void ext4_quota_off_umount(struct super_block *sb)
5367 + {
5368 +@@ -907,7 +919,7 @@ static void ext4_put_super(struct super_block *sb)
5369 + percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
5370 + #ifdef CONFIG_QUOTA
5371 + for (i = 0; i < EXT4_MAXQUOTAS; i++)
5372 +- kfree(sbi->s_qf_names[i]);
5373 ++ kfree(get_qf_name(sb, sbi, i));
5374 + #endif
5375 +
5376 + /* Debugging code just in case the in-memory inode orphan list
5377 +@@ -1473,11 +1485,10 @@ static const char deprecated_msg[] =
5378 + static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
5379 + {
5380 + struct ext4_sb_info *sbi = EXT4_SB(sb);
5381 +- char *qname;
5382 ++ char *qname, *old_qname = get_qf_name(sb, sbi, qtype);
5383 + int ret = -1;
5384 +
5385 +- if (sb_any_quota_loaded(sb) &&
5386 +- !sbi->s_qf_names[qtype]) {
5387 ++ if (sb_any_quota_loaded(sb) && !old_qname) {
5388 + ext4_msg(sb, KERN_ERR,
5389 + "Cannot change journaled "
5390 + "quota options when quota turned on");
5391 +@@ -1494,8 +1505,8 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
5392 + "Not enough memory for storing quotafile name");
5393 + return -1;
5394 + }
5395 +- if (sbi->s_qf_names[qtype]) {
5396 +- if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
5397 ++ if (old_qname) {
5398 ++ if (strcmp(old_qname, qname) == 0)
5399 + ret = 1;
5400 + else
5401 + ext4_msg(sb, KERN_ERR,
5402 +@@ -1508,7 +1519,7 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
5403 + "quotafile must be on filesystem root");
5404 + goto errout;
5405 + }
5406 +- sbi->s_qf_names[qtype] = qname;
5407 ++ rcu_assign_pointer(sbi->s_qf_names[qtype], qname);
5408 + set_opt(sb, QUOTA);
5409 + return 1;
5410 + errout:
5411 +@@ -1520,15 +1531,16 @@ static int clear_qf_name(struct super_block *sb, int qtype)
5412 + {
5413 +
5414 + struct ext4_sb_info *sbi = EXT4_SB(sb);
5415 ++ char *old_qname = get_qf_name(sb, sbi, qtype);
5416 +
5417 +- if (sb_any_quota_loaded(sb) &&
5418 +- sbi->s_qf_names[qtype]) {
5419 ++ if (sb_any_quota_loaded(sb) && old_qname) {
5420 + ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
5421 + " when quota turned on");
5422 + return -1;
5423 + }
5424 +- kfree(sbi->s_qf_names[qtype]);
5425 +- sbi->s_qf_names[qtype] = NULL;
5426 ++ rcu_assign_pointer(sbi->s_qf_names[qtype], NULL);
5427 ++ synchronize_rcu();
5428 ++ kfree(old_qname);
5429 + return 1;
5430 + }
5431 + #endif
5432 +@@ -1901,7 +1913,7 @@ static int parse_options(char *options, struct super_block *sb,
5433 + int is_remount)
5434 + {
5435 + struct ext4_sb_info *sbi = EXT4_SB(sb);
5436 +- char *p;
5437 ++ char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
5438 + substring_t args[MAX_OPT_ARGS];
5439 + int token;
5440 +
5441 +@@ -1932,11 +1944,13 @@ static int parse_options(char *options, struct super_block *sb,
5442 + "Cannot enable project quota enforcement.");
5443 + return 0;
5444 + }
5445 +- if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
5446 +- if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
5447 ++ usr_qf_name = get_qf_name(sb, sbi, USRQUOTA);
5448 ++ grp_qf_name = get_qf_name(sb, sbi, GRPQUOTA);
5449 ++ if (usr_qf_name || grp_qf_name) {
5450 ++ if (test_opt(sb, USRQUOTA) && usr_qf_name)
5451 + clear_opt(sb, USRQUOTA);
5452 +
5453 +- if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
5454 ++ if (test_opt(sb, GRPQUOTA) && grp_qf_name)
5455 + clear_opt(sb, GRPQUOTA);
5456 +
5457 + if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
5458 +@@ -1970,6 +1984,7 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
5459 + {
5460 + #if defined(CONFIG_QUOTA)
5461 + struct ext4_sb_info *sbi = EXT4_SB(sb);
5462 ++ char *usr_qf_name, *grp_qf_name;
5463 +
5464 + if (sbi->s_jquota_fmt) {
5465 + char *fmtname = "";
5466 +@@ -1988,11 +2003,14 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
5467 + seq_printf(seq, ",jqfmt=%s", fmtname);
5468 + }
5469 +
5470 +- if (sbi->s_qf_names[USRQUOTA])
5471 +- seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
5472 +-
5473 +- if (sbi->s_qf_names[GRPQUOTA])
5474 +- seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
5475 ++ rcu_read_lock();
5476 ++ usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
5477 ++ grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
5478 ++ if (usr_qf_name)
5479 ++ seq_show_option(seq, "usrjquota", usr_qf_name);
5480 ++ if (grp_qf_name)
5481 ++ seq_show_option(seq, "grpjquota", grp_qf_name);
5482 ++ rcu_read_unlock();
5483 + #endif
5484 + }
5485 +
5486 +@@ -5038,6 +5056,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
5487 + int err = 0;
5488 + #ifdef CONFIG_QUOTA
5489 + int i, j;
5490 ++ char *to_free[EXT4_MAXQUOTAS];
5491 + #endif
5492 + char *orig_data = kstrdup(data, GFP_KERNEL);
5493 +
5494 +@@ -5054,8 +5073,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
5495 + old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
5496 + for (i = 0; i < EXT4_MAXQUOTAS; i++)
5497 + if (sbi->s_qf_names[i]) {
5498 +- old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
5499 +- GFP_KERNEL);
5500 ++ char *qf_name = get_qf_name(sb, sbi, i);
5501 ++
5502 ++ old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
5503 + if (!old_opts.s_qf_names[i]) {
5504 + for (j = 0; j < i; j++)
5505 + kfree(old_opts.s_qf_names[j]);
5506 +@@ -5277,9 +5297,12 @@ restore_opts:
5507 + #ifdef CONFIG_QUOTA
5508 + sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
5509 + for (i = 0; i < EXT4_MAXQUOTAS; i++) {
5510 +- kfree(sbi->s_qf_names[i]);
5511 +- sbi->s_qf_names[i] = old_opts.s_qf_names[i];
5512 ++ to_free[i] = get_qf_name(sb, sbi, i);
5513 ++ rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
5514 + }
5515 ++ synchronize_rcu();
5516 ++ for (i = 0; i < EXT4_MAXQUOTAS; i++)
5517 ++ kfree(to_free[i]);
5518 + #endif
5519 + kfree(orig_data);
5520 + return err;
5521 +@@ -5469,7 +5492,7 @@ static int ext4_write_info(struct super_block *sb, int type)
5522 + */
5523 + static int ext4_quota_on_mount(struct super_block *sb, int type)
5524 + {
5525 +- return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
5526 ++ return dquot_quota_on_mount(sb, get_qf_name(sb, EXT4_SB(sb), type),
5527 + EXT4_SB(sb)->s_jquota_fmt, type);
5528 + }
5529 +
5530 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
5531 +index e10bd73f0723..6fbb6d75318a 100644
5532 +--- a/fs/f2fs/data.c
5533 ++++ b/fs/f2fs/data.c
5534 +@@ -381,10 +381,10 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
5535 + }
5536 + bio_set_op_attrs(bio, fio->op, fio->op_flags);
5537 +
5538 +- __submit_bio(fio->sbi, bio, fio->type);
5539 +-
5540 + if (!is_read_io(fio->op))
5541 + inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
5542 ++
5543 ++ __submit_bio(fio->sbi, bio, fio->type);
5544 + return 0;
5545 + }
5546 +
5547 +@@ -2190,10 +2190,6 @@ static int f2fs_set_data_page_dirty(struct page *page)
5548 + if (!PageUptodate(page))
5549 + SetPageUptodate(page);
5550 +
5551 +- /* don't remain PG_checked flag which was set during GC */
5552 +- if (is_cold_data(page))
5553 +- clear_cold_data(page);
5554 +-
5555 + if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
5556 + if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
5557 + register_inmem_page(inode, page);
5558 +diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
5559 +index 9626758bc762..765fadf954af 100644
5560 +--- a/fs/f2fs/recovery.c
5561 ++++ b/fs/f2fs/recovery.c
5562 +@@ -210,6 +210,7 @@ static void recover_inode(struct inode *inode, struct page *page)
5563 + inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
5564 +
5565 + F2FS_I(inode)->i_advise = raw->i_advise;
5566 ++ F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
5567 +
5568 + if (file_enc_name(inode))
5569 + name = "<encrypted>";
5570 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
5571 +index eae35909fa51..7cda685296b2 100644
5572 +--- a/fs/f2fs/super.c
5573 ++++ b/fs/f2fs/super.c
5574 +@@ -1488,7 +1488,9 @@ static int f2fs_quota_off(struct super_block *sb, int type)
5575 + if (!inode || !igrab(inode))
5576 + return dquot_quota_off(sb, type);
5577 +
5578 +- f2fs_quota_sync(sb, type);
5579 ++ err = f2fs_quota_sync(sb, type);
5580 ++ if (err)
5581 ++ goto out_put;
5582 +
5583 + err = dquot_quota_off(sb, type);
5584 + if (err)
5585 +@@ -1507,9 +1509,20 @@ out_put:
5586 + void f2fs_quota_off_umount(struct super_block *sb)
5587 + {
5588 + int type;
5589 ++ int err;
5590 +
5591 +- for (type = 0; type < MAXQUOTAS; type++)
5592 +- f2fs_quota_off(sb, type);
5593 ++ for (type = 0; type < MAXQUOTAS; type++) {
5594 ++ err = f2fs_quota_off(sb, type);
5595 ++ if (err) {
5596 ++ int ret = dquot_quota_off(sb, type);
5597 ++
5598 ++ f2fs_msg(sb, KERN_ERR,
5599 ++ "Fail to turn off disk quota "
5600 ++ "(type: %d, err: %d, ret:%d), Please "
5601 ++ "run fsck to fix it.", type, err, ret);
5602 ++ set_sbi_flag(F2FS_SB(sb), SBI_NEED_FSCK);
5603 ++ }
5604 ++ }
5605 + }
5606 +
5607 + int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
5608 +diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
5609 +index a3711f543405..28d6c65c8bb3 100644
5610 +--- a/fs/gfs2/ops_fstype.c
5611 ++++ b/fs/gfs2/ops_fstype.c
5612 +@@ -1352,6 +1352,9 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
5613 + struct path path;
5614 + int error;
5615 +
5616 ++ if (!dev_name || !*dev_name)
5617 ++ return ERR_PTR(-EINVAL);
5618 ++
5619 + error = kern_path(dev_name, LOOKUP_FOLLOW, &path);
5620 + if (error) {
5621 + pr_warn("path_lookup on %s returned error %d\n",
5622 +diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
5623 +index 4055f51617ef..fe4fe155b7fb 100644
5624 +--- a/fs/jbd2/checkpoint.c
5625 ++++ b/fs/jbd2/checkpoint.c
5626 +@@ -254,8 +254,8 @@ restart:
5627 + bh = jh2bh(jh);
5628 +
5629 + if (buffer_locked(bh)) {
5630 +- spin_unlock(&journal->j_list_lock);
5631 + get_bh(bh);
5632 ++ spin_unlock(&journal->j_list_lock);
5633 + wait_on_buffer(bh);
5634 + /* the journal_head may have gone by now */
5635 + BUFFER_TRACE(bh, "brelse");
5636 +@@ -336,8 +336,8 @@ restart2:
5637 + jh = transaction->t_checkpoint_io_list;
5638 + bh = jh2bh(jh);
5639 + if (buffer_locked(bh)) {
5640 +- spin_unlock(&journal->j_list_lock);
5641 + get_bh(bh);
5642 ++ spin_unlock(&journal->j_list_lock);
5643 + wait_on_buffer(bh);
5644 + /* the journal_head may have gone by now */
5645 + BUFFER_TRACE(bh, "brelse");
5646 +diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
5647 +index 33e01de576d2..bc00cc385b77 100644
5648 +--- a/fs/jffs2/super.c
5649 ++++ b/fs/jffs2/super.c
5650 +@@ -285,10 +285,8 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
5651 + sb->s_fs_info = c;
5652 +
5653 + ret = jffs2_parse_options(c, data);
5654 +- if (ret) {
5655 +- kfree(c);
5656 ++ if (ret)
5657 + return -EINVAL;
5658 +- }
5659 +
5660 + /* Initialize JFFS2 superblock locks, the further initialization will
5661 + * be done later */
5662 +diff --git a/fs/lockd/host.c b/fs/lockd/host.c
5663 +index 0d4e590e0549..c4504ed9f680 100644
5664 +--- a/fs/lockd/host.c
5665 ++++ b/fs/lockd/host.c
5666 +@@ -341,7 +341,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
5667 + };
5668 + struct lockd_net *ln = net_generic(net, lockd_net_id);
5669 +
5670 +- dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
5671 ++ dprintk("lockd: %s(host='%.*s', vers=%u, proto=%s)\n", __func__,
5672 + (int)hostname_len, hostname, rqstp->rq_vers,
5673 + (rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp"));
5674 +
5675 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
5676 +index fb85d04fdc4c..fed9c8005c17 100644
5677 +--- a/fs/nfs/nfs4client.c
5678 ++++ b/fs/nfs/nfs4client.c
5679 +@@ -925,10 +925,10 @@ EXPORT_SYMBOL_GPL(nfs4_set_ds_client);
5680 +
5681 + /*
5682 + * Session has been established, and the client marked ready.
5683 +- * Set the mount rsize and wsize with negotiated fore channel
5684 +- * attributes which will be bound checked in nfs_server_set_fsinfo.
5685 ++ * Limit the mount rsize, wsize and dtsize using negotiated fore
5686 ++ * channel attributes.
5687 + */
5688 +-static void nfs4_session_set_rwsize(struct nfs_server *server)
5689 ++static void nfs4_session_limit_rwsize(struct nfs_server *server)
5690 + {
5691 + #ifdef CONFIG_NFS_V4_1
5692 + struct nfs4_session *sess;
5693 +@@ -941,9 +941,11 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
5694 + server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
5695 + server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
5696 +
5697 +- if (!server->rsize || server->rsize > server_resp_sz)
5698 ++ if (server->dtsize > server_resp_sz)
5699 ++ server->dtsize = server_resp_sz;
5700 ++ if (server->rsize > server_resp_sz)
5701 + server->rsize = server_resp_sz;
5702 +- if (!server->wsize || server->wsize > server_rqst_sz)
5703 ++ if (server->wsize > server_rqst_sz)
5704 + server->wsize = server_rqst_sz;
5705 + #endif /* CONFIG_NFS_V4_1 */
5706 + }
5707 +@@ -990,12 +992,12 @@ static int nfs4_server_common_setup(struct nfs_server *server,
5708 + (unsigned long long) server->fsid.minor);
5709 + nfs_display_fhandle(mntfh, "Pseudo-fs root FH");
5710 +
5711 +- nfs4_session_set_rwsize(server);
5712 +-
5713 + error = nfs_probe_fsinfo(server, mntfh, fattr);
5714 + if (error < 0)
5715 + goto out;
5716 +
5717 ++ nfs4_session_limit_rwsize(server);
5718 ++
5719 + if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
5720 + server->namelen = NFS4_MAXNAMLEN;
5721 +
5722 +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
5723 +index d0543e19098a..37f20d7a26ed 100644
5724 +--- a/fs/nfs/pagelist.c
5725 ++++ b/fs/nfs/pagelist.c
5726 +@@ -1110,6 +1110,20 @@ static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
5727 + return ret;
5728 + }
5729 +
5730 ++static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
5731 ++{
5732 ++ u32 midx;
5733 ++ struct nfs_pgio_mirror *mirror;
5734 ++
5735 ++ if (!desc->pg_error)
5736 ++ return;
5737 ++
5738 ++ for (midx = 0; midx < desc->pg_mirror_count; midx++) {
5739 ++ mirror = &desc->pg_mirrors[midx];
5740 ++ desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
5741 ++ }
5742 ++}
5743 ++
5744 + int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
5745 + struct nfs_page *req)
5746 + {
5747 +@@ -1160,25 +1174,11 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
5748 + return 1;
5749 +
5750 + out_failed:
5751 +- /*
5752 +- * We might have failed before sending any reqs over wire.
5753 +- * Clean up rest of the reqs in mirror pg_list.
5754 +- */
5755 +- if (desc->pg_error) {
5756 +- struct nfs_pgio_mirror *mirror;
5757 +- void (*func)(struct list_head *);
5758 +-
5759 +- /* remember fatal errors */
5760 +- if (nfs_error_is_fatal(desc->pg_error))
5761 +- nfs_context_set_write_error(req->wb_context,
5762 +- desc->pg_error);
5763 +-
5764 +- func = desc->pg_completion_ops->error_cleanup;
5765 +- for (midx = 0; midx < desc->pg_mirror_count; midx++) {
5766 +- mirror = &desc->pg_mirrors[midx];
5767 +- func(&mirror->pg_list);
5768 +- }
5769 +- }
5770 ++ /* remember fatal errors */
5771 ++ if (nfs_error_is_fatal(desc->pg_error))
5772 ++ nfs_context_set_write_error(req->wb_context,
5773 ++ desc->pg_error);
5774 ++ nfs_pageio_error_cleanup(desc);
5775 + return 0;
5776 + }
5777 +
5778 +@@ -1250,6 +1250,8 @@ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
5779 + for (midx = 0; midx < desc->pg_mirror_count; midx++)
5780 + nfs_pageio_complete_mirror(desc, midx);
5781 +
5782 ++ if (desc->pg_error < 0)
5783 ++ nfs_pageio_error_cleanup(desc);
5784 + if (desc->pg_ops->pg_cleanup)
5785 + desc->pg_ops->pg_cleanup(desc);
5786 + nfs_pageio_cleanup_mirroring(desc);
5787 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
5788 +index 519522d39bde..2b47757c9c68 100644
5789 +--- a/fs/proc/task_mmu.c
5790 ++++ b/fs/proc/task_mmu.c
5791 +@@ -768,6 +768,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
5792 + smaps_walk.private = mss;
5793 +
5794 + #ifdef CONFIG_SHMEM
5795 ++ /* In case of smaps_rollup, reset the value from previous vma */
5796 ++ mss->check_shmem_swap = false;
5797 + if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
5798 + /*
5799 + * For shared or readonly shmem mappings we know that all
5800 +@@ -783,7 +785,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
5801 +
5802 + if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
5803 + !(vma->vm_flags & VM_WRITE)) {
5804 +- mss->swap = shmem_swapped;
5805 ++ mss->swap += shmem_swapped;
5806 + } else {
5807 + mss->check_shmem_swap = true;
5808 + smaps_walk.pte_hole = smaps_pte_hole;
5809 +diff --git a/include/linux/compat.h b/include/linux/compat.h
5810 +index 3e838a828459..23909d12f729 100644
5811 +--- a/include/linux/compat.h
5812 ++++ b/include/linux/compat.h
5813 +@@ -68,6 +68,9 @@ typedef struct compat_sigaltstack {
5814 + compat_size_t ss_size;
5815 + } compat_stack_t;
5816 + #endif
5817 ++#ifndef COMPAT_MINSIGSTKSZ
5818 ++#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ
5819 ++#endif
5820 +
5821 + #define compat_jiffies_to_clock_t(x) \
5822 + (((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
5823 +diff --git a/include/linux/signal.h b/include/linux/signal.h
5824 +index 042968dd98f0..843bd62b1ead 100644
5825 +--- a/include/linux/signal.h
5826 ++++ b/include/linux/signal.h
5827 +@@ -34,7 +34,7 @@ enum siginfo_layout {
5828 + #endif
5829 + };
5830 +
5831 +-enum siginfo_layout siginfo_layout(int sig, int si_code);
5832 ++enum siginfo_layout siginfo_layout(unsigned sig, int si_code);
5833 +
5834 + /*
5835 + * Define some primitives to manipulate sigset_t.
5836 +diff --git a/include/linux/tc.h b/include/linux/tc.h
5837 +index f92511e57cdb..a60639f37963 100644
5838 +--- a/include/linux/tc.h
5839 ++++ b/include/linux/tc.h
5840 +@@ -84,6 +84,7 @@ struct tc_dev {
5841 + device. */
5842 + struct device dev; /* Generic device interface. */
5843 + struct resource resource; /* Address space of this device. */
5844 ++ u64 dma_mask; /* DMA addressable range. */
5845 + char vendor[9];
5846 + char name[9];
5847 + char firmware[9];
5848 +diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
5849 +index 3f03567631cb..145f242c7c90 100644
5850 +--- a/include/uapi/linux/ndctl.h
5851 ++++ b/include/uapi/linux/ndctl.h
5852 +@@ -176,37 +176,31 @@ enum {
5853 +
5854 + static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
5855 + {
5856 +- static const char * const names[] = {
5857 +- [ND_CMD_ARS_CAP] = "ars_cap",
5858 +- [ND_CMD_ARS_START] = "ars_start",
5859 +- [ND_CMD_ARS_STATUS] = "ars_status",
5860 +- [ND_CMD_CLEAR_ERROR] = "clear_error",
5861 +- [ND_CMD_CALL] = "cmd_call",
5862 +- };
5863 +-
5864 +- if (cmd < ARRAY_SIZE(names) && names[cmd])
5865 +- return names[cmd];
5866 +- return "unknown";
5867 ++ switch (cmd) {
5868 ++ case ND_CMD_ARS_CAP: return "ars_cap";
5869 ++ case ND_CMD_ARS_START: return "ars_start";
5870 ++ case ND_CMD_ARS_STATUS: return "ars_status";
5871 ++ case ND_CMD_CLEAR_ERROR: return "clear_error";
5872 ++ case ND_CMD_CALL: return "cmd_call";
5873 ++ default: return "unknown";
5874 ++ }
5875 + }
5876 +
5877 + static inline const char *nvdimm_cmd_name(unsigned cmd)
5878 + {
5879 +- static const char * const names[] = {
5880 +- [ND_CMD_SMART] = "smart",
5881 +- [ND_CMD_SMART_THRESHOLD] = "smart_thresh",
5882 +- [ND_CMD_DIMM_FLAGS] = "flags",
5883 +- [ND_CMD_GET_CONFIG_SIZE] = "get_size",
5884 +- [ND_CMD_GET_CONFIG_DATA] = "get_data",
5885 +- [ND_CMD_SET_CONFIG_DATA] = "set_data",
5886 +- [ND_CMD_VENDOR_EFFECT_LOG_SIZE] = "effect_size",
5887 +- [ND_CMD_VENDOR_EFFECT_LOG] = "effect_log",
5888 +- [ND_CMD_VENDOR] = "vendor",
5889 +- [ND_CMD_CALL] = "cmd_call",
5890 +- };
5891 +-
5892 +- if (cmd < ARRAY_SIZE(names) && names[cmd])
5893 +- return names[cmd];
5894 +- return "unknown";
5895 ++ switch (cmd) {
5896 ++ case ND_CMD_SMART: return "smart";
5897 ++ case ND_CMD_SMART_THRESHOLD: return "smart_thresh";
5898 ++ case ND_CMD_DIMM_FLAGS: return "flags";
5899 ++ case ND_CMD_GET_CONFIG_SIZE: return "get_size";
5900 ++ case ND_CMD_GET_CONFIG_DATA: return "get_data";
5901 ++ case ND_CMD_SET_CONFIG_DATA: return "set_data";
5902 ++ case ND_CMD_VENDOR_EFFECT_LOG_SIZE: return "effect_size";
5903 ++ case ND_CMD_VENDOR_EFFECT_LOG: return "effect_log";
5904 ++ case ND_CMD_VENDOR: return "vendor";
5905 ++ case ND_CMD_CALL: return "cmd_call";
5906 ++ default: return "unknown";
5907 ++ }
5908 + }
5909 +
5910 + #define ND_IOCTL 'N'
5911 +diff --git a/kernel/bounds.c b/kernel/bounds.c
5912 +index c373e887c066..9795d75b09b2 100644
5913 +--- a/kernel/bounds.c
5914 ++++ b/kernel/bounds.c
5915 +@@ -13,7 +13,7 @@
5916 + #include <linux/log2.h>
5917 + #include <linux/spinlock_types.h>
5918 +
5919 +-void foo(void)
5920 ++int main(void)
5921 + {
5922 + /* The enum constants to put into include/generated/bounds.h */
5923 + DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
5924 +@@ -23,4 +23,6 @@ void foo(void)
5925 + #endif
5926 + DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
5927 + /* End of constants */
5928 ++
5929 ++ return 0;
5930 + }
5931 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
5932 +index ea22d0b6a9f0..5c9deed4524e 100644
5933 +--- a/kernel/bpf/syscall.c
5934 ++++ b/kernel/bpf/syscall.c
5935 +@@ -519,6 +519,17 @@ err_put:
5936 + return err;
5937 + }
5938 +
5939 ++static void maybe_wait_bpf_programs(struct bpf_map *map)
5940 ++{
5941 ++ /* Wait for any running BPF programs to complete so that
5942 ++ * userspace, when we return to it, knows that all programs
5943 ++ * that could be running use the new map value.
5944 ++ */
5945 ++ if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
5946 ++ map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
5947 ++ synchronize_rcu();
5948 ++}
5949 ++
5950 + #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
5951 +
5952 + static int map_update_elem(union bpf_attr *attr)
5953 +@@ -592,6 +603,7 @@ static int map_update_elem(union bpf_attr *attr)
5954 + }
5955 + __this_cpu_dec(bpf_prog_active);
5956 + preempt_enable();
5957 ++ maybe_wait_bpf_programs(map);
5958 +
5959 + if (!err)
5960 + trace_bpf_map_update_elem(map, ufd, key, value);
5961 +@@ -636,6 +648,7 @@ static int map_delete_elem(union bpf_attr *attr)
5962 + rcu_read_unlock();
5963 + __this_cpu_dec(bpf_prog_active);
5964 + preempt_enable();
5965 ++ maybe_wait_bpf_programs(map);
5966 +
5967 + if (!err)
5968 + trace_bpf_map_delete_elem(map, ufd, key);
5969 +diff --git a/kernel/cpu.c b/kernel/cpu.c
5970 +index f3f389e33343..90cf6a04e08a 100644
5971 +--- a/kernel/cpu.c
5972 ++++ b/kernel/cpu.c
5973 +@@ -2045,6 +2045,12 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
5974 + kobject_uevent(&dev->kobj, KOBJ_ONLINE);
5975 + }
5976 +
5977 ++/*
5978 ++ * Architectures that need SMT-specific errata handling during SMT hotplug
5979 ++ * should override this.
5980 ++ */
5981 ++void __weak arch_smt_update(void) { };
5982 ++
5983 + static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
5984 + {
5985 + int cpu, ret = 0;
5986 +@@ -2071,8 +2077,10 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
5987 + */
5988 + cpuhp_offline_cpu_device(cpu);
5989 + }
5990 +- if (!ret)
5991 ++ if (!ret) {
5992 + cpu_smt_control = ctrlval;
5993 ++ arch_smt_update();
5994 ++ }
5995 + cpu_maps_update_done();
5996 + return ret;
5997 + }
5998 +@@ -2083,6 +2091,7 @@ static int cpuhp_smt_enable(void)
5999 +
6000 + cpu_maps_update_begin();
6001 + cpu_smt_control = CPU_SMT_ENABLED;
6002 ++ arch_smt_update();
6003 + for_each_present_cpu(cpu) {
6004 + /* Skip online CPUs and CPUs on offline nodes */
6005 + if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
6006 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
6007 +index 069311541577..4cd85870f00e 100644
6008 +--- a/kernel/irq/manage.c
6009 ++++ b/kernel/irq/manage.c
6010 +@@ -882,6 +882,9 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
6011 +
6012 + local_bh_disable();
6013 + ret = action->thread_fn(action->irq, action->dev_id);
6014 ++ if (ret == IRQ_HANDLED)
6015 ++ atomic_inc(&desc->threads_handled);
6016 ++
6017 + irq_finalize_oneshot(desc, action);
6018 + local_bh_enable();
6019 + return ret;
6020 +@@ -898,6 +901,9 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
6021 + irqreturn_t ret;
6022 +
6023 + ret = action->thread_fn(action->irq, action->dev_id);
6024 ++ if (ret == IRQ_HANDLED)
6025 ++ atomic_inc(&desc->threads_handled);
6026 ++
6027 + irq_finalize_oneshot(desc, action);
6028 + return ret;
6029 + }
6030 +@@ -975,8 +981,6 @@ static int irq_thread(void *data)
6031 + irq_thread_check_affinity(desc, action);
6032 +
6033 + action_ret = handler_fn(desc, action);
6034 +- if (action_ret == IRQ_HANDLED)
6035 +- atomic_inc(&desc->threads_handled);
6036 + if (action_ret == IRQ_WAKE_THREAD)
6037 + irq_wake_secondary(desc, action);
6038 +
6039 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
6040 +index 5c90765d37e7..5cbad4fb9107 100644
6041 +--- a/kernel/kprobes.c
6042 ++++ b/kernel/kprobes.c
6043 +@@ -700,9 +700,10 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
6044 + }
6045 +
6046 + /* Cancel unoptimizing for reusing */
6047 +-static void reuse_unused_kprobe(struct kprobe *ap)
6048 ++static int reuse_unused_kprobe(struct kprobe *ap)
6049 + {
6050 + struct optimized_kprobe *op;
6051 ++ int ret;
6052 +
6053 + BUG_ON(!kprobe_unused(ap));
6054 + /*
6055 +@@ -716,8 +717,12 @@ static void reuse_unused_kprobe(struct kprobe *ap)
6056 + /* Enable the probe again */
6057 + ap->flags &= ~KPROBE_FLAG_DISABLED;
6058 + /* Optimize it again (remove from op->list) */
6059 +- BUG_ON(!kprobe_optready(ap));
6060 ++ ret = kprobe_optready(ap);
6061 ++ if (ret)
6062 ++ return ret;
6063 ++
6064 + optimize_kprobe(ap);
6065 ++ return 0;
6066 + }
6067 +
6068 + /* Remove optimized instructions */
6069 +@@ -942,11 +947,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
6070 + #define kprobe_disarmed(p) kprobe_disabled(p)
6071 + #define wait_for_kprobe_optimizer() do {} while (0)
6072 +
6073 +-/* There should be no unused kprobes can be reused without optimization */
6074 +-static void reuse_unused_kprobe(struct kprobe *ap)
6075 ++static int reuse_unused_kprobe(struct kprobe *ap)
6076 + {
6077 ++ /*
6078 ++ * If the optimized kprobe is NOT supported, the aggr kprobe is
6079 ++ * released at the same time that the last aggregated kprobe is
6080 ++ * unregistered.
6081 ++ * Thus there should be no chance to reuse unused kprobe.
6082 ++ */
6083 + printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
6084 +- BUG_ON(kprobe_unused(ap));
6085 ++ return -EINVAL;
6086 + }
6087 +
6088 + static void free_aggr_kprobe(struct kprobe *p)
6089 +@@ -1320,9 +1330,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
6090 + goto out;
6091 + }
6092 + init_aggr_kprobe(ap, orig_p);
6093 +- } else if (kprobe_unused(ap))
6094 ++ } else if (kprobe_unused(ap)) {
6095 + /* This probe is going to die. Rescue it */
6096 +- reuse_unused_kprobe(ap);
6097 ++ ret = reuse_unused_kprobe(ap);
6098 ++ if (ret)
6099 ++ goto out;
6100 ++ }
6101 +
6102 + if (kprobe_gone(ap)) {
6103 + /*
6104 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
6105 +index d7c155048ea9..bf694c709b96 100644
6106 +--- a/kernel/locking/lockdep.c
6107 ++++ b/kernel/locking/lockdep.c
6108 +@@ -4215,7 +4215,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
6109 + {
6110 + unsigned long flags;
6111 +
6112 +- if (unlikely(!lock_stat))
6113 ++ if (unlikely(!lock_stat || !debug_locks))
6114 + return;
6115 +
6116 + if (unlikely(current->lockdep_recursion))
6117 +@@ -4235,7 +4235,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
6118 + {
6119 + unsigned long flags;
6120 +
6121 +- if (unlikely(!lock_stat))
6122 ++ if (unlikely(!lock_stat || !debug_locks))
6123 + return;
6124 +
6125 + if (unlikely(current->lockdep_recursion))
6126 +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
6127 +index f0223a7d9ed1..7161312593dd 100644
6128 +--- a/kernel/printk/printk.c
6129 ++++ b/kernel/printk/printk.c
6130 +@@ -1043,7 +1043,12 @@ static void __init log_buf_len_update(unsigned size)
6131 + /* save requested log_buf_len since it's too early to process it */
6132 + static int __init log_buf_len_setup(char *str)
6133 + {
6134 +- unsigned size = memparse(str, &str);
6135 ++ unsigned int size;
6136 ++
6137 ++ if (!str)
6138 ++ return -EINVAL;
6139 ++
6140 ++ size = memparse(str, &str);
6141 +
6142 + log_buf_len_update(size);
6143 +
6144 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
6145 +index 19bfa21f7197..2d4d79420e36 100644
6146 +--- a/kernel/sched/fair.c
6147 ++++ b/kernel/sched/fair.c
6148 +@@ -3825,7 +3825,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
6149 + * put back on, and if we advance min_vruntime, we'll be placed back
6150 + * further than we started -- ie. we'll be penalized.
6151 + */
6152 +- if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
6153 ++ if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
6154 + update_min_vruntime(cfs_rq);
6155 + }
6156 +
6157 +diff --git a/kernel/signal.c b/kernel/signal.c
6158 +index 4439ba9dc5d9..164c36ef0825 100644
6159 +--- a/kernel/signal.c
6160 ++++ b/kernel/signal.c
6161 +@@ -1003,7 +1003,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
6162 +
6163 + result = TRACE_SIGNAL_IGNORED;
6164 + if (!prepare_signal(sig, t,
6165 +- from_ancestor_ns || (info == SEND_SIG_FORCED)))
6166 ++ from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
6167 + goto ret;
6168 +
6169 + pending = group ? &t->signal->shared_pending : &t->pending;
6170 +@@ -2700,7 +2700,7 @@ COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
6171 + }
6172 + #endif
6173 +
6174 +-enum siginfo_layout siginfo_layout(int sig, int si_code)
6175 ++enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
6176 + {
6177 + enum siginfo_layout layout = SIL_KILL;
6178 + if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
6179 +@@ -3215,7 +3215,8 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
6180 + }
6181 +
6182 + static int
6183 +-do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
6184 ++do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
6185 ++ size_t min_ss_size)
6186 + {
6187 + struct task_struct *t = current;
6188 +
6189 +@@ -3245,7 +3246,7 @@ do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
6190 + ss_size = 0;
6191 + ss_sp = NULL;
6192 + } else {
6193 +- if (unlikely(ss_size < MINSIGSTKSZ))
6194 ++ if (unlikely(ss_size < min_ss_size))
6195 + return -ENOMEM;
6196 + }
6197 +
6198 +@@ -3263,7 +3264,8 @@ SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
6199 + if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
6200 + return -EFAULT;
6201 + err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
6202 +- current_user_stack_pointer());
6203 ++ current_user_stack_pointer(),
6204 ++ MINSIGSTKSZ);
6205 + if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
6206 + err = -EFAULT;
6207 + return err;
6208 +@@ -3274,7 +3276,8 @@ int restore_altstack(const stack_t __user *uss)
6209 + stack_t new;
6210 + if (copy_from_user(&new, uss, sizeof(stack_t)))
6211 + return -EFAULT;
6212 +- (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
6213 ++ (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
6214 ++ MINSIGSTKSZ);
6215 + /* squash all but EFAULT for now */
6216 + return 0;
6217 + }
6218 +@@ -3309,7 +3312,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
6219 + uss.ss_size = uss32.ss_size;
6220 + }
6221 + ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
6222 +- compat_user_stack_pointer());
6223 ++ compat_user_stack_pointer(),
6224 ++ COMPAT_MINSIGSTKSZ);
6225 + if (ret >= 0 && uoss_ptr) {
6226 + compat_stack_t old;
6227 + memset(&old, 0, sizeof(old));
6228 +diff --git a/lib/debug_locks.c b/lib/debug_locks.c
6229 +index 96c4c633d95e..124fdf238b3d 100644
6230 +--- a/lib/debug_locks.c
6231 ++++ b/lib/debug_locks.c
6232 +@@ -37,7 +37,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
6233 + */
6234 + int debug_locks_off(void)
6235 + {
6236 +- if (__debug_locks_off()) {
6237 ++ if (debug_locks && __debug_locks_off()) {
6238 + if (!debug_locks_silent) {
6239 + console_verbose();
6240 + return 1;
6241 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
6242 +index 9801dc0250e2..e073099083ca 100644
6243 +--- a/mm/hugetlb.c
6244 ++++ b/mm/hugetlb.c
6245 +@@ -3644,6 +3644,12 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
6246 + return err;
6247 + ClearPagePrivate(page);
6248 +
6249 ++ /*
6250 ++ * set page dirty so that it will not be removed from cache/file
6251 ++ * by non-hugetlbfs specific code paths.
6252 ++ */
6253 ++ set_page_dirty(page);
6254 ++
6255 + spin_lock(&inode->i_lock);
6256 + inode->i_blocks += blocks_per_huge_page(h);
6257 + spin_unlock(&inode->i_lock);
6258 +diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
6259 +index 956015614395..e00d985a51c5 100644
6260 +--- a/mm/page_vma_mapped.c
6261 ++++ b/mm/page_vma_mapped.c
6262 +@@ -21,7 +21,29 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
6263 + if (!is_swap_pte(*pvmw->pte))
6264 + return false;
6265 + } else {
6266 +- if (!pte_present(*pvmw->pte))
6267 ++ /*
6268 ++ * We get here when we are trying to unmap a private
6269 ++ * device page from the process address space. Such
6270 ++ * page is not CPU accessible and thus is mapped as
6271 ++ * a special swap entry, nonetheless it still does
6272 ++ * count as a valid regular mapping for the page (and
6273 ++ * is accounted as such in page maps count).
6274 ++ *
6275 ++ * So handle this special case as if it was a normal
6276 ++ * page mapping ie lock CPU page table and returns
6277 ++ * true.
6278 ++ *
6279 ++ * For more details on device private memory see HMM
6280 ++ * (include/linux/hmm.h or mm/hmm.c).
6281 ++ */
6282 ++ if (is_swap_pte(*pvmw->pte)) {
6283 ++ swp_entry_t entry;
6284 ++
6285 ++ /* Handle un-addressable ZONE_DEVICE memory */
6286 ++ entry = pte_to_swp_entry(*pvmw->pte);
6287 ++ if (!is_device_private_entry(entry))
6288 ++ return false;
6289 ++ } else if (!pte_present(*pvmw->pte))
6290 + return false;
6291 + }
6292 + }
6293 +diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
6294 +index 5e4f04004a49..7bf833598615 100644
6295 +--- a/net/core/netclassid_cgroup.c
6296 ++++ b/net/core/netclassid_cgroup.c
6297 +@@ -106,6 +106,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
6298 + iterate_fd(p->files, 0, update_classid_sock,
6299 + (void *)(unsigned long)cs->classid);
6300 + task_unlock(p);
6301 ++ cond_resched();
6302 + }
6303 + css_task_iter_end(&it);
6304 +
6305 +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
6306 +index 82178cc69c96..777fa3b7fb13 100644
6307 +--- a/net/ipv4/cipso_ipv4.c
6308 ++++ b/net/ipv4/cipso_ipv4.c
6309 +@@ -1512,7 +1512,7 @@ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
6310 + *
6311 + * Description:
6312 + * Parse the packet's IP header looking for a CIPSO option. Returns a pointer
6313 +- * to the start of the CIPSO option on success, NULL if one if not found.
6314 ++ * to the start of the CIPSO option on success, NULL if one is not found.
6315 + *
6316 + */
6317 + unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
6318 +@@ -1522,10 +1522,8 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
6319 + int optlen;
6320 + int taglen;
6321 +
6322 +- for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
6323 ++ for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 1; ) {
6324 + switch (optptr[0]) {
6325 +- case IPOPT_CIPSO:
6326 +- return optptr;
6327 + case IPOPT_END:
6328 + return NULL;
6329 + case IPOPT_NOOP:
6330 +@@ -1534,6 +1532,11 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
6331 + default:
6332 + taglen = optptr[1];
6333 + }
6334 ++ if (!taglen || taglen > optlen)
6335 ++ return NULL;
6336 ++ if (optptr[0] == IPOPT_CIPSO)
6337 ++ return optptr;
6338 ++
6339 + optlen -= taglen;
6340 + optptr += taglen;
6341 + }
6342 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
6343 +index 691ca96f7460..7b4270987ac1 100644
6344 +--- a/net/sched/sch_api.c
6345 ++++ b/net/sched/sch_api.c
6346 +@@ -1218,7 +1218,6 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
6347 +
6348 + const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
6349 + [TCA_KIND] = { .type = NLA_STRING },
6350 +- [TCA_OPTIONS] = { .type = NLA_NESTED },
6351 + [TCA_RATE] = { .type = NLA_BINARY,
6352 + .len = sizeof(struct tc_estimator) },
6353 + [TCA_STAB] = { .type = NLA_NESTED },
6354 +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
6355 +index d16a8b423c20..ea7b5a3a53f0 100644
6356 +--- a/net/sunrpc/svc_xprt.c
6357 ++++ b/net/sunrpc/svc_xprt.c
6358 +@@ -1040,7 +1040,7 @@ static void call_xpt_users(struct svc_xprt *xprt)
6359 + spin_lock(&xprt->xpt_lock);
6360 + while (!list_empty(&xprt->xpt_users)) {
6361 + u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
6362 +- list_del(&u->list);
6363 ++ list_del_init(&u->list);
6364 + u->callback(u);
6365 + }
6366 + spin_unlock(&xprt->xpt_lock);
6367 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
6368 +index 37c32e73aaef..70ec57b887f6 100644
6369 +--- a/net/xfrm/xfrm_policy.c
6370 ++++ b/net/xfrm/xfrm_policy.c
6371 +@@ -626,9 +626,9 @@ static void xfrm_hash_rebuild(struct work_struct *work)
6372 + break;
6373 + }
6374 + if (newpos)
6375 +- hlist_add_behind(&policy->bydst, newpos);
6376 ++ hlist_add_behind_rcu(&policy->bydst, newpos);
6377 + else
6378 +- hlist_add_head(&policy->bydst, chain);
6379 ++ hlist_add_head_rcu(&policy->bydst, chain);
6380 + }
6381 +
6382 + spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
6383 +@@ -767,9 +767,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
6384 + break;
6385 + }
6386 + if (newpos)
6387 +- hlist_add_behind(&policy->bydst, newpos);
6388 ++ hlist_add_behind_rcu(&policy->bydst, newpos);
6389 + else
6390 +- hlist_add_head(&policy->bydst, chain);
6391 ++ hlist_add_head_rcu(&policy->bydst, chain);
6392 + __xfrm_policy_link(policy, dir);
6393 +
6394 + /* After previous checking, family can either be AF_INET or AF_INET6 */
6395 +diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
6396 +index ad491c51e833..2c4e83f6409e 100644
6397 +--- a/security/integrity/ima/ima_fs.c
6398 ++++ b/security/integrity/ima/ima_fs.c
6399 +@@ -39,14 +39,14 @@ static int __init default_canonical_fmt_setup(char *str)
6400 + __setup("ima_canonical_fmt", default_canonical_fmt_setup);
6401 +
6402 + static int valid_policy = 1;
6403 +-#define TMPBUFLEN 12
6404 ++
6405 + static ssize_t ima_show_htable_value(char __user *buf, size_t count,
6406 + loff_t *ppos, atomic_long_t *val)
6407 + {
6408 +- char tmpbuf[TMPBUFLEN];
6409 ++ char tmpbuf[32]; /* greater than largest 'long' string value */
6410 + ssize_t len;
6411 +
6412 +- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
6413 ++ len = scnprintf(tmpbuf, sizeof(tmpbuf), "%li\n", atomic_long_read(val));
6414 + return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
6415 + }
6416 +
6417 +diff --git a/sound/pci/ca0106/ca0106.h b/sound/pci/ca0106/ca0106.h
6418 +index 04402c14cb23..9847b669cf3c 100644
6419 +--- a/sound/pci/ca0106/ca0106.h
6420 ++++ b/sound/pci/ca0106/ca0106.h
6421 +@@ -582,7 +582,7 @@
6422 + #define SPI_PL_BIT_R_R (2<<7) /* right channel = right */
6423 + #define SPI_PL_BIT_R_C (3<<7) /* right channel = (L+R)/2 */
6424 + #define SPI_IZD_REG 2
6425 +-#define SPI_IZD_BIT (1<<4) /* infinite zero detect */
6426 ++#define SPI_IZD_BIT (0<<4) /* infinite zero detect */
6427 +
6428 + #define SPI_FMT_REG 3
6429 + #define SPI_FMT_BIT_RJ (0<<0) /* right justified mode */
6430 +diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
6431 +index a68e75b00ea3..53c3cd28bc99 100644
6432 +--- a/sound/pci/hda/hda_controller.h
6433 ++++ b/sound/pci/hda/hda_controller.h
6434 +@@ -160,6 +160,7 @@ struct azx {
6435 + unsigned int msi:1;
6436 + unsigned int probing:1; /* codec probing phase */
6437 + unsigned int snoop:1;
6438 ++ unsigned int uc_buffer:1; /* non-cached pages for stream buffers */
6439 + unsigned int align_buffer_size:1;
6440 + unsigned int region_requested:1;
6441 + unsigned int disabled:1; /* disabled by vga_switcheroo */
6442 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6443 +index 873d9824fbcf..4e38905bc47d 100644
6444 +--- a/sound/pci/hda/hda_intel.c
6445 ++++ b/sound/pci/hda/hda_intel.c
6446 +@@ -410,7 +410,7 @@ static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool
6447 + #ifdef CONFIG_SND_DMA_SGBUF
6448 + if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
6449 + struct snd_sg_buf *sgbuf = dmab->private_data;
6450 +- if (chip->driver_type == AZX_DRIVER_CMEDIA)
6451 ++ if (!chip->uc_buffer)
6452 + return; /* deal with only CORB/RIRB buffers */
6453 + if (on)
6454 + set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
6455 +@@ -1634,6 +1634,7 @@ static void azx_check_snoop_available(struct azx *chip)
6456 + dev_info(chip->card->dev, "Force to %s mode by module option\n",
6457 + snoop ? "snoop" : "non-snoop");
6458 + chip->snoop = snoop;
6459 ++ chip->uc_buffer = !snoop;
6460 + return;
6461 + }
6462 +
6463 +@@ -1654,8 +1655,12 @@ static void azx_check_snoop_available(struct azx *chip)
6464 + snoop = false;
6465 +
6466 + chip->snoop = snoop;
6467 +- if (!snoop)
6468 ++ if (!snoop) {
6469 + dev_info(chip->card->dev, "Force to non-snoop mode\n");
6470 ++ /* C-Media requires non-cached pages only for CORB/RIRB */
6471 ++ if (chip->driver_type != AZX_DRIVER_CMEDIA)
6472 ++ chip->uc_buffer = true;
6473 ++ }
6474 + }
6475 +
6476 + static void azx_probe_work(struct work_struct *work)
6477 +@@ -2094,7 +2099,7 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
6478 + #ifdef CONFIG_X86
6479 + struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
6480 + struct azx *chip = apcm->chip;
6481 +- if (!azx_snoop(chip) && chip->driver_type != AZX_DRIVER_CMEDIA)
6482 ++ if (chip->uc_buffer)
6483 + area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
6484 + #endif
6485 + }
6486 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
6487 +index 16197ad4512a..0cc0ced1f2ed 100644
6488 +--- a/sound/pci/hda/patch_conexant.c
6489 ++++ b/sound/pci/hda/patch_conexant.c
6490 +@@ -981,6 +981,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
6491 + SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
6492 + SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
6493 + SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
6494 ++ SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC),
6495 + SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
6496 + SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
6497 + SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
6498 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6499 +index fe5c741fcc6a..eb8807de3ebc 100644
6500 +--- a/sound/pci/hda/patch_realtek.c
6501 ++++ b/sound/pci/hda/patch_realtek.c
6502 +@@ -6629,6 +6629,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6503 + {0x1a, 0x02a11040},
6504 + {0x1b, 0x01014020},
6505 + {0x21, 0x0221101f}),
6506 ++ SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
6507 ++ {0x14, 0x90170110},
6508 ++ {0x19, 0x02a11030},
6509 ++ {0x1a, 0x02a11040},
6510 ++ {0x1b, 0x01011020},
6511 ++ {0x21, 0x0221101f}),
6512 + SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
6513 + {0x14, 0x90170110},
6514 + {0x19, 0x02a11020},
6515 +@@ -7515,6 +7521,8 @@ enum {
6516 + ALC662_FIXUP_ASUS_Nx50,
6517 + ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
6518 + ALC668_FIXUP_ASUS_Nx51,
6519 ++ ALC668_FIXUP_MIC_COEF,
6520 ++ ALC668_FIXUP_ASUS_G751,
6521 + ALC891_FIXUP_HEADSET_MODE,
6522 + ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
6523 + ALC662_FIXUP_ACER_VERITON,
6524 +@@ -7784,6 +7792,23 @@ static const struct hda_fixup alc662_fixups[] = {
6525 + .chained = true,
6526 + .chain_id = ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
6527 + },
6528 ++ [ALC668_FIXUP_MIC_COEF] = {
6529 ++ .type = HDA_FIXUP_VERBS,
6530 ++ .v.verbs = (const struct hda_verb[]) {
6531 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0xc3 },
6532 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x4000 },
6533 ++ {}
6534 ++ },
6535 ++ },
6536 ++ [ALC668_FIXUP_ASUS_G751] = {
6537 ++ .type = HDA_FIXUP_PINS,
6538 ++ .v.pins = (const struct hda_pintbl[]) {
6539 ++ { 0x16, 0x0421101f }, /* HP */
6540 ++ {}
6541 ++ },
6542 ++ .chained = true,
6543 ++ .chain_id = ALC668_FIXUP_MIC_COEF
6544 ++ },
6545 + [ALC891_FIXUP_HEADSET_MODE] = {
6546 + .type = HDA_FIXUP_FUNC,
6547 + .v.func = alc_fixup_headset_mode,
6548 +@@ -7857,6 +7882,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
6549 + SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
6550 + SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
6551 + SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
6552 ++ SND_PCI_QUIRK(0x1043, 0x12ff, "ASUS G751", ALC668_FIXUP_ASUS_G751),
6553 + SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
6554 + SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
6555 + SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
6556 +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
6557 +index 22f768ca3c73..b45c1ae60f94 100644
6558 +--- a/sound/soc/intel/skylake/skl-topology.c
6559 ++++ b/sound/soc/intel/skylake/skl-topology.c
6560 +@@ -2360,6 +2360,7 @@ static int skl_tplg_get_token(struct device *dev,
6561 +
6562 + case SKL_TKN_U8_CORE_ID:
6563 + mconfig->core_id = tkn_elem->value;
6564 ++ break;
6565 +
6566 + case SKL_TKN_U8_MOD_TYPE:
6567 + mconfig->m_type = tkn_elem->value;
6568 +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
6569 +index 63f534a0902f..f362ee46506a 100644
6570 +--- a/tools/perf/Makefile.config
6571 ++++ b/tools/perf/Makefile.config
6572 +@@ -795,7 +795,7 @@ ifndef NO_JVMTI
6573 + JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
6574 + else
6575 + ifneq (,$(wildcard /usr/sbin/alternatives))
6576 +- JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
6577 ++ JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
6578 + endif
6579 + endif
6580 + ifndef JDIR
6581 +diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
6582 +index d40498f2cb1e..635c09fda1d9 100644
6583 +--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
6584 ++++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
6585 +@@ -188,7 +188,7 @@
6586 + "Counter": "0,1,2,3",
6587 + "EventCode": "0xb",
6588 + "EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
6589 +- "Filter": "filter_band0=1200",
6590 ++ "Filter": "filter_band0=12",
6591 + "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6592 + "MetricName": "freq_ge_1200mhz_cycles %",
6593 + "PerPkg": "1",
6594 +@@ -199,7 +199,7 @@
6595 + "Counter": "0,1,2,3",
6596 + "EventCode": "0xc",
6597 + "EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
6598 +- "Filter": "filter_band1=2000",
6599 ++ "Filter": "filter_band1=20",
6600 + "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6601 + "MetricName": "freq_ge_2000mhz_cycles %",
6602 + "PerPkg": "1",
6603 +@@ -210,7 +210,7 @@
6604 + "Counter": "0,1,2,3",
6605 + "EventCode": "0xd",
6606 + "EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
6607 +- "Filter": "filter_band2=3000",
6608 ++ "Filter": "filter_band2=30",
6609 + "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6610 + "MetricName": "freq_ge_3000mhz_cycles %",
6611 + "PerPkg": "1",
6612 +@@ -221,7 +221,7 @@
6613 + "Counter": "0,1,2,3",
6614 + "EventCode": "0xe",
6615 + "EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
6616 +- "Filter": "filter_band3=4000",
6617 ++ "Filter": "filter_band3=40",
6618 + "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6619 + "MetricName": "freq_ge_4000mhz_cycles %",
6620 + "PerPkg": "1",
6621 +@@ -232,7 +232,7 @@
6622 + "Counter": "0,1,2,3",
6623 + "EventCode": "0xb",
6624 + "EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
6625 +- "Filter": "edge=1,filter_band0=1200",
6626 ++ "Filter": "edge=1,filter_band0=12",
6627 + "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6628 + "MetricName": "freq_ge_1200mhz_cycles %",
6629 + "PerPkg": "1",
6630 +@@ -243,7 +243,7 @@
6631 + "Counter": "0,1,2,3",
6632 + "EventCode": "0xc",
6633 + "EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
6634 +- "Filter": "edge=1,filter_band1=2000",
6635 ++ "Filter": "edge=1,filter_band1=20",
6636 + "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6637 + "MetricName": "freq_ge_2000mhz_cycles %",
6638 + "PerPkg": "1",
6639 +@@ -254,7 +254,7 @@
6640 + "Counter": "0,1,2,3",
6641 + "EventCode": "0xd",
6642 + "EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
6643 +- "Filter": "edge=1,filter_band2=4000",
6644 ++ "Filter": "edge=1,filter_band2=30",
6645 + "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6646 + "MetricName": "freq_ge_3000mhz_cycles %",
6647 + "PerPkg": "1",
6648 +@@ -265,7 +265,7 @@
6649 + "Counter": "0,1,2,3",
6650 + "EventCode": "0xe",
6651 + "EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
6652 +- "Filter": "edge=1,filter_band3=4000",
6653 ++ "Filter": "edge=1,filter_band3=40",
6654 + "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6655 + "MetricName": "freq_ge_4000mhz_cycles %",
6656 + "PerPkg": "1",
6657 +diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
6658 +index 16034bfd06dd..8755693d86c6 100644
6659 +--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
6660 ++++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
6661 +@@ -187,7 +187,7 @@
6662 + "Counter": "0,1,2,3",
6663 + "EventCode": "0xb",
6664 + "EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
6665 +- "Filter": "filter_band0=1200",
6666 ++ "Filter": "filter_band0=12",
6667 + "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6668 + "MetricName": "freq_ge_1200mhz_cycles %",
6669 + "PerPkg": "1",
6670 +@@ -198,7 +198,7 @@
6671 + "Counter": "0,1,2,3",
6672 + "EventCode": "0xc",
6673 + "EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
6674 +- "Filter": "filter_band1=2000",
6675 ++ "Filter": "filter_band1=20",
6676 + "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6677 + "MetricName": "freq_ge_2000mhz_cycles %",
6678 + "PerPkg": "1",
6679 +@@ -209,7 +209,7 @@
6680 + "Counter": "0,1,2,3",
6681 + "EventCode": "0xd",
6682 + "EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
6683 +- "Filter": "filter_band2=3000",
6684 ++ "Filter": "filter_band2=30",
6685 + "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6686 + "MetricName": "freq_ge_3000mhz_cycles %",
6687 + "PerPkg": "1",
6688 +@@ -220,7 +220,7 @@
6689 + "Counter": "0,1,2,3",
6690 + "EventCode": "0xe",
6691 + "EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
6692 +- "Filter": "filter_band3=4000",
6693 ++ "Filter": "filter_band3=40",
6694 + "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6695 + "MetricName": "freq_ge_4000mhz_cycles %",
6696 + "PerPkg": "1",
6697 +@@ -231,7 +231,7 @@
6698 + "Counter": "0,1,2,3",
6699 + "EventCode": "0xb",
6700 + "EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
6701 +- "Filter": "edge=1,filter_band0=1200",
6702 ++ "Filter": "edge=1,filter_band0=12",
6703 + "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6704 + "MetricName": "freq_ge_1200mhz_cycles %",
6705 + "PerPkg": "1",
6706 +@@ -242,7 +242,7 @@
6707 + "Counter": "0,1,2,3",
6708 + "EventCode": "0xc",
6709 + "EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
6710 +- "Filter": "edge=1,filter_band1=2000",
6711 ++ "Filter": "edge=1,filter_band1=20",
6712 + "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6713 + "MetricName": "freq_ge_2000mhz_cycles %",
6714 + "PerPkg": "1",
6715 +@@ -253,7 +253,7 @@
6716 + "Counter": "0,1,2,3",
6717 + "EventCode": "0xd",
6718 + "EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
6719 +- "Filter": "edge=1,filter_band2=4000",
6720 ++ "Filter": "edge=1,filter_band2=30",
6721 + "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6722 + "MetricName": "freq_ge_3000mhz_cycles %",
6723 + "PerPkg": "1",
6724 +@@ -264,7 +264,7 @@
6725 + "Counter": "0,1,2,3",
6726 + "EventCode": "0xe",
6727 + "EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
6728 +- "Filter": "edge=1,filter_band3=4000",
6729 ++ "Filter": "edge=1,filter_band3=40",
6730 + "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
6731 + "MetricName": "freq_ge_4000mhz_cycles %",
6732 + "PerPkg": "1",
6733 +diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
6734 +index fc690fecbfd6..a19e840db54a 100644
6735 +--- a/tools/perf/util/event.c
6736 ++++ b/tools/perf/util/event.c
6737 +@@ -951,6 +951,7 @@ void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max
6738 + }
6739 +
6740 + *size += sizeof(struct cpu_map_data);
6741 ++ *size = PERF_ALIGN(*size, sizeof(u64));
6742 + return zalloc(*size);
6743 + }
6744 +
6745 +diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
6746 +index d87d458996b7..dceef4725d33 100644
6747 +--- a/tools/perf/util/pmu.c
6748 ++++ b/tools/perf/util/pmu.c
6749 +@@ -754,13 +754,14 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
6750 +
6751 + static __u64 pmu_format_max_value(const unsigned long *format)
6752 + {
6753 +- __u64 w = 0;
6754 +- int fbit;
6755 +-
6756 +- for_each_set_bit(fbit, format, PERF_PMU_FORMAT_BITS)
6757 +- w |= (1ULL << fbit);
6758 ++ int w;
6759 +
6760 +- return w;
6761 ++ w = bitmap_weight(format, PERF_PMU_FORMAT_BITS);
6762 ++ if (!w)
6763 ++ return 0;
6764 ++ if (w < 64)
6765 ++ return (1ULL << w) - 1;
6766 ++ return -1;
6767 + }
6768 +
6769 + /*
6770 +diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
6771 +index 3d1cf5bf7f18..9005fbe0780e 100644
6772 +--- a/tools/perf/util/strbuf.c
6773 ++++ b/tools/perf/util/strbuf.c
6774 +@@ -98,19 +98,25 @@ static int strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
6775 +
6776 + va_copy(ap_saved, ap);
6777 + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
6778 +- if (len < 0)
6779 ++ if (len < 0) {
6780 ++ va_end(ap_saved);
6781 + return len;
6782 ++ }
6783 + if (len > strbuf_avail(sb)) {
6784 + ret = strbuf_grow(sb, len);
6785 +- if (ret)
6786 ++ if (ret) {
6787 ++ va_end(ap_saved);
6788 + return ret;
6789 ++ }
6790 + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
6791 + va_end(ap_saved);
6792 + if (len > strbuf_avail(sb)) {
6793 + pr_debug("this should not happen, your vsnprintf is broken");
6794 ++ va_end(ap_saved);
6795 + return -EINVAL;
6796 + }
6797 + }
6798 ++ va_end(ap_saved);
6799 + return strbuf_setlen(sb, sb->len + len);
6800 + }
6801 +
6802 +diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
6803 +index 8f3b7ef221f2..71a5b4863707 100644
6804 +--- a/tools/perf/util/trace-event-info.c
6805 ++++ b/tools/perf/util/trace-event-info.c
6806 +@@ -533,12 +533,14 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
6807 + "/tmp/perf-XXXXXX");
6808 + if (!mkstemp(tdata->temp_file)) {
6809 + pr_debug("Can't make temp file");
6810 ++ free(tdata);
6811 + return NULL;
6812 + }
6813 +
6814 + temp_fd = open(tdata->temp_file, O_RDWR);
6815 + if (temp_fd < 0) {
6816 + pr_debug("Can't read '%s'", tdata->temp_file);
6817 ++ free(tdata);
6818 + return NULL;
6819 + }
6820 +
6821 +diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
6822 +index 8a9a677f7576..6bfd690d63d9 100644
6823 +--- a/tools/perf/util/trace-event-read.c
6824 ++++ b/tools/perf/util/trace-event-read.c
6825 +@@ -350,9 +350,12 @@ static int read_event_files(struct pevent *pevent)
6826 + for (x=0; x < count; x++) {
6827 + size = read8(pevent);
6828 + ret = read_event_file(pevent, sys, size);
6829 +- if (ret)
6830 ++ if (ret) {
6831 ++ free(sys);
6832 + return ret;
6833 ++ }
6834 + }
6835 ++ free(sys);
6836 + }
6837 + return 0;
6838 + }
6839 +diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c
6840 +index 3e701f0e9c14..5853faa9daf3 100644
6841 +--- a/tools/power/cpupower/utils/cpufreq-info.c
6842 ++++ b/tools/power/cpupower/utils/cpufreq-info.c
6843 +@@ -202,6 +202,8 @@ static int get_boost_mode(unsigned int cpu)
6844 + printf(_(" Boost States: %d\n"), b_states);
6845 + printf(_(" Total States: %d\n"), pstate_no);
6846 + for (i = 0; i < pstate_no; i++) {
6847 ++ if (!pstates[i])
6848 ++ continue;
6849 + if (i < b_states)
6850 + printf(_(" Pstate-Pb%d: %luMHz (boost state)"
6851 + "\n"), i, pstates[i]);
6852 +diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c
6853 +index bb41cdd0df6b..9607ada5b29a 100644
6854 +--- a/tools/power/cpupower/utils/helpers/amd.c
6855 ++++ b/tools/power/cpupower/utils/helpers/amd.c
6856 +@@ -33,7 +33,7 @@ union msr_pstate {
6857 + unsigned vid:8;
6858 + unsigned iddval:8;
6859 + unsigned idddiv:2;
6860 +- unsigned res1:30;
6861 ++ unsigned res1:31;
6862 + unsigned en:1;
6863 + } fam17h_bits;
6864 + unsigned long long val;
6865 +@@ -119,6 +119,11 @@ int decode_pstates(unsigned int cpu, unsigned int cpu_family,
6866 + }
6867 + if (read_msr(cpu, MSR_AMD_PSTATE + i, &pstate.val))
6868 + return -1;
6869 ++ if ((cpu_family == 0x17) && (!pstate.fam17h_bits.en))
6870 ++ continue;
6871 ++ else if (!pstate.bits.en)
6872 ++ continue;
6873 ++
6874 + pstates[i] = get_cof(cpu_family, pstate);
6875 + }
6876 + *no = i;
6877 +diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
6878 +new file mode 100644
6879 +index 000000000000..88e6c3f43006
6880 +--- /dev/null
6881 ++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
6882 +@@ -0,0 +1,80 @@
6883 ++#!/bin/sh
6884 ++# SPDX-License-Identifier: GPL-2.0
6885 ++# description: event trigger - test synthetic_events syntax parser
6886 ++
6887 ++do_reset() {
6888 ++ reset_trigger
6889 ++ echo > set_event
6890 ++ clear_trace
6891 ++}
6892 ++
6893 ++fail() { #msg
6894 ++ do_reset
6895 ++ echo $1
6896 ++ exit_fail
6897 ++}
6898 ++
6899 ++if [ ! -f set_event ]; then
6900 ++ echo "event tracing is not supported"
6901 ++ exit_unsupported
6902 ++fi
6903 ++
6904 ++if [ ! -f synthetic_events ]; then
6905 ++ echo "synthetic event is not supported"
6906 ++ exit_unsupported
6907 ++fi
6908 ++
6909 ++reset_tracer
6910 ++do_reset
6911 ++
6912 ++echo "Test synthetic_events syntax parser"
6913 ++
6914 ++echo > synthetic_events
6915 ++
6916 ++# synthetic event must have a field
6917 ++! echo "myevent" >> synthetic_events
6918 ++echo "myevent u64 var1" >> synthetic_events
6919 ++
6920 ++# synthetic event must be found in synthetic_events
6921 ++grep "myevent[[:space:]]u64 var1" synthetic_events
6922 ++
6923 ++# it is not possible to add same name event
6924 ++! echo "myevent u64 var2" >> synthetic_events
6925 ++
6926 ++# Non-append open will cleanup all events and add new one
6927 ++echo "myevent u64 var2" > synthetic_events
6928 ++
6929 ++# multiple fields with different spaces
6930 ++echo "myevent u64 var1; u64 var2;" > synthetic_events
6931 ++grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
6932 ++echo "myevent u64 var1 ; u64 var2 ;" > synthetic_events
6933 ++grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
6934 ++echo "myevent u64 var1 ;u64 var2" > synthetic_events
6935 ++grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
6936 ++
6937 ++# test field types
6938 ++echo "myevent u32 var" > synthetic_events
6939 ++echo "myevent u16 var" > synthetic_events
6940 ++echo "myevent u8 var" > synthetic_events
6941 ++echo "myevent s64 var" > synthetic_events
6942 ++echo "myevent s32 var" > synthetic_events
6943 ++echo "myevent s16 var" > synthetic_events
6944 ++echo "myevent s8 var" > synthetic_events
6945 ++
6946 ++echo "myevent char var" > synthetic_events
6947 ++echo "myevent int var" > synthetic_events
6948 ++echo "myevent long var" > synthetic_events
6949 ++echo "myevent pid_t var" > synthetic_events
6950 ++
6951 ++echo "myevent unsigned char var" > synthetic_events
6952 ++echo "myevent unsigned int var" > synthetic_events
6953 ++echo "myevent unsigned long var" > synthetic_events
6954 ++grep "myevent[[:space:]]unsigned long var" synthetic_events
6955 ++
6956 ++# test string type
6957 ++echo "myevent char var[10]" > synthetic_events
6958 ++grep "myevent[[:space:]]char\[10\] var" synthetic_events
6959 ++
6960 ++do_reset
6961 ++
6962 ++exit 0
6963 +diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
6964 +index cad14cd0ea92..b5277106df1f 100644
6965 +--- a/tools/testing/selftests/net/reuseport_bpf.c
6966 ++++ b/tools/testing/selftests/net/reuseport_bpf.c
6967 +@@ -437,14 +437,19 @@ void enable_fastopen(void)
6968 + }
6969 + }
6970 +
6971 +-static struct rlimit rlim_old, rlim_new;
6972 ++static struct rlimit rlim_old;
6973 +
6974 + static __attribute__((constructor)) void main_ctor(void)
6975 + {
6976 + getrlimit(RLIMIT_MEMLOCK, &rlim_old);
6977 +- rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
6978 +- rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
6979 +- setrlimit(RLIMIT_MEMLOCK, &rlim_new);
6980 ++
6981 ++ if (rlim_old.rlim_cur != RLIM_INFINITY) {
6982 ++ struct rlimit rlim_new;
6983 ++
6984 ++ rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
6985 ++ rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
6986 ++ setrlimit(RLIMIT_MEMLOCK, &rlim_new);
6987 ++ }
6988 + }
6989 +
6990 + static __attribute__((destructor)) void main_dtor(void)
6991 +diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
6992 +index 327fa943c7f3..dbdffa2e2c82 100644
6993 +--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
6994 ++++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
6995 +@@ -67,8 +67,8 @@ trans:
6996 + "3: ;"
6997 + : [res] "=r" (result), [texasr] "=r" (texasr)
6998 + : [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2), [gpr_4]"i"(GPR_4),
6999 +- [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "r" (&a),
7000 +- [flt_2] "r" (&b), [flt_4] "r" (&d)
7001 ++ [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "b" (&a),
7002 ++ [flt_4] "b" (&d)
7003 + : "memory", "r5", "r6", "r7",
7004 + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
7005 + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
7006 +diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
7007 +index d5f1d8364571..ed42b8cf6f5b 100644
7008 +--- a/virt/kvm/arm/arm.c
7009 ++++ b/virt/kvm/arm/arm.c
7010 +@@ -1148,8 +1148,6 @@ static void cpu_init_hyp_mode(void *dummy)
7011 +
7012 + __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
7013 + __cpu_init_stage2();
7014 +-
7015 +- kvm_arm_init_debug();
7016 + }
7017 +
7018 + static void cpu_hyp_reset(void)
7019 +@@ -1173,6 +1171,8 @@ static void cpu_hyp_reinit(void)
7020 + cpu_init_hyp_mode(NULL);
7021 + }
7022 +
7023 ++ kvm_arm_init_debug();
7024 ++
7025 + if (vgic_present)
7026 + kvm_vgic_init_cpu_hardware();
7027 + }