Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.2 commit in: /
Date: Sat, 21 Sep 2019 16:23:14
Message-Id: 1569082956.b7a9c69a82eb6aea9059b1ee493d8349aa02eb15.mpagano@gentoo
1 commit: b7a9c69a82eb6aea9059b1ee493d8349aa02eb15
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Sep 21 16:22:36 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Sep 21 16:22:36 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b7a9c69a
7
8 Linux patch 5.2.17
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1016_linux-5.2.17.patch | 4122 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4126 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index c046e8a..200ad40 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -107,6 +107,10 @@ Patch: 1015_linux-5.2.16.patch
21 From: https://www.kernel.org
22 Desc: Linux 5.2.16
23
24 +Patch: 1016_linux-5.2.17.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 5.2.17
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1016_linux-5.2.17.patch b/1016_linux-5.2.17.patch
33 new file mode 100644
34 index 0000000..8e36dc2
35 --- /dev/null
36 +++ b/1016_linux-5.2.17.patch
37 @@ -0,0 +1,4122 @@
38 +diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
39 +index 1da2f1668f08..845d689e0fd7 100644
40 +--- a/Documentation/filesystems/overlayfs.txt
41 ++++ b/Documentation/filesystems/overlayfs.txt
42 +@@ -302,7 +302,7 @@ beneath or above the path of another overlay lower layer path.
43 +
44 + Using an upper layer path and/or a workdir path that are already used by
45 + another overlay mount is not allowed and may fail with EBUSY. Using
46 +-partially overlapping paths is not allowed but will not fail with EBUSY.
47 ++partially overlapping paths is not allowed and may fail with EBUSY.
48 + If files are accessed from two overlayfs mounts which share or overlap the
49 + upper layer and/or workdir path the behavior of the overlay is undefined,
50 + though it will not result in a crash or deadlock.
51 +diff --git a/Makefile b/Makefile
52 +index 3cec03e93b40..32226d81fbb5 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 2
59 +-SUBLEVEL = 16
60 ++SUBLEVEL = 17
61 + EXTRAVERSION =
62 + NAME = Bobtail Squid
63 +
64 +diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
65 +index ced1a19d5f89..46849d6ecb3e 100644
66 +--- a/arch/arm/boot/dts/am33xx-l4.dtsi
67 ++++ b/arch/arm/boot/dts/am33xx-l4.dtsi
68 +@@ -185,7 +185,7 @@
69 + uart0: serial@0 {
70 + compatible = "ti,am3352-uart", "ti,omap3-uart";
71 + clock-frequency = <48000000>;
72 +- reg = <0x0 0x2000>;
73 ++ reg = <0x0 0x1000>;
74 + interrupts = <72>;
75 + status = "disabled";
76 + dmas = <&edma 26 0>, <&edma 27 0>;
77 +@@ -934,7 +934,7 @@
78 + uart1: serial@0 {
79 + compatible = "ti,am3352-uart", "ti,omap3-uart";
80 + clock-frequency = <48000000>;
81 +- reg = <0x0 0x2000>;
82 ++ reg = <0x0 0x1000>;
83 + interrupts = <73>;
84 + status = "disabled";
85 + dmas = <&edma 28 0>, <&edma 29 0>;
86 +@@ -966,7 +966,7 @@
87 + uart2: serial@0 {
88 + compatible = "ti,am3352-uart", "ti,omap3-uart";
89 + clock-frequency = <48000000>;
90 +- reg = <0x0 0x2000>;
91 ++ reg = <0x0 0x1000>;
92 + interrupts = <74>;
93 + status = "disabled";
94 + dmas = <&edma 30 0>, <&edma 31 0>;
95 +@@ -1614,7 +1614,7 @@
96 + uart3: serial@0 {
97 + compatible = "ti,am3352-uart", "ti,omap3-uart";
98 + clock-frequency = <48000000>;
99 +- reg = <0x0 0x2000>;
100 ++ reg = <0x0 0x1000>;
101 + interrupts = <44>;
102 + status = "disabled";
103 + };
104 +@@ -1644,7 +1644,7 @@
105 + uart4: serial@0 {
106 + compatible = "ti,am3352-uart", "ti,omap3-uart";
107 + clock-frequency = <48000000>;
108 +- reg = <0x0 0x2000>;
109 ++ reg = <0x0 0x1000>;
110 + interrupts = <45>;
111 + status = "disabled";
112 + };
113 +@@ -1674,7 +1674,7 @@
114 + uart5: serial@0 {
115 + compatible = "ti,am3352-uart", "ti,omap3-uart";
116 + clock-frequency = <48000000>;
117 +- reg = <0x0 0x2000>;
118 ++ reg = <0x0 0x1000>;
119 + interrupts = <46>;
120 + status = "disabled";
121 + };
122 +@@ -1758,6 +1758,8 @@
123 +
124 + target-module@cc000 { /* 0x481cc000, ap 60 46.0 */
125 + compatible = "ti,sysc-omap4", "ti,sysc";
126 ++ reg = <0xcc020 0x4>;
127 ++ reg-names = "rev";
128 + ti,hwmods = "d_can0";
129 + /* Domains (P, C): per_pwrdm, l4ls_clkdm */
130 + clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>,
131 +@@ -1780,6 +1782,8 @@
132 +
133 + target-module@d0000 { /* 0x481d0000, ap 62 42.0 */
134 + compatible = "ti,sysc-omap4", "ti,sysc";
135 ++ reg = <0xd0020 0x4>;
136 ++ reg-names = "rev";
137 + ti,hwmods = "d_can1";
138 + /* Domains (P, C): per_pwrdm, l4ls_clkdm */
139 + clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>,
140 +diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
141 +index e5c2f71a7c77..fb6b8aa12cc5 100644
142 +--- a/arch/arm/boot/dts/am33xx.dtsi
143 ++++ b/arch/arm/boot/dts/am33xx.dtsi
144 +@@ -234,13 +234,33 @@
145 + interrupt-names = "edma3_tcerrint";
146 + };
147 +
148 +- mmc3: mmc@47810000 {
149 +- compatible = "ti,omap4-hsmmc";
150 ++ target-module@47810000 {
151 ++ compatible = "ti,sysc-omap2", "ti,sysc";
152 + ti,hwmods = "mmc3";
153 +- ti,needs-special-reset;
154 +- interrupts = <29>;
155 +- reg = <0x47810000 0x1000>;
156 +- status = "disabled";
157 ++ reg = <0x478102fc 0x4>,
158 ++ <0x47810110 0x4>,
159 ++ <0x47810114 0x4>;
160 ++ reg-names = "rev", "sysc", "syss";
161 ++ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
162 ++ SYSC_OMAP2_ENAWAKEUP |
163 ++ SYSC_OMAP2_SOFTRESET |
164 ++ SYSC_OMAP2_AUTOIDLE)>;
165 ++ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
166 ++ <SYSC_IDLE_NO>,
167 ++ <SYSC_IDLE_SMART>;
168 ++ ti,syss-mask = <1>;
169 ++ clocks = <&l3s_clkctrl AM3_L3S_MMC3_CLKCTRL 0>;
170 ++ clock-names = "fck";
171 ++ #address-cells = <1>;
172 ++ #size-cells = <1>;
173 ++ ranges = <0x0 0x47810000 0x1000>;
174 ++
175 ++ mmc3: mmc@0 {
176 ++ compatible = "ti,omap4-hsmmc";
177 ++ ti,needs-special-reset;
178 ++ interrupts = <29>;
179 ++ reg = <0x0 0x1000>;
180 ++ };
181 + };
182 +
183 + usb: usb@47400000 {
184 +diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
185 +index 55aff4db9c7c..848e2a8884e2 100644
186 +--- a/arch/arm/boot/dts/am4372.dtsi
187 ++++ b/arch/arm/boot/dts/am4372.dtsi
188 +@@ -228,13 +228,33 @@
189 + interrupt-names = "edma3_tcerrint";
190 + };
191 +
192 +- mmc3: mmc@47810000 {
193 +- compatible = "ti,omap4-hsmmc";
194 +- reg = <0x47810000 0x1000>;
195 ++ target-module@47810000 {
196 ++ compatible = "ti,sysc-omap2", "ti,sysc";
197 + ti,hwmods = "mmc3";
198 +- ti,needs-special-reset;
199 +- interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
200 +- status = "disabled";
201 ++ reg = <0x478102fc 0x4>,
202 ++ <0x47810110 0x4>,
203 ++ <0x47810114 0x4>;
204 ++ reg-names = "rev", "sysc", "syss";
205 ++ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
206 ++ SYSC_OMAP2_ENAWAKEUP |
207 ++ SYSC_OMAP2_SOFTRESET |
208 ++ SYSC_OMAP2_AUTOIDLE)>;
209 ++ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
210 ++ <SYSC_IDLE_NO>,
211 ++ <SYSC_IDLE_SMART>;
212 ++ ti,syss-mask = <1>;
213 ++ clocks = <&l3s_clkctrl AM4_L3S_MMC3_CLKCTRL 0>;
214 ++ clock-names = "fck";
215 ++ #address-cells = <1>;
216 ++ #size-cells = <1>;
217 ++ ranges = <0x0 0x47810000 0x1000>;
218 ++
219 ++ mmc3: mmc@0 {
220 ++ compatible = "ti,omap4-hsmmc";
221 ++ ti,needs-special-reset;
222 ++ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
223 ++ reg = <0x0 0x1000>;
224 ++ };
225 + };
226 +
227 + sham: sham@53100000 {
228 +diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi
229 +index 989cb60b9029..04bee4ff9dcb 100644
230 +--- a/arch/arm/boot/dts/am437x-l4.dtsi
231 ++++ b/arch/arm/boot/dts/am437x-l4.dtsi
232 +@@ -1574,6 +1574,8 @@
233 +
234 + target-module@cc000 { /* 0x481cc000, ap 50 46.0 */
235 + compatible = "ti,sysc-omap4", "ti,sysc";
236 ++ reg = <0xcc020 0x4>;
237 ++ reg-names = "rev";
238 + ti,hwmods = "d_can0";
239 + /* Domains (P, C): per_pwrdm, l4ls_clkdm */
240 + clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>;
241 +@@ -1593,6 +1595,8 @@
242 +
243 + target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */
244 + compatible = "ti,sysc-omap4", "ti,sysc";
245 ++ reg = <0xd0020 0x4>;
246 ++ reg-names = "rev";
247 + ti,hwmods = "d_can1";
248 + /* Domains (P, C): per_pwrdm, l4ls_clkdm */
249 + clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>;
250 +diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
251 +index 1d5e99964bbf..0aaacea1d887 100644
252 +--- a/arch/arm/boot/dts/am571x-idk.dts
253 ++++ b/arch/arm/boot/dts/am571x-idk.dts
254 +@@ -175,14 +175,9 @@
255 + };
256 +
257 + &mmc1 {
258 +- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
259 ++ pinctrl-names = "default", "hs";
260 + pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
261 + pinctrl-1 = <&mmc1_pins_hs>;
262 +- pinctrl-2 = <&mmc1_pins_sdr12>;
263 +- pinctrl-3 = <&mmc1_pins_sdr25>;
264 +- pinctrl-4 = <&mmc1_pins_sdr50>;
265 +- pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>;
266 +- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
267 + };
268 +
269 + &mmc2 {
270 +diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
271 +index c65d7f6d3b5a..ea1c119feaa5 100644
272 +--- a/arch/arm/boot/dts/am572x-idk.dts
273 ++++ b/arch/arm/boot/dts/am572x-idk.dts
274 +@@ -16,14 +16,9 @@
275 + };
276 +
277 + &mmc1 {
278 +- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
279 ++ pinctrl-names = "default", "hs";
280 + pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
281 + pinctrl-1 = <&mmc1_pins_hs>;
282 +- pinctrl-2 = <&mmc1_pins_sdr12>;
283 +- pinctrl-3 = <&mmc1_pins_sdr25>;
284 +- pinctrl-4 = <&mmc1_pins_sdr50>;
285 +- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
286 +- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
287 + };
288 +
289 + &mmc2 {
290 +diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts
291 +index dc5141c35610..7935d70874ce 100644
292 +--- a/arch/arm/boot/dts/am574x-idk.dts
293 ++++ b/arch/arm/boot/dts/am574x-idk.dts
294 +@@ -24,14 +24,9 @@
295 + };
296 +
297 + &mmc1 {
298 +- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
299 ++ pinctrl-names = "default", "hs";
300 + pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
301 + pinctrl-1 = <&mmc1_pins_hs>;
302 +- pinctrl-2 = <&mmc1_pins_default>;
303 +- pinctrl-3 = <&mmc1_pins_hs>;
304 +- pinctrl-4 = <&mmc1_pins_sdr50>;
305 +- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_conf>;
306 +- pinctrl-6 = <&mmc1_pins_ddr50 &mmc1_iodelay_sdr104_conf>;
307 + };
308 +
309 + &mmc2 {
310 +diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
311 +index d02f5fa61e5f..bc76f1705c0f 100644
312 +--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
313 ++++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
314 +@@ -379,7 +379,7 @@
315 + };
316 + };
317 +
318 +-&gpio7 {
319 ++&gpio7_target {
320 + ti,no-reset-on-init;
321 + ti,no-idle-on-init;
322 + };
323 +@@ -430,6 +430,7 @@
324 +
325 + bus-width = <4>;
326 + cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
327 ++ no-1-8-v;
328 + };
329 +
330 + &mmc2 {
331 +diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
332 +index a374b5cd6db0..7b113b52c3fb 100644
333 +--- a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
334 ++++ b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
335 +@@ -16,14 +16,9 @@
336 + };
337 +
338 + &mmc1 {
339 +- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
340 ++ pinctrl-names = "default", "hs";
341 + pinctrl-0 = <&mmc1_pins_default>;
342 + pinctrl-1 = <&mmc1_pins_hs>;
343 +- pinctrl-2 = <&mmc1_pins_sdr12>;
344 +- pinctrl-3 = <&mmc1_pins_sdr25>;
345 +- pinctrl-4 = <&mmc1_pins_sdr50>;
346 +- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>;
347 +- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>;
348 + vmmc-supply = <&vdd_3v3>;
349 + vqmmc-supply = <&ldo1_reg>;
350 + };
351 +diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
352 +index 4badd2144db9..30c500b15b21 100644
353 +--- a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
354 ++++ b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
355 +@@ -16,14 +16,9 @@
356 + };
357 +
358 + &mmc1 {
359 +- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
360 ++ pinctrl-names = "default", "hs";
361 + pinctrl-0 = <&mmc1_pins_default>;
362 + pinctrl-1 = <&mmc1_pins_hs>;
363 +- pinctrl-2 = <&mmc1_pins_sdr12>;
364 +- pinctrl-3 = <&mmc1_pins_sdr25>;
365 +- pinctrl-4 = <&mmc1_pins_sdr50>;
366 +- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
367 +- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
368 + vmmc-supply = <&vdd_3v3>;
369 + vqmmc-supply = <&ldo1_reg>;
370 + };
371 +diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
372 +index 714e971b912a..de7f85efaa51 100644
373 +--- a/arch/arm/boot/dts/dra7-evm.dts
374 ++++ b/arch/arm/boot/dts/dra7-evm.dts
375 +@@ -498,7 +498,7 @@
376 + phy-supply = <&ldousb_reg>;
377 + };
378 +
379 +-&gpio7 {
380 ++&gpio7_target {
381 + ti,no-reset-on-init;
382 + ti,no-idle-on-init;
383 + };
384 +diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
385 +index 23faedec08ab..21e5914fdd62 100644
386 +--- a/arch/arm/boot/dts/dra7-l4.dtsi
387 ++++ b/arch/arm/boot/dts/dra7-l4.dtsi
388 +@@ -1261,7 +1261,7 @@
389 + };
390 + };
391 +
392 +- target-module@51000 { /* 0x48051000, ap 45 2e.0 */
393 ++ gpio7_target: target-module@51000 { /* 0x48051000, ap 45 2e.0 */
394 + compatible = "ti,sysc-omap2", "ti,sysc";
395 + ti,hwmods = "gpio7";
396 + reg = <0x51000 0x4>,
397 +@@ -3025,7 +3025,7 @@
398 +
399 + target-module@80000 { /* 0x48480000, ap 31 16.0 */
400 + compatible = "ti,sysc-omap4", "ti,sysc";
401 +- reg = <0x80000 0x4>;
402 ++ reg = <0x80020 0x4>;
403 + reg-names = "rev";
404 + clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>;
405 + clock-names = "fck";
406 +@@ -4577,7 +4577,7 @@
407 +
408 + target-module@c000 { /* 0x4ae3c000, ap 30 04.0 */
409 + compatible = "ti,sysc-omap4", "ti,sysc";
410 +- reg = <0xc000 0x4>;
411 ++ reg = <0xc020 0x4>;
412 + reg-names = "rev";
413 + clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>;
414 + clock-names = "fck";
415 +diff --git a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
416 +index 28ebb4eb884a..214b9e6de2c3 100644
417 +--- a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
418 ++++ b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
419 +@@ -32,7 +32,7 @@
420 + *
421 + * Datamanual Revisions:
422 + *
423 +- * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016
424 ++ * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019
425 + * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016
426 + *
427 + */
428 +@@ -229,45 +229,45 @@
429 +
430 + mmc3_pins_default: mmc3_pins_default {
431 + pinctrl-single,pins = <
432 +- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
433 +- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
434 +- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
435 +- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
436 +- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
437 +- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
438 ++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
439 ++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
440 ++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
441 ++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
442 ++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
443 ++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
444 + >;
445 + };
446 +
447 + mmc3_pins_hs: mmc3_pins_hs {
448 + pinctrl-single,pins = <
449 +- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
450 +- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
451 +- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
452 +- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
453 +- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
454 +- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
455 ++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
456 ++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
457 ++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
458 ++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
459 ++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
460 ++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
461 + >;
462 + };
463 +
464 + mmc3_pins_sdr12: mmc3_pins_sdr12 {
465 + pinctrl-single,pins = <
466 +- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
467 +- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
468 +- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
469 +- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
470 +- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
471 +- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
472 ++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
473 ++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
474 ++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
475 ++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
476 ++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
477 ++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
478 + >;
479 + };
480 +
481 + mmc3_pins_sdr25: mmc3_pins_sdr25 {
482 + pinctrl-single,pins = <
483 +- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
484 +- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
485 +- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
486 +- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
487 +- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
488 +- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
489 ++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
490 ++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
491 ++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
492 ++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
493 ++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
494 ++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
495 + >;
496 + };
497 +
498 +diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
499 +index 81159af44862..14a6c3eb3298 100644
500 +--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
501 ++++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
502 +@@ -126,6 +126,8 @@ restart:
503 + orr r11, r11, r13 @ mask all requested interrupts
504 + str r11, [r12, #OMAP1510_GPIO_INT_MASK]
505 +
506 ++ str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts
507 ++
508 + ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set?
509 + beq hksw @ no - try next source
510 +
511 +@@ -133,7 +135,6 @@ restart:
512 + @@@@@@@@@@@@@@@@@@@@@@
513 + @ Keyboard clock FIQ mode interrupt handler
514 + @ r10 now contains KEYBRD_CLK_MASK, use it
515 +- str r10, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack the interrupt
516 + bic r11, r11, r10 @ unmask it
517 + str r11, [r12, #OMAP1510_GPIO_INT_MASK]
518 +
519 +diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
520 +index 0af2bf6f9933..fd87382a3f18 100644
521 +--- a/arch/arm/mach-omap1/ams-delta-fiq.c
522 ++++ b/arch/arm/mach-omap1/ams-delta-fiq.c
523 +@@ -69,9 +69,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
524 + * interrupts default to since commit 80ac93c27441
525 + * requires interrupt already acked and unmasked.
526 + */
527 +- if (irq_chip->irq_ack)
528 +- irq_chip->irq_ack(d);
529 +- if (irq_chip->irq_unmask)
530 ++ if (!WARN_ON_ONCE(!irq_chip->irq_unmask))
531 + irq_chip->irq_unmask(d);
532 + }
533 + for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
534 +diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
535 +index f9c02f9f1c92..5c3845730dbf 100644
536 +--- a/arch/arm/mach-omap2/omap4-common.c
537 ++++ b/arch/arm/mach-omap2/omap4-common.c
538 +@@ -127,6 +127,9 @@ static int __init omap4_sram_init(void)
539 + struct device_node *np;
540 + struct gen_pool *sram_pool;
541 +
542 ++ if (!soc_is_omap44xx() && !soc_is_omap54xx())
543 ++ return 0;
544 ++
545 + np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
546 + if (!np)
547 + pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
548 +diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
549 +index 4a5b4aee6615..1ec21e9ba1e9 100644
550 +--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
551 ++++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
552 +@@ -379,7 +379,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
553 + static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
554 + .rev_offs = 0x0,
555 + .sysc_offs = 0x4,
556 +- .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET,
557 ++ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
558 ++ SYSC_HAS_RESET_STATUS,
559 + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
560 + .sysc_fields = &omap_hwmod_sysc_type2,
561 + };
562 +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
563 +index 749a5a6f6143..98e17388a563 100644
564 +--- a/arch/arm/mm/init.c
565 ++++ b/arch/arm/mm/init.c
566 +@@ -174,6 +174,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
567 + #ifdef CONFIG_HAVE_ARCH_PFN_VALID
568 + int pfn_valid(unsigned long pfn)
569 + {
570 ++ phys_addr_t addr = __pfn_to_phys(pfn);
571 ++
572 ++ if (__phys_to_pfn(addr) != pfn)
573 ++ return 0;
574 ++
575 + return memblock_is_map_memory(__pfn_to_phys(pfn));
576 + }
577 + EXPORT_SYMBOL(pfn_valid);
578 +@@ -613,7 +618,8 @@ static void update_sections_early(struct section_perm perms[], int n)
579 + if (t->flags & PF_KTHREAD)
580 + continue;
581 + for_each_thread(t, s)
582 +- set_section_perms(perms, n, true, s->mm);
583 ++ if (s->mm)
584 ++ set_section_perms(perms, n, true, s->mm);
585 + }
586 + set_section_perms(perms, n, true, current->active_mm);
587 + set_section_perms(perms, n, true, &init_mm);
588 +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
589 +index 9f72396ba710..4c92c197aeb8 100644
590 +--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
591 ++++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
592 +@@ -591,6 +591,7 @@
593 + clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
594 + clock-names = "ddr";
595 + phys = <&usb2_phy1>;
596 ++ phy-names = "usb2-phy";
597 + dr_mode = "peripheral";
598 + g-rx-fifo-size = <192>;
599 + g-np-tx-fifo-size = <128>;
600 +diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
601 +index a7dc11e36fd9..071f66d8719e 100644
602 +--- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
603 ++++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
604 +@@ -97,7 +97,7 @@
605 + reg = <0x0 0x48000000 0x0 0x18000000>;
606 + };
607 +
608 +- reg_1p8v: regulator0 {
609 ++ reg_1p8v: regulator-1p8v {
610 + compatible = "regulator-fixed";
611 + regulator-name = "fixed-1.8V";
612 + regulator-min-microvolt = <1800000>;
613 +@@ -106,7 +106,7 @@
614 + regulator-always-on;
615 + };
616 +
617 +- reg_3p3v: regulator1 {
618 ++ reg_3p3v: regulator-3p3v {
619 + compatible = "regulator-fixed";
620 + regulator-name = "fixed-3.3V";
621 + regulator-min-microvolt = <3300000>;
622 +@@ -115,7 +115,7 @@
623 + regulator-always-on;
624 + };
625 +
626 +- reg_12p0v: regulator1 {
627 ++ reg_12p0v: regulator-12p0v {
628 + compatible = "regulator-fixed";
629 + regulator-name = "D12.0V";
630 + regulator-min-microvolt = <12000000>;
631 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
632 +index b9574d850f14..4e07aa514f60 100644
633 +--- a/arch/arm64/include/asm/pgtable.h
634 ++++ b/arch/arm64/include/asm/pgtable.h
635 +@@ -214,8 +214,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
636 + * Only if the new pte is valid and kernel, otherwise TLB maintenance
637 + * or update_mmu_cache() have the necessary barriers.
638 + */
639 +- if (pte_valid_not_user(pte))
640 ++ if (pte_valid_not_user(pte)) {
641 + dsb(ishst);
642 ++ isb();
643 ++ }
644 + }
645 +
646 + extern void __sync_icache_dcache(pte_t pteval);
647 +@@ -453,8 +455,10 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
648 +
649 + WRITE_ONCE(*pmdp, pmd);
650 +
651 +- if (pmd_valid(pmd))
652 ++ if (pmd_valid(pmd)) {
653 + dsb(ishst);
654 ++ isb();
655 ++ }
656 + }
657 +
658 + static inline void pmd_clear(pmd_t *pmdp)
659 +@@ -512,8 +516,10 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
660 +
661 + WRITE_ONCE(*pudp, pud);
662 +
663 +- if (pud_valid(pud))
664 ++ if (pud_valid(pud)) {
665 + dsb(ishst);
666 ++ isb();
667 ++ }
668 + }
669 +
670 + static inline void pud_clear(pud_t *pudp)
671 +diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
672 +index 273ae66a9a45..8deb432c2975 100644
673 +--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
674 ++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
675 +@@ -515,14 +515,6 @@ void __init radix__early_init_devtree(void)
676 + mmu_psize_defs[MMU_PAGE_64K].shift = 16;
677 + mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
678 + found:
679 +-#ifdef CONFIG_SPARSEMEM_VMEMMAP
680 +- if (mmu_psize_defs[MMU_PAGE_2M].shift) {
681 +- /*
682 +- * map vmemmap using 2M if available
683 +- */
684 +- mmu_vmemmap_psize = MMU_PAGE_2M;
685 +- }
686 +-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
687 + return;
688 + }
689 +
690 +@@ -587,7 +579,13 @@ void __init radix__early_init_mmu(void)
691 +
692 + #ifdef CONFIG_SPARSEMEM_VMEMMAP
693 + /* vmemmap mapping */
694 +- mmu_vmemmap_psize = mmu_virtual_psize;
695 ++ if (mmu_psize_defs[MMU_PAGE_2M].shift) {
696 ++ /*
697 ++ * map vmemmap using 2M if available
698 ++ */
699 ++ mmu_vmemmap_psize = MMU_PAGE_2M;
700 ++ } else
701 ++ mmu_vmemmap_psize = mmu_virtual_psize;
702 + #endif
703 + /*
704 + * initialize page table size
705 +diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
706 +index c207f6634b91..15b3edaabc28 100644
707 +--- a/arch/riscv/include/asm/fixmap.h
708 ++++ b/arch/riscv/include/asm/fixmap.h
709 +@@ -25,10 +25,6 @@ enum fixed_addresses {
710 + __end_of_fixed_addresses
711 + };
712 +
713 +-#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE)
714 +-#define FIXADDR_TOP (VMALLOC_START)
715 +-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
716 +-
717 + #define FIXMAP_PAGE_IO PAGE_KERNEL
718 +
719 + #define __early_set_fixmap __set_fixmap
720 +diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
721 +index f7c3f7de15f2..e6faa469c133 100644
722 +--- a/arch/riscv/include/asm/pgtable.h
723 ++++ b/arch/riscv/include/asm/pgtable.h
724 +@@ -408,14 +408,22 @@ static inline void pgtable_cache_init(void)
725 + #define VMALLOC_END (PAGE_OFFSET - 1)
726 + #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
727 +
728 ++#define FIXADDR_TOP VMALLOC_START
729 ++#ifdef CONFIG_64BIT
730 ++#define FIXADDR_SIZE PMD_SIZE
731 ++#else
732 ++#define FIXADDR_SIZE PGDIR_SIZE
733 ++#endif
734 ++#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
735 ++
736 + /*
737 +- * Task size is 0x40000000000 for RV64 or 0xb800000 for RV32.
738 ++ * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
739 + * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
740 + */
741 + #ifdef CONFIG_64BIT
742 + #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
743 + #else
744 +-#define TASK_SIZE VMALLOC_START
745 ++#define TASK_SIZE FIXADDR_START
746 + #endif
747 +
748 + #include <asm-generic/pgtable.h>
749 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
750 +index 5e7c63033159..fd9844f947f7 100644
751 +--- a/arch/s390/net/bpf_jit_comp.c
752 ++++ b/arch/s390/net/bpf_jit_comp.c
753 +@@ -853,7 +853,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
754 + break;
755 + case BPF_ALU64 | BPF_NEG: /* dst = -dst */
756 + /* lcgr %dst,%dst */
757 +- EMIT4(0xb9130000, dst_reg, dst_reg);
758 ++ EMIT4(0xb9030000, dst_reg, dst_reg);
759 + break;
760 + /*
761 + * BPF_FROM_BE/LE
762 +@@ -1027,8 +1027,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
763 + /* llgf %w1,map.max_entries(%b2) */
764 + EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
765 + offsetof(struct bpf_array, map.max_entries));
766 +- /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
767 +- EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
768 ++ /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
769 ++ EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
770 + REG_W1, 0, 0xa);
771 +
772 + /*
773 +@@ -1054,8 +1054,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
774 + * goto out;
775 + */
776 +
777 +- /* sllg %r1,%b3,3: %r1 = index * 8 */
778 +- EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
779 ++ /* llgfr %r1,%b3: %r1 = (u32) index */
780 ++ EMIT4(0xb9160000, REG_1, BPF_REG_3);
781 ++ /* sllg %r1,%r1,3: %r1 *= 8 */
782 ++ EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
783 + /* lg %r1,prog(%b2,%r1) */
784 + EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
785 + REG_1, offsetof(struct bpf_array, ptrs));
786 +diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
787 +index 62f317c9113a..5b35b7ea5d72 100644
788 +--- a/arch/x86/events/amd/ibs.c
789 ++++ b/arch/x86/events/amd/ibs.c
790 +@@ -661,10 +661,17 @@ fail:
791 +
792 + throttle = perf_event_overflow(event, &data, &regs);
793 + out:
794 +- if (throttle)
795 ++ if (throttle) {
796 + perf_ibs_stop(event, 0);
797 +- else
798 +- perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
799 ++ } else {
800 ++ period >>= 4;
801 ++
802 ++ if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
803 ++ (*config & IBS_OP_CNT_CTL))
804 ++ period |= *config & IBS_OP_CUR_CNT_RAND;
805 ++
806 ++ perf_ibs_enable_event(perf_ibs, hwc, period);
807 ++ }
808 +
809 + perf_event_update_userpage(event);
810 +
811 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
812 +index 6179be624f35..2369ea1a1db7 100644
813 +--- a/arch/x86/events/intel/core.c
814 ++++ b/arch/x86/events/intel/core.c
815 +@@ -3572,6 +3572,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
816 + return left;
817 + }
818 +
819 ++static u64 nhm_limit_period(struct perf_event *event, u64 left)
820 ++{
821 ++ return max(left, 32ULL);
822 ++}
823 ++
824 + PMU_FORMAT_ATTR(event, "config:0-7" );
825 + PMU_FORMAT_ATTR(umask, "config:8-15" );
826 + PMU_FORMAT_ATTR(edge, "config:18" );
827 +@@ -4550,6 +4555,7 @@ __init int intel_pmu_init(void)
828 + x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
829 + x86_pmu.enable_all = intel_pmu_nhm_enable_all;
830 + x86_pmu.extra_regs = intel_nehalem_extra_regs;
831 ++ x86_pmu.limit_period = nhm_limit_period;
832 +
833 + mem_attr = nhm_mem_events_attrs;
834 +
835 +diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
836 +index e65d7fe6489f..5208ba49c89a 100644
837 +--- a/arch/x86/hyperv/mmu.c
838 ++++ b/arch/x86/hyperv/mmu.c
839 +@@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
840 + * Lower 12 bits encode the number of additional
841 + * pages to flush (in addition to the 'cur' page).
842 + */
843 +- if (diff >= HV_TLB_FLUSH_UNIT)
844 ++ if (diff >= HV_TLB_FLUSH_UNIT) {
845 + gva_list[gva_n] |= ~PAGE_MASK;
846 +- else if (diff)
847 ++ cur += HV_TLB_FLUSH_UNIT;
848 ++ } else if (diff) {
849 + gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
850 ++ cur = end;
851 ++ }
852 +
853 +- cur += HV_TLB_FLUSH_UNIT;
854 + gva_n++;
855 +
856 + } while (cur < end);
857 +diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
858 +index 1392d5e6e8d6..ee26e9215f18 100644
859 +--- a/arch/x86/include/asm/perf_event.h
860 ++++ b/arch/x86/include/asm/perf_event.h
861 +@@ -252,16 +252,20 @@ struct pebs_lbr {
862 + #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
863 + #define IBSCTL_LVT_OFFSET_MASK 0x0F
864 +
865 +-/* ibs fetch bits/masks */
866 ++/* IBS fetch bits/masks */
867 + #define IBS_FETCH_RAND_EN (1ULL<<57)
868 + #define IBS_FETCH_VAL (1ULL<<49)
869 + #define IBS_FETCH_ENABLE (1ULL<<48)
870 + #define IBS_FETCH_CNT 0xFFFF0000ULL
871 + #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
872 +
873 +-/* ibs op bits/masks */
874 +-/* lower 4 bits of the current count are ignored: */
875 +-#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
876 ++/*
877 ++ * IBS op bits/masks
878 ++ * The lower 7 bits of the current count are random bits
879 ++ * preloaded by hardware and ignored in software
880 ++ */
881 ++#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
882 ++#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
883 + #define IBS_OP_CNT_CTL (1ULL<<19)
884 + #define IBS_OP_VAL (1ULL<<18)
885 + #define IBS_OP_ENABLE (1ULL<<17)
886 +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
887 +index c82abd6e4ca3..869794bd0fd9 100644
888 +--- a/arch/x86/include/asm/uaccess.h
889 ++++ b/arch/x86/include/asm/uaccess.h
890 +@@ -442,8 +442,10 @@ __pu_label: \
891 + ({ \
892 + int __gu_err; \
893 + __inttype(*(ptr)) __gu_val; \
894 ++ __typeof__(ptr) __gu_ptr = (ptr); \
895 ++ __typeof__(size) __gu_size = (size); \
896 + __uaccess_begin_nospec(); \
897 +- __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
898 ++ __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
899 + __uaccess_end(); \
900 + (x) = (__force __typeof__(*(ptr)))__gu_val; \
901 + __builtin_expect(__gu_err, 0); \
902 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
903 +index c9fec0657eea..e8c6466ef65e 100644
904 +--- a/arch/x86/kernel/apic/io_apic.c
905 ++++ b/arch/x86/kernel/apic/io_apic.c
906 +@@ -2434,7 +2434,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
907 + * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
908 + * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
909 + */
910 +- return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
911 ++ if (!ioapic_initialized)
912 ++ return gsi_top;
913 ++ /*
914 ++ * For DT enabled machines ioapic_dynirq_base is irrelevant and not
915 ++ * updated. So simply return @from if ioapic_dynirq_base == 0.
916 ++ */
917 ++ return ioapic_dynirq_base ? : from;
918 + }
919 +
920 + #ifdef CONFIG_X86_32
921 +diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
922 +index 2e2efa577437..8c37294f1d1e 100644
923 +--- a/drivers/atm/Kconfig
924 ++++ b/drivers/atm/Kconfig
925 +@@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI
926 + make the card work).
927 +
928 + config ATM_NICSTAR_USE_IDT77105
929 +- bool "Use IDT77015 PHY driver (25Mbps)"
930 ++ bool "Use IDT77105 PHY driver (25Mbps)"
931 + depends on ATM_NICSTAR
932 + help
933 + Support for the PHYsical layer chip in ForeRunner LE25 cards. In
934 +diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
935 +index fee57f7f3821..81ac7805397d 100644
936 +--- a/drivers/block/floppy.c
937 ++++ b/drivers/block/floppy.c
938 +@@ -3780,7 +3780,7 @@ static int compat_getdrvprm(int drive,
939 + v.native_format = UDP->native_format;
940 + mutex_unlock(&floppy_mutex);
941 +
942 +- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
943 ++ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
944 + return -EFAULT;
945 + return 0;
946 + }
947 +@@ -3816,7 +3816,7 @@ static int compat_getdrvstat(int drive, bool poll,
948 + v.bufblocks = UDRS->bufblocks;
949 + mutex_unlock(&floppy_mutex);
950 +
951 +- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
952 ++ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
953 + return -EFAULT;
954 + return 0;
955 + Eintr:
956 +diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
957 +index b72741668c92..0d122440d111 100644
958 +--- a/drivers/bus/ti-sysc.c
959 ++++ b/drivers/bus/ti-sysc.c
960 +@@ -853,7 +853,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
961 + *best_mode = SYSC_IDLE_SMART_WKUP;
962 + else if (idlemodes & BIT(SYSC_IDLE_SMART))
963 + *best_mode = SYSC_IDLE_SMART;
964 +- else if (idlemodes & SYSC_IDLE_FORCE)
965 ++ else if (idlemodes & BIT(SYSC_IDLE_FORCE))
966 + *best_mode = SYSC_IDLE_FORCE;
967 + else
968 + return -EINVAL;
969 +@@ -1127,7 +1127,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
970 + SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
971 + SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
972 + 0xffff00f0, 0),
973 +- SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0),
974 ++ SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
975 ++ SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
976 + SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
977 + SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
978 + SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
979 +@@ -1388,10 +1389,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata)
980 + if (error)
981 + return 0;
982 +
983 +- if (val)
984 +- ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
985 +- else
986 +- ddata->cfg.sysc_val = ddata->cap->sysc_mask;
987 ++ ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
988 +
989 + return 0;
990 + }
991 +@@ -2081,27 +2079,27 @@ static int sysc_probe(struct platform_device *pdev)
992 +
993 + error = sysc_init_dts_quirks(ddata);
994 + if (error)
995 +- goto unprepare;
996 ++ return error;
997 +
998 + error = sysc_map_and_check_registers(ddata);
999 + if (error)
1000 +- goto unprepare;
1001 ++ return error;
1002 +
1003 + error = sysc_init_sysc_mask(ddata);
1004 + if (error)
1005 +- goto unprepare;
1006 ++ return error;
1007 +
1008 + error = sysc_init_idlemodes(ddata);
1009 + if (error)
1010 +- goto unprepare;
1011 ++ return error;
1012 +
1013 + error = sysc_init_syss_mask(ddata);
1014 + if (error)
1015 +- goto unprepare;
1016 ++ return error;
1017 +
1018 + error = sysc_init_pdata(ddata);
1019 + if (error)
1020 +- goto unprepare;
1021 ++ return error;
1022 +
1023 + sysc_init_early_quirks(ddata);
1024 +
1025 +@@ -2111,7 +2109,7 @@ static int sysc_probe(struct platform_device *pdev)
1026 +
1027 + error = sysc_init_resets(ddata);
1028 + if (error)
1029 +- return error;
1030 ++ goto unprepare;
1031 +
1032 + error = sysc_init_module(ddata);
1033 + if (error)
1034 +diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
1035 +index 54de669c38b8..f1d89bdebdda 100644
1036 +--- a/drivers/dma/sh/rcar-dmac.c
1037 ++++ b/drivers/dma/sh/rcar-dmac.c
1038 +@@ -192,6 +192,7 @@ struct rcar_dmac_chan {
1039 + * @iomem: remapped I/O memory base
1040 + * @n_channels: number of available channels
1041 + * @channels: array of DMAC channels
1042 ++ * @channels_mask: bitfield of which DMA channels are managed by this driver
1043 + * @modules: bitmask of client modules in use
1044 + */
1045 + struct rcar_dmac {
1046 +@@ -202,6 +203,7 @@ struct rcar_dmac {
1047 +
1048 + unsigned int n_channels;
1049 + struct rcar_dmac_chan *channels;
1050 ++ unsigned int channels_mask;
1051 +
1052 + DECLARE_BITMAP(modules, 256);
1053 + };
1054 +@@ -438,7 +440,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
1055 + u16 dmaor;
1056 +
1057 + /* Clear all channels and enable the DMAC globally. */
1058 +- rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
1059 ++ rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
1060 + rcar_dmac_write(dmac, RCAR_DMAOR,
1061 + RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
1062 +
1063 +@@ -814,6 +816,9 @@ static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
1064 + for (i = 0; i < dmac->n_channels; ++i) {
1065 + struct rcar_dmac_chan *chan = &dmac->channels[i];
1066 +
1067 ++ if (!(dmac->channels_mask & BIT(i)))
1068 ++ continue;
1069 ++
1070 + /* Stop and reinitialize the channel. */
1071 + spin_lock_irq(&chan->lock);
1072 + rcar_dmac_chan_halt(chan);
1073 +@@ -1776,6 +1781,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1074 + return 0;
1075 + }
1076 +
1077 ++#define RCAR_DMAC_MAX_CHANNELS 32
1078 ++
1079 + static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1080 + {
1081 + struct device_node *np = dev->of_node;
1082 +@@ -1787,12 +1794,16 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1083 + return ret;
1084 + }
1085 +
1086 +- if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
1087 ++ /* The hardware and driver don't support more than 32 bits in CHCLR */
1088 ++ if (dmac->n_channels <= 0 ||
1089 ++ dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
1090 + dev_err(dev, "invalid number of channels %u\n",
1091 + dmac->n_channels);
1092 + return -EINVAL;
1093 + }
1094 +
1095 ++ dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
1096 ++
1097 + return 0;
1098 + }
1099 +
1100 +@@ -1802,7 +1813,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1101 + DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1102 + DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1103 + DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
1104 +- unsigned int channels_offset = 0;
1105 + struct dma_device *engine;
1106 + struct rcar_dmac *dmac;
1107 + struct resource *mem;
1108 +@@ -1831,10 +1841,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1109 + * level we can't disable it selectively, so ignore channel 0 for now if
1110 + * the device is part of an IOMMU group.
1111 + */
1112 +- if (device_iommu_mapped(&pdev->dev)) {
1113 +- dmac->n_channels--;
1114 +- channels_offset = 1;
1115 +- }
1116 ++ if (device_iommu_mapped(&pdev->dev))
1117 ++ dmac->channels_mask &= ~BIT(0);
1118 +
1119 + dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1120 + sizeof(*dmac->channels), GFP_KERNEL);
1121 +@@ -1892,8 +1900,10 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1122 + INIT_LIST_HEAD(&engine->channels);
1123 +
1124 + for (i = 0; i < dmac->n_channels; ++i) {
1125 +- ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
1126 +- i + channels_offset);
1127 ++ if (!(dmac->channels_mask & BIT(i)))
1128 ++ continue;
1129 ++
1130 ++ ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
1131 + if (ret < 0)
1132 + goto error;
1133 + }
1134 +diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
1135 +index baac476c8622..525dc7338fe3 100644
1136 +--- a/drivers/dma/sprd-dma.c
1137 ++++ b/drivers/dma/sprd-dma.c
1138 +@@ -908,6 +908,7 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1139 + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
1140 + struct dma_slave_config *slave_cfg = &schan->slave_cfg;
1141 + dma_addr_t src = 0, dst = 0;
1142 ++ dma_addr_t start_src = 0, start_dst = 0;
1143 + struct sprd_dma_desc *sdesc;
1144 + struct scatterlist *sg;
1145 + u32 len = 0;
1146 +@@ -954,6 +955,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1147 + dst = sg_dma_address(sg);
1148 + }
1149 +
1150 ++ if (!i) {
1151 ++ start_src = src;
1152 ++ start_dst = dst;
1153 ++ }
1154 ++
1155 + /*
1156 + * The link-list mode needs at least 2 link-list
1157 + * configurations. If there is only one sg, it doesn't
1158 +@@ -970,8 +976,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1159 + }
1160 + }
1161 +
1162 +- ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
1163 +- dir, flags, slave_cfg);
1164 ++ ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
1165 ++ start_dst, len, dir, flags, slave_cfg);
1166 + if (ret) {
1167 + kfree(sdesc);
1168 + return NULL;
1169 +diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
1170 +index ad2f0a4cd6a4..f255056696ee 100644
1171 +--- a/drivers/dma/ti/dma-crossbar.c
1172 ++++ b/drivers/dma/ti/dma-crossbar.c
1173 +@@ -391,8 +391,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
1174 +
1175 + ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
1176 + nelm * 2);
1177 +- if (ret)
1178 ++ if (ret) {
1179 ++ kfree(rsv_events);
1180 + return ret;
1181 ++ }
1182 +
1183 + for (i = 0; i < nelm; i++) {
1184 + ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
1185 +diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
1186 +index ba27802efcd0..d07c0d5de7a2 100644
1187 +--- a/drivers/dma/ti/omap-dma.c
1188 ++++ b/drivers/dma/ti/omap-dma.c
1189 +@@ -1540,8 +1540,10 @@ static int omap_dma_probe(struct platform_device *pdev)
1190 +
1191 + rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
1192 + IRQF_SHARED, "omap-dma-engine", od);
1193 +- if (rc)
1194 ++ if (rc) {
1195 ++ omap_dma_free(od);
1196 + return rc;
1197 ++ }
1198 + }
1199 +
1200 + if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
1201 +diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
1202 +index fd5212c395c0..34d48618f3fc 100644
1203 +--- a/drivers/firmware/google/vpd.c
1204 ++++ b/drivers/firmware/google/vpd.c
1205 +@@ -92,8 +92,8 @@ static int vpd_section_check_key_name(const u8 *key, s32 key_len)
1206 + return VPD_OK;
1207 + }
1208 +
1209 +-static int vpd_section_attrib_add(const u8 *key, s32 key_len,
1210 +- const u8 *value, s32 value_len,
1211 ++static int vpd_section_attrib_add(const u8 *key, u32 key_len,
1212 ++ const u8 *value, u32 value_len,
1213 + void *arg)
1214 + {
1215 + int ret;
1216 +diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
1217 +index c62fa7063a7c..584d0d56491f 100644
1218 +--- a/drivers/firmware/google/vpd_decode.c
1219 ++++ b/drivers/firmware/google/vpd_decode.c
1220 +@@ -11,8 +11,8 @@
1221 +
1222 + #include "vpd_decode.h"
1223 +
1224 +-static int vpd_decode_len(const s32 max_len, const u8 *in,
1225 +- s32 *length, s32 *decoded_len)
1226 ++static int vpd_decode_len(const u32 max_len, const u8 *in,
1227 ++ u32 *length, u32 *decoded_len)
1228 + {
1229 + u8 more;
1230 + int i = 0;
1231 +@@ -32,18 +32,39 @@ static int vpd_decode_len(const s32 max_len, const u8 *in,
1232 + } while (more);
1233 +
1234 + *decoded_len = i;
1235 ++ return VPD_OK;
1236 ++}
1237 ++
1238 ++static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
1239 ++ u32 *_consumed, const u8 **entry, u32 *entry_len)
1240 ++{
1241 ++ u32 decoded_len;
1242 ++ u32 consumed = *_consumed;
1243 ++
1244 ++ if (vpd_decode_len(max_len - consumed, &input_buf[consumed],
1245 ++ entry_len, &decoded_len) != VPD_OK)
1246 ++ return VPD_FAIL;
1247 ++ if (max_len - consumed < decoded_len)
1248 ++ return VPD_FAIL;
1249 ++
1250 ++ consumed += decoded_len;
1251 ++ *entry = input_buf + consumed;
1252 ++
1253 ++ /* entry_len is untrusted data and must be checked again. */
1254 ++ if (max_len - consumed < *entry_len)
1255 ++ return VPD_FAIL;
1256 +
1257 ++ consumed += decoded_len;
1258 ++ *_consumed = consumed;
1259 + return VPD_OK;
1260 + }
1261 +
1262 +-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
1263 ++int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
1264 + vpd_decode_callback callback, void *callback_arg)
1265 + {
1266 + int type;
1267 +- int res;
1268 +- s32 key_len;
1269 +- s32 value_len;
1270 +- s32 decoded_len;
1271 ++ u32 key_len;
1272 ++ u32 value_len;
1273 + const u8 *key;
1274 + const u8 *value;
1275 +
1276 +@@ -58,26 +79,14 @@ int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
1277 + case VPD_TYPE_STRING:
1278 + (*consumed)++;
1279 +
1280 +- /* key */
1281 +- res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
1282 +- &key_len, &decoded_len);
1283 +- if (res != VPD_OK || *consumed + decoded_len >= max_len)
1284 ++ if (vpd_decode_entry(max_len, input_buf, consumed, &key,
1285 ++ &key_len) != VPD_OK)
1286 + return VPD_FAIL;
1287 +
1288 +- *consumed += decoded_len;
1289 +- key = &input_buf[*consumed];
1290 +- *consumed += key_len;
1291 +-
1292 +- /* value */
1293 +- res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
1294 +- &value_len, &decoded_len);
1295 +- if (res != VPD_OK || *consumed + decoded_len > max_len)
1296 ++ if (vpd_decode_entry(max_len, input_buf, consumed, &value,
1297 ++ &value_len) != VPD_OK)
1298 + return VPD_FAIL;
1299 +
1300 +- *consumed += decoded_len;
1301 +- value = &input_buf[*consumed];
1302 +- *consumed += value_len;
1303 +-
1304 + if (type == VPD_TYPE_STRING)
1305 + return callback(key, key_len, value, value_len,
1306 + callback_arg);
1307 +diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h
1308 +index cf8c2ace155a..8dbe41cac599 100644
1309 +--- a/drivers/firmware/google/vpd_decode.h
1310 ++++ b/drivers/firmware/google/vpd_decode.h
1311 +@@ -25,8 +25,8 @@ enum {
1312 + };
1313 +
1314 + /* Callback for vpd_decode_string to invoke. */
1315 +-typedef int vpd_decode_callback(const u8 *key, s32 key_len,
1316 +- const u8 *value, s32 value_len,
1317 ++typedef int vpd_decode_callback(const u8 *key, u32 key_len,
1318 ++ const u8 *value, u32 value_len,
1319 + void *arg);
1320 +
1321 + /*
1322 +@@ -44,7 +44,7 @@ typedef int vpd_decode_callback(const u8 *key, s32 key_len,
1323 + * If one entry is successfully decoded, sends it to callback and returns the
1324 + * result.
1325 + */
1326 +-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
1327 ++int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
1328 + vpd_decode_callback callback, void *callback_arg);
1329 +
1330 + #endif /* __VPD_DECODE_H */
1331 +diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
1332 +index a13f224303c6..0221dee8dd4c 100644
1333 +--- a/drivers/fpga/altera-ps-spi.c
1334 ++++ b/drivers/fpga/altera-ps-spi.c
1335 +@@ -210,7 +210,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
1336 + return -EIO;
1337 + }
1338 +
1339 +- if (!IS_ERR(conf->confd)) {
1340 ++ if (conf->confd) {
1341 + if (!gpiod_get_raw_value_cansleep(conf->confd)) {
1342 + dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
1343 + return -EIO;
1344 +@@ -289,10 +289,13 @@ static int altera_ps_probe(struct spi_device *spi)
1345 + return PTR_ERR(conf->status);
1346 + }
1347 +
1348 +- conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN);
1349 ++ conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN);
1350 + if (IS_ERR(conf->confd)) {
1351 +- dev_warn(&spi->dev, "Not using confd gpio: %ld\n",
1352 +- PTR_ERR(conf->confd));
1353 ++ dev_err(&spi->dev, "Failed to get confd gpio: %ld\n",
1354 ++ PTR_ERR(conf->confd));
1355 ++ return PTR_ERR(conf->confd);
1356 ++ } else if (!conf->confd) {
1357 ++ dev_warn(&spi->dev, "Not using confd gpio");
1358 + }
1359 +
1360 + /* Register manager with unique name */
1361 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
1362 +index a28a3d722ba2..62298ae5c81c 100644
1363 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
1364 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
1365 +@@ -535,21 +535,24 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
1366 + struct drm_sched_entity *entity)
1367 + {
1368 + struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
1369 +- unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
1370 +- struct dma_fence *other = centity->fences[idx];
1371 ++ struct dma_fence *other;
1372 ++ unsigned idx;
1373 ++ long r;
1374 +
1375 +- if (other) {
1376 +- signed long r;
1377 +- r = dma_fence_wait(other, true);
1378 +- if (r < 0) {
1379 +- if (r != -ERESTARTSYS)
1380 +- DRM_ERROR("Error (%ld) waiting for fence!\n", r);
1381 ++ spin_lock(&ctx->ring_lock);
1382 ++ idx = centity->sequence & (amdgpu_sched_jobs - 1);
1383 ++ other = dma_fence_get(centity->fences[idx]);
1384 ++ spin_unlock(&ctx->ring_lock);
1385 +
1386 +- return r;
1387 +- }
1388 +- }
1389 ++ if (!other)
1390 ++ return 0;
1391 +
1392 +- return 0;
1393 ++ r = dma_fence_wait(other, true);
1394 ++ if (r < 0 && r != -ERESTARTSYS)
1395 ++ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
1396 ++
1397 ++ dma_fence_put(other);
1398 ++ return r;
1399 + }
1400 +
1401 + void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
1402 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
1403 +index 9b9f87b84910..d98fe481cd36 100644
1404 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
1405 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
1406 +@@ -2288,12 +2288,16 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
1407 + data->dpm_table.soc_table.dpm_state.soft_max_level =
1408 + data->dpm_table.soc_table.dpm_levels[soft_level].value;
1409 +
1410 +- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
1411 ++ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
1412 ++ FEATURE_DPM_UCLK_MASK |
1413 ++ FEATURE_DPM_SOCCLK_MASK);
1414 + PP_ASSERT_WITH_CODE(!ret,
1415 + "Failed to upload boot level to highest!",
1416 + return ret);
1417 +
1418 +- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
1419 ++ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
1420 ++ FEATURE_DPM_UCLK_MASK |
1421 ++ FEATURE_DPM_SOCCLK_MASK);
1422 + PP_ASSERT_WITH_CODE(!ret,
1423 + "Failed to upload dpm max level to highest!",
1424 + return ret);
1425 +@@ -2326,12 +2330,16 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1426 + data->dpm_table.soc_table.dpm_state.soft_max_level =
1427 + data->dpm_table.soc_table.dpm_levels[soft_level].value;
1428 +
1429 +- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
1430 ++ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
1431 ++ FEATURE_DPM_UCLK_MASK |
1432 ++ FEATURE_DPM_SOCCLK_MASK);
1433 + PP_ASSERT_WITH_CODE(!ret,
1434 + "Failed to upload boot level to highest!",
1435 + return ret);
1436 +
1437 +- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
1438 ++ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
1439 ++ FEATURE_DPM_UCLK_MASK |
1440 ++ FEATURE_DPM_SOCCLK_MASK);
1441 + PP_ASSERT_WITH_CODE(!ret,
1442 + "Failed to upload dpm max level to highest!",
1443 + return ret);
1444 +@@ -2342,14 +2350,54 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1445 +
1446 + static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1447 + {
1448 ++ struct vega20_hwmgr *data =
1449 ++ (struct vega20_hwmgr *)(hwmgr->backend);
1450 ++ uint32_t soft_min_level, soft_max_level;
1451 + int ret = 0;
1452 +
1453 +- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
1454 ++ /* gfxclk soft min/max settings */
1455 ++ soft_min_level =
1456 ++ vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1457 ++ soft_max_level =
1458 ++ vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1459 ++
1460 ++ data->dpm_table.gfx_table.dpm_state.soft_min_level =
1461 ++ data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
1462 ++ data->dpm_table.gfx_table.dpm_state.soft_max_level =
1463 ++ data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
1464 ++
1465 ++ /* uclk soft min/max settings */
1466 ++ soft_min_level =
1467 ++ vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1468 ++ soft_max_level =
1469 ++ vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
1470 ++
1471 ++ data->dpm_table.mem_table.dpm_state.soft_min_level =
1472 ++ data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
1473 ++ data->dpm_table.mem_table.dpm_state.soft_max_level =
1474 ++ data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
1475 ++
1476 ++ /* socclk soft min/max settings */
1477 ++ soft_min_level =
1478 ++ vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
1479 ++ soft_max_level =
1480 ++ vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
1481 ++
1482 ++ data->dpm_table.soc_table.dpm_state.soft_min_level =
1483 ++ data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
1484 ++ data->dpm_table.soc_table.dpm_state.soft_max_level =
1485 ++ data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
1486 ++
1487 ++ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
1488 ++ FEATURE_DPM_UCLK_MASK |
1489 ++ FEATURE_DPM_SOCCLK_MASK);
1490 + PP_ASSERT_WITH_CODE(!ret,
1491 + "Failed to upload DPM Bootup Levels!",
1492 + return ret);
1493 +
1494 +- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
1495 ++ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
1496 ++ FEATURE_DPM_UCLK_MASK |
1497 ++ FEATURE_DPM_SOCCLK_MASK);
1498 + PP_ASSERT_WITH_CODE(!ret,
1499 + "Failed to upload DPM Max Levels!",
1500 + return ret);
1501 +diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
1502 +index de0f882f0f7b..14b41de44ebc 100644
1503 +--- a/drivers/gpu/drm/omapdrm/dss/output.c
1504 ++++ b/drivers/gpu/drm/omapdrm/dss/output.c
1505 +@@ -4,6 +4,7 @@
1506 + * Author: Archit Taneja <archit@××.com>
1507 + */
1508 +
1509 ++#include <linux/bitops.h>
1510 + #include <linux/kernel.h>
1511 + #include <linux/module.h>
1512 + #include <linux/platform_device.h>
1513 +@@ -20,7 +21,8 @@ int omapdss_device_init_output(struct omap_dss_device *out)
1514 + {
1515 + struct device_node *remote_node;
1516 +
1517 +- remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0);
1518 ++ remote_node = of_graph_get_remote_node(out->dev->of_node,
1519 ++ ffs(out->of_ports) - 1, 0);
1520 + if (!remote_node) {
1521 + dev_dbg(out->dev, "failed to find video sink\n");
1522 + return 0;
1523 +diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
1524 +index b2da31310d24..09b526518f5a 100644
1525 +--- a/drivers/gpu/drm/virtio/virtgpu_object.c
1526 ++++ b/drivers/gpu/drm/virtio/virtgpu_object.c
1527 +@@ -204,6 +204,7 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
1528 + .interruptible = false,
1529 + .no_wait_gpu = false
1530 + };
1531 ++ size_t max_segment;
1532 +
1533 + /* wtf swapping */
1534 + if (bo->pages)
1535 +@@ -215,8 +216,13 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
1536 + if (!bo->pages)
1537 + goto out;
1538 +
1539 +- ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
1540 +- nr_pages << PAGE_SHIFT, GFP_KERNEL);
1541 ++ max_segment = virtio_max_dma_size(qdev->vdev);
1542 ++ max_segment &= PAGE_MASK;
1543 ++ if (max_segment > SCATTERLIST_MAX_SEGMENT)
1544 ++ max_segment = SCATTERLIST_MAX_SEGMENT;
1545 ++ ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
1546 ++ nr_pages << PAGE_SHIFT,
1547 ++ max_segment, GFP_KERNEL);
1548 + if (ret)
1549 + goto out;
1550 + return 0;
1551 +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
1552 +index 3299b1474d1b..53bddb50aeba 100644
1553 +--- a/drivers/hid/wacom_sys.c
1554 ++++ b/drivers/hid/wacom_sys.c
1555 +@@ -311,14 +311,16 @@ static void wacom_feature_mapping(struct hid_device *hdev,
1556 + /* leave touch_max as is if predefined */
1557 + if (!features->touch_max) {
1558 + /* read manually */
1559 +- data = kzalloc(2, GFP_KERNEL);
1560 ++ n = hid_report_len(field->report);
1561 ++ data = hid_alloc_report_buf(field->report, GFP_KERNEL);
1562 + if (!data)
1563 + break;
1564 + data[0] = field->report->id;
1565 + ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
1566 +- data, 2, WAC_CMD_RETRIES);
1567 +- if (ret == 2) {
1568 +- features->touch_max = data[1];
1569 ++ data, n, WAC_CMD_RETRIES);
1570 ++ if (ret == n) {
1571 ++ ret = hid_report_raw_event(hdev,
1572 ++ HID_FEATURE_REPORT, data, n, 0);
1573 + } else {
1574 + features->touch_max = 16;
1575 + hid_warn(hdev, "wacom_feature_mapping: "
1576 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1577 +index 53ed51adb8ac..58719461850d 100644
1578 +--- a/drivers/hid/wacom_wac.c
1579 ++++ b/drivers/hid/wacom_wac.c
1580 +@@ -2510,6 +2510,7 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
1581 + struct wacom *wacom = hid_get_drvdata(hdev);
1582 + struct wacom_wac *wacom_wac = &wacom->wacom_wac;
1583 + unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
1584 ++ struct wacom_features *features = &wacom->wacom_wac.features;
1585 +
1586 + switch (equivalent_usage) {
1587 + case HID_GD_X:
1588 +@@ -2530,6 +2531,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
1589 + case HID_DG_TIPSWITCH:
1590 + wacom_wac->hid_data.tipswitch = value;
1591 + break;
1592 ++ case HID_DG_CONTACTMAX:
1593 ++ features->touch_max = value;
1594 ++ return;
1595 + }
1596 +
1597 +
1598 +diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
1599 +index ad1681872e39..b99322d83f48 100644
1600 +--- a/drivers/i2c/busses/i2c-bcm-iproc.c
1601 ++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
1602 +@@ -801,7 +801,10 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
1603 +
1604 + static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
1605 + {
1606 +- u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1607 ++ u32 val;
1608 ++
1609 ++ /* We do not support the SMBUS Quick command */
1610 ++ val = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
1611 +
1612 + if (adap->algo->reg_slave)
1613 + val |= I2C_FUNC_SLAVE;
1614 +diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
1615 +index e7f9305b2dd9..f5f001738df5 100644
1616 +--- a/drivers/i2c/busses/i2c-designware-slave.c
1617 ++++ b/drivers/i2c/busses/i2c-designware-slave.c
1618 +@@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
1619 +
1620 + dev->disable_int(dev);
1621 + dev->disable(dev);
1622 ++ synchronize_irq(dev->irq);
1623 + dev->slave = NULL;
1624 + pm_runtime_put(dev->dev);
1625 +
1626 +diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
1627 +index 252edb433fdf..29eae1bf4f86 100644
1628 +--- a/drivers/i2c/busses/i2c-mt65xx.c
1629 ++++ b/drivers/i2c/busses/i2c-mt65xx.c
1630 +@@ -234,6 +234,10 @@ static const struct i2c_adapter_quirks mt7622_i2c_quirks = {
1631 + .max_num_msgs = 255,
1632 + };
1633 +
1634 ++static const struct i2c_adapter_quirks mt8183_i2c_quirks = {
1635 ++ .flags = I2C_AQ_NO_ZERO_LEN,
1636 ++};
1637 ++
1638 + static const struct mtk_i2c_compatible mt2712_compat = {
1639 + .regs = mt_i2c_regs_v1,
1640 + .pmic_i2c = 0,
1641 +@@ -298,6 +302,7 @@ static const struct mtk_i2c_compatible mt8173_compat = {
1642 + };
1643 +
1644 + static const struct mtk_i2c_compatible mt8183_compat = {
1645 ++ .quirks = &mt8183_i2c_quirks,
1646 + .regs = mt_i2c_regs_v2,
1647 + .pmic_i2c = 0,
1648 + .dcm = 0,
1649 +@@ -870,7 +875,11 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
1650 +
1651 + static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
1652 + {
1653 +- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1654 ++ if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
1655 ++ return I2C_FUNC_I2C |
1656 ++ (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
1657 ++ else
1658 ++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1659 + }
1660 +
1661 + static const struct i2c_algorithm mtk_i2c_algorithm = {
1662 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1663 +index 420efaab3860..e78c20d7df41 100644
1664 +--- a/drivers/input/mouse/elan_i2c_core.c
1665 ++++ b/drivers/input/mouse/elan_i2c_core.c
1666 +@@ -1357,7 +1357,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1667 + { "ELAN0618", 0 },
1668 + { "ELAN0619", 0 },
1669 + { "ELAN061A", 0 },
1670 +- { "ELAN061B", 0 },
1671 ++/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */
1672 + { "ELAN061C", 0 },
1673 + { "ELAN061D", 0 },
1674 + { "ELAN061E", 0 },
1675 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1676 +index dce1d8d2e8a4..3e687f18b203 100644
1677 +--- a/drivers/iommu/amd_iommu.c
1678 ++++ b/drivers/iommu/amd_iommu.c
1679 +@@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
1680 + iommu_completion_wait(iommu);
1681 + }
1682 +
1683 ++static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1684 ++{
1685 ++ struct iommu_cmd cmd;
1686 ++
1687 ++ build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1688 ++ dom_id, 1);
1689 ++ iommu_queue_command(iommu, &cmd);
1690 ++
1691 ++ iommu_completion_wait(iommu);
1692 ++}
1693 ++
1694 + static void amd_iommu_flush_all(struct amd_iommu *iommu)
1695 + {
1696 + struct iommu_cmd cmd;
1697 +@@ -1414,18 +1425,21 @@ static void free_pagetable(struct protection_domain *domain)
1698 + * another level increases the size of the address space by 9 bits to a size up
1699 + * to 64 bits.
1700 + */
1701 +-static bool increase_address_space(struct protection_domain *domain,
1702 ++static void increase_address_space(struct protection_domain *domain,
1703 + gfp_t gfp)
1704 + {
1705 ++ unsigned long flags;
1706 + u64 *pte;
1707 +
1708 +- if (domain->mode == PAGE_MODE_6_LEVEL)
1709 ++ spin_lock_irqsave(&domain->lock, flags);
1710 ++
1711 ++ if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
1712 + /* address space already 64 bit large */
1713 +- return false;
1714 ++ goto out;
1715 +
1716 + pte = (void *)get_zeroed_page(gfp);
1717 + if (!pte)
1718 +- return false;
1719 ++ goto out;
1720 +
1721 + *pte = PM_LEVEL_PDE(domain->mode,
1722 + iommu_virt_to_phys(domain->pt_root));
1723 +@@ -1433,7 +1447,10 @@ static bool increase_address_space(struct protection_domain *domain,
1724 + domain->mode += 1;
1725 + domain->updated = true;
1726 +
1727 +- return true;
1728 ++out:
1729 ++ spin_unlock_irqrestore(&domain->lock, flags);
1730 ++
1731 ++ return;
1732 + }
1733 +
1734 + static u64 *alloc_pte(struct protection_domain *domain,
1735 +@@ -1863,6 +1880,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
1736 + {
1737 + u64 pte_root = 0;
1738 + u64 flags = 0;
1739 ++ u32 old_domid;
1740 +
1741 + if (domain->mode != PAGE_MODE_NONE)
1742 + pte_root = iommu_virt_to_phys(domain->pt_root);
1743 +@@ -1912,8 +1930,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
1744 + flags &= ~DEV_DOMID_MASK;
1745 + flags |= domain->id;
1746 +
1747 ++ old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
1748 + amd_iommu_dev_table[devid].data[1] = flags;
1749 + amd_iommu_dev_table[devid].data[0] = pte_root;
1750 ++
1751 ++ /*
1752 ++ * A kdump kernel might be replacing a domain ID that was copied from
1753 ++ * the previous kernel--if so, it needs to flush the translation cache
1754 ++ * entries for the old domain ID that is being overwritten
1755 ++ */
1756 ++ if (old_domid) {
1757 ++ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1758 ++
1759 ++ amd_iommu_flush_tlb_domid(iommu, old_domid);
1760 ++ }
1761 + }
1762 +
1763 + static void clear_dte_entry(u16 devid)
1764 +diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
1765 +index eceaa7e968ae..641dc223c97b 100644
1766 +--- a/drivers/iommu/intel-svm.c
1767 ++++ b/drivers/iommu/intel-svm.c
1768 +@@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
1769 + }
1770 +
1771 + static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
1772 +- unsigned long address, unsigned long pages, int ih, int gl)
1773 ++ unsigned long address, unsigned long pages, int ih)
1774 + {
1775 + struct qi_desc desc;
1776 +
1777 +- if (pages == -1) {
1778 +- /* For global kernel pages we have to flush them in *all* PASIDs
1779 +- * because that's the only option the hardware gives us. Despite
1780 +- * the fact that they are actually only accessible through one. */
1781 +- if (gl)
1782 +- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
1783 +- QI_EIOTLB_DID(sdev->did) |
1784 +- QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) |
1785 +- QI_EIOTLB_TYPE;
1786 +- else
1787 +- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
1788 +- QI_EIOTLB_DID(sdev->did) |
1789 +- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
1790 +- QI_EIOTLB_TYPE;
1791 ++ /*
1792 ++ * Do PASID granu IOTLB invalidation if page selective capability is
1793 ++ * not available.
1794 ++ */
1795 ++ if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
1796 ++ desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
1797 ++ QI_EIOTLB_DID(sdev->did) |
1798 ++ QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
1799 ++ QI_EIOTLB_TYPE;
1800 + desc.qw1 = 0;
1801 + } else {
1802 + int mask = ilog2(__roundup_pow_of_two(pages));
1803 +@@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
1804 + QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
1805 + QI_EIOTLB_TYPE;
1806 + desc.qw1 = QI_EIOTLB_ADDR(address) |
1807 +- QI_EIOTLB_GL(gl) |
1808 + QI_EIOTLB_IH(ih) |
1809 + QI_EIOTLB_AM(mask);
1810 + }
1811 +@@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
1812 + }
1813 +
1814 + static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
1815 +- unsigned long pages, int ih, int gl)
1816 ++ unsigned long pages, int ih)
1817 + {
1818 + struct intel_svm_dev *sdev;
1819 +
1820 + rcu_read_lock();
1821 + list_for_each_entry_rcu(sdev, &svm->devs, list)
1822 +- intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
1823 ++ intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
1824 + rcu_read_unlock();
1825 + }
1826 +
1827 +@@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
1828 + struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
1829 +
1830 + intel_flush_svm_range(svm, start,
1831 +- (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
1832 ++ (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
1833 + }
1834 +
1835 + static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
1836 +@@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
1837 + rcu_read_lock();
1838 + list_for_each_entry_rcu(sdev, &svm->devs, list) {
1839 + intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
1840 +- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
1841 ++ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
1842 + }
1843 + rcu_read_unlock();
1844 +
1845 +@@ -410,7 +404,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
1846 + * large and has to be physically contiguous. So it's
1847 + * hard to be as defensive as we might like. */
1848 + intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
1849 +- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
1850 ++ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
1851 + kfree_rcu(sdev, rcu);
1852 +
1853 + if (list_empty(&svm->devs)) {
1854 +diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
1855 +index b9dad0accd1b..d855e9c09c08 100644
1856 +--- a/drivers/media/platform/stm32/stm32-dcmi.c
1857 ++++ b/drivers/media/platform/stm32/stm32-dcmi.c
1858 +@@ -1702,7 +1702,7 @@ static int dcmi_probe(struct platform_device *pdev)
1859 + if (irq <= 0) {
1860 + if (irq != -EPROBE_DEFER)
1861 + dev_err(&pdev->dev, "Could not get irq\n");
1862 +- return irq;
1863 ++ return irq ? irq : -ENXIO;
1864 + }
1865 +
1866 + dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1867 +diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
1868 +index c659e18b358b..676d233d46d5 100644
1869 +--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
1870 ++++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
1871 +@@ -608,10 +608,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
1872 + static int technisat_usb2_get_ir(struct dvb_usb_device *d)
1873 + {
1874 + struct technisat_usb2_state *state = d->priv;
1875 +- u8 *buf = state->buf;
1876 +- u8 *b;
1877 +- int ret;
1878 + struct ir_raw_event ev;
1879 ++ u8 *buf = state->buf;
1880 ++ int i, ret;
1881 +
1882 + buf[0] = GET_IR_DATA_VENDOR_REQUEST;
1883 + buf[1] = 0x08;
1884 +@@ -647,26 +646,25 @@ unlock:
1885 + return 0; /* no key pressed */
1886 +
1887 + /* decoding */
1888 +- b = buf+1;
1889 +
1890 + #if 0
1891 + deb_rc("RC: %d ", ret);
1892 +- debug_dump(b, ret, deb_rc);
1893 ++ debug_dump(buf + 1, ret, deb_rc);
1894 + #endif
1895 +
1896 + ev.pulse = 0;
1897 +- while (1) {
1898 +- ev.pulse = !ev.pulse;
1899 +- ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000;
1900 +- ir_raw_event_store(d->rc_dev, &ev);
1901 +-
1902 +- b++;
1903 +- if (*b == 0xff) {
1904 ++ for (i = 1; i < ARRAY_SIZE(state->buf); i++) {
1905 ++ if (buf[i] == 0xff) {
1906 + ev.pulse = 0;
1907 + ev.duration = 888888*2;
1908 + ir_raw_event_store(d->rc_dev, &ev);
1909 + break;
1910 + }
1911 ++
1912 ++ ev.pulse = !ev.pulse;
1913 ++ ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR *
1914 ++ FIRMWARE_CLOCK_TICK) / 1000;
1915 ++ ir_raw_event_store(d->rc_dev, &ev);
1916 + }
1917 +
1918 + ir_raw_event_handle(d->rc_dev);
1919 +diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
1920 +index e4d2dcd5cc0f..19c90fa9e443 100644
1921 +--- a/drivers/media/usb/tm6000/tm6000-dvb.c
1922 ++++ b/drivers/media/usb/tm6000/tm6000-dvb.c
1923 +@@ -97,6 +97,7 @@ static void tm6000_urb_received(struct urb *urb)
1924 + printk(KERN_ERR "tm6000: error %s\n", __func__);
1925 + kfree(urb->transfer_buffer);
1926 + usb_free_urb(urb);
1927 ++ dev->dvb->bulk_urb = NULL;
1928 + }
1929 + }
1930 + }
1931 +@@ -127,6 +128,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
1932 + dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
1933 + if (!dvb->bulk_urb->transfer_buffer) {
1934 + usb_free_urb(dvb->bulk_urb);
1935 ++ dvb->bulk_urb = NULL;
1936 + return -ENOMEM;
1937 + }
1938 +
1939 +@@ -153,6 +155,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
1940 +
1941 + kfree(dvb->bulk_urb->transfer_buffer);
1942 + usb_free_urb(dvb->bulk_urb);
1943 ++ dvb->bulk_urb = NULL;
1944 + return ret;
1945 + }
1946 +
1947 +diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
1948 +index 75178624d3f5..fb15f255a1db 100644
1949 +--- a/drivers/net/dsa/microchip/ksz9477_spi.c
1950 ++++ b/drivers/net/dsa/microchip/ksz9477_spi.c
1951 +@@ -157,6 +157,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
1952 + { .compatible = "microchip,ksz9897" },
1953 + { .compatible = "microchip,ksz9893" },
1954 + { .compatible = "microchip,ksz9563" },
1955 ++ { .compatible = "microchip,ksz8563" },
1956 + {},
1957 + };
1958 + MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
1959 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
1960 +index b41f23679a08..7ce9c69e9c44 100644
1961 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
1962 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
1963 +@@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void)
1964 +
1965 + ret = xgbe_platform_init();
1966 + if (ret)
1967 +- return ret;
1968 ++ goto err_platform_init;
1969 +
1970 + ret = xgbe_pci_init();
1971 + if (ret)
1972 +- return ret;
1973 ++ goto err_pci_init;
1974 +
1975 + return 0;
1976 ++
1977 ++err_pci_init:
1978 ++ xgbe_platform_exit();
1979 ++err_platform_init:
1980 ++ unregister_netdevice_notifier(&xgbe_netdev_notifier);
1981 ++ return ret;
1982 + }
1983 +
1984 + static void __exit xgbe_mod_exit(void)
1985 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
1986 +index 1fff462a4175..3dbf3ff1c450 100644
1987 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
1988 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
1989 +@@ -431,7 +431,8 @@ int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
1990 + if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
1991 + break;
1992 + }
1993 +- if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
1994 ++ if (rule && rule->type == aq_rx_filter_vlan &&
1995 ++ be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
1996 + struct ethtool_rxnfc cmd;
1997 +
1998 + cmd.fs.location = rule->aq_fsp.location;
1999 +@@ -843,7 +844,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
2000 + return err;
2001 +
2002 + if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
2003 +- if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) {
2004 ++ if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
2005 + err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
2006 + !(aq_nic->packet_filter & IFF_PROMISC));
2007 + aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
2008 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
2009 +index 5315df5ff6f8..4ebf083c51c5 100644
2010 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
2011 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
2012 +@@ -61,6 +61,10 @@ static int aq_ndev_open(struct net_device *ndev)
2013 + if (err < 0)
2014 + goto err_exit;
2015 +
2016 ++ err = aq_filters_vlans_update(aq_nic);
2017 ++ if (err < 0)
2018 ++ goto err_exit;
2019 ++
2020 + err = aq_nic_start(aq_nic);
2021 + if (err < 0)
2022 + goto err_exit;
2023 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
2024 +index 41172fbebddd..1a2b09065293 100644
2025 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
2026 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
2027 +@@ -390,7 +390,7 @@ int aq_nic_start(struct aq_nic_s *self)
2028 + self->aq_nic_cfg.link_irq_vec);
2029 + err = request_threaded_irq(irqvec, NULL,
2030 + aq_linkstate_threaded_isr,
2031 +- IRQF_SHARED,
2032 ++ IRQF_SHARED | IRQF_ONESHOT,
2033 + self->ndev->name, self);
2034 + if (err < 0)
2035 + goto err_exit;
2036 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
2037 +index 715685aa48c3..28892b8acd0e 100644
2038 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
2039 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
2040 +@@ -86,6 +86,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
2041 + }
2042 + }
2043 +
2044 ++err_exit:
2045 + if (!was_tx_cleaned)
2046 + work_done = budget;
2047 +
2048 +@@ -95,7 +96,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
2049 + 1U << self->aq_ring_param.vec_idx);
2050 + }
2051 + }
2052 +-err_exit:
2053 ++
2054 + return work_done;
2055 + }
2056 +
2057 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
2058 +index 8c1497e7d9c5..aa31948eac64 100644
2059 +--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
2060 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
2061 +@@ -79,7 +79,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
2062 + n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
2063 + if (n != 1) {
2064 + err = -EPERM;
2065 +- goto err_irq;
2066 ++ goto err_irq_vectors;
2067 + }
2068 +
2069 + ptp_qoriq->irq = pci_irq_vector(pdev, 0);
2070 +@@ -103,6 +103,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
2071 + err_no_clock:
2072 + free_irq(ptp_qoriq->irq, ptp_qoriq);
2073 + err_irq:
2074 ++ pci_free_irq_vectors(pdev);
2075 ++err_irq_vectors:
2076 + iounmap(base);
2077 + err_ioremap:
2078 + kfree(ptp_qoriq);
2079 +@@ -120,6 +122,7 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
2080 + struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev);
2081 +
2082 + ptp_qoriq_free(ptp_qoriq);
2083 ++ pci_free_irq_vectors(pdev);
2084 + kfree(ptp_qoriq);
2085 +
2086 + pci_release_mem_regions(pdev);
2087 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2088 +index fe879c07ae3c..fc5ea87bd387 100644
2089 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2090 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2091 +@@ -11,6 +11,7 @@
2092 + #include <linux/io.h>
2093 + #include <linux/ip.h>
2094 + #include <linux/ipv6.h>
2095 ++#include <linux/marvell_phy.h>
2096 + #include <linux/module.h>
2097 + #include <linux/phy.h>
2098 + #include <linux/platform_device.h>
2099 +@@ -1149,6 +1150,13 @@ static void hns_nic_adjust_link(struct net_device *ndev)
2100 + }
2101 + }
2102 +
2103 ++static int hns_phy_marvell_fixup(struct phy_device *phydev)
2104 ++{
2105 ++ phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE;
2106 ++
2107 ++ return 0;
2108 ++}
2109 ++
2110 + /**
2111 + *hns_nic_init_phy - init phy
2112 + *@ndev: net device
2113 +@@ -1174,6 +1182,16 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
2114 + if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
2115 + phy_dev->dev_flags = 0;
2116 +
2117 ++ /* register the PHY fixup (for Marvell 88E1510) */
2118 ++ ret = phy_register_fixup_for_uid(MARVELL_PHY_ID_88E1510,
2119 ++ MARVELL_PHY_ID_MASK,
2120 ++ hns_phy_marvell_fixup);
2121 ++ /* we can live without it, so just issue a warning */
2122 ++ if (ret)
2123 ++ netdev_warn(ndev,
2124 ++ "Cannot register PHY fixup, ret=%d\n",
2125 ++ ret);
2126 ++
2127 + ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
2128 + h->phy_if);
2129 + } else {
2130 +@@ -2429,8 +2447,11 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
2131 + hns_nic_uninit_ring_data(priv);
2132 + priv->ring_data = NULL;
2133 +
2134 +- if (ndev->phydev)
2135 ++ if (ndev->phydev) {
2136 ++ phy_unregister_fixup_for_uid(MARVELL_PHY_ID_88E1510,
2137 ++ MARVELL_PHY_ID_MASK);
2138 + phy_disconnect(ndev->phydev);
2139 ++ }
2140 +
2141 + if (!IS_ERR_OR_NULL(priv->ae_handle))
2142 + hnae_put_handle(priv->ae_handle);
2143 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2144 +index 66b691b7221f..f1e0c16263a4 100644
2145 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2146 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2147 +@@ -3896,6 +3896,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
2148 +
2149 + hns3_client_stop(handle);
2150 +
2151 ++ hns3_uninit_phy(netdev);
2152 ++
2153 + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
2154 + netdev_warn(netdev, "already uninitialized\n");
2155 + goto out_netdev_free;
2156 +@@ -3905,8 +3907,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
2157 +
2158 + hns3_clear_all_ring(handle, true);
2159 +
2160 +- hns3_uninit_phy(netdev);
2161 +-
2162 + hns3_nic_uninit_vector_data(priv);
2163 +
2164 + ret = hns3_nic_dealloc_vector_data(priv);
2165 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
2166 +index cebd20f3128d..fa4bb940665c 100644
2167 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
2168 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
2169 +@@ -1983,6 +1983,10 @@ static void __ibmvnic_reset(struct work_struct *work)
2170 +
2171 + rwi = get_next_rwi(adapter);
2172 + while (rwi) {
2173 ++ if (adapter->state == VNIC_REMOVING ||
2174 ++ adapter->state == VNIC_REMOVED)
2175 ++ goto out;
2176 ++
2177 + if (adapter->force_reset_recovery) {
2178 + adapter->force_reset_recovery = false;
2179 + rc = do_hard_reset(adapter, rwi, reset_state);
2180 +@@ -2007,7 +2011,7 @@ static void __ibmvnic_reset(struct work_struct *work)
2181 + netdev_dbg(adapter->netdev, "Reset failed\n");
2182 + free_all_rwi(adapter);
2183 + }
2184 +-
2185 ++out:
2186 + adapter->resetting = false;
2187 + if (we_lock_rtnl)
2188 + rtnl_unlock();
2189 +diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
2190 +index c93a6f9b735b..7e88446ac97a 100644
2191 +--- a/drivers/net/ethernet/marvell/sky2.c
2192 ++++ b/drivers/net/ethernet/marvell/sky2.c
2193 +@@ -4924,6 +4924,13 @@ static const struct dmi_system_id msi_blacklist[] = {
2194 + DMI_MATCH(DMI_BOARD_NAME, "P6T"),
2195 + },
2196 + },
2197 ++ {
2198 ++ .ident = "ASUS P6X",
2199 ++ .matches = {
2200 ++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
2201 ++ DMI_MATCH(DMI_BOARD_NAME, "P6X"),
2202 ++ },
2203 ++ },
2204 + {}
2205 + };
2206 +
2207 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
2208 +index 6de23b56b294..c875a2fa7596 100644
2209 +--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
2210 ++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
2211 +@@ -1215,7 +1215,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
2212 + &drv_version);
2213 + if (rc) {
2214 + DP_NOTICE(cdev, "Failed sending drv version command\n");
2215 +- return rc;
2216 ++ goto err4;
2217 + }
2218 + }
2219 +
2220 +@@ -1223,6 +1223,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
2221 +
2222 + return 0;
2223 +
2224 ++err4:
2225 ++ qed_ll2_dealloc_if(cdev);
2226 + err3:
2227 + qed_hw_stop(cdev);
2228 + err2:
2229 +diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
2230 +index 7a5e6c5abb57..276c7cae7cee 100644
2231 +--- a/drivers/net/ethernet/seeq/sgiseeq.c
2232 ++++ b/drivers/net/ethernet/seeq/sgiseeq.c
2233 +@@ -794,15 +794,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
2234 + printk(KERN_ERR "Sgiseeq: Cannot register net device, "
2235 + "aborting.\n");
2236 + err = -ENODEV;
2237 +- goto err_out_free_page;
2238 ++ goto err_out_free_attrs;
2239 + }
2240 +
2241 + printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
2242 +
2243 + return 0;
2244 +
2245 +-err_out_free_page:
2246 +- free_page((unsigned long) sp->srings);
2247 ++err_out_free_attrs:
2248 ++ dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
2249 ++ sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
2250 + err_out_free_dev:
2251 + free_netdev(dev);
2252 +
2253 +diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
2254 +index b41696e16bdc..c20e7ef18bc9 100644
2255 +--- a/drivers/net/ieee802154/mac802154_hwsim.c
2256 ++++ b/drivers/net/ieee802154/mac802154_hwsim.c
2257 +@@ -802,7 +802,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
2258 + err = hwsim_subscribe_all_others(phy);
2259 + if (err < 0) {
2260 + mutex_unlock(&hwsim_phys_lock);
2261 +- goto err_reg;
2262 ++ goto err_subscribe;
2263 + }
2264 + }
2265 + list_add_tail(&phy->list, &hwsim_phys);
2266 +@@ -812,6 +812,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
2267 +
2268 + return idx;
2269 +
2270 ++err_subscribe:
2271 ++ ieee802154_unregister_hw(phy->hw);
2272 + err_reg:
2273 + kfree(pib);
2274 + err_pib:
2275 +@@ -901,9 +903,9 @@ static __init int hwsim_init_module(void)
2276 + return 0;
2277 +
2278 + platform_drv:
2279 +- genl_unregister_family(&hwsim_genl_family);
2280 +-platform_dev:
2281 + platform_device_unregister(mac802154hwsim_dev);
2282 ++platform_dev:
2283 ++ genl_unregister_family(&hwsim_genl_family);
2284 + return rc;
2285 + }
2286 +
2287 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
2288 +index 1a7b7bd412f9..f2553dff5b17 100644
2289 +--- a/drivers/net/usb/r8152.c
2290 ++++ b/drivers/net/usb/r8152.c
2291 +@@ -787,8 +787,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
2292 + ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
2293 + RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
2294 + value, index, tmp, size, 500);
2295 ++ if (ret < 0)
2296 ++ memset(data, 0xff, size);
2297 ++ else
2298 ++ memcpy(data, tmp, size);
2299 +
2300 +- memcpy(data, tmp, size);
2301 + kfree(tmp);
2302 +
2303 + return ret;
2304 +diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
2305 +index 653d347a9a19..580387f9f12a 100644
2306 +--- a/drivers/net/wireless/marvell/mwifiex/ie.c
2307 ++++ b/drivers/net/wireless/marvell/mwifiex/ie.c
2308 +@@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
2309 + }
2310 +
2311 + vs_ie = (struct ieee_types_header *)vendor_ie;
2312 ++ if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
2313 ++ IEEE_MAX_IE_SIZE)
2314 ++ return -EINVAL;
2315 + memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
2316 + vs_ie, vs_ie->len + 2);
2317 + le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
2318 +diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
2319 +index 18f7d9bf30b2..0939a8c8f3ab 100644
2320 +--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
2321 ++++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
2322 +@@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
2323 +
2324 + rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
2325 + if (rate_ie) {
2326 ++ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
2327 ++ return;
2328 + memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
2329 + rate_len = rate_ie->len;
2330 + }
2331 +@@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
2332 + rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
2333 + params->beacon.tail,
2334 + params->beacon.tail_len);
2335 +- if (rate_ie)
2336 ++ if (rate_ie) {
2337 ++ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
2338 ++ return;
2339 + memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
2340 ++ }
2341 +
2342 + return;
2343 + }
2344 +@@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
2345 + params->beacon.tail_len);
2346 + if (vendor_ie) {
2347 + wmm_ie = vendor_ie;
2348 ++ if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
2349 ++ return;
2350 + memcpy(&bss_cfg->wmm_info, wmm_ie +
2351 + sizeof(struct ieee_types_header), *(wmm_ie + 1));
2352 + priv->wmm_enabled = 1;
2353 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
2354 +index 8d33970a2950..5f5722bf6762 100644
2355 +--- a/drivers/net/xen-netfront.c
2356 ++++ b/drivers/net/xen-netfront.c
2357 +@@ -906,7 +906,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
2358 + __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
2359 + }
2360 + if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
2361 +- queue->rx.rsp_cons = ++cons;
2362 ++ queue->rx.rsp_cons = ++cons + skb_queue_len(list);
2363 + kfree_skb(nskb);
2364 + return ~0U;
2365 + }
2366 +diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
2367 +index 8ffba67568ec..b7f6b1324395 100644
2368 +--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
2369 ++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
2370 +@@ -61,6 +61,7 @@
2371 + USB2_OBINT_IDDIGCHG)
2372 +
2373 + /* VBCTRL */
2374 ++#define USB2_VBCTRL_OCCLREN BIT(16)
2375 + #define USB2_VBCTRL_DRVVBUSSEL BIT(8)
2376 +
2377 + /* LINECTRL1 */
2378 +@@ -374,6 +375,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
2379 + writel(val, usb2_base + USB2_LINECTRL1);
2380 +
2381 + val = readl(usb2_base + USB2_VBCTRL);
2382 ++ val &= ~USB2_VBCTRL_OCCLREN;
2383 + writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
2384 + val = readl(usb2_base + USB2_ADPCTRL);
2385 + writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
2386 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
2387 +index 0b4f36905321..8e667967928a 100644
2388 +--- a/drivers/tty/serial/atmel_serial.c
2389 ++++ b/drivers/tty/serial/atmel_serial.c
2390 +@@ -1400,7 +1400,6 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
2391 +
2392 + atmel_port->hd_start_rx = false;
2393 + atmel_start_rx(port);
2394 +- return;
2395 + }
2396 +
2397 + atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
2398 +diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
2399 +index 73d71a4e6c0c..f49b7d6fbc88 100644
2400 +--- a/drivers/tty/serial/sprd_serial.c
2401 ++++ b/drivers/tty/serial/sprd_serial.c
2402 +@@ -609,7 +609,7 @@ static inline void sprd_rx(struct uart_port *port)
2403 +
2404 + if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
2405 + SPRD_LSR_FE | SPRD_LSR_OE))
2406 +- if (handle_lsr_errors(port, &lsr, &flag))
2407 ++ if (handle_lsr_errors(port, &flag, &lsr))
2408 + continue;
2409 + if (uart_handle_sysrq_char(port, ch))
2410 + continue;
2411 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
2412 +index 9d6cb709ca7b..151a74a54386 100644
2413 +--- a/drivers/usb/core/config.c
2414 ++++ b/drivers/usb/core/config.c
2415 +@@ -921,7 +921,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
2416 + struct usb_bos_descriptor *bos;
2417 + struct usb_dev_cap_header *cap;
2418 + struct usb_ssp_cap_descriptor *ssp_cap;
2419 +- unsigned char *buffer;
2420 ++ unsigned char *buffer, *buffer0;
2421 + int length, total_len, num, i, ssac;
2422 + __u8 cap_type;
2423 + int ret;
2424 +@@ -966,10 +966,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
2425 + ret = -ENOMSG;
2426 + goto err;
2427 + }
2428 ++
2429 ++ buffer0 = buffer;
2430 + total_len -= length;
2431 ++ buffer += length;
2432 +
2433 + for (i = 0; i < num; i++) {
2434 +- buffer += length;
2435 + cap = (struct usb_dev_cap_header *)buffer;
2436 +
2437 + if (total_len < sizeof(*cap) || total_len < cap->bLength) {
2438 +@@ -983,8 +985,6 @@ int usb_get_bos_descriptor(struct usb_device *dev)
2439 + break;
2440 + }
2441 +
2442 +- total_len -= length;
2443 +-
2444 + if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
2445 + dev_warn(ddev, "descriptor type invalid, skip\n");
2446 + continue;
2447 +@@ -1019,7 +1019,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
2448 + default:
2449 + break;
2450 + }
2451 ++
2452 ++ total_len -= length;
2453 ++ buffer += length;
2454 + }
2455 ++ dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0);
2456 +
2457 + return 0;
2458 +
2459 +diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
2460 +index 294158113d62..77142f9bf26a 100644
2461 +--- a/drivers/usb/host/xhci-tegra.c
2462 ++++ b/drivers/usb/host/xhci-tegra.c
2463 +@@ -1217,6 +1217,16 @@ static int tegra_xusb_probe(struct platform_device *pdev)
2464 +
2465 + tegra_xusb_config(tegra, regs);
2466 +
2467 ++ /*
2468 ++ * The XUSB Falcon microcontroller can only address 40 bits, so set
2469 ++ * the DMA mask accordingly.
2470 ++ */
2471 ++ err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
2472 ++ if (err < 0) {
2473 ++ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
2474 ++ goto put_rpm;
2475 ++ }
2476 ++
2477 + err = tegra_xusb_load_firmware(tegra);
2478 + if (err < 0) {
2479 + dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
2480 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
2481 +index 18c7c6b2fe08..85b2107e8a3d 100644
2482 +--- a/fs/cifs/connect.c
2483 ++++ b/fs/cifs/connect.c
2484 +@@ -2961,6 +2961,7 @@ static int
2485 + cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
2486 + {
2487 + int rc = 0;
2488 ++ int is_domain = 0;
2489 + const char *delim, *payload;
2490 + char *desc;
2491 + ssize_t len;
2492 +@@ -3008,6 +3009,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
2493 + rc = PTR_ERR(key);
2494 + goto out_err;
2495 + }
2496 ++ is_domain = 1;
2497 + }
2498 +
2499 + down_read(&key->sem);
2500 +@@ -3065,6 +3067,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
2501 + goto out_key_put;
2502 + }
2503 +
2504 ++ /*
2505 ++ * If we have a domain key then we must set the domainName in the
2506 ++ * for the request.
2507 ++ */
2508 ++ if (is_domain && ses->domainName) {
2509 ++ vol->domainname = kstrndup(ses->domainName,
2510 ++ strlen(ses->domainName),
2511 ++ GFP_KERNEL);
2512 ++ if (!vol->domainname) {
2513 ++ cifs_dbg(FYI, "Unable to allocate %zd bytes for "
2514 ++ "domain\n", len);
2515 ++ rc = -ENOMEM;
2516 ++ kfree(vol->username);
2517 ++ vol->username = NULL;
2518 ++ kzfree(vol->password);
2519 ++ vol->password = NULL;
2520 ++ goto out_key_put;
2521 ++ }
2522 ++ }
2523 ++
2524 + out_key_put:
2525 + up_read(&key->sem);
2526 + key_put(key);
2527 +diff --git a/fs/fs_parser.c b/fs/fs_parser.c
2528 +index 0d388faa25d1..460ea4206fa2 100644
2529 +--- a/fs/fs_parser.c
2530 ++++ b/fs/fs_parser.c
2531 +@@ -264,6 +264,7 @@ int fs_lookup_param(struct fs_context *fc,
2532 + return invalf(fc, "%s: not usable as path", param->key);
2533 + }
2534 +
2535 ++ f->refcnt++; /* filename_lookup() drops our ref. */
2536 + ret = filename_lookup(param->dirfd, f, flags, _path, NULL);
2537 + if (ret < 0) {
2538 + errorf(fc, "%s: Lookup failure for '%s'", param->key, f->name);
2539 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
2540 +index 9f44ddc34c7b..3321cc7a7ead 100644
2541 +--- a/fs/nfs/dir.c
2542 ++++ b/fs/nfs/dir.c
2543 +@@ -1483,7 +1483,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
2544 + if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
2545 + nfs_file_set_open_context(file, ctx);
2546 + else
2547 +- err = -ESTALE;
2548 ++ err = -EOPENSTALE;
2549 + out:
2550 + return err;
2551 + }
2552 +diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
2553 +index c67cdbb36ce7..38d915814221 100644
2554 +--- a/fs/nfs/flexfilelayout/flexfilelayout.c
2555 ++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
2556 +@@ -8,6 +8,7 @@
2557 + */
2558 +
2559 + #include <linux/nfs_fs.h>
2560 ++#include <linux/nfs_mount.h>
2561 + #include <linux/nfs_page.h>
2562 + #include <linux/module.h>
2563 + #include <linux/sched/mm.h>
2564 +@@ -928,7 +929,9 @@ retry:
2565 + pgm = &pgio->pg_mirrors[0];
2566 + pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
2567 +
2568 +- pgio->pg_maxretrans = io_maxretrans;
2569 ++ if (NFS_SERVER(pgio->pg_inode)->flags &
2570 ++ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
2571 ++ pgio->pg_maxretrans = io_maxretrans;
2572 + return;
2573 + out_nolseg:
2574 + if (pgio->pg_error < 0)
2575 +@@ -936,6 +939,7 @@ out_nolseg:
2576 + out_mds:
2577 + pnfs_put_lseg(pgio->pg_lseg);
2578 + pgio->pg_lseg = NULL;
2579 ++ pgio->pg_maxretrans = 0;
2580 + nfs_pageio_reset_read_mds(pgio);
2581 + }
2582 +
2583 +@@ -996,12 +1000,15 @@ retry:
2584 + pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
2585 + }
2586 +
2587 +- pgio->pg_maxretrans = io_maxretrans;
2588 ++ if (NFS_SERVER(pgio->pg_inode)->flags &
2589 ++ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
2590 ++ pgio->pg_maxretrans = io_maxretrans;
2591 + return;
2592 +
2593 + out_mds:
2594 + pnfs_put_lseg(pgio->pg_lseg);
2595 + pgio->pg_lseg = NULL;
2596 ++ pgio->pg_maxretrans = 0;
2597 + nfs_pageio_reset_write_mds(pgio);
2598 + }
2599 +
2600 +diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
2601 +index 81e2fdff227e..9ab9427405f3 100644
2602 +--- a/fs/nfs/internal.h
2603 ++++ b/fs/nfs/internal.h
2604 +@@ -773,3 +773,13 @@ static inline bool nfs_error_is_fatal(int err)
2605 + }
2606 + }
2607 +
2608 ++static inline bool nfs_error_is_fatal_on_server(int err)
2609 ++{
2610 ++ switch (err) {
2611 ++ case 0:
2612 ++ case -ERESTARTSYS:
2613 ++ case -EINTR:
2614 ++ return false;
2615 ++ }
2616 ++ return nfs_error_is_fatal(err);
2617 ++}
2618 +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
2619 +index 3a507c42c1ca..336643b82188 100644
2620 +--- a/fs/nfs/nfs4file.c
2621 ++++ b/fs/nfs/nfs4file.c
2622 +@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
2623 + if (IS_ERR(inode)) {
2624 + err = PTR_ERR(inode);
2625 + switch (err) {
2626 +- case -EPERM:
2627 +- case -EACCES:
2628 +- case -EDQUOT:
2629 +- case -ENOSPC:
2630 +- case -EROFS:
2631 +- goto out_put_ctx;
2632 + default:
2633 ++ goto out_put_ctx;
2634 ++ case -ENOENT:
2635 ++ case -ESTALE:
2636 ++ case -EISDIR:
2637 ++ case -ENOTDIR:
2638 ++ case -ELOOP:
2639 + goto out_drop;
2640 + }
2641 + }
2642 +@@ -187,7 +187,11 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
2643 + bool same_inode = false;
2644 + int ret;
2645 +
2646 +- if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
2647 ++ /* NFS does not support deduplication. */
2648 ++ if (remap_flags & REMAP_FILE_DEDUP)
2649 ++ return -EOPNOTSUPP;
2650 ++
2651 ++ if (remap_flags & ~REMAP_FILE_ADVISORY)
2652 + return -EINVAL;
2653 +
2654 + /* check alignment w.r.t. clone_blksize */
2655 +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
2656 +index 8b6211753228..eae584dbfa08 100644
2657 +--- a/fs/nfs/pagelist.c
2658 ++++ b/fs/nfs/pagelist.c
2659 +@@ -590,7 +590,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
2660 + }
2661 +
2662 + hdr->res.fattr = &hdr->fattr;
2663 +- hdr->res.count = count;
2664 ++ hdr->res.count = 0;
2665 + hdr->res.eof = 0;
2666 + hdr->res.verf = &hdr->verf;
2667 + nfs_fattr_init(&hdr->fattr);
2668 +diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
2669 +index 5552fa8b6e12..0f7288b94633 100644
2670 +--- a/fs/nfs/proc.c
2671 ++++ b/fs/nfs/proc.c
2672 +@@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
2673 + /* Emulate the eof flag, which isn't normally needed in NFSv2
2674 + * as it is guaranteed to always return the file attributes
2675 + */
2676 +- if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
2677 ++ if ((hdr->res.count == 0 && hdr->args.count > 0) ||
2678 ++ hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
2679 + hdr->res.eof = 1;
2680 + }
2681 + return 0;
2682 +@@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
2683 +
2684 + static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
2685 + {
2686 +- if (task->tk_status >= 0)
2687 ++ if (task->tk_status >= 0) {
2688 ++ hdr->res.count = hdr->args.count;
2689 + nfs_writeback_update_inode(hdr);
2690 ++ }
2691 + return 0;
2692 + }
2693 +
2694 +diff --git a/fs/nfs/read.c b/fs/nfs/read.c
2695 +index c19841c82b6a..cfe0b586eadd 100644
2696 +--- a/fs/nfs/read.c
2697 ++++ b/fs/nfs/read.c
2698 +@@ -91,19 +91,25 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
2699 + }
2700 + EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
2701 +
2702 +-static void nfs_readpage_release(struct nfs_page *req)
2703 ++static void nfs_readpage_release(struct nfs_page *req, int error)
2704 + {
2705 + struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
2706 ++ struct page *page = req->wb_page;
2707 +
2708 + dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
2709 + (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
2710 + (long long)req_offset(req));
2711 +
2712 ++ if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
2713 ++ SetPageError(page);
2714 + if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
2715 +- if (PageUptodate(req->wb_page))
2716 +- nfs_readpage_to_fscache(inode, req->wb_page, 0);
2717 ++ struct address_space *mapping = page_file_mapping(page);
2718 +
2719 +- unlock_page(req->wb_page);
2720 ++ if (PageUptodate(page))
2721 ++ nfs_readpage_to_fscache(inode, page, 0);
2722 ++ else if (!PageError(page) && !PagePrivate(page))
2723 ++ generic_error_remove_page(mapping, page);
2724 ++ unlock_page(page);
2725 + }
2726 + nfs_release_request(req);
2727 + }
2728 +@@ -131,7 +137,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
2729 + &nfs_async_read_completion_ops);
2730 + if (!nfs_pageio_add_request(&pgio, new)) {
2731 + nfs_list_remove_request(new);
2732 +- nfs_readpage_release(new);
2733 ++ nfs_readpage_release(new, pgio.pg_error);
2734 + }
2735 + nfs_pageio_complete(&pgio);
2736 +
2737 +@@ -153,6 +159,7 @@ static void nfs_page_group_set_uptodate(struct nfs_page *req)
2738 + static void nfs_read_completion(struct nfs_pgio_header *hdr)
2739 + {
2740 + unsigned long bytes = 0;
2741 ++ int error;
2742 +
2743 + if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
2744 + goto out;
2745 +@@ -179,14 +186,19 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
2746 + zero_user_segment(page, start, end);
2747 + }
2748 + }
2749 ++ error = 0;
2750 + bytes += req->wb_bytes;
2751 + if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
2752 + if (bytes <= hdr->good_bytes)
2753 + nfs_page_group_set_uptodate(req);
2754 ++ else {
2755 ++ error = hdr->error;
2756 ++ xchg(&nfs_req_openctx(req)->error, error);
2757 ++ }
2758 + } else
2759 + nfs_page_group_set_uptodate(req);
2760 + nfs_list_remove_request(req);
2761 +- nfs_readpage_release(req);
2762 ++ nfs_readpage_release(req, error);
2763 + }
2764 + out:
2765 + hdr->release(hdr);
2766 +@@ -213,7 +225,7 @@ nfs_async_read_error(struct list_head *head, int error)
2767 + while (!list_empty(head)) {
2768 + req = nfs_list_entry(head->next);
2769 + nfs_list_remove_request(req);
2770 +- nfs_readpage_release(req);
2771 ++ nfs_readpage_release(req, error);
2772 + }
2773 + }
2774 +
2775 +@@ -337,8 +349,13 @@ int nfs_readpage(struct file *file, struct page *page)
2776 + goto out;
2777 + }
2778 +
2779 ++ xchg(&ctx->error, 0);
2780 + error = nfs_readpage_async(ctx, inode, page);
2781 +-
2782 ++ if (!error) {
2783 ++ error = wait_on_page_locked_killable(page);
2784 ++ if (!PageUptodate(page) && !error)
2785 ++ error = xchg(&ctx->error, 0);
2786 ++ }
2787 + out:
2788 + put_nfs_open_context(ctx);
2789 + return error;
2790 +@@ -372,8 +389,8 @@ readpage_async_filler(void *data, struct page *page)
2791 + zero_user_segment(page, len, PAGE_SIZE);
2792 + if (!nfs_pageio_add_request(desc->pgio, new)) {
2793 + nfs_list_remove_request(new);
2794 +- nfs_readpage_release(new);
2795 + error = desc->pgio->pg_error;
2796 ++ nfs_readpage_release(new, error);
2797 + goto out;
2798 + }
2799 + return 0;
2800 +diff --git a/fs/nfs/write.c b/fs/nfs/write.c
2801 +index 059a7c38bc4f..ee6932c9819e 100644
2802 +--- a/fs/nfs/write.c
2803 ++++ b/fs/nfs/write.c
2804 +@@ -57,6 +57,7 @@ static const struct rpc_call_ops nfs_commit_ops;
2805 + static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
2806 + static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
2807 + static const struct nfs_rw_ops nfs_rw_write_ops;
2808 ++static void nfs_inode_remove_request(struct nfs_page *req);
2809 + static void nfs_clear_request_commit(struct nfs_page *req);
2810 + static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
2811 + struct inode *inode);
2812 +@@ -591,23 +592,13 @@ release_request:
2813 +
2814 + static void nfs_write_error(struct nfs_page *req, int error)
2815 + {
2816 ++ nfs_set_pageerror(page_file_mapping(req->wb_page));
2817 + nfs_mapping_set_error(req->wb_page, error);
2818 ++ nfs_inode_remove_request(req);
2819 + nfs_end_page_writeback(req);
2820 + nfs_release_request(req);
2821 + }
2822 +
2823 +-static bool
2824 +-nfs_error_is_fatal_on_server(int err)
2825 +-{
2826 +- switch (err) {
2827 +- case 0:
2828 +- case -ERESTARTSYS:
2829 +- case -EINTR:
2830 +- return false;
2831 +- }
2832 +- return nfs_error_is_fatal(err);
2833 +-}
2834 +-
2835 + /*
2836 + * Find an associated nfs write request, and prepare to flush it out
2837 + * May return an error if the user signalled nfs_wait_on_request().
2838 +@@ -615,7 +606,6 @@ nfs_error_is_fatal_on_server(int err)
2839 + static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
2840 + struct page *page)
2841 + {
2842 +- struct address_space *mapping;
2843 + struct nfs_page *req;
2844 + int ret = 0;
2845 +
2846 +@@ -630,12 +620,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
2847 + WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
2848 +
2849 + /* If there is a fatal error that covers this write, just exit */
2850 +- ret = 0;
2851 +- mapping = page_file_mapping(page);
2852 +- if (test_bit(AS_ENOSPC, &mapping->flags) ||
2853 +- test_bit(AS_EIO, &mapping->flags))
2854 ++ ret = pgio->pg_error;
2855 ++ if (nfs_error_is_fatal_on_server(ret))
2856 + goto out_launder;
2857 +
2858 ++ ret = 0;
2859 + if (!nfs_pageio_add_request(pgio, req)) {
2860 + ret = pgio->pg_error;
2861 + /*
2862 +@@ -647,6 +636,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
2863 + } else
2864 + ret = -EAGAIN;
2865 + nfs_redirty_request(req);
2866 ++ pgio->pg_error = 0;
2867 + } else
2868 + nfs_add_stats(page_file_mapping(page)->host,
2869 + NFSIOS_WRITEPAGES, 1);
2870 +@@ -666,7 +656,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
2871 + ret = nfs_page_async_flush(pgio, page);
2872 + if (ret == -EAGAIN) {
2873 + redirty_page_for_writepage(wbc, page);
2874 +- ret = 0;
2875 ++ ret = AOP_WRITEPAGE_ACTIVATE;
2876 + }
2877 + return ret;
2878 + }
2879 +@@ -685,10 +675,11 @@ static int nfs_writepage_locked(struct page *page,
2880 + nfs_pageio_init_write(&pgio, inode, 0,
2881 + false, &nfs_async_write_completion_ops);
2882 + err = nfs_do_writepage(page, wbc, &pgio);
2883 ++ pgio.pg_error = 0;
2884 + nfs_pageio_complete(&pgio);
2885 + if (err < 0)
2886 + return err;
2887 +- if (pgio.pg_error < 0)
2888 ++ if (nfs_error_is_fatal(pgio.pg_error))
2889 + return pgio.pg_error;
2890 + return 0;
2891 + }
2892 +@@ -698,7 +689,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
2893 + int ret;
2894 +
2895 + ret = nfs_writepage_locked(page, wbc);
2896 +- unlock_page(page);
2897 ++ if (ret != AOP_WRITEPAGE_ACTIVATE)
2898 ++ unlock_page(page);
2899 + return ret;
2900 + }
2901 +
2902 +@@ -707,7 +699,8 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
2903 + int ret;
2904 +
2905 + ret = nfs_do_writepage(page, wbc, data);
2906 +- unlock_page(page);
2907 ++ if (ret != AOP_WRITEPAGE_ACTIVATE)
2908 ++ unlock_page(page);
2909 + return ret;
2910 + }
2911 +
2912 +@@ -734,6 +727,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
2913 + &nfs_async_write_completion_ops);
2914 + pgio.pg_io_completion = ioc;
2915 + err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
2916 ++ pgio.pg_error = 0;
2917 + nfs_pageio_complete(&pgio);
2918 + nfs_io_completion_put(ioc);
2919 +
2920 +@@ -742,7 +736,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
2921 + if (err < 0)
2922 + goto out_err;
2923 + err = pgio.pg_error;
2924 +- if (err < 0)
2925 ++ if (nfs_error_is_fatal(err))
2926 + goto out_err;
2927 + return 0;
2928 + out_err:
2929 +diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
2930 +index 28a2d12a1029..a8279280e88d 100644
2931 +--- a/fs/overlayfs/ovl_entry.h
2932 ++++ b/fs/overlayfs/ovl_entry.h
2933 +@@ -66,6 +66,7 @@ struct ovl_fs {
2934 + bool workdir_locked;
2935 + /* Traps in ovl inode cache */
2936 + struct inode *upperdir_trap;
2937 ++ struct inode *workbasedir_trap;
2938 + struct inode *workdir_trap;
2939 + struct inode *indexdir_trap;
2940 + /* Inode numbers in all layers do not use the high xino_bits */
2941 +diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
2942 +index b368e2e102fa..afbcb116a7f1 100644
2943 +--- a/fs/overlayfs/super.c
2944 ++++ b/fs/overlayfs/super.c
2945 +@@ -212,6 +212,7 @@ static void ovl_free_fs(struct ovl_fs *ofs)
2946 + {
2947 + unsigned i;
2948 +
2949 ++ iput(ofs->workbasedir_trap);
2950 + iput(ofs->indexdir_trap);
2951 + iput(ofs->workdir_trap);
2952 + iput(ofs->upperdir_trap);
2953 +@@ -1003,6 +1004,25 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
2954 + return 0;
2955 + }
2956 +
2957 ++/*
2958 ++ * Determine how we treat concurrent use of upperdir/workdir based on the
2959 ++ * index feature. This is papering over mount leaks of container runtimes,
2960 ++ * for example, an old overlay mount is leaked and now its upperdir is
2961 ++ * attempted to be used as a lower layer in a new overlay mount.
2962 ++ */
2963 ++static int ovl_report_in_use(struct ovl_fs *ofs, const char *name)
2964 ++{
2965 ++ if (ofs->config.index) {
2966 ++ pr_err("overlayfs: %s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n",
2967 ++ name);
2968 ++ return -EBUSY;
2969 ++ } else {
2970 ++ pr_warn("overlayfs: %s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n",
2971 ++ name);
2972 ++ return 0;
2973 ++ }
2974 ++}
2975 ++
2976 + static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
2977 + struct path *upperpath)
2978 + {
2979 +@@ -1040,14 +1060,12 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
2980 + upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
2981 + ofs->upper_mnt = upper_mnt;
2982 +
2983 +- err = -EBUSY;
2984 + if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) {
2985 + ofs->upperdir_locked = true;
2986 +- } else if (ofs->config.index) {
2987 +- pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
2988 +- goto out;
2989 + } else {
2990 +- pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
2991 ++ err = ovl_report_in_use(ofs, "upperdir");
2992 ++ if (err)
2993 ++ goto out;
2994 + }
2995 +
2996 + err = 0;
2997 +@@ -1157,16 +1175,19 @@ static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
2998 +
2999 + ofs->workbasedir = dget(workpath.dentry);
3000 +
3001 +- err = -EBUSY;
3002 + if (ovl_inuse_trylock(ofs->workbasedir)) {
3003 + ofs->workdir_locked = true;
3004 +- } else if (ofs->config.index) {
3005 +- pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
3006 +- goto out;
3007 + } else {
3008 +- pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
3009 ++ err = ovl_report_in_use(ofs, "workdir");
3010 ++ if (err)
3011 ++ goto out;
3012 + }
3013 +
3014 ++ err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap,
3015 ++ "workdir");
3016 ++ if (err)
3017 ++ goto out;
3018 ++
3019 + err = ovl_make_workdir(sb, ofs, &workpath);
3020 +
3021 + out:
3022 +@@ -1313,16 +1334,16 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
3023 + if (err < 0)
3024 + goto out;
3025 +
3026 +- err = -EBUSY;
3027 +- if (ovl_is_inuse(stack[i].dentry)) {
3028 +- pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n");
3029 +- goto out;
3030 +- }
3031 +-
3032 + err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir");
3033 + if (err)
3034 + goto out;
3035 +
3036 ++ if (ovl_is_inuse(stack[i].dentry)) {
3037 ++ err = ovl_report_in_use(ofs, "lowerdir");
3038 ++ if (err)
3039 ++ goto out;
3040 ++ }
3041 ++
3042 + mnt = clone_private_mount(&stack[i]);
3043 + err = PTR_ERR(mnt);
3044 + if (IS_ERR(mnt)) {
3045 +@@ -1469,8 +1490,8 @@ out_err:
3046 + * - another layer of this overlayfs instance
3047 + * - upper/work dir of any overlayfs instance
3048 + */
3049 +-static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
3050 +- const char *name)
3051 ++static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
3052 ++ struct dentry *dentry, const char *name)
3053 + {
3054 + struct dentry *next = dentry, *parent;
3055 + int err = 0;
3056 +@@ -1482,13 +1503,11 @@ static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
3057 +
3058 + /* Walk back ancestors to root (inclusive) looking for traps */
3059 + while (!err && parent != next) {
3060 +- if (ovl_is_inuse(parent)) {
3061 +- err = -EBUSY;
3062 +- pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n",
3063 +- name);
3064 +- } else if (ovl_lookup_trap_inode(sb, parent)) {
3065 ++ if (ovl_lookup_trap_inode(sb, parent)) {
3066 + err = -ELOOP;
3067 + pr_err("overlayfs: overlapping %s path\n", name);
3068 ++ } else if (ovl_is_inuse(parent)) {
3069 ++ err = ovl_report_in_use(ofs, name);
3070 + }
3071 + next = parent;
3072 + parent = dget_parent(next);
3073 +@@ -1509,7 +1528,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
3074 + int i, err;
3075 +
3076 + if (ofs->upper_mnt) {
3077 +- err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir");
3078 ++ err = ovl_check_layer(sb, ofs, ofs->upper_mnt->mnt_root,
3079 ++ "upperdir");
3080 + if (err)
3081 + return err;
3082 +
3083 +@@ -1520,13 +1540,14 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
3084 + * workbasedir. In that case, we already have their traps in
3085 + * inode cache and we will catch that case on lookup.
3086 + */
3087 +- err = ovl_check_layer(sb, ofs->workbasedir, "workdir");
3088 ++ err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir");
3089 + if (err)
3090 + return err;
3091 + }
3092 +
3093 + for (i = 0; i < ofs->numlower; i++) {
3094 +- err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root,
3095 ++ err = ovl_check_layer(sb, ofs,
3096 ++ ofs->lower_layers[i].mnt->mnt_root,
3097 + "lowerdir");
3098 + if (err)
3099 + return err;
3100 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
3101 +index 6a8dd4af0147..ba8dc520cc79 100644
3102 +--- a/include/linux/intel-iommu.h
3103 ++++ b/include/linux/intel-iommu.h
3104 +@@ -346,7 +346,6 @@ enum {
3105 + #define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
3106 +
3107 + #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
3108 +-#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
3109 + #define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
3110 + #define QI_EIOTLB_AM(am) (((u64)am))
3111 + #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
3112 +@@ -378,8 +377,6 @@ enum {
3113 + #define QI_RESP_INVALID 0x1
3114 + #define QI_RESP_FAILURE 0xf
3115 +
3116 +-#define QI_GRAN_ALL_ALL 0
3117 +-#define QI_GRAN_NONG_ALL 1
3118 + #define QI_GRAN_NONG_PASID 2
3119 + #define QI_GRAN_PSI_PASID 3
3120 +
3121 +diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
3122 +index a16fbe9a2a67..aa99c73c3fbd 100644
3123 +--- a/include/net/pkt_sched.h
3124 ++++ b/include/net/pkt_sched.h
3125 +@@ -118,7 +118,12 @@ void __qdisc_run(struct Qdisc *q);
3126 + static inline void qdisc_run(struct Qdisc *q)
3127 + {
3128 + if (qdisc_run_begin(q)) {
3129 +- __qdisc_run(q);
3130 ++ /* NOLOCK qdisc must check 'state' under the qdisc seqlock
3131 ++ * to avoid racing with dev_qdisc_reset()
3132 ++ */
3133 ++ if (!(q->flags & TCQ_F_NOLOCK) ||
3134 ++ likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3135 ++ __qdisc_run(q);
3136 + qdisc_run_end(q);
3137 + }
3138 + }
3139 +diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
3140 +index 8a5f70c7cdf2..5e69fba181bc 100644
3141 +--- a/include/net/sock_reuseport.h
3142 ++++ b/include/net/sock_reuseport.h
3143 +@@ -21,7 +21,8 @@ struct sock_reuseport {
3144 + unsigned int synq_overflow_ts;
3145 + /* ID stays the same even after the size of socks[] grows. */
3146 + unsigned int reuseport_id;
3147 +- bool bind_inany;
3148 ++ unsigned int bind_inany:1;
3149 ++ unsigned int has_conns:1;
3150 + struct bpf_prog __rcu *prog; /* optional BPF sock selector */
3151 + struct sock *socks[0]; /* array of sock pointers */
3152 + };
3153 +@@ -35,6 +36,24 @@ extern struct sock *reuseport_select_sock(struct sock *sk,
3154 + struct sk_buff *skb,
3155 + int hdr_len);
3156 + extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
3157 ++
3158 ++static inline bool reuseport_has_conns(struct sock *sk, bool set)
3159 ++{
3160 ++ struct sock_reuseport *reuse;
3161 ++ bool ret = false;
3162 ++
3163 ++ rcu_read_lock();
3164 ++ reuse = rcu_dereference(sk->sk_reuseport_cb);
3165 ++ if (reuse) {
3166 ++ if (set)
3167 ++ reuse->has_conns = 1;
3168 ++ ret = reuse->has_conns;
3169 ++ }
3170 ++ rcu_read_unlock();
3171 ++
3172 ++ return ret;
3173 ++}
3174 ++
3175 + int reuseport_get_id(struct sock_reuseport *reuse);
3176 +
3177 + #endif /* _SOCK_REUSEPORT_H */
3178 +diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h
3179 +index 5c8a4d760ee3..b5123ab8d54a 100644
3180 +--- a/include/uapi/linux/netfilter/xt_nfacct.h
3181 ++++ b/include/uapi/linux/netfilter/xt_nfacct.h
3182 +@@ -11,4 +11,9 @@ struct xt_nfacct_match_info {
3183 + struct nf_acct *nfacct;
3184 + };
3185 +
3186 ++struct xt_nfacct_match_info_v1 {
3187 ++ char name[NFACCT_NAME_MAX];
3188 ++ struct nf_acct *nfacct __attribute__((aligned(8)));
3189 ++};
3190 ++
3191 + #endif /* _XT_NFACCT_MATCH_H */
3192 +diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
3193 +index 95a260f9214b..136ce049c4ad 100644
3194 +--- a/kernel/kallsyms.c
3195 ++++ b/kernel/kallsyms.c
3196 +@@ -263,8 +263,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
3197 + {
3198 + char namebuf[KSYM_NAME_LEN];
3199 +
3200 +- if (is_ksym_addr(addr))
3201 +- return !!get_symbol_pos(addr, symbolsize, offset);
3202 ++ if (is_ksym_addr(addr)) {
3203 ++ get_symbol_pos(addr, symbolsize, offset);
3204 ++ return 1;
3205 ++ }
3206 + return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
3207 + !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
3208 + }
3209 +diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
3210 +index fad95ef64e01..bc06e3cdfa84 100644
3211 +--- a/net/batman-adv/bat_v_ogm.c
3212 ++++ b/net/batman-adv/bat_v_ogm.c
3213 +@@ -631,17 +631,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
3214 + * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
3215 + * @buff_pos: current position in the skb
3216 + * @packet_len: total length of the skb
3217 +- * @tvlv_len: tvlv length of the previously considered OGM
3218 ++ * @ogm2_packet: potential OGM2 in buffer
3219 + *
3220 + * Return: true if there is enough space for another OGM, false otherwise.
3221 + */
3222 +-static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
3223 +- __be16 tvlv_len)
3224 ++static bool
3225 ++batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
3226 ++ const struct batadv_ogm2_packet *ogm2_packet)
3227 + {
3228 + int next_buff_pos = 0;
3229 +
3230 +- next_buff_pos += buff_pos + BATADV_OGM2_HLEN;
3231 +- next_buff_pos += ntohs(tvlv_len);
3232 ++ /* check if there is enough space for the header */
3233 ++ next_buff_pos += buff_pos + sizeof(*ogm2_packet);
3234 ++ if (next_buff_pos > packet_len)
3235 ++ return false;
3236 ++
3237 ++ /* check if there is enough space for the optional TVLV */
3238 ++ next_buff_pos += ntohs(ogm2_packet->tvlv_len);
3239 +
3240 + return (next_buff_pos <= packet_len) &&
3241 + (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
3242 +@@ -818,7 +824,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
3243 + ogm_packet = (struct batadv_ogm2_packet *)skb->data;
3244 +
3245 + while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
3246 +- ogm_packet->tvlv_len)) {
3247 ++ ogm_packet)) {
3248 + batadv_v_ogm_process(skb, ogm_offset, if_incoming);
3249 +
3250 + ogm_offset += BATADV_OGM2_HLEN;
3251 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
3252 +index c8177a89f52c..4096d8a74a2b 100644
3253 +--- a/net/bridge/netfilter/ebtables.c
3254 ++++ b/net/bridge/netfilter/ebtables.c
3255 +@@ -221,7 +221,7 @@ unsigned int ebt_do_table(struct sk_buff *skb,
3256 + return NF_DROP;
3257 + }
3258 +
3259 +- ADD_COUNTER(*(counter_base + i), 1, skb->len);
3260 ++ ADD_COUNTER(*(counter_base + i), skb->len, 1);
3261 +
3262 + /* these should only watch: not modify, nor tell us
3263 + * what to do with the packet
3264 +@@ -959,8 +959,8 @@ static void get_counters(const struct ebt_counter *oldcounters,
3265 + continue;
3266 + counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
3267 + for (i = 0; i < nentries; i++)
3268 +- ADD_COUNTER(counters[i], counter_base[i].pcnt,
3269 +- counter_base[i].bcnt);
3270 ++ ADD_COUNTER(counters[i], counter_base[i].bcnt,
3271 ++ counter_base[i].pcnt);
3272 + }
3273 + }
3274 +
3275 +@@ -1280,7 +1280,7 @@ static int do_update_counters(struct net *net, const char *name,
3276 +
3277 + /* we add to the counters of the first cpu */
3278 + for (i = 0; i < num_counters; i++)
3279 +- ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt);
3280 ++ ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
3281 +
3282 + write_unlock_bh(&t->lock);
3283 + ret = 0;
3284 +diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
3285 +index 5d6724cee38f..4f75df40fb12 100644
3286 +--- a/net/ceph/crypto.c
3287 ++++ b/net/ceph/crypto.c
3288 +@@ -136,8 +136,10 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
3289 + if (key) {
3290 + kfree(key->key);
3291 + key->key = NULL;
3292 +- crypto_free_sync_skcipher(key->tfm);
3293 +- key->tfm = NULL;
3294 ++ if (key->tfm) {
3295 ++ crypto_free_sync_skcipher(key->tfm);
3296 ++ key->tfm = NULL;
3297 ++ }
3298 + }
3299 + }
3300 +
3301 +diff --git a/net/core/dev.c b/net/core/dev.c
3302 +index 2ff556906b5d..828ecca03c07 100644
3303 +--- a/net/core/dev.c
3304 ++++ b/net/core/dev.c
3305 +@@ -3475,18 +3475,22 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3306 + qdisc_calculate_pkt_len(skb, q);
3307 +
3308 + if (q->flags & TCQ_F_NOLOCK) {
3309 +- if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3310 +- __qdisc_drop(skb, &to_free);
3311 +- rc = NET_XMIT_DROP;
3312 +- } else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
3313 +- qdisc_run_begin(q)) {
3314 ++ if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
3315 ++ qdisc_run_begin(q)) {
3316 ++ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
3317 ++ &q->state))) {
3318 ++ __qdisc_drop(skb, &to_free);
3319 ++ rc = NET_XMIT_DROP;
3320 ++ goto end_run;
3321 ++ }
3322 + qdisc_bstats_cpu_update(q, skb);
3323 +
3324 ++ rc = NET_XMIT_SUCCESS;
3325 + if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
3326 + __qdisc_run(q);
3327 +
3328 ++end_run:
3329 + qdisc_run_end(q);
3330 +- rc = NET_XMIT_SUCCESS;
3331 + } else {
3332 + rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3333 + qdisc_run(q);
3334 +diff --git a/net/core/filter.c b/net/core/filter.c
3335 +index 534c310bb089..7aee6f368754 100644
3336 +--- a/net/core/filter.c
3337 ++++ b/net/core/filter.c
3338 +@@ -8553,13 +8553,13 @@ sk_reuseport_is_valid_access(int off, int size,
3339 + return size == size_default;
3340 +
3341 + /* Fields that allow narrowing */
3342 +- case offsetof(struct sk_reuseport_md, eth_protocol):
3343 ++ case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
3344 + if (size < FIELD_SIZEOF(struct sk_buff, protocol))
3345 + return false;
3346 + /* fall through */
3347 +- case offsetof(struct sk_reuseport_md, ip_protocol):
3348 +- case offsetof(struct sk_reuseport_md, bind_inany):
3349 +- case offsetof(struct sk_reuseport_md, len):
3350 ++ case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
3351 ++ case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
3352 ++ case bpf_ctx_range(struct sk_reuseport_md, len):
3353 + bpf_ctx_record_field_size(info, size_default);
3354 + return bpf_ctx_narrow_access_ok(off, size, size_default);
3355 +
3356 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
3357 +index edd622956083..b15c0c0f6e55 100644
3358 +--- a/net/core/flow_dissector.c
3359 ++++ b/net/core/flow_dissector.c
3360 +@@ -138,8 +138,8 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
3361 + mutex_unlock(&flow_dissector_mutex);
3362 + return -ENOENT;
3363 + }
3364 +- bpf_prog_put(attached);
3365 + RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
3366 ++ bpf_prog_put(attached);
3367 + mutex_unlock(&flow_dissector_mutex);
3368 + return 0;
3369 + }
3370 +diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
3371 +index dc4aefdf2a08..2f89777763ad 100644
3372 +--- a/net/core/sock_reuseport.c
3373 ++++ b/net/core/sock_reuseport.c
3374 +@@ -295,8 +295,19 @@ struct sock *reuseport_select_sock(struct sock *sk,
3375 +
3376 + select_by_hash:
3377 + /* no bpf or invalid bpf result: fall back to hash usage */
3378 +- if (!sk2)
3379 +- sk2 = reuse->socks[reciprocal_scale(hash, socks)];
3380 ++ if (!sk2) {
3381 ++ int i, j;
3382 ++
3383 ++ i = j = reciprocal_scale(hash, socks);
3384 ++ while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
3385 ++ i++;
3386 ++ if (i >= reuse->num_socks)
3387 ++ i = 0;
3388 ++ if (i == j)
3389 ++ goto out;
3390 ++ }
3391 ++ sk2 = reuse->socks[i];
3392 ++ }
3393 + }
3394 +
3395 + out:
3396 +diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
3397 +index 820dd8da57fc..1739b98a8f4b 100644
3398 +--- a/net/dsa/dsa2.c
3399 ++++ b/net/dsa/dsa2.c
3400 +@@ -577,6 +577,8 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
3401 + tag_protocol = ds->ops->get_tag_protocol(ds, dp->index);
3402 + tag_ops = dsa_tag_driver_get(tag_protocol);
3403 + if (IS_ERR(tag_ops)) {
3404 ++ if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
3405 ++ return -EPROBE_DEFER;
3406 + dev_warn(ds->dev, "No tagger for this switch\n");
3407 + return PTR_ERR(tag_ops);
3408 + }
3409 +diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
3410 +index 7bd29e694603..9a0fe0c2fa02 100644
3411 +--- a/net/ipv4/datagram.c
3412 ++++ b/net/ipv4/datagram.c
3413 +@@ -15,6 +15,7 @@
3414 + #include <net/sock.h>
3415 + #include <net/route.h>
3416 + #include <net/tcp_states.h>
3417 ++#include <net/sock_reuseport.h>
3418 +
3419 + int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
3420 + {
3421 +@@ -69,6 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
3422 + }
3423 + inet->inet_daddr = fl4->daddr;
3424 + inet->inet_dport = usin->sin_port;
3425 ++ reuseport_has_conns(sk, true);
3426 + sk->sk_state = TCP_ESTABLISHED;
3427 + sk_set_txhash(sk);
3428 + inet->inet_id = jiffies;
3429 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3430 +index eed59c847722..acab7738f733 100644
3431 +--- a/net/ipv4/udp.c
3432 ++++ b/net/ipv4/udp.c
3433 +@@ -434,12 +434,13 @@ static struct sock *udp4_lib_lookup2(struct net *net,
3434 + score = compute_score(sk, net, saddr, sport,
3435 + daddr, hnum, dif, sdif, exact_dif);
3436 + if (score > badness) {
3437 +- if (sk->sk_reuseport) {
3438 ++ if (sk->sk_reuseport &&
3439 ++ sk->sk_state != TCP_ESTABLISHED) {
3440 + hash = udp_ehashfn(net, daddr, hnum,
3441 + saddr, sport);
3442 + result = reuseport_select_sock(sk, hash, skb,
3443 + sizeof(struct udphdr));
3444 +- if (result)
3445 ++ if (result && !reuseport_has_conns(sk, false))
3446 + return result;
3447 + }
3448 + badness = score;
3449 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
3450 +index 9d78c907b918..694168e2302e 100644
3451 +--- a/net/ipv6/datagram.c
3452 ++++ b/net/ipv6/datagram.c
3453 +@@ -27,6 +27,7 @@
3454 + #include <net/ip6_route.h>
3455 + #include <net/tcp_states.h>
3456 + #include <net/dsfield.h>
3457 ++#include <net/sock_reuseport.h>
3458 +
3459 + #include <linux/errqueue.h>
3460 + #include <linux/uaccess.h>
3461 +@@ -254,6 +255,7 @@ ipv4_connected:
3462 + goto out;
3463 + }
3464 +
3465 ++ reuseport_has_conns(sk, true);
3466 + sk->sk_state = TCP_ESTABLISHED;
3467 + sk_set_txhash(sk);
3468 + out:
3469 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
3470 +index dd2d0b963260..d5779d6a6065 100644
3471 +--- a/net/ipv6/ip6_gre.c
3472 ++++ b/net/ipv6/ip6_gre.c
3473 +@@ -968,7 +968,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
3474 + if (unlikely(!tun_info ||
3475 + !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
3476 + ip_tunnel_info_af(tun_info) != AF_INET6))
3477 +- return -EINVAL;
3478 ++ goto tx_err;
3479 +
3480 + key = &tun_info->key;
3481 + memset(&fl6, 0, sizeof(fl6));
3482 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
3483 +index 70b01bd95022..1258be19e186 100644
3484 +--- a/net/ipv6/udp.c
3485 ++++ b/net/ipv6/udp.c
3486 +@@ -168,13 +168,14 @@ static struct sock *udp6_lib_lookup2(struct net *net,
3487 + score = compute_score(sk, net, saddr, sport,
3488 + daddr, hnum, dif, sdif, exact_dif);
3489 + if (score > badness) {
3490 +- if (sk->sk_reuseport) {
3491 ++ if (sk->sk_reuseport &&
3492 ++ sk->sk_state != TCP_ESTABLISHED) {
3493 + hash = udp6_ehashfn(net, daddr, hnum,
3494 + saddr, sport);
3495 +
3496 + result = reuseport_select_sock(sk, hash, skb,
3497 + sizeof(struct udphdr));
3498 +- if (result)
3499 ++ if (result && !reuseport_has_conns(sk, false))
3500 + return result;
3501 + }
3502 + result = sk;
3503 +diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
3504 +index 8c6c11bab5b6..b5df6c4c159c 100644
3505 +--- a/net/netfilter/nf_conntrack_ftp.c
3506 ++++ b/net/netfilter/nf_conntrack_ftp.c
3507 +@@ -322,7 +322,7 @@ static int find_pattern(const char *data, size_t dlen,
3508 + i++;
3509 + }
3510 +
3511 +- pr_debug("Skipped up to `%c'!\n", skip);
3512 ++ pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
3513 +
3514 + *numoff = i;
3515 + *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
3516 +diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
3517 +index e0d392cb3075..0006503d2da9 100644
3518 +--- a/net/netfilter/nf_conntrack_standalone.c
3519 ++++ b/net/netfilter/nf_conntrack_standalone.c
3520 +@@ -1037,8 +1037,13 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
3521 + table[NF_SYSCTL_CT_COUNT].data = &net->ct.count;
3522 + table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
3523 + table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
3524 ++ table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
3525 ++ table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper;
3526 + #ifdef CONFIG_NF_CONNTRACK_EVENTS
3527 + table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
3528 ++#endif
3529 ++#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
3530 ++ table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp;
3531 + #endif
3532 + table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout;
3533 + table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
3534 +diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
3535 +index 49248fe5847a..55106bebf2b5 100644
3536 +--- a/net/netfilter/nf_flow_table_core.c
3537 ++++ b/net/netfilter/nf_flow_table_core.c
3538 +@@ -218,7 +218,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
3539 + return err;
3540 + }
3541 +
3542 +- flow->timeout = (u32)jiffies;
3543 ++ flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
3544 + return 0;
3545 + }
3546 + EXPORT_SYMBOL_GPL(flow_offload_add);
3547 +diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
3548 +index d68c801dd614..b9e7dd6e60ce 100644
3549 +--- a/net/netfilter/nf_flow_table_ip.c
3550 ++++ b/net/netfilter/nf_flow_table_ip.c
3551 +@@ -228,7 +228,6 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
3552 + {
3553 + skb_orphan(skb);
3554 + skb_dst_set_noref(skb, dst);
3555 +- skb->tstamp = 0;
3556 + dst_output(state->net, state->sk, skb);
3557 + return NF_STOLEN;
3558 + }
3559 +@@ -284,6 +283,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
3560 + flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
3561 + iph = ip_hdr(skb);
3562 + ip_decrease_ttl(iph);
3563 ++ skb->tstamp = 0;
3564 +
3565 + if (unlikely(dst_xfrm(&rt->dst))) {
3566 + memset(skb->cb, 0, sizeof(struct inet_skb_parm));
3567 +@@ -512,6 +512,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
3568 + flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
3569 + ip6h = ipv6_hdr(skb);
3570 + ip6h->hop_limit--;
3571 ++ skb->tstamp = 0;
3572 +
3573 + if (unlikely(dst_xfrm(&rt->dst))) {
3574 + memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
3575 +diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
3576 +index 060a4ed46d5e..01705ad74a9a 100644
3577 +--- a/net/netfilter/nft_flow_offload.c
3578 ++++ b/net/netfilter/nft_flow_offload.c
3579 +@@ -149,6 +149,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
3580 + return nft_chain_validate_hooks(ctx->chain, hook_mask);
3581 + }
3582 +
3583 ++static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
3584 ++ [NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING,
3585 ++ .len = NFT_NAME_MAXLEN - 1 },
3586 ++};
3587 ++
3588 + static int nft_flow_offload_init(const struct nft_ctx *ctx,
3589 + const struct nft_expr *expr,
3590 + const struct nlattr * const tb[])
3591 +@@ -207,6 +212,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = {
3592 + static struct nft_expr_type nft_flow_offload_type __read_mostly = {
3593 + .name = "flow_offload",
3594 + .ops = &nft_flow_offload_ops,
3595 ++ .policy = nft_flow_offload_policy,
3596 + .maxattr = NFTA_FLOW_MAX,
3597 + .owner = THIS_MODULE,
3598 + };
3599 +diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
3600 +index d0ab1adf5bff..5aab6df74e0f 100644
3601 +--- a/net/netfilter/xt_nfacct.c
3602 ++++ b/net/netfilter/xt_nfacct.c
3603 +@@ -54,25 +54,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par)
3604 + nfnl_acct_put(info->nfacct);
3605 + }
3606 +
3607 +-static struct xt_match nfacct_mt_reg __read_mostly = {
3608 +- .name = "nfacct",
3609 +- .family = NFPROTO_UNSPEC,
3610 +- .checkentry = nfacct_mt_checkentry,
3611 +- .match = nfacct_mt,
3612 +- .destroy = nfacct_mt_destroy,
3613 +- .matchsize = sizeof(struct xt_nfacct_match_info),
3614 +- .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
3615 +- .me = THIS_MODULE,
3616 ++static struct xt_match nfacct_mt_reg[] __read_mostly = {
3617 ++ {
3618 ++ .name = "nfacct",
3619 ++ .revision = 0,
3620 ++ .family = NFPROTO_UNSPEC,
3621 ++ .checkentry = nfacct_mt_checkentry,
3622 ++ .match = nfacct_mt,
3623 ++ .destroy = nfacct_mt_destroy,
3624 ++ .matchsize = sizeof(struct xt_nfacct_match_info),
3625 ++ .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
3626 ++ .me = THIS_MODULE,
3627 ++ },
3628 ++ {
3629 ++ .name = "nfacct",
3630 ++ .revision = 1,
3631 ++ .family = NFPROTO_UNSPEC,
3632 ++ .checkentry = nfacct_mt_checkentry,
3633 ++ .match = nfacct_mt,
3634 ++ .destroy = nfacct_mt_destroy,
3635 ++ .matchsize = sizeof(struct xt_nfacct_match_info_v1),
3636 ++ .usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct),
3637 ++ .me = THIS_MODULE,
3638 ++ },
3639 + };
3640 +
3641 + static int __init nfacct_mt_init(void)
3642 + {
3643 +- return xt_register_match(&nfacct_mt_reg);
3644 ++ return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
3645 + }
3646 +
3647 + static void __exit nfacct_mt_exit(void)
3648 + {
3649 +- xt_unregister_match(&nfacct_mt_reg);
3650 ++ xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
3651 + }
3652 +
3653 + module_init(nfacct_mt_init);
3654 +diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
3655 +index ead7c6022208..b92b22ce8abd 100644
3656 +--- a/net/netfilter/xt_physdev.c
3657 ++++ b/net/netfilter/xt_physdev.c
3658 +@@ -101,11 +101,9 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
3659 + if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) &&
3660 + (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
3661 + info->invert & XT_PHYSDEV_OP_BRIDGED) &&
3662 +- par->hook_mask & ((1 << NF_INET_LOCAL_OUT) |
3663 +- (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
3664 ++ par->hook_mask & (1 << NF_INET_LOCAL_OUT)) {
3665 + pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
3666 +- if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
3667 +- return -EINVAL;
3668 ++ return -EINVAL;
3669 + }
3670 +
3671 + if (!brnf_probed) {
3672 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
3673 +index ac28f6a5d70e..17bd8f539bc7 100644
3674 +--- a/net/sched/sch_generic.c
3675 ++++ b/net/sched/sch_generic.c
3676 +@@ -985,6 +985,9 @@ static void qdisc_destroy(struct Qdisc *qdisc)
3677 +
3678 + void qdisc_put(struct Qdisc *qdisc)
3679 + {
3680 ++ if (!qdisc)
3681 ++ return;
3682 ++
3683 + if (qdisc->flags & TCQ_F_BUILTIN ||
3684 + !refcount_dec_and_test(&qdisc->refcnt))
3685 + return;
3686 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
3687 +index a680d28c231e..fbb85ea24ea0 100644
3688 +--- a/net/sunrpc/clnt.c
3689 ++++ b/net/sunrpc/clnt.c
3690 +@@ -2301,7 +2301,7 @@ call_status(struct rpc_task *task)
3691 + case -ECONNABORTED:
3692 + case -ENOTCONN:
3693 + rpc_force_rebind(clnt);
3694 +- /* fall through */
3695 ++ break;
3696 + case -EADDRINUSE:
3697 + rpc_delay(task, 3*HZ);
3698 + /* fall through */
3699 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3700 +index 88a1de9def11..b28aaddbe08e 100644
3701 +--- a/net/wireless/nl80211.c
3702 ++++ b/net/wireless/nl80211.c
3703 +@@ -10640,9 +10640,11 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
3704 + hyst = wdev->cqm_config->rssi_hyst;
3705 + n = wdev->cqm_config->n_rssi_thresholds;
3706 +
3707 +- for (i = 0; i < n; i++)
3708 ++ for (i = 0; i < n; i++) {
3709 ++ i = array_index_nospec(i, n);
3710 + if (last < wdev->cqm_config->rssi_thresholds[i])
3711 + break;
3712 ++ }
3713 +
3714 + low_index = i - 1;
3715 + if (low_index >= 0) {
3716 +diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
3717 +index 9c6de4f114f8..9bd7b96027c1 100644
3718 +--- a/net/xdp/xdp_umem.c
3719 ++++ b/net/xdp/xdp_umem.c
3720 +@@ -368,7 +368,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
3721 + umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
3722 + if (!umem->pages) {
3723 + err = -ENOMEM;
3724 +- goto out_account;
3725 ++ goto out_pin;
3726 + }
3727 +
3728 + for (i = 0; i < umem->npgs; i++)
3729 +@@ -376,6 +376,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
3730 +
3731 + return 0;
3732 +
3733 ++out_pin:
3734 ++ xdp_umem_unpin_pages(umem);
3735 + out_account:
3736 + xdp_umem_unaccount_pages(umem);
3737 + return err;
3738 +diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
3739 +index a7a36209a193..6c2c05a75b54 100755
3740 +--- a/scripts/decode_stacktrace.sh
3741 ++++ b/scripts/decode_stacktrace.sh
3742 +@@ -85,7 +85,7 @@ parse_symbol() {
3743 + fi
3744 +
3745 + # Strip out the base of the path
3746 +- code=${code//^$basepath/""}
3747 ++ code=${code#$basepath/}
3748 +
3749 + # In the case of inlines, move everything to same line
3750 + code=${code//$'\n'/' '}
3751 +diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
3752 +index e45b5cf3b97f..8491becb5727 100644
3753 +--- a/security/keys/request_key_auth.c
3754 ++++ b/security/keys/request_key_auth.c
3755 +@@ -66,6 +66,9 @@ static void request_key_auth_describe(const struct key *key,
3756 + {
3757 + struct request_key_auth *rka = get_request_key_auth(key);
3758 +
3759 ++ if (!rka)
3760 ++ return;
3761 ++
3762 + seq_puts(m, "key:");
3763 + seq_puts(m, key->description);
3764 + if (key_is_positive(key))
3765 +@@ -83,6 +86,9 @@ static long request_key_auth_read(const struct key *key,
3766 + size_t datalen;
3767 + long ret;
3768 +
3769 ++ if (!rka)
3770 ++ return -EKEYREVOKED;
3771 ++
3772 + datalen = rka->callout_len;
3773 + ret = datalen;
3774 +
3775 +diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
3776 +index 7a4e21a31523..d41651afe5f6 100644
3777 +--- a/tools/bpf/bpftool/prog.c
3778 ++++ b/tools/bpf/bpftool/prog.c
3779 +@@ -362,7 +362,9 @@ static int do_show(int argc, char **argv)
3780 + if (fd < 0)
3781 + return -1;
3782 +
3783 +- return show_prog(fd);
3784 ++ err = show_prog(fd);
3785 ++ close(fd);
3786 ++ return err;
3787 + }
3788 +
3789 + if (argc)
3790 +diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
3791 +index 1cd28ebf8443..5c0154cf190c 100644
3792 +--- a/tools/power/x86/turbostat/turbostat.c
3793 ++++ b/tools/power/x86/turbostat/turbostat.c
3794 +@@ -506,6 +506,7 @@ unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAU
3795 + unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
3796 +
3797 + #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
3798 ++#define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME)
3799 + #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
3800 + #define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
3801 + #define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
3802 +@@ -1287,6 +1288,14 @@ delta_core(struct core_data *new, struct core_data *old)
3803 + }
3804 + }
3805 +
3806 ++int soft_c1_residency_display(int bic)
3807 ++{
3808 ++ if (!DO_BIC(BIC_CPU_c1) || use_c1_residency_msr)
3809 ++ return 0;
3810 ++
3811 ++ return DO_BIC_READ(bic);
3812 ++}
3813 ++
3814 + /*
3815 + * old = new - old
3816 + */
3817 +@@ -1322,7 +1331,8 @@ delta_thread(struct thread_data *new, struct thread_data *old,
3818 +
3819 + old->c1 = new->c1 - old->c1;
3820 +
3821 +- if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) {
3822 ++ if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
3823 ++ soft_c1_residency_display(BIC_Avg_MHz)) {
3824 + if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
3825 + old->aperf = new->aperf - old->aperf;
3826 + old->mperf = new->mperf - old->mperf;
3827 +@@ -1774,7 +1784,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3828 + retry:
3829 + t->tsc = rdtsc(); /* we are running on local CPU of interest */
3830 +
3831 +- if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) {
3832 ++ if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
3833 ++ soft_c1_residency_display(BIC_Avg_MHz)) {
3834 + unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
3835 +
3836 + /*
3837 +@@ -1851,20 +1862,20 @@ retry:
3838 + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
3839 + goto done;
3840 +
3841 +- if (DO_BIC(BIC_CPU_c3)) {
3842 ++ if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) {
3843 + if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
3844 + return -6;
3845 + }
3846 +
3847 +- if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) {
3848 ++ if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
3849 + if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
3850 + return -7;
3851 +- } else if (do_knl_cstates) {
3852 ++ } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
3853 + if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
3854 + return -7;
3855 + }
3856 +
3857 +- if (DO_BIC(BIC_CPU_c7))
3858 ++ if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7))
3859 + if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
3860 + return -8;
3861 +
3862 +@@ -2912,6 +2923,7 @@ int snapshot_cpu_lpi_us(void)
3863 + if (retval != 1) {
3864 + fprintf(stderr, "Disabling Low Power Idle CPU output\n");
3865 + BIC_NOT_PRESENT(BIC_CPU_LPI);
3866 ++ fclose(fp);
3867 + return -1;
3868 + }
3869 +
3870 +@@ -2938,6 +2950,7 @@ int snapshot_sys_lpi_us(void)
3871 + if (retval != 1) {
3872 + fprintf(stderr, "Disabling Low Power Idle System output\n");
3873 + BIC_NOT_PRESENT(BIC_SYS_LPI);
3874 ++ fclose(fp);
3875 + return -1;
3876 + }
3877 + fclose(fp);
3878 +@@ -3209,6 +3222,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
3879 + break;
3880 + case INTEL_FAM6_HASWELL_CORE: /* HSW */
3881 + case INTEL_FAM6_HASWELL_X: /* HSX */
3882 ++ case INTEL_FAM6_HASWELL_ULT: /* HSW */
3883 + case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3884 + case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3885 + case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
3886 +@@ -3405,6 +3419,7 @@ int has_config_tdp(unsigned int family, unsigned int model)
3887 + case INTEL_FAM6_IVYBRIDGE: /* IVB */
3888 + case INTEL_FAM6_HASWELL_CORE: /* HSW */
3889 + case INTEL_FAM6_HASWELL_X: /* HSX */
3890 ++ case INTEL_FAM6_HASWELL_ULT: /* HSW */
3891 + case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3892 + case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3893 + case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
3894 +@@ -3841,6 +3856,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model)
3895 + case INTEL_FAM6_SANDYBRIDGE:
3896 + case INTEL_FAM6_IVYBRIDGE:
3897 + case INTEL_FAM6_HASWELL_CORE: /* HSW */
3898 ++ case INTEL_FAM6_HASWELL_ULT: /* HSW */
3899 + case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3900 + case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3901 + case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
3902 +@@ -4032,6 +4048,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
3903 +
3904 + switch (model) {
3905 + case INTEL_FAM6_HASWELL_CORE: /* HSW */
3906 ++ case INTEL_FAM6_HASWELL_ULT: /* HSW */
3907 + case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3908 + do_gfx_perf_limit_reasons = 1;
3909 + case INTEL_FAM6_HASWELL_X: /* HSX */
3910 +@@ -4251,6 +4268,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
3911 + case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
3912 + case INTEL_FAM6_HASWELL_CORE: /* HSW */
3913 + case INTEL_FAM6_HASWELL_X: /* HSW */
3914 ++ case INTEL_FAM6_HASWELL_ULT: /* HSW */
3915 + case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3916 + case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3917 + case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
3918 +@@ -4284,7 +4302,7 @@ int has_hsw_msrs(unsigned int family, unsigned int model)
3919 + return 0;
3920 +
3921 + switch (model) {
3922 +- case INTEL_FAM6_HASWELL_CORE:
3923 ++ case INTEL_FAM6_HASWELL_ULT: /* HSW */
3924 + case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3925 + case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
3926 + case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
3927 +@@ -4568,9 +4586,6 @@ unsigned int intel_model_duplicates(unsigned int model)
3928 + case INTEL_FAM6_XEON_PHI_KNM:
3929 + return INTEL_FAM6_XEON_PHI_KNL;
3930 +
3931 +- case INTEL_FAM6_HASWELL_ULT:
3932 +- return INTEL_FAM6_HASWELL_CORE;
3933 +-
3934 + case INTEL_FAM6_BROADWELL_X:
3935 + case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
3936 + return INTEL_FAM6_BROADWELL_X;
3937 +@@ -4582,6 +4597,7 @@ unsigned int intel_model_duplicates(unsigned int model)
3938 + return INTEL_FAM6_SKYLAKE_MOBILE;
3939 +
3940 + case INTEL_FAM6_ICELAKE_MOBILE:
3941 ++ case INTEL_FAM6_ICELAKE_NNPI:
3942 + return INTEL_FAM6_CANNONLAKE_MOBILE;
3943 + }
3944 + return model;
3945 +@@ -5123,7 +5139,7 @@ int initialize_counters(int cpu_id)
3946 +
3947 + void allocate_output_buffer()
3948 + {
3949 +- output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
3950 ++ output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
3951 + outp = output_buffer;
3952 + if (outp == NULL)
3953 + err(-1, "calloc output buffer");
3954 +diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
3955 +index 34a796b303fe..3fe1eed900d4 100644
3956 +--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
3957 ++++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
3958 +@@ -545,7 +545,7 @@ void cmdline(int argc, char **argv)
3959 +
3960 + progname = argv[0];
3961 +
3962 +- while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw",
3963 ++ while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:",
3964 + long_options, &option_index)) != -1) {
3965 + switch (opt) {
3966 + case 'a':
3967 +@@ -1259,6 +1259,15 @@ void probe_dev_msr(void)
3968 + if (system("/sbin/modprobe msr > /dev/null 2>&1"))
3969 + err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
3970 + }
3971 ++
3972 ++static void get_cpuid_or_exit(unsigned int leaf,
3973 ++ unsigned int *eax, unsigned int *ebx,
3974 ++ unsigned int *ecx, unsigned int *edx)
3975 ++{
3976 ++ if (!__get_cpuid(leaf, eax, ebx, ecx, edx))
3977 ++ errx(1, "Processor not supported\n");
3978 ++}
3979 ++
3980 + /*
3981 + * early_cpuid()
3982 + * initialize turbo_is_enabled, has_hwp, has_epb
3983 +@@ -1266,15 +1275,10 @@ void probe_dev_msr(void)
3984 + */
3985 + void early_cpuid(void)
3986 + {
3987 +- unsigned int eax, ebx, ecx, edx, max_level;
3988 ++ unsigned int eax, ebx, ecx, edx;
3989 + unsigned int fms, family, model;
3990 +
3991 +- __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
3992 +-
3993 +- if (max_level < 6)
3994 +- errx(1, "Processor not supported\n");
3995 +-
3996 +- __get_cpuid(1, &fms, &ebx, &ecx, &edx);
3997 ++ get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
3998 + family = (fms >> 8) & 0xf;
3999 + model = (fms >> 4) & 0xf;
4000 + if (family == 6 || family == 0xf)
4001 +@@ -1288,7 +1292,7 @@ void early_cpuid(void)
4002 + bdx_highest_ratio = msr & 0xFF;
4003 + }
4004 +
4005 +- __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
4006 ++ get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
4007 + turbo_is_enabled = (eax >> 1) & 1;
4008 + has_hwp = (eax >> 7) & 1;
4009 + has_epb = (ecx >> 3) & 1;
4010 +@@ -1306,7 +1310,7 @@ void parse_cpuid(void)
4011 +
4012 + eax = ebx = ecx = edx = 0;
4013 +
4014 +- __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
4015 ++ get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx);
4016 +
4017 + if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
4018 + genuine_intel = 1;
4019 +@@ -1315,7 +1319,7 @@ void parse_cpuid(void)
4020 + fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
4021 + (char *)&ebx, (char *)&edx, (char *)&ecx);
4022 +
4023 +- __get_cpuid(1, &fms, &ebx, &ecx, &edx);
4024 ++ get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
4025 + family = (fms >> 8) & 0xf;
4026 + model = (fms >> 4) & 0xf;
4027 + stepping = fms & 0xf;
4028 +@@ -1340,7 +1344,7 @@ void parse_cpuid(void)
4029 + errx(1, "CPUID: no MSR");
4030 +
4031 +
4032 +- __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
4033 ++ get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
4034 + /* turbo_is_enabled already set */
4035 + /* has_hwp already set */
4036 + has_hwp_notify = eax & (1 << 8);
4037 +diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
4038 +index f7a0744db31e..5dc109f4c097 100644
4039 +--- a/tools/testing/selftests/bpf/config
4040 ++++ b/tools/testing/selftests/bpf/config
4041 +@@ -34,3 +34,4 @@ CONFIG_NET_MPLS_GSO=m
4042 + CONFIG_MPLS_ROUTING=m
4043 + CONFIG_MPLS_IPTUNNEL=m
4044 + CONFIG_IPV6_SIT=m
4045 ++CONFIG_BPF_JIT=y
4046 +diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
4047 +index 2fc4625c1a15..655729004391 100644
4048 +--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
4049 ++++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
4050 +@@ -20,9 +20,9 @@ int main(int argc, char **argv)
4051 + BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
4052 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4053 + BPF_FUNC_get_local_storage),
4054 +- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4055 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
4056 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
4057 +- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
4058 ++ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4059 +
4060 + BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
4061 + BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
4062 +@@ -30,7 +30,7 @@ int main(int argc, char **argv)
4063 + BPF_FUNC_get_local_storage),
4064 + BPF_MOV64_IMM(BPF_REG_1, 1),
4065 + BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
4066 +- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4067 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4068 + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
4069 + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4070 + BPF_EXIT_INSN(),
4071 +diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
4072 +index fb679ac3d4b0..0e6652733462 100644
4073 +--- a/tools/testing/selftests/bpf/test_sock.c
4074 ++++ b/tools/testing/selftests/bpf/test_sock.c
4075 +@@ -13,6 +13,7 @@
4076 + #include <bpf/bpf.h>
4077 +
4078 + #include "cgroup_helpers.h"
4079 ++#include "bpf_endian.h"
4080 + #include "bpf_rlimit.h"
4081 + #include "bpf_util.h"
4082 +
4083 +@@ -232,7 +233,8 @@ static struct sock_test tests[] = {
4084 + /* if (ip == expected && port == expected) */
4085 + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
4086 + offsetof(struct bpf_sock, src_ip6[3])),
4087 +- BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4),
4088 ++ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
4089 ++ __bpf_constant_ntohl(0x00000001), 4),
4090 + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
4091 + offsetof(struct bpf_sock, src_port)),
4092 + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
4093 +@@ -261,7 +263,8 @@ static struct sock_test tests[] = {
4094 + /* if (ip == expected && port == expected) */
4095 + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
4096 + offsetof(struct bpf_sock, src_ip4)),
4097 +- BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4),
4098 ++ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
4099 ++ __bpf_constant_ntohl(0x7F000001), 4),
4100 + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
4101 + offsetof(struct bpf_sock, src_port)),
4102 + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
4103 +diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
4104 +index 5294abb3f178..8ffd07e2a160 100644
4105 +--- a/virt/kvm/coalesced_mmio.c
4106 ++++ b/virt/kvm/coalesced_mmio.c
4107 +@@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
4108 + return 1;
4109 + }
4110 +
4111 +-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
4112 ++static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
4113 + {
4114 + struct kvm_coalesced_mmio_ring *ring;
4115 + unsigned avail;
4116 +@@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
4117 + * there is always one unused entry in the buffer
4118 + */
4119 + ring = dev->kvm->coalesced_mmio_ring;
4120 +- avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
4121 ++ avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
4122 + if (avail == 0) {
4123 + /* full */
4124 + return 0;
4125 +@@ -67,25 +67,28 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
4126 + {
4127 + struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
4128 + struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
4129 ++ __u32 insert;
4130 +
4131 + if (!coalesced_mmio_in_range(dev, addr, len))
4132 + return -EOPNOTSUPP;
4133 +
4134 + spin_lock(&dev->kvm->ring_lock);
4135 +
4136 +- if (!coalesced_mmio_has_room(dev)) {
4137 ++ insert = READ_ONCE(ring->last);
4138 ++ if (!coalesced_mmio_has_room(dev, insert) ||
4139 ++ insert >= KVM_COALESCED_MMIO_MAX) {
4140 + spin_unlock(&dev->kvm->ring_lock);
4141 + return -EOPNOTSUPP;
4142 + }
4143 +
4144 + /* copy data in first free entry of the ring */
4145 +
4146 +- ring->coalesced_mmio[ring->last].phys_addr = addr;
4147 +- ring->coalesced_mmio[ring->last].len = len;
4148 +- memcpy(ring->coalesced_mmio[ring->last].data, val, len);
4149 +- ring->coalesced_mmio[ring->last].pio = dev->zone.pio;
4150 ++ ring->coalesced_mmio[insert].phys_addr = addr;
4151 ++ ring->coalesced_mmio[insert].len = len;
4152 ++ memcpy(ring->coalesced_mmio[insert].data, val, len);
4153 ++ ring->coalesced_mmio[insert].pio = dev->zone.pio;
4154 + smp_wmb();
4155 +- ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
4156 ++ ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
4157 + spin_unlock(&dev->kvm->ring_lock);
4158 + return 0;
4159 + }