Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.5 commit in: /
Date: Fri, 14 Feb 2020 23:56:54
Message-Id: 1581724592.fc793d41bdad23c353174f56a79f97eb170ea76e.mpagano@gentoo
1 commit: fc793d41bdad23c353174f56a79f97eb170ea76e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Feb 14 23:56:32 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Feb 14 23:56:32 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fc793d41
7
8 Linux patch 5.5.4
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1003_linux-5.5.4.patch | 4385 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4389 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index d7385d1..567c784 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -55,6 +55,10 @@ Patch: 1002_linux-5.5.3.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.5.3
23
24 +Patch: 1003_linux-5.5.4.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.5.4
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1003_linux-5.5.4.patch b/1003_linux-5.5.4.patch
33 new file mode 100644
34 index 0000000..1390447
35 --- /dev/null
36 +++ b/1003_linux-5.5.4.patch
37 @@ -0,0 +1,4385 @@
38 +diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
39 +index 6eb33207a167..5117ad68a584 100644
40 +--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
41 ++++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
42 +@@ -82,7 +82,7 @@ properties:
43 + Must be the device tree identifier of the over-sampling
44 + mode pins. As the line is active high, it should be marked
45 + GPIO_ACTIVE_HIGH.
46 +- maxItems: 1
47 ++ maxItems: 3
48 +
49 + adi,sw-mode:
50 + description:
51 +@@ -125,9 +125,9 @@ examples:
52 + adi,conversion-start-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
53 + reset-gpios = <&gpio 27 GPIO_ACTIVE_HIGH>;
54 + adi,first-data-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
55 +- adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH
56 +- &gpio 23 GPIO_ACTIVE_HIGH
57 +- &gpio 26 GPIO_ACTIVE_HIGH>;
58 ++ adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>,
59 ++ <&gpio 23 GPIO_ACTIVE_HIGH>,
60 ++ <&gpio 26 GPIO_ACTIVE_HIGH>;
61 + standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>;
62 + adi,sw-mode;
63 + };
64 +diff --git a/Makefile b/Makefile
65 +index fdaa1e262320..62f956e9c81d 100644
66 +--- a/Makefile
67 ++++ b/Makefile
68 +@@ -1,7 +1,7 @@
69 + # SPDX-License-Identifier: GPL-2.0
70 + VERSION = 5
71 + PATCHLEVEL = 5
72 +-SUBLEVEL = 3
73 ++SUBLEVEL = 4
74 + EXTRAVERSION =
75 + NAME = Kleptomaniac Octopus
76 +
77 +diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
78 +index f9a5c9ddcae7..1d109b06e7d8 100644
79 +--- a/arch/arc/boot/dts/axs10x_mb.dtsi
80 ++++ b/arch/arc/boot/dts/axs10x_mb.dtsi
81 +@@ -78,6 +78,7 @@
82 + interrupt-names = "macirq";
83 + phy-mode = "rgmii";
84 + snps,pbl = < 32 >;
85 ++ snps,multicast-filter-bins = <256>;
86 + clocks = <&apbclk>;
87 + clock-names = "stmmaceth";
88 + max-speed = <100>;
89 +diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
90 +index 091356f2a8c1..c726cd8dbdf1 100644
91 +--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
92 ++++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
93 +@@ -704,6 +704,60 @@
94 + ti,bit-shift = <8>;
95 + reg = <0x2a48>;
96 + };
97 ++
98 ++ clkout1_osc_div_ck: clkout1-osc-div-ck {
99 ++ #clock-cells = <0>;
100 ++ compatible = "ti,divider-clock";
101 ++ clocks = <&sys_clkin_ck>;
102 ++ ti,bit-shift = <20>;
103 ++ ti,max-div = <4>;
104 ++ reg = <0x4100>;
105 ++ };
106 ++
107 ++ clkout1_src2_mux_ck: clkout1-src2-mux-ck {
108 ++ #clock-cells = <0>;
109 ++ compatible = "ti,mux-clock";
110 ++ clocks = <&clk_rc32k_ck>, <&sysclk_div>, <&dpll_ddr_m2_ck>,
111 ++ <&dpll_per_m2_ck>, <&dpll_disp_m2_ck>,
112 ++ <&dpll_mpu_m2_ck>;
113 ++ reg = <0x4100>;
114 ++ };
115 ++
116 ++ clkout1_src2_pre_div_ck: clkout1-src2-pre-div-ck {
117 ++ #clock-cells = <0>;
118 ++ compatible = "ti,divider-clock";
119 ++ clocks = <&clkout1_src2_mux_ck>;
120 ++ ti,bit-shift = <4>;
121 ++ ti,max-div = <8>;
122 ++ reg = <0x4100>;
123 ++ };
124 ++
125 ++ clkout1_src2_post_div_ck: clkout1-src2-post-div-ck {
126 ++ #clock-cells = <0>;
127 ++ compatible = "ti,divider-clock";
128 ++ clocks = <&clkout1_src2_pre_div_ck>;
129 ++ ti,bit-shift = <8>;
130 ++ ti,max-div = <32>;
131 ++ ti,index-power-of-two;
132 ++ reg = <0x4100>;
133 ++ };
134 ++
135 ++ clkout1_mux_ck: clkout1-mux-ck {
136 ++ #clock-cells = <0>;
137 ++ compatible = "ti,mux-clock";
138 ++ clocks = <&clkout1_osc_div_ck>, <&clk_rc32k_ck>,
139 ++ <&clkout1_src2_post_div_ck>, <&dpll_extdev_m2_ck>;
140 ++ ti,bit-shift = <16>;
141 ++ reg = <0x4100>;
142 ++ };
143 ++
144 ++ clkout1_ck: clkout1-ck {
145 ++ #clock-cells = <0>;
146 ++ compatible = "ti,gate-clock";
147 ++ clocks = <&clkout1_mux_ck>;
148 ++ ti,bit-shift = <23>;
149 ++ reg = <0x4100>;
150 ++ };
151 + };
152 +
153 + &prcm {
154 +diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
155 +index dee9c0c8a096..16c6fd3c4246 100644
156 +--- a/arch/arm/boot/dts/at91sam9260.dtsi
157 ++++ b/arch/arm/boot/dts/at91sam9260.dtsi
158 +@@ -187,7 +187,7 @@
159 + usart0 {
160 + pinctrl_usart0: usart0-0 {
161 + atmel,pins =
162 +- <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE
163 ++ <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
164 + AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
165 + };
166 +
167 +@@ -221,7 +221,7 @@
168 + usart1 {
169 + pinctrl_usart1: usart1-0 {
170 + atmel,pins =
171 +- <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE
172 ++ <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
173 + AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
174 + };
175 +
176 +@@ -239,7 +239,7 @@
177 + usart2 {
178 + pinctrl_usart2: usart2-0 {
179 + atmel,pins =
180 +- <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE
181 ++ <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
182 + AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
183 + };
184 +
185 +@@ -257,7 +257,7 @@
186 + usart3 {
187 + pinctrl_usart3: usart3-0 {
188 + atmel,pins =
189 +- <AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_NONE
190 ++ <AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
191 + AT91_PIOB 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
192 + };
193 +
194 +@@ -275,7 +275,7 @@
195 + uart0 {
196 + pinctrl_uart0: uart0-0 {
197 + atmel,pins =
198 +- <AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_NONE
199 ++ <AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_PULL_UP
200 + AT91_PIOA 30 AT91_PERIPH_B AT91_PINCTRL_PULL_UP>;
201 + };
202 + };
203 +@@ -283,7 +283,7 @@
204 + uart1 {
205 + pinctrl_uart1: uart1-0 {
206 + atmel,pins =
207 +- <AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_NONE
208 ++ <AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
209 + AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
210 + };
211 + };
212 +diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
213 +index dba025a98527..5ed3d745ac86 100644
214 +--- a/arch/arm/boot/dts/at91sam9261.dtsi
215 ++++ b/arch/arm/boot/dts/at91sam9261.dtsi
216 +@@ -329,7 +329,7 @@
217 + usart0 {
218 + pinctrl_usart0: usart0-0 {
219 + atmel,pins =
220 +- <AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE>,
221 ++ <AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
222 + <AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
223 + };
224 +
225 +@@ -347,7 +347,7 @@
226 + usart1 {
227 + pinctrl_usart1: usart1-0 {
228 + atmel,pins =
229 +- <AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_NONE>,
230 ++ <AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
231 + <AT91_PIOC 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
232 + };
233 +
234 +@@ -365,7 +365,7 @@
235 + usart2 {
236 + pinctrl_usart2: usart2-0 {
237 + atmel,pins =
238 +- <AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_NONE>,
239 ++ <AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
240 + <AT91_PIOC 15 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
241 + };
242 +
243 +diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
244 +index 99678abdda93..5c990cfae254 100644
245 +--- a/arch/arm/boot/dts/at91sam9263.dtsi
246 ++++ b/arch/arm/boot/dts/at91sam9263.dtsi
247 +@@ -183,7 +183,7 @@
248 + usart0 {
249 + pinctrl_usart0: usart0-0 {
250 + atmel,pins =
251 +- <AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_NONE
252 ++ <AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
253 + AT91_PIOA 27 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
254 + };
255 +
256 +@@ -201,7 +201,7 @@
257 + usart1 {
258 + pinctrl_usart1: usart1-0 {
259 + atmel,pins =
260 +- <AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_NONE
261 ++ <AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
262 + AT91_PIOD 1 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
263 + };
264 +
265 +@@ -219,7 +219,7 @@
266 + usart2 {
267 + pinctrl_usart2: usart2-0 {
268 + atmel,pins =
269 +- <AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_NONE
270 ++ <AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
271 + AT91_PIOD 3 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
272 + };
273 +
274 +diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
275 +index 691c95ea6175..fd179097a4bf 100644
276 +--- a/arch/arm/boot/dts/at91sam9g45.dtsi
277 ++++ b/arch/arm/boot/dts/at91sam9g45.dtsi
278 +@@ -556,7 +556,7 @@
279 + usart0 {
280 + pinctrl_usart0: usart0-0 {
281 + atmel,pins =
282 +- <AT91_PIOB 19 AT91_PERIPH_A AT91_PINCTRL_NONE
283 ++ <AT91_PIOB 19 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
284 + AT91_PIOB 18 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
285 + };
286 +
287 +@@ -574,7 +574,7 @@
288 + usart1 {
289 + pinctrl_usart1: usart1-0 {
290 + atmel,pins =
291 +- <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE
292 ++ <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
293 + AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
294 + };
295 +
296 +@@ -592,7 +592,7 @@
297 + usart2 {
298 + pinctrl_usart2: usart2-0 {
299 + atmel,pins =
300 +- <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE
301 ++ <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
302 + AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
303 + };
304 +
305 +@@ -610,7 +610,7 @@
306 + usart3 {
307 + pinctrl_usart3: usart3-0 {
308 + atmel,pins =
309 +- <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE
310 ++ <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
311 + AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
312 + };
313 +
314 +diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
315 +index 8643b7151565..ea024e4b6e09 100644
316 +--- a/arch/arm/boot/dts/at91sam9rl.dtsi
317 ++++ b/arch/arm/boot/dts/at91sam9rl.dtsi
318 +@@ -682,7 +682,7 @@
319 + usart0 {
320 + pinctrl_usart0: usart0-0 {
321 + atmel,pins =
322 +- <AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE>,
323 ++ <AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
324 + <AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
325 + };
326 +
327 +@@ -721,7 +721,7 @@
328 + usart1 {
329 + pinctrl_usart1: usart1-0 {
330 + atmel,pins =
331 +- <AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE>,
332 ++ <AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
333 + <AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
334 + };
335 +
336 +@@ -744,7 +744,7 @@
337 + usart2 {
338 + pinctrl_usart2: usart2-0 {
339 + atmel,pins =
340 +- <AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE>,
341 ++ <AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
342 + <AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
343 + };
344 +
345 +@@ -767,7 +767,7 @@
346 + usart3 {
347 + pinctrl_usart3: usart3-0 {
348 + atmel,pins =
349 +- <AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_NONE>,
350 ++ <AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
351 + <AT91_PIOB 1 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
352 + };
353 +
354 +diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
355 +index 3c534cd50ee3..db2033f674c6 100644
356 +--- a/arch/arm/boot/dts/meson8.dtsi
357 ++++ b/arch/arm/boot/dts/meson8.dtsi
358 +@@ -129,8 +129,8 @@
359 + gpu_opp_table: gpu-opp-table {
360 + compatible = "operating-points-v2";
361 +
362 +- opp-182150000 {
363 +- opp-hz = /bits/ 64 <182150000>;
364 ++ opp-182142857 {
365 ++ opp-hz = /bits/ 64 <182142857>;
366 + opp-microvolt = <1150000>;
367 + };
368 + opp-318750000 {
369 +diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
370 +index 099bf8e711c9..1e8c5d7bc824 100644
371 +--- a/arch/arm/boot/dts/meson8b.dtsi
372 ++++ b/arch/arm/boot/dts/meson8b.dtsi
373 +@@ -125,8 +125,8 @@
374 + opp-hz = /bits/ 64 <255000000>;
375 + opp-microvolt = <1100000>;
376 + };
377 +- opp-364300000 {
378 +- opp-hz = /bits/ 64 <364300000>;
379 ++ opp-364285714 {
380 ++ opp-hz = /bits/ 64 <364285714>;
381 + opp-microvolt = <1100000>;
382 + };
383 + opp-425000000 {
384 +diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
385 +index f770aace0efd..203d40be70a5 100644
386 +--- a/arch/arm/boot/dts/sama5d3.dtsi
387 ++++ b/arch/arm/boot/dts/sama5d3.dtsi
388 +@@ -1188,49 +1188,49 @@
389 + usart0_clk: usart0_clk {
390 + #clock-cells = <0>;
391 + reg = <12>;
392 +- atmel,clk-output-range = <0 66000000>;
393 ++ atmel,clk-output-range = <0 83000000>;
394 + };
395 +
396 + usart1_clk: usart1_clk {
397 + #clock-cells = <0>;
398 + reg = <13>;
399 +- atmel,clk-output-range = <0 66000000>;
400 ++ atmel,clk-output-range = <0 83000000>;
401 + };
402 +
403 + usart2_clk: usart2_clk {
404 + #clock-cells = <0>;
405 + reg = <14>;
406 +- atmel,clk-output-range = <0 66000000>;
407 ++ atmel,clk-output-range = <0 83000000>;
408 + };
409 +
410 + usart3_clk: usart3_clk {
411 + #clock-cells = <0>;
412 + reg = <15>;
413 +- atmel,clk-output-range = <0 66000000>;
414 ++ atmel,clk-output-range = <0 83000000>;
415 + };
416 +
417 + uart0_clk: uart0_clk {
418 + #clock-cells = <0>;
419 + reg = <16>;
420 +- atmel,clk-output-range = <0 66000000>;
421 ++ atmel,clk-output-range = <0 83000000>;
422 + };
423 +
424 + twi0_clk: twi0_clk {
425 + reg = <18>;
426 + #clock-cells = <0>;
427 +- atmel,clk-output-range = <0 16625000>;
428 ++ atmel,clk-output-range = <0 41500000>;
429 + };
430 +
431 + twi1_clk: twi1_clk {
432 + #clock-cells = <0>;
433 + reg = <19>;
434 +- atmel,clk-output-range = <0 16625000>;
435 ++ atmel,clk-output-range = <0 41500000>;
436 + };
437 +
438 + twi2_clk: twi2_clk {
439 + #clock-cells = <0>;
440 + reg = <20>;
441 +- atmel,clk-output-range = <0 16625000>;
442 ++ atmel,clk-output-range = <0 41500000>;
443 + };
444 +
445 + mci0_clk: mci0_clk {
446 +@@ -1246,19 +1246,19 @@
447 + spi0_clk: spi0_clk {
448 + #clock-cells = <0>;
449 + reg = <24>;
450 +- atmel,clk-output-range = <0 133000000>;
451 ++ atmel,clk-output-range = <0 166000000>;
452 + };
453 +
454 + spi1_clk: spi1_clk {
455 + #clock-cells = <0>;
456 + reg = <25>;
457 +- atmel,clk-output-range = <0 133000000>;
458 ++ atmel,clk-output-range = <0 166000000>;
459 + };
460 +
461 + tcb0_clk: tcb0_clk {
462 + #clock-cells = <0>;
463 + reg = <26>;
464 +- atmel,clk-output-range = <0 133000000>;
465 ++ atmel,clk-output-range = <0 166000000>;
466 + };
467 +
468 + pwm_clk: pwm_clk {
469 +@@ -1269,7 +1269,7 @@
470 + adc_clk: adc_clk {
471 + #clock-cells = <0>;
472 + reg = <29>;
473 +- atmel,clk-output-range = <0 66000000>;
474 ++ atmel,clk-output-range = <0 83000000>;
475 + };
476 +
477 + dma0_clk: dma0_clk {
478 +@@ -1300,13 +1300,13 @@
479 + ssc0_clk: ssc0_clk {
480 + #clock-cells = <0>;
481 + reg = <38>;
482 +- atmel,clk-output-range = <0 66000000>;
483 ++ atmel,clk-output-range = <0 83000000>;
484 + };
485 +
486 + ssc1_clk: ssc1_clk {
487 + #clock-cells = <0>;
488 + reg = <39>;
489 +- atmel,clk-output-range = <0 66000000>;
490 ++ atmel,clk-output-range = <0 83000000>;
491 + };
492 +
493 + sha_clk: sha_clk {
494 +diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi
495 +index cf06a018ed0f..2470dd3fff25 100644
496 +--- a/arch/arm/boot/dts/sama5d3_can.dtsi
497 ++++ b/arch/arm/boot/dts/sama5d3_can.dtsi
498 +@@ -36,13 +36,13 @@
499 + can0_clk: can0_clk {
500 + #clock-cells = <0>;
501 + reg = <40>;
502 +- atmel,clk-output-range = <0 66000000>;
503 ++ atmel,clk-output-range = <0 83000000>;
504 + };
505 +
506 + can1_clk: can1_clk {
507 + #clock-cells = <0>;
508 + reg = <41>;
509 +- atmel,clk-output-range = <0 66000000>;
510 ++ atmel,clk-output-range = <0 83000000>;
511 + };
512 + };
513 + };
514 +diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
515 +index 1584035daf51..215802b8db30 100644
516 +--- a/arch/arm/boot/dts/sama5d3_tcb1.dtsi
517 ++++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
518 +@@ -22,6 +22,7 @@
519 + tcb1_clk: tcb1_clk {
520 + #clock-cells = <0>;
521 + reg = <27>;
522 ++ atmel,clk-output-range = <0 166000000>;
523 + };
524 + };
525 + };
526 +diff --git a/arch/arm/boot/dts/sama5d3_uart.dtsi b/arch/arm/boot/dts/sama5d3_uart.dtsi
527 +index 4316bdbdc25d..cb62adbd28ed 100644
528 +--- a/arch/arm/boot/dts/sama5d3_uart.dtsi
529 ++++ b/arch/arm/boot/dts/sama5d3_uart.dtsi
530 +@@ -41,13 +41,13 @@
531 + uart0_clk: uart0_clk {
532 + #clock-cells = <0>;
533 + reg = <16>;
534 +- atmel,clk-output-range = <0 66000000>;
535 ++ atmel,clk-output-range = <0 83000000>;
536 + };
537 +
538 + uart1_clk: uart1_clk {
539 + #clock-cells = <0>;
540 + reg = <17>;
541 +- atmel,clk-output-range = <0 66000000>;
542 ++ atmel,clk-output-range = <0 83000000>;
543 + };
544 + };
545 + };
546 +diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c
547 +index 6ebbb2b241d2..6fdb0ac62b3d 100644
548 +--- a/arch/arm/crypto/chacha-glue.c
549 ++++ b/arch/arm/crypto/chacha-glue.c
550 +@@ -115,7 +115,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
551 + if (nbytes < walk.total)
552 + nbytes = round_down(nbytes, walk.stride);
553 +
554 +- if (!neon) {
555 ++ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
556 + chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr,
557 + nbytes, state, ctx->nrounds);
558 + state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE);
559 +@@ -159,7 +159,7 @@ static int do_xchacha(struct skcipher_request *req, bool neon)
560 +
561 + chacha_init_generic(state, ctx->key, req->iv);
562 +
563 +- if (!neon) {
564 ++ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
565 + hchacha_block_arm(state, subctx.key, ctx->nrounds);
566 + } else {
567 + kernel_neon_begin();
568 +diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
569 +index d5af6aedc02c..52665f30d236 100644
570 +--- a/arch/arm/mach-at91/pm.c
571 ++++ b/arch/arm/mach-at91/pm.c
572 +@@ -691,6 +691,12 @@ static void __init at91_pm_use_default_mode(int pm_mode)
573 + soc_pm.data.suspend_mode = AT91_PM_ULP0;
574 + }
575 +
576 ++static const struct of_device_id atmel_shdwc_ids[] = {
577 ++ { .compatible = "atmel,sama5d2-shdwc" },
578 ++ { .compatible = "microchip,sam9x60-shdwc" },
579 ++ { /* sentinel. */ }
580 ++};
581 ++
582 + static void __init at91_pm_modes_init(void)
583 + {
584 + struct device_node *np;
585 +@@ -700,7 +706,7 @@ static void __init at91_pm_modes_init(void)
586 + !at91_is_pm_mode_active(AT91_PM_ULP1))
587 + return;
588 +
589 +- np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-shdwc");
590 ++ np = of_find_matching_node(NULL, atmel_shdwc_ids);
591 + if (!np) {
592 + pr_warn("%s: failed to find shdwc!\n", __func__);
593 + goto ulp1_default;
594 +@@ -751,6 +757,7 @@ static const struct of_device_id atmel_pmc_ids[] __initconst = {
595 + { .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
596 + { .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
597 + { .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
598 ++ { .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[1] },
599 + { /* sentinel */ },
600 + };
601 +
602 +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
603 +index 3ef204137e73..054be44d1cdb 100644
604 +--- a/arch/arm/mm/init.c
605 ++++ b/arch/arm/mm/init.c
606 +@@ -324,7 +324,7 @@ static inline void poison_init_mem(void *s, size_t count)
607 + *p++ = 0xe7fddef0;
608 + }
609 +
610 +-static inline void
611 ++static inline void __init
612 + free_memmap(unsigned long start_pfn, unsigned long end_pfn)
613 + {
614 + struct page *start_pg, *end_pg;
615 +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
616 +index bd4aab6092e0..e31813a4f972 100644
617 +--- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
618 ++++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
619 +@@ -143,6 +143,7 @@
620 + phy-mode = "sgmii";
621 + status = "okay";
622 + managed = "in-band-status";
623 ++ phys = <&comphy1 0>;
624 + sfp = <&sfp_eth0>;
625 + };
626 +
627 +@@ -150,11 +151,14 @@
628 + phy-mode = "sgmii";
629 + status = "okay";
630 + managed = "in-band-status";
631 ++ phys = <&comphy0 1>;
632 + sfp = <&sfp_eth1>;
633 + };
634 +
635 + &usb3 {
636 + status = "okay";
637 ++ phys = <&usb2_utmi_otg_phy>;
638 ++ phy-names = "usb2-utmi-otg-phy";
639 + };
640 +
641 + &uart0 {
642 +diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
643 +index bd881497b872..a211a046b2f2 100644
644 +--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
645 ++++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
646 +@@ -408,6 +408,8 @@
647 + reg = <5>;
648 + label = "cpu";
649 + ethernet = <&cp1_eth2>;
650 ++ phy-mode = "2500base-x";
651 ++ managed = "in-band-status";
652 + };
653 + };
654 +
655 +diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
656 +index 5f101a20a20a..e08fcb426bbf 100644
657 +--- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
658 ++++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
659 +@@ -9,6 +9,7 @@
660 + / {
661 + aliases {
662 + serial0 = &blsp2_uart1;
663 ++ serial1 = &blsp1_uart3;
664 + };
665 +
666 + chosen {
667 +diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
668 +index fc7838ea9a01..385b46686194 100644
669 +--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
670 ++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
671 +@@ -987,7 +987,7 @@
672 +
673 + tcsr_mutex_regs: syscon@1f40000 {
674 + compatible = "syscon";
675 +- reg = <0x01f40000 0x20000>;
676 ++ reg = <0x01f40000 0x40000>;
677 + };
678 +
679 + tlmm: pinctrl@3400000 {
680 +diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
681 +index b38f9d442fc0..e6d700f8c194 100644
682 +--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
683 ++++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
684 +@@ -636,7 +636,6 @@
685 + /* audio_clkout0/1/2/3 */
686 + #clock-cells = <1>;
687 + clock-frequency = <12288000 11289600>;
688 +- clkout-lr-synchronous;
689 +
690 + status = "okay";
691 +
692 +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
693 +index 04cf64e9f0c9..32eff833a43c 100644
694 +--- a/arch/arm64/kernel/cpufeature.c
695 ++++ b/arch/arm64/kernel/cpufeature.c
696 +@@ -32,9 +32,7 @@ static unsigned long elf_hwcap __read_mostly;
697 + #define COMPAT_ELF_HWCAP_DEFAULT \
698 + (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
699 + COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
700 +- COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
701 +- COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
702 +- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
703 ++ COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
704 + COMPAT_HWCAP_LPAE)
705 + unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
706 + unsigned int compat_elf_hwcap2 __read_mostly;
707 +@@ -1368,7 +1366,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
708 + {
709 + /* FP/SIMD is not implemented */
710 + .capability = ARM64_HAS_NO_FPSIMD,
711 +- .type = ARM64_CPUCAP_SYSTEM_FEATURE,
712 ++ .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
713 + .min_field_value = 0,
714 + .matches = has_no_fpsimd,
715 + },
716 +@@ -1596,6 +1594,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
717 + .match_list = list, \
718 + }
719 +
720 ++#define HWCAP_CAP_MATCH(match, cap_type, cap) \
721 ++ { \
722 ++ __HWCAP_CAP(#cap, cap_type, cap) \
723 ++ .matches = match, \
724 ++ }
725 ++
726 + #ifdef CONFIG_ARM64_PTR_AUTH
727 + static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
728 + {
729 +@@ -1669,8 +1673,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
730 + {},
731 + };
732 +
733 ++#ifdef CONFIG_COMPAT
734 ++static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
735 ++{
736 ++ /*
737 ++ * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
738 ++ * in line with that of arm32 as in vfp_init(). We make sure that the
739 ++ * check is future proof, by making sure value is non-zero.
740 ++ */
741 ++ u32 mvfr1;
742 ++
743 ++ WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
744 ++ if (scope == SCOPE_SYSTEM)
745 ++ mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
746 ++ else
747 ++ mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
748 ++
749 ++ return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
750 ++ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
751 ++ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
752 ++}
753 ++#endif
754 ++
755 + static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
756 + #ifdef CONFIG_COMPAT
757 ++ HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
758 ++ HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
759 ++ /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
760 ++ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
761 ++ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
762 + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
763 + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
764 + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
765 +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
766 +index 7c6a0a41676f..d54d165b286a 100644
767 +--- a/arch/arm64/kernel/entry.S
768 ++++ b/arch/arm64/kernel/entry.S
769 +@@ -653,6 +653,7 @@ el0_sync:
770 + mov x0, sp
771 + bl el0_sync_handler
772 + b ret_to_user
773 ++ENDPROC(el0_sync)
774 +
775 + #ifdef CONFIG_COMPAT
776 + .align 6
777 +@@ -661,16 +662,18 @@ el0_sync_compat:
778 + mov x0, sp
779 + bl el0_sync_compat_handler
780 + b ret_to_user
781 +-ENDPROC(el0_sync)
782 ++ENDPROC(el0_sync_compat)
783 +
784 + .align 6
785 + el0_irq_compat:
786 + kernel_entry 0, 32
787 + b el0_irq_naked
788 ++ENDPROC(el0_irq_compat)
789 +
790 + el0_error_compat:
791 + kernel_entry 0, 32
792 + b el0_error_naked
793 ++ENDPROC(el0_error_compat)
794 + #endif
795 +
796 + .align 6
797 +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
798 +index 3eb338f14386..94289d126993 100644
799 +--- a/arch/arm64/kernel/fpsimd.c
800 ++++ b/arch/arm64/kernel/fpsimd.c
801 +@@ -269,6 +269,7 @@ static void sve_free(struct task_struct *task)
802 + */
803 + static void task_fpsimd_load(void)
804 + {
805 ++ WARN_ON(!system_supports_fpsimd());
806 + WARN_ON(!have_cpu_fpsimd_context());
807 +
808 + if (system_supports_sve() && test_thread_flag(TIF_SVE))
809 +@@ -289,6 +290,7 @@ static void fpsimd_save(void)
810 + this_cpu_ptr(&fpsimd_last_state);
811 + /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
812 +
813 ++ WARN_ON(!system_supports_fpsimd());
814 + WARN_ON(!have_cpu_fpsimd_context());
815 +
816 + if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
817 +@@ -1092,6 +1094,7 @@ void fpsimd_bind_task_to_cpu(void)
818 + struct fpsimd_last_state_struct *last =
819 + this_cpu_ptr(&fpsimd_last_state);
820 +
821 ++ WARN_ON(!system_supports_fpsimd());
822 + last->st = &current->thread.uw.fpsimd_state;
823 + last->sve_state = current->thread.sve_state;
824 + last->sve_vl = current->thread.sve_vl;
825 +@@ -1114,6 +1117,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
826 + struct fpsimd_last_state_struct *last =
827 + this_cpu_ptr(&fpsimd_last_state);
828 +
829 ++ WARN_ON(!system_supports_fpsimd());
830 + WARN_ON(!in_softirq() && !irqs_disabled());
831 +
832 + last->st = st;
833 +@@ -1128,8 +1132,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
834 + */
835 + void fpsimd_restore_current_state(void)
836 + {
837 +- if (!system_supports_fpsimd())
838 ++ /*
839 ++ * For the tasks that were created before we detected the absence of
840 ++ * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
841 ++ * e.g, init. This could be then inherited by the children processes.
842 ++ * If we later detect that the system doesn't support FP/SIMD,
843 ++ * we must clear the flag for all the tasks to indicate that the
844 ++ * FPSTATE is clean (as we can't have one) to avoid looping for ever in
845 ++ * do_notify_resume().
846 ++ */
847 ++ if (!system_supports_fpsimd()) {
848 ++ clear_thread_flag(TIF_FOREIGN_FPSTATE);
849 + return;
850 ++ }
851 +
852 + get_cpu_fpsimd_context();
853 +
854 +@@ -1148,7 +1163,7 @@ void fpsimd_restore_current_state(void)
855 + */
856 + void fpsimd_update_current_state(struct user_fpsimd_state const *state)
857 + {
858 +- if (!system_supports_fpsimd())
859 ++ if (WARN_ON(!system_supports_fpsimd()))
860 + return;
861 +
862 + get_cpu_fpsimd_context();
863 +@@ -1179,7 +1194,13 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
864 + void fpsimd_flush_task_state(struct task_struct *t)
865 + {
866 + t->thread.fpsimd_cpu = NR_CPUS;
867 +-
868 ++ /*
869 ++ * If we don't support fpsimd, bail out after we have
870 ++ * reset the fpsimd_cpu for this task and clear the
871 ++ * FPSTATE.
872 ++ */
873 ++ if (!system_supports_fpsimd())
874 ++ return;
875 + barrier();
876 + set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
877 +
878 +@@ -1193,6 +1214,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
879 + */
880 + static void fpsimd_flush_cpu_state(void)
881 + {
882 ++ WARN_ON(!system_supports_fpsimd());
883 + __this_cpu_write(fpsimd_last_state.st, NULL);
884 + set_thread_flag(TIF_FOREIGN_FPSTATE);
885 + }
886 +@@ -1203,6 +1225,8 @@ static void fpsimd_flush_cpu_state(void)
887 + */
888 + void fpsimd_save_and_flush_cpu_state(void)
889 + {
890 ++ if (!system_supports_fpsimd())
891 ++ return;
892 + WARN_ON(preemptible());
893 + __get_cpu_fpsimd_context();
894 + fpsimd_save();
895 +diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
896 +index 6771c399d40c..cd6e5fa48b9c 100644
897 +--- a/arch/arm64/kernel/ptrace.c
898 ++++ b/arch/arm64/kernel/ptrace.c
899 +@@ -615,6 +615,13 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
900 + return 0;
901 + }
902 +
903 ++static int fpr_active(struct task_struct *target, const struct user_regset *regset)
904 ++{
905 ++ if (!system_supports_fpsimd())
906 ++ return -ENODEV;
907 ++ return regset->n;
908 ++}
909 ++
910 + /*
911 + * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
912 + */
913 +@@ -637,6 +644,9 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
914 + unsigned int pos, unsigned int count,
915 + void *kbuf, void __user *ubuf)
916 + {
917 ++ if (!system_supports_fpsimd())
918 ++ return -EINVAL;
919 ++
920 + if (target == current)
921 + fpsimd_preserve_current_state();
922 +
923 +@@ -676,6 +686,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
924 + {
925 + int ret;
926 +
927 ++ if (!system_supports_fpsimd())
928 ++ return -EINVAL;
929 ++
930 + ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
931 + if (ret)
932 + return ret;
933 +@@ -1134,6 +1147,7 @@ static const struct user_regset aarch64_regsets[] = {
934 + */
935 + .size = sizeof(u32),
936 + .align = sizeof(u32),
937 ++ .active = fpr_active,
938 + .get = fpr_get,
939 + .set = fpr_set
940 + },
941 +@@ -1348,6 +1362,9 @@ static int compat_vfp_get(struct task_struct *target,
942 + compat_ulong_t fpscr;
943 + int ret, vregs_end_pos;
944 +
945 ++ if (!system_supports_fpsimd())
946 ++ return -EINVAL;
947 ++
948 + uregs = &target->thread.uw.fpsimd_state;
949 +
950 + if (target == current)
951 +@@ -1381,6 +1398,9 @@ static int compat_vfp_set(struct task_struct *target,
952 + compat_ulong_t fpscr;
953 + int ret, vregs_end_pos;
954 +
955 ++ if (!system_supports_fpsimd())
956 ++ return -EINVAL;
957 ++
958 + uregs = &target->thread.uw.fpsimd_state;
959 +
960 + vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
961 +@@ -1438,6 +1458,7 @@ static const struct user_regset aarch32_regsets[] = {
962 + .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
963 + .size = sizeof(compat_ulong_t),
964 + .align = sizeof(compat_ulong_t),
965 ++ .active = fpr_active,
966 + .get = compat_vfp_get,
967 + .set = compat_vfp_set
968 + },
969 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
970 +index 72fbbd86eb5e..e5816d885761 100644
971 +--- a/arch/arm64/kvm/hyp/switch.c
972 ++++ b/arch/arm64/kvm/hyp/switch.c
973 +@@ -28,7 +28,15 @@
974 + /* Check whether the FP regs were dirtied while in the host-side run loop: */
975 + static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
976 + {
977 +- if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
978 ++ /*
979 ++ * When the system doesn't support FP/SIMD, we cannot rely on
980 ++ * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
981 ++ * abort on the very first access to FP and thus we should never
982 ++ * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
983 ++ * trap the accesses.
984 ++ */
985 ++ if (!system_supports_fpsimd() ||
986 ++ vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
987 + vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
988 + KVM_ARM64_FP_HOST);
989 +
990 +diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
991 +index dab1fea4752a..a4f48c1ac28c 100644
992 +--- a/arch/arm64/kvm/va_layout.c
993 ++++ b/arch/arm64/kvm/va_layout.c
994 +@@ -13,52 +13,46 @@
995 + #include <asm/kvm_mmu.h>
996 +
997 + /*
998 +- * The LSB of the random hyp VA tag or 0 if no randomization is used.
999 ++ * The LSB of the HYP VA tag
1000 + */
1001 + static u8 tag_lsb;
1002 + /*
1003 +- * The random hyp VA tag value with the region bit if hyp randomization is used
1004 ++ * The HYP VA tag value with the region bit
1005 + */
1006 + static u64 tag_val;
1007 + static u64 va_mask;
1008 +
1009 ++/*
1010 ++ * We want to generate a hyp VA with the following format (with V ==
1011 ++ * vabits_actual):
1012 ++ *
1013 ++ * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
1014 ++ * ---------------------------------------------------------
1015 ++ * | 0000000 | hyp_va_msb | random tag | kern linear VA |
1016 ++ * |--------- tag_val -----------|----- va_mask ---|
1017 ++ *
1018 ++ * which does not conflict with the idmap regions.
1019 ++ */
1020 + __init void kvm_compute_layout(void)
1021 + {
1022 + phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
1023 + u64 hyp_va_msb;
1024 +- int kva_msb;
1025 +
1026 + /* Where is my RAM region? */
1027 + hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
1028 + hyp_va_msb ^= BIT(vabits_actual - 1);
1029 +
1030 +- kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
1031 ++ tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
1032 + (u64)(high_memory - 1));
1033 +
1034 +- if (kva_msb == (vabits_actual - 1)) {
1035 +- /*
1036 +- * No space in the address, let's compute the mask so
1037 +- * that it covers (vabits_actual - 1) bits, and the region
1038 +- * bit. The tag stays set to zero.
1039 +- */
1040 +- va_mask = BIT(vabits_actual - 1) - 1;
1041 +- va_mask |= hyp_va_msb;
1042 +- } else {
1043 +- /*
1044 +- * We do have some free bits to insert a random tag.
1045 +- * Hyp VAs are now created from kernel linear map VAs
1046 +- * using the following formula (with V == vabits_actual):
1047 +- *
1048 +- * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
1049 +- * ---------------------------------------------------------
1050 +- * | 0000000 | hyp_va_msb | random tag | kern linear VA |
1051 +- */
1052 +- tag_lsb = kva_msb;
1053 +- va_mask = GENMASK_ULL(tag_lsb - 1, 0);
1054 +- tag_val = get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
1055 +- tag_val |= hyp_va_msb;
1056 +- tag_val >>= tag_lsb;
1057 ++ va_mask = GENMASK_ULL(tag_lsb - 1, 0);
1058 ++ tag_val = hyp_va_msb;
1059 ++
1060 ++ if (tag_lsb != (vabits_actual - 1)) {
1061 ++ /* We have some free bits to insert a random tag. */
1062 ++ tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
1063 + }
1064 ++ tag_val >>= tag_lsb;
1065 + }
1066 +
1067 + static u32 compute_instruction(int n, u32 rd, u32 rn)
1068 +@@ -117,11 +111,11 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
1069 + * VHE doesn't need any address translation, let's NOP
1070 + * everything.
1071 + *
1072 +- * Alternatively, if we don't have any spare bits in
1073 +- * the address, NOP everything after masking that
1074 +- * kernel VA.
1075 ++ * Alternatively, if the tag is zero (because the layout
1076 ++ * dictates it and we don't have any spare bits in the
1077 ++ * address), NOP everything after masking the kernel VA.
1078 + */
1079 +- if (has_vhe() || (!tag_lsb && i > 0)) {
1080 ++ if (has_vhe() || (!tag_val && i > 0)) {
1081 + updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
1082 + continue;
1083 + }
1084 +diff --git a/arch/mips/loongson64/platform.c b/arch/mips/loongson64/platform.c
1085 +index 13f3404f0030..9674ae1361a8 100644
1086 +--- a/arch/mips/loongson64/platform.c
1087 ++++ b/arch/mips/loongson64/platform.c
1088 +@@ -27,6 +27,9 @@ static int __init loongson3_platform_init(void)
1089 + continue;
1090 +
1091 + pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
1092 ++ if (!pdev)
1093 ++ return -ENOMEM;
1094 ++
1095 + pdev->name = loongson_sysconf.sensors[i].name;
1096 + pdev->id = loongson_sysconf.sensors[i].id;
1097 + pdev->dev.platform_data = &loongson_sysconf.sensors[i];
1098 +diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
1099 +index 4e1d39847462..0b063830eea8 100644
1100 +--- a/arch/powerpc/Kconfig.debug
1101 ++++ b/arch/powerpc/Kconfig.debug
1102 +@@ -371,7 +371,7 @@ config PPC_PTDUMP
1103 +
1104 + config PPC_DEBUG_WX
1105 + bool "Warn on W+X mappings at boot"
1106 +- depends on PPC_PTDUMP
1107 ++ depends on PPC_PTDUMP && STRICT_KERNEL_RWX
1108 + help
1109 + Generate a warning if any W+X mappings are found at boot.
1110 +
1111 +diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
1112 +index 73b84166d06a..5fb90edd865e 100644
1113 +--- a/arch/powerpc/mm/pgtable_32.c
1114 ++++ b/arch/powerpc/mm/pgtable_32.c
1115 +@@ -218,6 +218,7 @@ void mark_rodata_ro(void)
1116 +
1117 + if (v_block_mapped((unsigned long)_sinittext)) {
1118 + mmu_mark_rodata_ro();
1119 ++ ptdump_check_wx();
1120 + return;
1121 + }
1122 +
1123 +diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
1124 +index 6ba081dd61c9..b4ce9d472dfe 100644
1125 +--- a/arch/powerpc/platforms/pseries/iommu.c
1126 ++++ b/arch/powerpc/platforms/pseries/iommu.c
1127 +@@ -36,7 +36,6 @@
1128 + #include <asm/udbg.h>
1129 + #include <asm/mmzone.h>
1130 + #include <asm/plpar_wrappers.h>
1131 +-#include <asm/svm.h>
1132 +
1133 + #include "pseries.h"
1134 +
1135 +@@ -133,10 +132,10 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
1136 + return be64_to_cpu(*tcep);
1137 + }
1138 +
1139 +-static void tce_free_pSeriesLP(struct iommu_table*, long, long);
1140 ++static void tce_free_pSeriesLP(unsigned long liobn, long, long);
1141 + static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
1142 +
1143 +-static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
1144 ++static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
1145 + long npages, unsigned long uaddr,
1146 + enum dma_data_direction direction,
1147 + unsigned long attrs)
1148 +@@ -147,25 +146,25 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
1149 + int ret = 0;
1150 + long tcenum_start = tcenum, npages_start = npages;
1151 +
1152 +- rpn = __pa(uaddr) >> TCE_SHIFT;
1153 ++ rpn = __pa(uaddr) >> tceshift;
1154 + proto_tce = TCE_PCI_READ;
1155 + if (direction != DMA_TO_DEVICE)
1156 + proto_tce |= TCE_PCI_WRITE;
1157 +
1158 + while (npages--) {
1159 +- tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
1160 +- rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
1161 ++ tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
1162 ++ rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
1163 +
1164 + if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
1165 + ret = (int)rc;
1166 +- tce_free_pSeriesLP(tbl, tcenum_start,
1167 ++ tce_free_pSeriesLP(liobn, tcenum_start,
1168 + (npages_start - (npages + 1)));
1169 + break;
1170 + }
1171 +
1172 + if (rc && printk_ratelimit()) {
1173 + printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
1174 +- printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
1175 ++ printk("\tindex = 0x%llx\n", (u64)liobn);
1176 + printk("\ttcenum = 0x%llx\n", (u64)tcenum);
1177 + printk("\ttce val = 0x%llx\n", tce );
1178 + dump_stack();
1179 +@@ -194,7 +193,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
1180 + unsigned long flags;
1181 +
1182 + if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
1183 +- return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
1184 ++ return tce_build_pSeriesLP(tbl->it_index, tcenum,
1185 ++ tbl->it_page_shift, npages, uaddr,
1186 + direction, attrs);
1187 + }
1188 +
1189 +@@ -210,8 +210,9 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
1190 + /* If allocation fails, fall back to the loop implementation */
1191 + if (!tcep) {
1192 + local_irq_restore(flags);
1193 +- return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
1194 +- direction, attrs);
1195 ++ return tce_build_pSeriesLP(tbl->it_index, tcenum,
1196 ++ tbl->it_page_shift,
1197 ++ npages, uaddr, direction, attrs);
1198 + }
1199 + __this_cpu_write(tce_page, tcep);
1200 + }
1201 +@@ -262,16 +263,16 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
1202 + return ret;
1203 + }
1204 +
1205 +-static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
1206 ++static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
1207 + {
1208 + u64 rc;
1209 +
1210 + while (npages--) {
1211 +- rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
1212 ++ rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
1213 +
1214 + if (rc && printk_ratelimit()) {
1215 + printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
1216 +- printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
1217 ++ printk("\tindex = 0x%llx\n", (u64)liobn);
1218 + printk("\ttcenum = 0x%llx\n", (u64)tcenum);
1219 + dump_stack();
1220 + }
1221 +@@ -286,7 +287,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
1222 + u64 rc;
1223 +
1224 + if (!firmware_has_feature(FW_FEATURE_MULTITCE))
1225 +- return tce_free_pSeriesLP(tbl, tcenum, npages);
1226 ++ return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
1227 +
1228 + rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
1229 +
1230 +@@ -401,6 +402,19 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
1231 + u64 rc = 0;
1232 + long l, limit;
1233 +
1234 ++ if (!firmware_has_feature(FW_FEATURE_MULTITCE)) {
1235 ++ unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
1236 ++ unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
1237 ++ be64_to_cpu(maprange->dma_base);
1238 ++ unsigned long tcenum = dmastart >> tceshift;
1239 ++ unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
1240 ++ void *uaddr = __va(start_pfn << PAGE_SHIFT);
1241 ++
1242 ++ return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
1243 ++ tcenum, tceshift, npages, (unsigned long) uaddr,
1244 ++ DMA_BIDIRECTIONAL, 0);
1245 ++ }
1246 ++
1247 + local_irq_disable(); /* to protect tcep and the page behind it */
1248 + tcep = __this_cpu_read(tce_page);
1249 +
1250 +@@ -1320,15 +1334,7 @@ void iommu_init_early_pSeries(void)
1251 + of_reconfig_notifier_register(&iommu_reconfig_nb);
1252 + register_memory_notifier(&iommu_mem_nb);
1253 +
1254 +- /*
1255 +- * Secure guest memory is inacessible to devices so regular DMA isn't
1256 +- * possible.
1257 +- *
1258 +- * In that case keep devices' dma_map_ops as NULL so that the generic
1259 +- * DMA code path will use SWIOTLB to bounce buffers for DMA.
1260 +- */
1261 +- if (!is_secure_guest())
1262 +- set_pci_dma_ops(&dma_iommu_ops);
1263 ++ set_pci_dma_ops(&dma_iommu_ops);
1264 + }
1265 +
1266 + static int __init disable_multitce(char *str)
1267 +diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
1268 +index c2ef320ba1bf..eb420655ed0b 100644
1269 +--- a/arch/powerpc/platforms/pseries/papr_scm.c
1270 ++++ b/arch/powerpc/platforms/pseries/papr_scm.c
1271 +@@ -322,6 +322,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
1272 + p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
1273 + if (!p->bus) {
1274 + dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
1275 ++ kfree(p->bus_desc.provider_name);
1276 + return -ENXIO;
1277 + }
1278 +
1279 +@@ -477,6 +478,7 @@ static int papr_scm_remove(struct platform_device *pdev)
1280 +
1281 + nvdimm_bus_unregister(p->bus);
1282 + drc_pmem_unbind(p);
1283 ++ kfree(p->bus_desc.provider_name);
1284 + kfree(p);
1285 +
1286 + return 0;
1287 +diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
1288 +index 79e2287991db..f682b7babc09 100644
1289 +--- a/arch/powerpc/platforms/pseries/vio.c
1290 ++++ b/arch/powerpc/platforms/pseries/vio.c
1291 +@@ -1176,6 +1176,8 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1292 + if (tbl == NULL)
1293 + return NULL;
1294 +
1295 ++ kref_init(&tbl->it_kref);
1296 ++
1297 + of_parse_dma_window(dev->dev.of_node, dma_window,
1298 + &tbl->it_index, &offset, &size);
1299 +
1300 +diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c
1301 +index 25019d42ae93..ef2ad7253cd5 100644
1302 +--- a/arch/x86/boot/compressed/acpi.c
1303 ++++ b/arch/x86/boot/compressed/acpi.c
1304 +@@ -393,7 +393,13 @@ int count_immovable_mem_regions(void)
1305 + table = table_addr + sizeof(struct acpi_table_srat);
1306 +
1307 + while (table + sizeof(struct acpi_subtable_header) < table_end) {
1308 ++
1309 + sub_table = (struct acpi_subtable_header *)table;
1310 ++ if (!sub_table->length) {
1311 ++ debug_putstr("Invalid zero length SRAT subtable.\n");
1312 ++ return 0;
1313 ++ }
1314 ++
1315 + if (sub_table->type == ACPI_SRAT_TYPE_MEMORY_AFFINITY) {
1316 + struct acpi_srat_mem_affinity *ma;
1317 +
1318 +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
1319 +index 9ec463fe96f2..2f1e2333bd0a 100644
1320 +--- a/arch/x86/kernel/alternative.c
1321 ++++ b/arch/x86/kernel/alternative.c
1322 +@@ -23,6 +23,7 @@
1323 + #include <asm/nmi.h>
1324 + #include <asm/cacheflush.h>
1325 + #include <asm/tlbflush.h>
1326 ++#include <asm/insn.h>
1327 + #include <asm/io.h>
1328 + #include <asm/fixmap.h>
1329 +
1330 +diff --git a/crypto/testmgr.c b/crypto/testmgr.c
1331 +index 82513b6b0abd..2c96963b2e51 100644
1332 +--- a/crypto/testmgr.c
1333 ++++ b/crypto/testmgr.c
1334 +@@ -2102,6 +2102,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
1335 + * If the key or authentication tag size couldn't be set, no need to
1336 + * continue to encrypt.
1337 + */
1338 ++ vec->crypt_error = 0;
1339 + if (vec->setkey_error || vec->setauthsize_error)
1340 + goto done;
1341 +
1342 +@@ -2245,10 +2246,12 @@ static int test_aead_vs_generic_impl(const char *driver,
1343 + req, tsgls);
1344 + if (err)
1345 + goto out;
1346 +- err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, cfg,
1347 +- req, tsgls);
1348 +- if (err)
1349 +- goto out;
1350 ++ if (vec.crypt_error == 0) {
1351 ++ err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name,
1352 ++ cfg, req, tsgls);
1353 ++ if (err)
1354 ++ goto out;
1355 ++ }
1356 + cond_resched();
1357 + }
1358 + err = 0;
1359 +@@ -2678,6 +2681,15 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
1360 + skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
1361 + skcipher_request_set_crypt(req, &src, &dst, vec->len, iv);
1362 + vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
1363 ++ if (vec->crypt_error != 0) {
1364 ++ /*
1365 ++ * The only acceptable error here is for an invalid length, so
1366 ++ * skcipher decryption should fail with the same error too.
1367 ++ * We'll test for this. But to keep the API usage well-defined,
1368 ++ * explicitly initialize the ciphertext buffer too.
1369 ++ */
1370 ++ memset((u8 *)vec->ctext, 0, vec->len);
1371 ++ }
1372 + done:
1373 + snprintf(name, max_namelen, "\"random: len=%u klen=%u\"",
1374 + vec->len, vec->klen);
1375 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
1376 +index 19f57ccfbe1d..59f911e57719 100644
1377 +--- a/drivers/base/regmap/regmap.c
1378 ++++ b/drivers/base/regmap/regmap.c
1379 +@@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1380 +
1381 + WARN_ON(!map->bus);
1382 +
1383 +- /* Check for unwritable registers before we start */
1384 +- for (i = 0; i < val_len / map->format.val_bytes; i++)
1385 +- if (!regmap_writeable(map,
1386 +- reg + regmap_get_offset(map, i)))
1387 +- return -EINVAL;
1388 ++ /* Check for unwritable or noinc registers in range
1389 ++ * before we start
1390 ++ */
1391 ++ if (!regmap_writeable_noinc(map, reg)) {
1392 ++ for (i = 0; i < val_len / map->format.val_bytes; i++) {
1393 ++ unsigned int element =
1394 ++ reg + regmap_get_offset(map, i);
1395 ++ if (!regmap_writeable(map, element) ||
1396 ++ regmap_writeable_noinc(map, element))
1397 ++ return -EINVAL;
1398 ++ }
1399 ++ }
1400 +
1401 + if (!map->cache_bypass && map->format.parse_val) {
1402 + unsigned int ival;
1403 +diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
1404 +index b3af61cc6fb9..d2760a021301 100644
1405 +--- a/drivers/clk/meson/g12a.c
1406 ++++ b/drivers/clk/meson/g12a.c
1407 +@@ -4692,6 +4692,7 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
1408 + &g12a_bt656,
1409 + &g12a_usb1_to_ddr,
1410 + &g12a_mmc_pclk,
1411 ++ &g12a_uart2,
1412 + &g12a_vpu_intr,
1413 + &g12a_gic,
1414 + &g12a_sd_emmc_a_clk0,
1415 +diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
1416 +index 8ea0e4bcde0d..7394671f815b 100644
1417 +--- a/drivers/crypto/atmel-sha.c
1418 ++++ b/drivers/crypto/atmel-sha.c
1419 +@@ -1918,12 +1918,7 @@ static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1420 + {
1421 + struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1422 +
1423 +- if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
1424 +- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1425 +- return -EINVAL;
1426 +- }
1427 +-
1428 +- return 0;
1429 ++ return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen);
1430 + }
1431 +
1432 + static int atmel_sha_hmac_init(struct ahash_request *req)
1433 +diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
1434 +index 4b20606983a4..22ebe40f09f5 100644
1435 +--- a/drivers/crypto/axis/artpec6_crypto.c
1436 ++++ b/drivers/crypto/axis/artpec6_crypto.c
1437 +@@ -1251,7 +1251,7 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
1438 +
1439 + if (len != 16 && len != 24 && len != 32) {
1440 + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1441 +- return -1;
1442 ++ return -EINVAL;
1443 + }
1444 +
1445 + ctx->key_length = len;
1446 +diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
1447 +index 3443f6d6dd83..6863d7097674 100644
1448 +--- a/drivers/crypto/caam/caamalg_qi2.c
1449 ++++ b/drivers/crypto/caam/caamalg_qi2.c
1450 +@@ -2481,7 +2481,7 @@ static struct caam_aead_alg driver_aeads[] = {
1451 + .cra_name = "echainiv(authenc(hmac(sha256),"
1452 + "cbc(des)))",
1453 + .cra_driver_name = "echainiv-authenc-"
1454 +- "hmac-sha256-cbc-desi-"
1455 ++ "hmac-sha256-cbc-des-"
1456 + "caam-qi2",
1457 + .cra_blocksize = DES_BLOCK_SIZE,
1458 + },
1459 +diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
1460 +index a0ee404b736e..f1d149e32839 100644
1461 +--- a/drivers/dma/dma-axi-dmac.c
1462 ++++ b/drivers/dma/dma-axi-dmac.c
1463 +@@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
1464 + struct dma_device *dma_dev;
1465 + struct axi_dmac *dmac;
1466 + struct resource *res;
1467 ++ struct regmap *regmap;
1468 + int ret;
1469 +
1470 + dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1471 +@@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev)
1472 +
1473 + platform_set_drvdata(pdev, dmac);
1474 +
1475 +- devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
1476 ++ regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
1477 ++ &axi_dmac_regmap_config);
1478 ++ if (IS_ERR(regmap)) {
1479 ++ ret = PTR_ERR(regmap);
1480 ++ goto err_free_irq;
1481 ++ }
1482 +
1483 + return 0;
1484 +
1485 ++err_free_irq:
1486 ++ free_irq(dmac->irq, dmac);
1487 + err_unregister_of:
1488 + of_dma_controller_free(pdev->dev.of_node);
1489 + err_unregister_device:
1490 +diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
1491 +index 958161c71985..790ea3fda693 100644
1492 +--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
1493 ++++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
1494 +@@ -273,6 +273,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
1495 + bus->adap.dev.parent = &pdev->dev;
1496 + bus->adap.dev.of_node = pdev->dev.of_node;
1497 + bus->adap.retries = I2C_MAX_RETRIES;
1498 ++ ACPI_COMPANION_SET(&bus->adap.dev, ACPI_COMPANION(&pdev->dev));
1499 +
1500 + err = i2c_add_adapter(&bus->adap);
1501 + if (err)
1502 +@@ -298,7 +299,7 @@ static const struct of_device_id cros_ec_i2c_of_match[] = {
1503 + MODULE_DEVICE_TABLE(of, cros_ec_i2c_of_match);
1504 +
1505 + static const struct acpi_device_id cros_ec_i2c_tunnel_acpi_id[] = {
1506 +- { "GOOG001A", 0 },
1507 ++ { "GOOG0012", 0 },
1508 + { }
1509 + };
1510 + MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_tunnel_acpi_id);
1511 +diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
1512 +index 606fa6d86685..1753a9801b70 100644
1513 +--- a/drivers/infiniband/core/addr.c
1514 ++++ b/drivers/infiniband/core/addr.c
1515 +@@ -139,7 +139,7 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
1516 + if (ib_nl_is_good_ip_resp(nlh))
1517 + ib_nl_process_good_ip_rsep(nlh);
1518 +
1519 +- return skb->len;
1520 ++ return 0;
1521 + }
1522 +
1523 + static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
1524 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1525 +index 43a6f07e0afe..af1afc17b8bd 100644
1526 +--- a/drivers/infiniband/core/cma.c
1527 ++++ b/drivers/infiniband/core/cma.c
1528 +@@ -3118,6 +3118,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1529 + rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1530 + rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1531 +
1532 ++ atomic_inc(&id_priv->refcount);
1533 + cma_init_resolve_addr_work(work, id_priv);
1534 + queue_work(cma_wq, &work->work);
1535 + return 0;
1536 +@@ -3144,6 +3145,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
1537 + rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
1538 + &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
1539 +
1540 ++ atomic_inc(&id_priv->refcount);
1541 + cma_init_resolve_addr_work(work, id_priv);
1542 + queue_work(cma_wq, &work->work);
1543 + return 0;
1544 +diff --git a/drivers/infiniband/core/ib_core_uverbs.c b/drivers/infiniband/core/ib_core_uverbs.c
1545 +index b7cb59844ece..b51bd7087a88 100644
1546 +--- a/drivers/infiniband/core/ib_core_uverbs.c
1547 ++++ b/drivers/infiniband/core/ib_core_uverbs.c
1548 +@@ -232,7 +232,9 @@ void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
1549 + if (!entry)
1550 + return;
1551 +
1552 ++ xa_lock(&entry->ucontext->mmap_xa);
1553 + entry->driver_removed = true;
1554 ++ xa_unlock(&entry->ucontext->mmap_xa);
1555 + kref_put(&entry->ref, rdma_user_mmap_entry_free);
1556 + }
1557 + EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
1558 +diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
1559 +index 8917125ea16d..30d4c126a2db 100644
1560 +--- a/drivers/infiniband/core/sa_query.c
1561 ++++ b/drivers/infiniband/core/sa_query.c
1562 +@@ -1068,7 +1068,7 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
1563 + }
1564 +
1565 + settimeout_out:
1566 +- return skb->len;
1567 ++ return 0;
1568 + }
1569 +
1570 + static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1571 +@@ -1139,7 +1139,7 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1572 + }
1573 +
1574 + resp_out:
1575 +- return skb->len;
1576 ++ return 0;
1577 + }
1578 +
1579 + static void free_sm_ah(struct kref *kref)
1580 +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
1581 +index 7a3b99597ead..40cadb889114 100644
1582 +--- a/drivers/infiniband/core/umem.c
1583 ++++ b/drivers/infiniband/core/umem.c
1584 +@@ -166,10 +166,13 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
1585 + * for any address.
1586 + */
1587 + mask |= (sg_dma_address(sg) + pgoff) ^ va;
1588 +- if (i && i != (umem->nmap - 1))
1589 +- /* restrict by length as well for interior SGEs */
1590 +- mask |= sg_dma_len(sg);
1591 + va += sg_dma_len(sg) - pgoff;
1592 ++ /* Except for the last entry, the ending iova alignment sets
1593 ++ * the maximum possible page size as the low bits of the iova
1594 ++ * must be zero when starting the next chunk.
1595 ++ */
1596 ++ if (i != (umem->nmap - 1))
1597 ++ mask |= va;
1598 + pgoff = 0;
1599 + }
1600 + best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap);
1601 +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
1602 +index 970d8e31dd65..8f5de4dcad97 100644
1603 +--- a/drivers/infiniband/core/uverbs_main.c
1604 ++++ b/drivers/infiniband/core/uverbs_main.c
1605 +@@ -220,7 +220,6 @@ void ib_uverbs_release_file(struct kref *ref)
1606 + }
1607 +
1608 + static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
1609 +- struct ib_uverbs_file *uverbs_file,
1610 + struct file *filp, char __user *buf,
1611 + size_t count, loff_t *pos,
1612 + size_t eventsz)
1613 +@@ -238,19 +237,16 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
1614 +
1615 + if (wait_event_interruptible(ev_queue->poll_wait,
1616 + (!list_empty(&ev_queue->event_list) ||
1617 +- /* The barriers built into wait_event_interruptible()
1618 +- * and wake_up() guarentee this will see the null set
1619 +- * without using RCU
1620 +- */
1621 +- !uverbs_file->device->ib_dev)))
1622 ++ ev_queue->is_closed)))
1623 + return -ERESTARTSYS;
1624 +
1625 ++ spin_lock_irq(&ev_queue->lock);
1626 ++
1627 + /* If device was disassociated and no event exists set an error */
1628 +- if (list_empty(&ev_queue->event_list) &&
1629 +- !uverbs_file->device->ib_dev)
1630 ++ if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
1631 ++ spin_unlock_irq(&ev_queue->lock);
1632 + return -EIO;
1633 +-
1634 +- spin_lock_irq(&ev_queue->lock);
1635 ++ }
1636 + }
1637 +
1638 + event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
1639 +@@ -285,8 +281,7 @@ static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
1640 + {
1641 + struct ib_uverbs_async_event_file *file = filp->private_data;
1642 +
1643 +- return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp,
1644 +- buf, count, pos,
1645 ++ return ib_uverbs_event_read(&file->ev_queue, filp, buf, count, pos,
1646 + sizeof(struct ib_uverbs_async_event_desc));
1647 + }
1648 +
1649 +@@ -296,9 +291,8 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
1650 + struct ib_uverbs_completion_event_file *comp_ev_file =
1651 + filp->private_data;
1652 +
1653 +- return ib_uverbs_event_read(&comp_ev_file->ev_queue,
1654 +- comp_ev_file->uobj.ufile, filp,
1655 +- buf, count, pos,
1656 ++ return ib_uverbs_event_read(&comp_ev_file->ev_queue, filp, buf, count,
1657 ++ pos,
1658 + sizeof(struct ib_uverbs_comp_event_desc));
1659 + }
1660 +
1661 +@@ -321,7 +315,9 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
1662 + static __poll_t ib_uverbs_async_event_poll(struct file *filp,
1663 + struct poll_table_struct *wait)
1664 + {
1665 +- return ib_uverbs_event_poll(filp->private_data, filp, wait);
1666 ++ struct ib_uverbs_async_event_file *file = filp->private_data;
1667 ++
1668 ++ return ib_uverbs_event_poll(&file->ev_queue, filp, wait);
1669 + }
1670 +
1671 + static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
1672 +@@ -335,9 +331,9 @@ static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
1673 +
1674 + static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
1675 + {
1676 +- struct ib_uverbs_event_queue *ev_queue = filp->private_data;
1677 ++ struct ib_uverbs_async_event_file *file = filp->private_data;
1678 +
1679 +- return fasync_helper(fd, filp, on, &ev_queue->async_queue);
1680 ++ return fasync_helper(fd, filp, on, &file->ev_queue.async_queue);
1681 + }
1682 +
1683 + static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
1684 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
1685 +index d44cf33df81a..238614370927 100644
1686 +--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
1687 ++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
1688 +@@ -1225,6 +1225,8 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
1689 + const struct in_ifaddr *ifa;
1690 +
1691 + idev = in_dev_get(dev);
1692 ++ if (!idev)
1693 ++ continue;
1694 + in_dev_for_each_ifa_rtnl(ifa, idev) {
1695 + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
1696 + "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
1697 +diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
1698 +index ecd6cadd529a..b591861934b3 100644
1699 +--- a/drivers/infiniband/hw/mlx4/cm.c
1700 ++++ b/drivers/infiniband/hw/mlx4/cm.c
1701 +@@ -186,23 +186,6 @@ out:
1702 + kfree(ent);
1703 + }
1704 +
1705 +-static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
1706 +-{
1707 +- struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
1708 +- struct rb_root *sl_id_map = &sriov->sl_id_map;
1709 +- struct id_map_entry *ent, *found_ent;
1710 +-
1711 +- spin_lock(&sriov->id_map_lock);
1712 +- ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
1713 +- if (!ent)
1714 +- goto out;
1715 +- found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
1716 +- if (found_ent && found_ent == ent)
1717 +- rb_erase(&found_ent->node, sl_id_map);
1718 +-out:
1719 +- spin_unlock(&sriov->id_map_lock);
1720 +-}
1721 +-
1722 + static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
1723 + {
1724 + struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
1725 +@@ -294,7 +277,7 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
1726 + spin_lock(&sriov->id_map_lock);
1727 + spin_lock_irqsave(&sriov->going_down_lock, flags);
1728 + /*make sure that there is no schedule inside the scheduled work.*/
1729 +- if (!sriov->is_going_down) {
1730 ++ if (!sriov->is_going_down && !id->scheduled_delete) {
1731 + id->scheduled_delete = 1;
1732 + schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
1733 + }
1734 +@@ -341,9 +324,6 @@ cont:
1735 +
1736 + if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
1737 + schedule_delayed(ibdev, id);
1738 +- else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
1739 +- id_map_find_del(ibdev, pv_cm_id);
1740 +-
1741 + return 0;
1742 + }
1743 +
1744 +@@ -382,12 +362,9 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
1745 + *slave = id->slave_id;
1746 + set_remote_comm_id(mad, id->sl_cm_id);
1747 +
1748 +- if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
1749 ++ if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
1750 ++ mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
1751 + schedule_delayed(ibdev, id);
1752 +- else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
1753 +- mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
1754 +- id_map_find_del(ibdev, (int) pv_cm_id);
1755 +- }
1756 +
1757 + return 0;
1758 + }
1759 +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
1760 +index 34055cbab38c..2f5d9b181848 100644
1761 +--- a/drivers/infiniband/hw/mlx4/main.c
1762 ++++ b/drivers/infiniband/hw/mlx4/main.c
1763 +@@ -246,6 +246,13 @@ static int mlx4_ib_update_gids(struct gid_entry *gids,
1764 + return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
1765 + }
1766 +
1767 ++static void free_gid_entry(struct gid_entry *entry)
1768 ++{
1769 ++ memset(&entry->gid, 0, sizeof(entry->gid));
1770 ++ kfree(entry->ctx);
1771 ++ entry->ctx = NULL;
1772 ++}
1773 ++
1774 + static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
1775 + {
1776 + struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
1777 +@@ -313,6 +320,8 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
1778 + GFP_ATOMIC);
1779 + if (!gids) {
1780 + ret = -ENOMEM;
1781 ++ *context = NULL;
1782 ++ free_gid_entry(&port_gid_table->gids[free]);
1783 + } else {
1784 + for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
1785 + memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
1786 +@@ -324,6 +333,12 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
1787 +
1788 + if (!ret && hw_update) {
1789 + ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
1790 ++ if (ret) {
1791 ++ spin_lock_bh(&iboe->lock);
1792 ++ *context = NULL;
1793 ++ free_gid_entry(&port_gid_table->gids[free]);
1794 ++ spin_unlock_bh(&iboe->lock);
1795 ++ }
1796 + kfree(gids);
1797 + }
1798 +
1799 +@@ -353,10 +368,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
1800 + if (!ctx->refcount) {
1801 + unsigned int real_index = ctx->real_index;
1802 +
1803 +- memset(&port_gid_table->gids[real_index].gid, 0,
1804 +- sizeof(port_gid_table->gids[real_index].gid));
1805 +- kfree(port_gid_table->gids[real_index].ctx);
1806 +- port_gid_table->gids[real_index].ctx = NULL;
1807 ++ free_gid_entry(&port_gid_table->gids[real_index]);
1808 + hw_update = 1;
1809 + }
1810 + }
1811 +diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
1812 +index 4f0edd4832bd..b61165359954 100644
1813 +--- a/drivers/infiniband/hw/mlx5/ib_virt.c
1814 ++++ b/drivers/infiniband/hw/mlx5/ib_virt.c
1815 +@@ -164,8 +164,10 @@ static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
1816 + in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
1817 + in->node_guid = guid;
1818 + err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
1819 +- if (!err)
1820 ++ if (!err) {
1821 + vfs_ctx[vf].node_guid = guid;
1822 ++ vfs_ctx[vf].node_guid_valid = 1;
1823 ++ }
1824 + kfree(in);
1825 + return err;
1826 + }
1827 +@@ -185,8 +187,10 @@ static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
1828 + in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
1829 + in->port_guid = guid;
1830 + err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
1831 +- if (!err)
1832 ++ if (!err) {
1833 + vfs_ctx[vf].port_guid = guid;
1834 ++ vfs_ctx[vf].port_guid_valid = 1;
1835 ++ }
1836 + kfree(in);
1837 + return err;
1838 + }
1839 +@@ -208,20 +212,12 @@ int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
1840 + {
1841 + struct mlx5_ib_dev *dev = to_mdev(device);
1842 + struct mlx5_core_dev *mdev = dev->mdev;
1843 +- struct mlx5_hca_vport_context *rep;
1844 +- int err;
1845 +-
1846 +- rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1847 +- if (!rep)
1848 +- return -ENOMEM;
1849 ++ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
1850 +
1851 +- err = mlx5_query_hca_vport_context(mdev, 1, 1, vf+1, rep);
1852 +- if (err)
1853 +- goto ex;
1854 ++ node_guid->guid =
1855 ++ vfs_ctx[vf].node_guid_valid ? vfs_ctx[vf].node_guid : 0;
1856 ++ port_guid->guid =
1857 ++ vfs_ctx[vf].port_guid_valid ? vfs_ctx[vf].port_guid : 0;
1858 +
1859 +- port_guid->guid = rep->port_guid;
1860 +- node_guid->guid = rep->node_guid;
1861 +-ex:
1862 +- kfree(rep);
1863 +- return err;
1864 ++ return 0;
1865 + }
1866 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
1867 +index ea8bfc3e2d8d..23c4529edf54 100644
1868 +--- a/drivers/infiniband/hw/mlx5/mr.c
1869 ++++ b/drivers/infiniband/hw/mlx5/mr.c
1870 +@@ -1247,6 +1247,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1871 +
1872 + if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
1873 + length == U64_MAX) {
1874 ++ if (virt_addr != start)
1875 ++ return ERR_PTR(-EINVAL);
1876 + if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1877 + !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1878 + return ERR_PTR(-EINVAL);
1879 +diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
1880 +index f924250f80c2..8247c26a1ce9 100644
1881 +--- a/drivers/infiniband/hw/mlx5/odp.c
1882 ++++ b/drivers/infiniband/hw/mlx5/odp.c
1883 +@@ -624,11 +624,10 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
1884 + bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
1885 + unsigned long current_seq;
1886 + u64 access_mask;
1887 +- u64 start_idx, page_mask;
1888 ++ u64 start_idx;
1889 +
1890 + page_shift = odp->page_shift;
1891 +- page_mask = ~(BIT(page_shift) - 1);
1892 +- start_idx = (user_va - (mr->mmkey.iova & page_mask)) >> page_shift;
1893 ++ start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
1894 + access_mask = ODP_READ_ALLOWED_BIT;
1895 +
1896 + if (odp->umem.writable && !downgrade)
1897 +@@ -767,11 +766,19 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
1898 + {
1899 + struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
1900 +
1901 ++ if (unlikely(io_virt < mr->mmkey.iova))
1902 ++ return -EFAULT;
1903 ++
1904 + if (!odp->is_implicit_odp) {
1905 +- if (unlikely(io_virt < ib_umem_start(odp) ||
1906 +- ib_umem_end(odp) - io_virt < bcnt))
1907 ++ u64 user_va;
1908 ++
1909 ++ if (check_add_overflow(io_virt - mr->mmkey.iova,
1910 ++ (u64)odp->umem.address, &user_va))
1911 ++ return -EFAULT;
1912 ++ if (unlikely(user_va >= ib_umem_end(odp) ||
1913 ++ ib_umem_end(odp) - user_va < bcnt))
1914 + return -EFAULT;
1915 +- return pagefault_real_mr(mr, odp, io_virt, bcnt, bytes_mapped,
1916 ++ return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
1917 + flags);
1918 + }
1919 + return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
1920 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1921 +index b7f7a5f7bd98..cd1181c39ed2 100644
1922 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
1923 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
1924 +@@ -2546,7 +2546,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1925 + if (lrsp->opcode == SRP_LOGIN_RSP) {
1926 + ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1927 + ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1928 +- ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
1929 ++ ch->use_imm_data = srp_use_imm_data &&
1930 ++ (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
1931 + ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1932 + ch->use_imm_data,
1933 + target->max_it_iu_size);
1934 +diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
1935 +index effe72eb89e7..2f7680faba49 100644
1936 +--- a/drivers/iommu/arm-smmu-v3.c
1937 ++++ b/drivers/iommu/arm-smmu-v3.c
1938 +@@ -856,6 +856,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
1939 + cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
1940 + break;
1941 + case CMDQ_OP_TLBI_NH_VA:
1942 ++ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
1943 + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
1944 + cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
1945 + cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
1946 +diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
1947 +index be2a2a201603..33ddc5269e8d 100644
1948 +--- a/drivers/md/bcache/journal.c
1949 ++++ b/drivers/md/bcache/journal.c
1950 +@@ -417,10 +417,14 @@ err:
1951 +
1952 + /* Journalling */
1953 +
1954 ++#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask))
1955 ++
1956 + static void btree_flush_write(struct cache_set *c)
1957 + {
1958 + struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
1959 +- unsigned int i, n;
1960 ++ unsigned int i, nr, ref_nr;
1961 ++ atomic_t *fifo_front_p, *now_fifo_front_p;
1962 ++ size_t mask;
1963 +
1964 + if (c->journal.btree_flushing)
1965 + return;
1966 +@@ -433,12 +437,50 @@ static void btree_flush_write(struct cache_set *c)
1967 + c->journal.btree_flushing = true;
1968 + spin_unlock(&c->journal.flush_write_lock);
1969 +
1970 ++ /* get the oldest journal entry and check its refcount */
1971 ++ spin_lock(&c->journal.lock);
1972 ++ fifo_front_p = &fifo_front(&c->journal.pin);
1973 ++ ref_nr = atomic_read(fifo_front_p);
1974 ++ if (ref_nr <= 0) {
1975 ++ /*
1976 ++ * do nothing if no btree node references
1977 ++ * the oldest journal entry
1978 ++ */
1979 ++ spin_unlock(&c->journal.lock);
1980 ++ goto out;
1981 ++ }
1982 ++ spin_unlock(&c->journal.lock);
1983 ++
1984 ++ mask = c->journal.pin.mask;
1985 ++ nr = 0;
1986 + atomic_long_inc(&c->flush_write);
1987 + memset(btree_nodes, 0, sizeof(btree_nodes));
1988 +- n = 0;
1989 +
1990 + mutex_lock(&c->bucket_lock);
1991 + list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
1992 ++ /*
1993 ++ * It is safe to get now_fifo_front_p without holding
1994 ++ * c->journal.lock here, because we don't need to know
1995 ++ * the exactly accurate value, just check whether the
1996 ++ * front pointer of c->journal.pin is changed.
1997 ++ */
1998 ++ now_fifo_front_p = &fifo_front(&c->journal.pin);
1999 ++ /*
2000 ++ * If the oldest journal entry is reclaimed and front
2001 ++ * pointer of c->journal.pin changes, it is unnecessary
2002 ++ * to scan c->btree_cache anymore, just quit the loop and
2003 ++ * flush out what we have already.
2004 ++ */
2005 ++ if (now_fifo_front_p != fifo_front_p)
2006 ++ break;
2007 ++ /*
2008 ++ * quit this loop if all matching btree nodes are
2009 ++ * scanned and record in btree_nodes[] already.
2010 ++ */
2011 ++ ref_nr = atomic_read(fifo_front_p);
2012 ++ if (nr >= ref_nr)
2013 ++ break;
2014 ++
2015 + if (btree_node_journal_flush(b))
2016 + pr_err("BUG: flush_write bit should not be set here!");
2017 +
2018 +@@ -454,17 +496,44 @@ static void btree_flush_write(struct cache_set *c)
2019 + continue;
2020 + }
2021 +
2022 ++ /*
2023 ++ * Only select the btree node which exactly references
2024 ++ * the oldest journal entry.
2025 ++ *
2026 ++ * If the journal entry pointed by fifo_front_p is
2027 ++ * reclaimed in parallel, don't worry:
2028 ++ * - the list_for_each_xxx loop will quit when checking
2029 ++ * next now_fifo_front_p.
2030 ++ * - If there are matched nodes recorded in btree_nodes[],
2031 ++ * they are clean now (this is why and how the oldest
2032 ++ * journal entry can be reclaimed). These selected nodes
2033 ++ * will be ignored and skipped in the folowing for-loop.
2034 ++ */
2035 ++ if (nr_to_fifo_front(btree_current_write(b)->journal,
2036 ++ fifo_front_p,
2037 ++ mask) != 0) {
2038 ++ mutex_unlock(&b->write_lock);
2039 ++ continue;
2040 ++ }
2041 ++
2042 + set_btree_node_journal_flush(b);
2043 +
2044 + mutex_unlock(&b->write_lock);
2045 +
2046 +- btree_nodes[n++] = b;
2047 +- if (n == BTREE_FLUSH_NR)
2048 ++ btree_nodes[nr++] = b;
2049 ++ /*
2050 ++ * To avoid holding c->bucket_lock too long time,
2051 ++ * only scan for BTREE_FLUSH_NR matched btree nodes
2052 ++ * at most. If there are more btree nodes reference
2053 ++ * the oldest journal entry, try to flush them next
2054 ++ * time when btree_flush_write() is called.
2055 ++ */
2056 ++ if (nr == BTREE_FLUSH_NR)
2057 + break;
2058 + }
2059 + mutex_unlock(&c->bucket_lock);
2060 +
2061 +- for (i = 0; i < n; i++) {
2062 ++ for (i = 0; i < nr; i++) {
2063 + b = btree_nodes[i];
2064 + if (!b) {
2065 + pr_err("BUG: btree_nodes[%d] is NULL", i);
2066 +@@ -497,6 +566,7 @@ static void btree_flush_write(struct cache_set *c)
2067 + mutex_unlock(&b->write_lock);
2068 + }
2069 +
2070 ++out:
2071 + spin_lock(&c->journal.flush_write_lock);
2072 + c->journal.btree_flushing = false;
2073 + spin_unlock(&c->journal.flush_write_lock);
2074 +diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
2075 +index 5042f9e94aee..fccb388ce179 100644
2076 +--- a/drivers/media/i2c/adv748x/adv748x.h
2077 ++++ b/drivers/media/i2c/adv748x/adv748x.h
2078 +@@ -394,10 +394,10 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
2079 +
2080 + #define io_read(s, r) adv748x_read(s, ADV748X_PAGE_IO, r)
2081 + #define io_write(s, r, v) adv748x_write(s, ADV748X_PAGE_IO, r, v)
2082 +-#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~m) | v)
2083 ++#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~(m)) | (v))
2084 +
2085 + #define hdmi_read(s, r) adv748x_read(s, ADV748X_PAGE_HDMI, r)
2086 +-#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, r+1)) & m)
2087 ++#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, (r)+1)) & (m))
2088 + #define hdmi_write(s, r, v) adv748x_write(s, ADV748X_PAGE_HDMI, r, v)
2089 +
2090 + #define repeater_read(s, r) adv748x_read(s, ADV748X_PAGE_REPEATER, r)
2091 +@@ -405,11 +405,11 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
2092 +
2093 + #define sdp_read(s, r) adv748x_read(s, ADV748X_PAGE_SDP, r)
2094 + #define sdp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_SDP, r, v)
2095 +-#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~m) | v)
2096 ++#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~(m)) | (v))
2097 +
2098 + #define cp_read(s, r) adv748x_read(s, ADV748X_PAGE_CP, r)
2099 + #define cp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_CP, r, v)
2100 +-#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~m) | v)
2101 ++#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~(m)) | (v))
2102 +
2103 + #define tx_read(t, r) adv748x_read(t->state, t->page, r)
2104 + #define tx_write(t, r, v) adv748x_write(t->state, t->page, r, v)
2105 +diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
2106 +index 420900852166..c366503c466d 100644
2107 +--- a/drivers/mfd/Kconfig
2108 ++++ b/drivers/mfd/Kconfig
2109 +@@ -758,6 +758,7 @@ config MFD_MAX77650
2110 + depends on OF || COMPILE_TEST
2111 + select MFD_CORE
2112 + select REGMAP_I2C
2113 ++ select REGMAP_IRQ
2114 + help
2115 + Say Y here to add support for Maxim Semiconductor MAX77650 and
2116 + MAX77651 Power Management ICs. This is the core multifunction
2117 +diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
2118 +index 85640ee11c86..d5326d19b136 100644
2119 +--- a/drivers/mtd/nand/onenand/onenand_base.c
2120 ++++ b/drivers/mtd/nand/onenand/onenand_base.c
2121 +@@ -1248,44 +1248,44 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
2122 +
2123 + stats = mtd->ecc_stats;
2124 +
2125 +- /* Read-while-load method */
2126 ++ /* Read-while-load method */
2127 +
2128 +- /* Do first load to bufferRAM */
2129 +- if (read < len) {
2130 +- if (!onenand_check_bufferram(mtd, from)) {
2131 ++ /* Do first load to bufferRAM */
2132 ++ if (read < len) {
2133 ++ if (!onenand_check_bufferram(mtd, from)) {
2134 + this->command(mtd, ONENAND_CMD_READ, from, writesize);
2135 +- ret = this->wait(mtd, FL_READING);
2136 +- onenand_update_bufferram(mtd, from, !ret);
2137 ++ ret = this->wait(mtd, FL_READING);
2138 ++ onenand_update_bufferram(mtd, from, !ret);
2139 + if (mtd_is_eccerr(ret))
2140 + ret = 0;
2141 +- }
2142 +- }
2143 ++ }
2144 ++ }
2145 +
2146 + thislen = min_t(int, writesize, len - read);
2147 + column = from & (writesize - 1);
2148 + if (column + thislen > writesize)
2149 + thislen = writesize - column;
2150 +
2151 +- while (!ret) {
2152 +- /* If there is more to load then start next load */
2153 +- from += thislen;
2154 +- if (read + thislen < len) {
2155 ++ while (!ret) {
2156 ++ /* If there is more to load then start next load */
2157 ++ from += thislen;
2158 ++ if (read + thislen < len) {
2159 + this->command(mtd, ONENAND_CMD_READ, from, writesize);
2160 +- /*
2161 +- * Chip boundary handling in DDP
2162 +- * Now we issued chip 1 read and pointed chip 1
2163 ++ /*
2164 ++ * Chip boundary handling in DDP
2165 ++ * Now we issued chip 1 read and pointed chip 1
2166 + * bufferram so we have to point chip 0 bufferram.
2167 +- */
2168 +- if (ONENAND_IS_DDP(this) &&
2169 +- unlikely(from == (this->chipsize >> 1))) {
2170 +- this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
2171 +- boundary = 1;
2172 +- } else
2173 +- boundary = 0;
2174 +- ONENAND_SET_PREV_BUFFERRAM(this);
2175 +- }
2176 +- /* While load is going, read from last bufferRAM */
2177 +- this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
2178 ++ */
2179 ++ if (ONENAND_IS_DDP(this) &&
2180 ++ unlikely(from == (this->chipsize >> 1))) {
2181 ++ this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
2182 ++ boundary = 1;
2183 ++ } else
2184 ++ boundary = 0;
2185 ++ ONENAND_SET_PREV_BUFFERRAM(this);
2186 ++ }
2187 ++ /* While load is going, read from last bufferRAM */
2188 ++ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
2189 +
2190 + /* Read oob area if needed */
2191 + if (oobbuf) {
2192 +@@ -1301,24 +1301,24 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
2193 + oobcolumn = 0;
2194 + }
2195 +
2196 +- /* See if we are done */
2197 +- read += thislen;
2198 +- if (read == len)
2199 +- break;
2200 +- /* Set up for next read from bufferRAM */
2201 +- if (unlikely(boundary))
2202 +- this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
2203 +- ONENAND_SET_NEXT_BUFFERRAM(this);
2204 +- buf += thislen;
2205 ++ /* See if we are done */
2206 ++ read += thislen;
2207 ++ if (read == len)
2208 ++ break;
2209 ++ /* Set up for next read from bufferRAM */
2210 ++ if (unlikely(boundary))
2211 ++ this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
2212 ++ ONENAND_SET_NEXT_BUFFERRAM(this);
2213 ++ buf += thislen;
2214 + thislen = min_t(int, writesize, len - read);
2215 +- column = 0;
2216 +- cond_resched();
2217 +- /* Now wait for load */
2218 +- ret = this->wait(mtd, FL_READING);
2219 +- onenand_update_bufferram(mtd, from, !ret);
2220 ++ column = 0;
2221 ++ cond_resched();
2222 ++ /* Now wait for load */
2223 ++ ret = this->wait(mtd, FL_READING);
2224 ++ onenand_update_bufferram(mtd, from, !ret);
2225 + if (mtd_is_eccerr(ret))
2226 + ret = 0;
2227 +- }
2228 ++ }
2229 +
2230 + /*
2231 + * Return success, if no ECC failures, else -EBADMSG
2232 +diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
2233 +index e5ea6127ab5a..671a61845bd5 100644
2234 +--- a/drivers/mtd/parsers/sharpslpart.c
2235 ++++ b/drivers/mtd/parsers/sharpslpart.c
2236 +@@ -165,10 +165,10 @@ static int sharpsl_nand_get_logical_num(u8 *oob)
2237 +
2238 + static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
2239 + {
2240 +- unsigned int block_num, log_num, phymax;
2241 ++ unsigned int block_num, phymax;
2242 ++ int i, ret, log_num;
2243 + loff_t block_adr;
2244 + u8 *oob;
2245 +- int i, ret;
2246 +
2247 + oob = kzalloc(mtd->oobsize, GFP_KERNEL);
2248 + if (!oob)
2249 +diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
2250 +index 6aeed0c600f8..7971dc4f54f1 100644
2251 +--- a/drivers/net/netdevsim/bus.c
2252 ++++ b/drivers/net/netdevsim/bus.c
2253 +@@ -17,6 +17,7 @@
2254 + static DEFINE_IDA(nsim_bus_dev_ids);
2255 + static LIST_HEAD(nsim_bus_dev_list);
2256 + static DEFINE_MUTEX(nsim_bus_dev_list_lock);
2257 ++static bool nsim_bus_enable;
2258 +
2259 + static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev)
2260 + {
2261 +@@ -28,7 +29,7 @@ static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev,
2262 + {
2263 + nsim_bus_dev->vfconfigs = kcalloc(num_vfs,
2264 + sizeof(struct nsim_vf_config),
2265 +- GFP_KERNEL);
2266 ++ GFP_KERNEL | __GFP_NOWARN);
2267 + if (!nsim_bus_dev->vfconfigs)
2268 + return -ENOMEM;
2269 + nsim_bus_dev->num_vfs = num_vfs;
2270 +@@ -96,13 +97,25 @@ new_port_store(struct device *dev, struct device_attribute *attr,
2271 + const char *buf, size_t count)
2272 + {
2273 + struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
2274 ++ struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
2275 ++ struct devlink *devlink;
2276 + unsigned int port_index;
2277 + int ret;
2278 +
2279 ++ /* Prevent to use nsim_bus_dev before initialization. */
2280 ++ if (!smp_load_acquire(&nsim_bus_dev->init))
2281 ++ return -EBUSY;
2282 + ret = kstrtouint(buf, 0, &port_index);
2283 + if (ret)
2284 + return ret;
2285 ++
2286 ++ devlink = priv_to_devlink(nsim_dev);
2287 ++
2288 ++ mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
2289 ++ devlink_reload_disable(devlink);
2290 + ret = nsim_dev_port_add(nsim_bus_dev, port_index);
2291 ++ devlink_reload_enable(devlink);
2292 ++ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
2293 + return ret ? ret : count;
2294 + }
2295 +
2296 +@@ -113,13 +126,25 @@ del_port_store(struct device *dev, struct device_attribute *attr,
2297 + const char *buf, size_t count)
2298 + {
2299 + struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
2300 ++ struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
2301 ++ struct devlink *devlink;
2302 + unsigned int port_index;
2303 + int ret;
2304 +
2305 ++ /* Prevent to use nsim_bus_dev before initialization. */
2306 ++ if (!smp_load_acquire(&nsim_bus_dev->init))
2307 ++ return -EBUSY;
2308 + ret = kstrtouint(buf, 0, &port_index);
2309 + if (ret)
2310 + return ret;
2311 ++
2312 ++ devlink = priv_to_devlink(nsim_dev);
2313 ++
2314 ++ mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
2315 ++ devlink_reload_disable(devlink);
2316 + ret = nsim_dev_port_del(nsim_bus_dev, port_index);
2317 ++ devlink_reload_enable(devlink);
2318 ++ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
2319 + return ret ? ret : count;
2320 + }
2321 +
2322 +@@ -179,15 +204,30 @@ new_device_store(struct bus_type *bus, const char *buf, size_t count)
2323 + pr_err("Format for adding new device is \"id port_count\" (uint uint).\n");
2324 + return -EINVAL;
2325 + }
2326 +- nsim_bus_dev = nsim_bus_dev_new(id, port_count);
2327 +- if (IS_ERR(nsim_bus_dev))
2328 +- return PTR_ERR(nsim_bus_dev);
2329 +
2330 + mutex_lock(&nsim_bus_dev_list_lock);
2331 ++ /* Prevent to use resource before initialization. */
2332 ++ if (!smp_load_acquire(&nsim_bus_enable)) {
2333 ++ err = -EBUSY;
2334 ++ goto err;
2335 ++ }
2336 ++
2337 ++ nsim_bus_dev = nsim_bus_dev_new(id, port_count);
2338 ++ if (IS_ERR(nsim_bus_dev)) {
2339 ++ err = PTR_ERR(nsim_bus_dev);
2340 ++ goto err;
2341 ++ }
2342 ++
2343 ++ /* Allow using nsim_bus_dev */
2344 ++ smp_store_release(&nsim_bus_dev->init, true);
2345 ++
2346 + list_add_tail(&nsim_bus_dev->list, &nsim_bus_dev_list);
2347 + mutex_unlock(&nsim_bus_dev_list_lock);
2348 +
2349 + return count;
2350 ++err:
2351 ++ mutex_unlock(&nsim_bus_dev_list_lock);
2352 ++ return err;
2353 + }
2354 + static BUS_ATTR_WO(new_device);
2355 +
2356 +@@ -215,6 +255,11 @@ del_device_store(struct bus_type *bus, const char *buf, size_t count)
2357 +
2358 + err = -ENOENT;
2359 + mutex_lock(&nsim_bus_dev_list_lock);
2360 ++ /* Prevent to use resource before initialization. */
2361 ++ if (!smp_load_acquire(&nsim_bus_enable)) {
2362 ++ mutex_unlock(&nsim_bus_dev_list_lock);
2363 ++ return -EBUSY;
2364 ++ }
2365 + list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
2366 + if (nsim_bus_dev->dev.id != id)
2367 + continue;
2368 +@@ -284,6 +329,9 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
2369 + nsim_bus_dev->dev.type = &nsim_bus_dev_type;
2370 + nsim_bus_dev->port_count = port_count;
2371 + nsim_bus_dev->initial_net = current->nsproxy->net_ns;
2372 ++ mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
2373 ++ /* Disallow using nsim_bus_dev */
2374 ++ smp_store_release(&nsim_bus_dev->init, false);
2375 +
2376 + err = device_register(&nsim_bus_dev->dev);
2377 + if (err)
2378 +@@ -299,6 +347,8 @@ err_nsim_bus_dev_free:
2379 +
2380 + static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
2381 + {
2382 ++ /* Disallow using nsim_bus_dev */
2383 ++ smp_store_release(&nsim_bus_dev->init, false);
2384 + device_unregister(&nsim_bus_dev->dev);
2385 + ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
2386 + kfree(nsim_bus_dev);
2387 +@@ -320,6 +370,8 @@ int nsim_bus_init(void)
2388 + err = driver_register(&nsim_driver);
2389 + if (err)
2390 + goto err_bus_unregister;
2391 ++ /* Allow using resources */
2392 ++ smp_store_release(&nsim_bus_enable, true);
2393 + return 0;
2394 +
2395 + err_bus_unregister:
2396 +@@ -331,12 +383,16 @@ void nsim_bus_exit(void)
2397 + {
2398 + struct nsim_bus_dev *nsim_bus_dev, *tmp;
2399 +
2400 ++ /* Disallow using resources */
2401 ++ smp_store_release(&nsim_bus_enable, false);
2402 ++
2403 + mutex_lock(&nsim_bus_dev_list_lock);
2404 + list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
2405 + list_del(&nsim_bus_dev->list);
2406 + nsim_bus_dev_del(nsim_bus_dev);
2407 + }
2408 + mutex_unlock(&nsim_bus_dev_list_lock);
2409 ++
2410 + driver_unregister(&nsim_driver);
2411 + bus_unregister(&nsim_bus);
2412 + }
2413 +diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
2414 +index 2a945b3c7c76..54bc089550b3 100644
2415 +--- a/drivers/net/netdevsim/dev.c
2416 ++++ b/drivers/net/netdevsim/dev.c
2417 +@@ -88,8 +88,11 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
2418 + &nsim_dev->max_macs);
2419 + debugfs_create_bool("test1", 0600, nsim_dev->ddir,
2420 + &nsim_dev->test1);
2421 +- debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, nsim_dev,
2422 +- &nsim_dev_take_snapshot_fops);
2423 ++ nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
2424 ++ 0200,
2425 ++ nsim_dev->ddir,
2426 ++ nsim_dev,
2427 ++ &nsim_dev_take_snapshot_fops);
2428 + debugfs_create_bool("dont_allow_reload", 0600, nsim_dev->ddir,
2429 + &nsim_dev->dont_allow_reload);
2430 + debugfs_create_bool("fail_reload", 0600, nsim_dev->ddir,
2431 +@@ -740,6 +743,11 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
2432 + if (err)
2433 + goto err_health_exit;
2434 +
2435 ++ nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
2436 ++ 0200,
2437 ++ nsim_dev->ddir,
2438 ++ nsim_dev,
2439 ++ &nsim_dev_take_snapshot_fops);
2440 + return 0;
2441 +
2442 + err_health_exit:
2443 +@@ -853,6 +861,7 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
2444 +
2445 + if (devlink_is_reload_failed(devlink))
2446 + return;
2447 ++ debugfs_remove(nsim_dev->take_snapshot);
2448 + nsim_dev_port_del_all(nsim_dev);
2449 + nsim_dev_health_exit(nsim_dev);
2450 + nsim_dev_traps_exit(devlink);
2451 +diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
2452 +index 9aa637d162eb..c06e0f8fbc10 100644
2453 +--- a/drivers/net/netdevsim/health.c
2454 ++++ b/drivers/net/netdevsim/health.c
2455 +@@ -82,7 +82,7 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
2456 + if (err)
2457 + return err;
2458 +
2459 +- binary = kmalloc(binary_len, GFP_KERNEL);
2460 ++ binary = kmalloc(binary_len, GFP_KERNEL | __GFP_NOWARN);
2461 + if (!binary)
2462 + return -ENOMEM;
2463 + get_random_bytes(binary, binary_len);
2464 +diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
2465 +index 94df795ef4d3..2eb7b0dc1594 100644
2466 +--- a/drivers/net/netdevsim/netdevsim.h
2467 ++++ b/drivers/net/netdevsim/netdevsim.h
2468 +@@ -160,6 +160,7 @@ struct nsim_dev {
2469 + struct nsim_trap_data *trap_data;
2470 + struct dentry *ddir;
2471 + struct dentry *ports_ddir;
2472 ++ struct dentry *take_snapshot;
2473 + struct bpf_offload_dev *bpf_dev;
2474 + bool bpf_bind_accept;
2475 + u32 bpf_bind_verifier_delay;
2476 +@@ -240,6 +241,9 @@ struct nsim_bus_dev {
2477 + */
2478 + unsigned int num_vfs;
2479 + struct nsim_vf_config *vfconfigs;
2480 ++ /* Lock for devlink->reload_enabled in netdevsim module */
2481 ++ struct mutex nsim_bus_reload_lock;
2482 ++ bool init;
2483 + };
2484 +
2485 + int nsim_bus_init(void);
2486 +diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
2487 +index bb44f5a0941b..4822a65f6f3c 100644
2488 +--- a/drivers/net/wireless/ath/ath10k/pci.c
2489 ++++ b/drivers/net/wireless/ath/ath10k/pci.c
2490 +@@ -1604,11 +1604,22 @@ static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
2491 + {
2492 + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2493 + u32 i;
2494 ++ int ret;
2495 ++
2496 ++ mutex_lock(&ar->conf_mutex);
2497 ++ if (ar->state != ATH10K_STATE_ON) {
2498 ++ ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
2499 ++ ret = -EIO;
2500 ++ goto done;
2501 ++ }
2502 +
2503 + for (i = 0; i < region->len; i += 4)
2504 + *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
2505 +
2506 +- return region->len;
2507 ++ ret = region->len;
2508 ++done:
2509 ++ mutex_unlock(&ar->conf_mutex);
2510 ++ return ret;
2511 + }
2512 +
2513 + /* if an error happened returns < 0, otherwise the length */
2514 +@@ -1704,7 +1715,11 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
2515 + count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
2516 + break;
2517 + case ATH10K_MEM_REGION_TYPE_IOREG:
2518 +- count = ath10k_pci_dump_memory_reg(ar, current_region, buf);
2519 ++ ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
2520 ++ if (ret < 0)
2521 ++ break;
2522 ++
2523 ++ count = ret;
2524 + break;
2525 + default:
2526 + ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
2527 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
2528 +index 9f4b117db9d7..d47f76890cf9 100644
2529 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
2530 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
2531 +@@ -8,6 +8,7 @@
2532 + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
2533 + * Copyright (C) 2018 Intel Corporation
2534 + * Copyright (C) 2019 Intel Corporation
2535 ++ * Copyright (C) 2020 Intel Corporation
2536 + *
2537 + * This program is free software; you can redistribute it and/or modify
2538 + * it under the terms of version 2 of the GNU General Public License as
2539 +@@ -30,6 +31,7 @@
2540 + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
2541 + * Copyright (C) 2018 Intel Corporation
2542 + * Copyright (C) 2019 Intel Corporation
2543 ++ * Copyright (C) 2020 Intel Corporation
2544 + * All rights reserved.
2545 + *
2546 + * Redistribution and use in source and binary forms, with or without
2547 +@@ -389,6 +391,8 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
2548 + if (req != mvm->ftm_initiator.req)
2549 + return;
2550 +
2551 ++ iwl_mvm_ftm_reset(mvm);
2552 ++
2553 + if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
2554 + LOCATION_GROUP, 0),
2555 + 0, sizeof(cmd), &cmd))
2556 +@@ -502,7 +506,6 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
2557 + lockdep_assert_held(&mvm->mutex);
2558 +
2559 + if (!mvm->ftm_initiator.req) {
2560 +- IWL_ERR(mvm, "Got FTM response but have no request?\n");
2561 + return;
2562 + }
2563 +
2564 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
2565 +index 6717f25c46b1..efdf15f57f16 100644
2566 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
2567 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
2568 +@@ -3293,7 +3293,7 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2569 + if (fw_has_capa(&mvm->fw->ucode_capa,
2570 + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
2571 + iwl_mvm_schedule_session_protection(mvm, vif, 900,
2572 +- min_duration);
2573 ++ min_duration, false);
2574 + else
2575 + iwl_mvm_protect_session(mvm, vif, duration,
2576 + min_duration, 500, false);
2577 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
2578 +index 1851719e9f4b..d781777b6b96 100644
2579 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
2580 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
2581 +@@ -205,9 +205,15 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
2582 + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2583 + u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
2584 +
2585 +- mutex_lock(&mvm->mutex);
2586 + /* Protect the session to hear the TDLS setup response on the channel */
2587 +- iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
2588 ++ mutex_lock(&mvm->mutex);
2589 ++ if (fw_has_capa(&mvm->fw->ucode_capa,
2590 ++ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
2591 ++ iwl_mvm_schedule_session_protection(mvm, vif, duration,
2592 ++ duration, true);
2593 ++ else
2594 ++ iwl_mvm_protect_session(mvm, vif, duration,
2595 ++ duration, 100, true);
2596 + mutex_unlock(&mvm->mutex);
2597 + }
2598 +
2599 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
2600 +index 51b138673ddb..c0b420fe5e48 100644
2601 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
2602 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
2603 +@@ -1056,13 +1056,42 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
2604 + return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
2605 + }
2606 +
2607 ++static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
2608 ++ struct iwl_rx_packet *pkt, void *data)
2609 ++{
2610 ++ struct iwl_mvm *mvm =
2611 ++ container_of(notif_wait, struct iwl_mvm, notif_wait);
2612 ++ struct iwl_mvm_session_prot_notif *resp;
2613 ++ int resp_len = iwl_rx_packet_payload_len(pkt);
2614 ++
2615 ++ if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
2616 ++ pkt->hdr.group_id != MAC_CONF_GROUP))
2617 ++ return true;
2618 ++
2619 ++ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
2620 ++ IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
2621 ++ return true;
2622 ++ }
2623 ++
2624 ++ resp = (void *)pkt->data;
2625 ++
2626 ++ if (!resp->status)
2627 ++ IWL_ERR(mvm,
2628 ++ "TIME_EVENT_NOTIFICATION received but not executed\n");
2629 ++
2630 ++ return true;
2631 ++}
2632 ++
2633 + void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
2634 + struct ieee80211_vif *vif,
2635 +- u32 duration, u32 min_duration)
2636 ++ u32 duration, u32 min_duration,
2637 ++ bool wait_for_notif)
2638 + {
2639 + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2640 + struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
2641 +-
2642 ++ const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
2643 ++ MAC_CONF_GROUP, 0) };
2644 ++ struct iwl_notification_wait wait_notif;
2645 + struct iwl_mvm_session_prot_cmd cmd = {
2646 + .id_and_color =
2647 + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
2648 +@@ -1071,7 +1100,6 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
2649 + .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
2650 + .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
2651 + };
2652 +- int ret;
2653 +
2654 + lockdep_assert_held(&mvm->mutex);
2655 +
2656 +@@ -1092,14 +1120,35 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
2657 + IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
2658 + le32_to_cpu(cmd.duration_tu));
2659 +
2660 +- ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
2661 +- MAC_CONF_GROUP, 0),
2662 +- 0, sizeof(cmd), &cmd);
2663 +- if (ret) {
2664 ++ if (!wait_for_notif) {
2665 ++ if (iwl_mvm_send_cmd_pdu(mvm,
2666 ++ iwl_cmd_id(SESSION_PROTECTION_CMD,
2667 ++ MAC_CONF_GROUP, 0),
2668 ++ 0, sizeof(cmd), &cmd)) {
2669 ++ IWL_ERR(mvm,
2670 ++ "Couldn't send the SESSION_PROTECTION_CMD\n");
2671 ++ spin_lock_bh(&mvm->time_event_lock);
2672 ++ iwl_mvm_te_clear_data(mvm, te_data);
2673 ++ spin_unlock_bh(&mvm->time_event_lock);
2674 ++ }
2675 ++
2676 ++ return;
2677 ++ }
2678 ++
2679 ++ iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
2680 ++ notif, ARRAY_SIZE(notif),
2681 ++ iwl_mvm_session_prot_notif, NULL);
2682 ++
2683 ++ if (iwl_mvm_send_cmd_pdu(mvm,
2684 ++ iwl_cmd_id(SESSION_PROTECTION_CMD,
2685 ++ MAC_CONF_GROUP, 0),
2686 ++ 0, sizeof(cmd), &cmd)) {
2687 + IWL_ERR(mvm,
2688 +- "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
2689 +- spin_lock_bh(&mvm->time_event_lock);
2690 +- iwl_mvm_te_clear_data(mvm, te_data);
2691 +- spin_unlock_bh(&mvm->time_event_lock);
2692 ++ "Couldn't send the SESSION_PROTECTION_CMD\n");
2693 ++ iwl_remove_notification(&mvm->notif_wait, &wait_notif);
2694 ++ } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
2695 ++ TU_TO_JIFFIES(100))) {
2696 ++ IWL_ERR(mvm,
2697 ++ "Failed to protect session until session protection\n");
2698 + }
2699 + }
2700 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
2701 +index df6832b79666..3186d7e40567 100644
2702 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
2703 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
2704 +@@ -250,10 +250,12 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
2705 + * @mvm: the mvm component
2706 + * @vif: the virtual interface for which the protection issued
2707 + * @duration: the duration of the protection
2708 ++ * @wait_for_notif: if true, will block until the start of the protection
2709 + */
2710 + void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
2711 + struct ieee80211_vif *vif,
2712 +- u32 duration, u32 min_duration);
2713 ++ u32 duration, u32 min_duration,
2714 ++ bool wait_for_notif);
2715 +
2716 + /**
2717 + * iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
2718 +diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
2719 +index c9401c121a14..4e3de684928b 100644
2720 +--- a/drivers/net/wireless/marvell/libertas/cfg.c
2721 ++++ b/drivers/net/wireless/marvell/libertas/cfg.c
2722 +@@ -1785,6 +1785,8 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
2723 + rates_max = rates_eid[1];
2724 + if (rates_max > MAX_RATES) {
2725 + lbs_deb_join("invalid rates");
2726 ++ rcu_read_unlock();
2727 ++ ret = -EINVAL;
2728 + goto out;
2729 + }
2730 + rates = cmd.bss.rates;
2731 +diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
2732 +index 98f942b797f7..a7968a84aaf8 100644
2733 +--- a/drivers/net/wireless/marvell/mwifiex/scan.c
2734 ++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
2735 +@@ -2884,6 +2884,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
2736 + vs_param_set->header.len =
2737 + cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
2738 + & 0x00FF) + 2);
2739 ++ if (le16_to_cpu(vs_param_set->header.len) >
2740 ++ MWIFIEX_MAX_VSIE_LEN) {
2741 ++ mwifiex_dbg(priv->adapter, ERROR,
2742 ++ "Invalid param length!\n");
2743 ++ break;
2744 ++ }
2745 ++
2746 + memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
2747 + le16_to_cpu(vs_param_set->header.len));
2748 + *buffer += le16_to_cpu(vs_param_set->header.len) +
2749 +diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
2750 +index 41f0231376c0..132f9e8ed68c 100644
2751 +--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
2752 ++++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
2753 +@@ -970,6 +970,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
2754 + "WMM Parameter Set Count: %d\n",
2755 + wmm_param_ie->qos_info_bitmap & mask);
2756 +
2757 ++ if (wmm_param_ie->vend_hdr.len + 2 >
2758 ++ sizeof(struct ieee_types_wmm_parameter))
2759 ++ break;
2760 ++
2761 + memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
2762 + wmm_ie, wmm_param_ie,
2763 + wmm_param_ie->vend_hdr.len + 2);
2764 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
2765 +index eccad4987ac8..17e277bf39e0 100644
2766 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
2767 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
2768 +@@ -92,8 +92,9 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
2769 +
2770 + static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
2771 + {
2772 +- u8 val, *eeprom = dev->mt76.eeprom.data;
2773 ++ u8 *eeprom = dev->mt76.eeprom.data;
2774 + u8 tx_mask, rx_mask, max_nss;
2775 ++ u32 val;
2776 +
2777 + val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
2778 + eeprom[MT_EE_WIFI_CONF]);
2779 +diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
2780 +index 090b632965e2..ac93f5a0398e 100644
2781 +--- a/drivers/pci/controller/pci-tegra.c
2782 ++++ b/drivers/pci/controller/pci-tegra.c
2783 +@@ -2499,7 +2499,6 @@ static const struct tegra_pcie_soc tegra20_pcie = {
2784 + .num_ports = 2,
2785 + .ports = tegra20_pcie_ports,
2786 + .msi_base_shift = 0,
2787 +- .afi_pex2_ctrl = 0x128,
2788 + .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2789 + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2790 + .pads_refclk_cfg0 = 0xfa5cfa5c,
2791 +@@ -2528,6 +2527,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
2792 + .num_ports = 3,
2793 + .ports = tegra30_pcie_ports,
2794 + .msi_base_shift = 8,
2795 ++ .afi_pex2_ctrl = 0x128,
2796 + .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2797 + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2798 + .pads_refclk_cfg0 = 0xfa5cfa5c,
2799 +diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
2800 +index 1e88fd427757..4d1f392b05f9 100644
2801 +--- a/drivers/pci/iov.c
2802 ++++ b/drivers/pci/iov.c
2803 +@@ -186,10 +186,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2804 + sprintf(buf, "virtfn%u", id);
2805 + rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
2806 + if (rc)
2807 +- goto failed2;
2808 ++ goto failed1;
2809 + rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
2810 + if (rc)
2811 +- goto failed3;
2812 ++ goto failed2;
2813 +
2814 + kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
2815 +
2816 +@@ -197,11 +197,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2817 +
2818 + return 0;
2819 +
2820 +-failed3:
2821 +- sysfs_remove_link(&dev->dev.kobj, buf);
2822 + failed2:
2823 +- pci_stop_and_remove_bus_device(virtfn);
2824 ++ sysfs_remove_link(&dev->dev.kobj, buf);
2825 + failed1:
2826 ++ pci_stop_and_remove_bus_device(virtfn);
2827 + pci_dev_put(dev);
2828 + failed0:
2829 + virtfn_remove_bus(dev->bus, bus);
2830 +diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
2831 +index 1ca86f2e0166..4a818b07a1af 100644
2832 +--- a/drivers/pci/pcie/aer.c
2833 ++++ b/drivers/pci/pcie/aer.c
2834 +@@ -1445,6 +1445,7 @@ static int aer_probe(struct pcie_device *dev)
2835 + return -ENOMEM;
2836 +
2837 + rpc->rpd = port;
2838 ++ INIT_KFIFO(rpc->aer_fifo);
2839 + set_service_data(dev, rpc);
2840 +
2841 + status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
2842 +diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
2843 +index f279826204eb..591161ce0f51 100644
2844 +--- a/drivers/pci/setup-bus.c
2845 ++++ b/drivers/pci/setup-bus.c
2846 +@@ -1803,12 +1803,18 @@ again:
2847 + /* Restore size and flags */
2848 + list_for_each_entry(fail_res, &fail_head, list) {
2849 + struct resource *res = fail_res->res;
2850 ++ int idx;
2851 +
2852 + res->start = fail_res->start;
2853 + res->end = fail_res->end;
2854 + res->flags = fail_res->flags;
2855 +- if (fail_res->dev->subordinate)
2856 +- res->flags = 0;
2857 ++
2858 ++ if (pci_is_bridge(fail_res->dev)) {
2859 ++ idx = res - &fail_res->dev->resource[0];
2860 ++ if (idx >= PCI_BRIDGE_RESOURCES &&
2861 ++ idx <= PCI_BRIDGE_RESOURCE_END)
2862 ++ res->flags = 0;
2863 ++ }
2864 + }
2865 + free_list(&fail_head);
2866 +
2867 +@@ -2055,12 +2061,18 @@ again:
2868 + /* Restore size and flags */
2869 + list_for_each_entry(fail_res, &fail_head, list) {
2870 + struct resource *res = fail_res->res;
2871 ++ int idx;
2872 +
2873 + res->start = fail_res->start;
2874 + res->end = fail_res->end;
2875 + res->flags = fail_res->flags;
2876 +- if (fail_res->dev->subordinate)
2877 +- res->flags = 0;
2878 ++
2879 ++ if (pci_is_bridge(fail_res->dev)) {
2880 ++ idx = res - &fail_res->dev->resource[0];
2881 ++ if (idx >= PCI_BRIDGE_RESOURCES &&
2882 ++ idx <= PCI_BRIDGE_RESOURCE_END)
2883 ++ res->flags = 0;
2884 ++ }
2885 + }
2886 + free_list(&fail_head);
2887 +
2888 +diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
2889 +index 88091bbfe77f..9c3ad09d3022 100644
2890 +--- a/drivers/pci/switch/switchtec.c
2891 ++++ b/drivers/pci/switch/switchtec.c
2892 +@@ -1276,7 +1276,7 @@ static int switchtec_init_isr(struct switchtec_dev *stdev)
2893 + if (nvecs < 0)
2894 + return nvecs;
2895 +
2896 +- event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
2897 ++ event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
2898 + if (event_irq < 0 || event_irq >= nvecs)
2899 + return -EFAULT;
2900 +
2901 +@@ -1349,7 +1349,7 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
2902 + if (rc)
2903 + return rc;
2904 +
2905 +- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2906 ++ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2907 + if (rc)
2908 + return rc;
2909 +
2910 +diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
2911 +index 55141d5de29e..72ffd19448e5 100644
2912 +--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
2913 ++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
2914 +@@ -107,6 +107,7 @@ struct byt_gpio_pin_context {
2915 +
2916 + struct byt_gpio {
2917 + struct gpio_chip chip;
2918 ++ struct irq_chip irqchip;
2919 + struct platform_device *pdev;
2920 + struct pinctrl_dev *pctl_dev;
2921 + struct pinctrl_desc pctl_desc;
2922 +@@ -1395,15 +1396,6 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
2923 + return 0;
2924 + }
2925 +
2926 +-static struct irq_chip byt_irqchip = {
2927 +- .name = "BYT-GPIO",
2928 +- .irq_ack = byt_irq_ack,
2929 +- .irq_mask = byt_irq_mask,
2930 +- .irq_unmask = byt_irq_unmask,
2931 +- .irq_set_type = byt_irq_type,
2932 +- .flags = IRQCHIP_SKIP_SET_WAKE,
2933 +-};
2934 +-
2935 + static void byt_gpio_irq_handler(struct irq_desc *desc)
2936 + {
2937 + struct irq_data *data = irq_desc_get_irq_data(desc);
2938 +@@ -1551,8 +1543,15 @@ static int byt_gpio_probe(struct byt_gpio *vg)
2939 + if (irq_rc && irq_rc->start) {
2940 + struct gpio_irq_chip *girq;
2941 +
2942 ++ vg->irqchip.name = "BYT-GPIO",
2943 ++ vg->irqchip.irq_ack = byt_irq_ack,
2944 ++ vg->irqchip.irq_mask = byt_irq_mask,
2945 ++ vg->irqchip.irq_unmask = byt_irq_unmask,
2946 ++ vg->irqchip.irq_set_type = byt_irq_type,
2947 ++ vg->irqchip.flags = IRQCHIP_SKIP_SET_WAKE,
2948 ++
2949 + girq = &gc->irq;
2950 +- girq->chip = &byt_irqchip;
2951 ++ girq->chip = &vg->irqchip;
2952 + girq->init_hw = byt_gpio_irq_init_hw;
2953 + girq->parent_handler = byt_gpio_irq_handler;
2954 + girq->num_parents = 1;
2955 +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
2956 +index 5d6f9f61ce02..9a8daa256a32 100644
2957 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c
2958 ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
2959 +@@ -960,7 +960,6 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
2960 + {
2961 + struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
2962 + struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
2963 +- unsigned long flags;
2964 +
2965 + /*
2966 + * While they may not wake up when the TLMM is powered off,
2967 +@@ -971,12 +970,8 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
2968 + if (d->parent_data)
2969 + irq_chip_set_wake_parent(d, on);
2970 +
2971 +- raw_spin_lock_irqsave(&pctrl->lock, flags);
2972 +-
2973 + irq_set_irq_wake(pctrl->irq, on);
2974 +
2975 +- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
2976 +-
2977 + return 0;
2978 + }
2979 +
2980 +diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
2981 +index 24866a5958ae..a9875038ed9b 100644
2982 +--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
2983 ++++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
2984 +@@ -2305,7 +2305,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
2985 + FN_ATAG0_A, 0, FN_REMOCON_B, 0,
2986 + /* IP0_11_8 [4] */
2987 + FN_SD1_DAT2_A, FN_MMC_D2, 0, FN_BS,
2988 +- FN_ATADIR0_A, 0, FN_SDSELF_B, 0,
2989 ++ FN_ATADIR0_A, 0, FN_SDSELF_A, 0,
2990 + FN_PWM4_B, 0, 0, 0,
2991 + 0, 0, 0, 0,
2992 + /* IP0_7_5 [3] */
2993 +@@ -2349,7 +2349,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
2994 + FN_TS_SDAT0_A, 0, 0, 0,
2995 + 0, 0, 0, 0,
2996 + /* IP1_10_8 [3] */
2997 +- FN_SD1_CLK_B, FN_MMC_D6, 0, FN_A24,
2998 ++ FN_SD1_CD_A, FN_MMC_D6, 0, FN_A24,
2999 + FN_DREQ1_A, 0, FN_HRX0_B, FN_TS_SPSYNC0_A,
3000 + /* IP1_7_5 [3] */
3001 + FN_A23, FN_HTX0_B, FN_TX2_B, FN_DACK2_A,
3002 +diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
3003 +index 8bdf33c807f6..6616f5210b9d 100644
3004 +--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
3005 ++++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
3006 +@@ -5998,7 +5998,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
3007 + { PIN_DU_DOTCLKIN1, 0, 2 }, /* DU_DOTCLKIN1 */
3008 + } },
3009 + { PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
3010 +- { PIN_DU_DOTCLKIN3, 28, 2 }, /* DU_DOTCLKIN3 */
3011 ++ { PIN_DU_DOTCLKIN3, 24, 2 }, /* DU_DOTCLKIN3 */
3012 + { PIN_FSCLKST, 20, 2 }, /* FSCLKST */
3013 + { PIN_TMS, 4, 2 }, /* TMS */
3014 + } },
3015 +@@ -6254,8 +6254,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
3016 + [31] = PIN_DU_DOTCLKIN1, /* DU_DOTCLKIN1 */
3017 + } },
3018 + { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
3019 +- [ 0] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
3020 +- [ 1] = SH_PFC_PIN_NONE,
3021 ++ [ 0] = SH_PFC_PIN_NONE,
3022 ++ [ 1] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
3023 + [ 2] = PIN_FSCLKST, /* FSCLKST */
3024 + [ 3] = PIN_EXTALR, /* EXTALR*/
3025 + [ 4] = PIN_TRST_N, /* TRST# */
3026 +diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
3027 +index 292bace83f1e..6f436836fe50 100644
3028 +--- a/drivers/platform/x86/intel_mid_powerbtn.c
3029 ++++ b/drivers/platform/x86/intel_mid_powerbtn.c
3030 +@@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev)
3031 +
3032 + input_set_capability(input, EV_KEY, KEY_POWER);
3033 +
3034 +- ddata = (struct mid_pb_ddata *)id->driver_data;
3035 ++ ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data,
3036 ++ sizeof(*ddata), GFP_KERNEL);
3037 + if (!ddata)
3038 +- return -ENODATA;
3039 ++ return -ENOMEM;
3040 +
3041 + ddata->dev = &pdev->dev;
3042 + ddata->irq = irq;
3043 +diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
3044 +index 033303708c8b..cb28bbdc9e17 100644
3045 +--- a/drivers/rtc/rtc-cmos.c
3046 ++++ b/drivers/rtc/rtc-cmos.c
3047 +@@ -850,7 +850,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
3048 + rtc_cmos_int_handler = cmos_interrupt;
3049 +
3050 + retval = request_irq(rtc_irq, rtc_cmos_int_handler,
3051 +- IRQF_SHARED, dev_name(&cmos_rtc.rtc->dev),
3052 ++ 0, dev_name(&cmos_rtc.rtc->dev),
3053 + cmos_rtc.rtc);
3054 + if (retval < 0) {
3055 + dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
3056 +diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
3057 +index 443f6d05ce29..fb6d7967ec00 100644
3058 +--- a/drivers/rtc/rtc-hym8563.c
3059 ++++ b/drivers/rtc/rtc-hym8563.c
3060 +@@ -97,7 +97,7 @@ static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
3061 +
3062 + if (!hym8563->valid) {
3063 + dev_warn(&client->dev, "no valid clock/calendar values available\n");
3064 +- return -EPERM;
3065 ++ return -EINVAL;
3066 + }
3067 +
3068 + ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf);
3069 +diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
3070 +index 9135e2101752..cda238dfe69b 100644
3071 +--- a/drivers/rtc/rtc-mt6397.c
3072 ++++ b/drivers/rtc/rtc-mt6397.c
3073 +@@ -297,15 +297,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
3074 +
3075 + rtc->rtc_dev->ops = &mtk_rtc_ops;
3076 +
3077 +- ret = rtc_register_device(rtc->rtc_dev);
3078 +- if (ret)
3079 +- goto out_free_irq;
3080 +-
3081 +- return 0;
3082 +-
3083 +-out_free_irq:
3084 +- free_irq(rtc->irq, rtc);
3085 +- return ret;
3086 ++ return rtc_register_device(rtc->rtc_dev);
3087 + }
3088 +
3089 + #ifdef CONFIG_PM_SLEEP
3090 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
3091 +index 03173f06ab96..3fbf9ea16c64 100644
3092 +--- a/drivers/scsi/ufs/ufshcd.c
3093 ++++ b/drivers/scsi/ufs/ufshcd.c
3094 +@@ -7030,7 +7030,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
3095 + ufshcd_init_icc_levels(hba);
3096 +
3097 + /* Add required well known logical units to scsi mid layer */
3098 +- if (ufshcd_scsi_add_wlus(hba))
3099 ++ ret = ufshcd_scsi_add_wlus(hba);
3100 ++ if (ret)
3101 + goto out;
3102 +
3103 + /* Initialize devfreq after UFS device is detected */
3104 +diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
3105 +index 5741ec3fa814..51850cc68b70 100644
3106 +--- a/drivers/soc/qcom/rpmhpd.c
3107 ++++ b/drivers/soc/qcom/rpmhpd.c
3108 +@@ -93,6 +93,7 @@ static struct rpmhpd sdm845_mx = {
3109 +
3110 + static struct rpmhpd sdm845_mx_ao = {
3111 + .pd = { .name = "mx_ao", },
3112 ++ .active_only = true,
3113 + .peer = &sdm845_mx,
3114 + .res_name = "mx.lvl",
3115 + };
3116 +@@ -107,6 +108,7 @@ static struct rpmhpd sdm845_cx = {
3117 +
3118 + static struct rpmhpd sdm845_cx_ao = {
3119 + .pd = { .name = "cx_ao", },
3120 ++ .active_only = true,
3121 + .peer = &sdm845_cx,
3122 + .parent = &sdm845_mx_ao.pd,
3123 + .res_name = "cx.lvl",
3124 +diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
3125 +index a494543d3ae1..eb47fe5ed280 100644
3126 +--- a/drivers/watchdog/qcom-wdt.c
3127 ++++ b/drivers/watchdog/qcom-wdt.c
3128 +@@ -246,7 +246,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
3129 + }
3130 +
3131 + /* check if there is pretimeout support */
3132 +- irq = platform_get_irq(pdev, 0);
3133 ++ irq = platform_get_irq_optional(pdev, 0);
3134 + if (irq > 0) {
3135 + ret = devm_request_irq(dev, irq, qcom_wdt_isr,
3136 + IRQF_TRIGGER_RISING,
3137 +diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
3138 +index a3a329011a06..25188d6bbe15 100644
3139 +--- a/drivers/watchdog/stm32_iwdg.c
3140 ++++ b/drivers/watchdog/stm32_iwdg.c
3141 +@@ -262,6 +262,24 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
3142 + watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
3143 + watchdog_init_timeout(wdd, 0, dev);
3144 +
3145 ++ /*
3146 ++ * In case of CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED is set
3147 ++ * (Means U-Boot/bootloaders leaves the watchdog running)
3148 ++ * When we get here we should make a decision to prevent
3149 ++ * any side effects before user space daemon will take care of it.
3150 ++ * The best option, taking into consideration that there is no
3151 ++ * way to read values back from hardware, is to enforce watchdog
3152 ++ * being run with deterministic values.
3153 ++ */
3154 ++ if (IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) {
3155 ++ ret = stm32_iwdg_start(wdd);
3156 ++ if (ret)
3157 ++ return ret;
3158 ++
3159 ++ /* Make sure the watchdog is serviced */
3160 ++ set_bit(WDOG_HW_RUNNING, &wdd->status);
3161 ++ }
3162 ++
3163 + ret = devm_watchdog_register_device(dev, wdd);
3164 + if (ret)
3165 + return ret;
3166 +diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
3167 +index 295a7a21b774..e7dd07f47825 100644
3168 +--- a/fs/nfs/Kconfig
3169 ++++ b/fs/nfs/Kconfig
3170 +@@ -90,7 +90,7 @@ config NFS_V4
3171 + config NFS_SWAP
3172 + bool "Provide swap over NFS support"
3173 + default n
3174 +- depends on NFS_FS
3175 ++ depends on NFS_FS && SWAP
3176 + select SUNRPC_SWAP
3177 + help
3178 + This option enables swapon to work on files located on NFS mounts.
3179 +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
3180 +index 040a50fd9bf3..29f00da8a0b7 100644
3181 +--- a/fs/nfs/direct.c
3182 ++++ b/fs/nfs/direct.c
3183 +@@ -245,10 +245,10 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
3184 + data->ds_commit_index);
3185 +
3186 + /* verifier not set so always fail */
3187 +- if (verfp->committed < 0)
3188 ++ if (verfp->committed < 0 || data->res.verf->committed <= NFS_UNSTABLE)
3189 + return 1;
3190 +
3191 +- return nfs_direct_cmp_verf(verfp, &data->verf);
3192 ++ return nfs_direct_cmp_verf(verfp, data->res.verf);
3193 + }
3194 +
3195 + /**
3196 +diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
3197 +index 927eb680f161..69971f6c840d 100644
3198 +--- a/fs/nfs/nfs3xdr.c
3199 ++++ b/fs/nfs/nfs3xdr.c
3200 +@@ -2334,6 +2334,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
3201 + void *data)
3202 + {
3203 + struct nfs_commitres *result = data;
3204 ++ struct nfs_writeverf *verf = result->verf;
3205 + enum nfs_stat status;
3206 + int error;
3207 +
3208 +@@ -2346,7 +2347,9 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
3209 + result->op_status = status;
3210 + if (status != NFS3_OK)
3211 + goto out_status;
3212 +- error = decode_writeverf3(xdr, &result->verf->verifier);
3213 ++ error = decode_writeverf3(xdr, &verf->verifier);
3214 ++ if (!error)
3215 ++ verf->committed = NFS_FILE_SYNC;
3216 + out:
3217 + return error;
3218 + out_status:
3219 +diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
3220 +index 1fe83e0f663e..9637aad36bdc 100644
3221 +--- a/fs/nfs/nfs42proc.c
3222 ++++ b/fs/nfs/nfs42proc.c
3223 +@@ -61,8 +61,11 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
3224 +
3225 + status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
3226 + lock, FMODE_WRITE);
3227 +- if (status)
3228 ++ if (status) {
3229 ++ if (status == -EAGAIN)
3230 ++ status = -NFS4ERR_BAD_STATEID;
3231 + return status;
3232 ++ }
3233 +
3234 + res.falloc_fattr = nfs_alloc_fattr();
3235 + if (!res.falloc_fattr)
3236 +@@ -287,8 +290,11 @@ static ssize_t _nfs42_proc_copy(struct file *src,
3237 + } else {
3238 + status = nfs4_set_rw_stateid(&args->src_stateid,
3239 + src_lock->open_context, src_lock, FMODE_READ);
3240 +- if (status)
3241 ++ if (status) {
3242 ++ if (status == -EAGAIN)
3243 ++ status = -NFS4ERR_BAD_STATEID;
3244 + return status;
3245 ++ }
3246 + }
3247 + status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
3248 + pos_src, pos_src + (loff_t)count - 1);
3249 +@@ -297,8 +303,11 @@ static ssize_t _nfs42_proc_copy(struct file *src,
3250 +
3251 + status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
3252 + dst_lock, FMODE_WRITE);
3253 +- if (status)
3254 ++ if (status) {
3255 ++ if (status == -EAGAIN)
3256 ++ status = -NFS4ERR_BAD_STATEID;
3257 + return status;
3258 ++ }
3259 +
3260 + status = nfs_sync_inode(dst_inode);
3261 + if (status)
3262 +@@ -546,8 +555,11 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
3263 + status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
3264 + FMODE_READ);
3265 + nfs_put_lock_context(l_ctx);
3266 +- if (status)
3267 ++ if (status) {
3268 ++ if (status == -EAGAIN)
3269 ++ status = -NFS4ERR_BAD_STATEID;
3270 + return status;
3271 ++ }
3272 +
3273 + status = nfs4_call_sync(src_server->client, src_server, &msg,
3274 + &args->cna_seq_args, &res->cnr_seq_res, 0);
3275 +@@ -618,8 +630,11 @@ static loff_t _nfs42_proc_llseek(struct file *filep,
3276 +
3277 + status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
3278 + lock, FMODE_READ);
3279 +- if (status)
3280 ++ if (status) {
3281 ++ if (status == -EAGAIN)
3282 ++ status = -NFS4ERR_BAD_STATEID;
3283 + return status;
3284 ++ }
3285 +
3286 + status = nfs_filemap_write_and_wait_range(inode->i_mapping,
3287 + offset, LLONG_MAX);
3288 +@@ -994,13 +1009,18 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
3289 +
3290 + status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
3291 + src_lock, FMODE_READ);
3292 +- if (status)
3293 ++ if (status) {
3294 ++ if (status == -EAGAIN)
3295 ++ status = -NFS4ERR_BAD_STATEID;
3296 + return status;
3297 +-
3298 ++ }
3299 + status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
3300 + dst_lock, FMODE_WRITE);
3301 +- if (status)
3302 ++ if (status) {
3303 ++ if (status == -EAGAIN)
3304 ++ status = -NFS4ERR_BAD_STATEID;
3305 + return status;
3306 ++ }
3307 +
3308 + res.dst_fattr = nfs_alloc_fattr();
3309 + if (!res.dst_fattr)
3310 +diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
3311 +index a7a73b1d1fec..a5db055e2a9b 100644
3312 +--- a/fs/nfs/nfs4_fs.h
3313 ++++ b/fs/nfs/nfs4_fs.h
3314 +@@ -446,9 +446,7 @@ extern void nfs4_schedule_state_renewal(struct nfs_client *);
3315 + extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
3316 + extern void nfs4_kill_renewd(struct nfs_client *);
3317 + extern void nfs4_renew_state(struct work_struct *);
3318 +-extern void nfs4_set_lease_period(struct nfs_client *clp,
3319 +- unsigned long lease,
3320 +- unsigned long lastrenewed);
3321 ++extern void nfs4_set_lease_period(struct nfs_client *clp, unsigned long lease);
3322 +
3323 +
3324 + /* nfs4state.c */
3325 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3326 +index 76d37161409a..a2759b4062ae 100644
3327 +--- a/fs/nfs/nfs4proc.c
3328 ++++ b/fs/nfs/nfs4proc.c
3329 +@@ -3187,6 +3187,11 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
3330 + exception.retry = 1;
3331 + continue;
3332 + }
3333 ++ if (status == -NFS4ERR_EXPIRED) {
3334 ++ nfs4_schedule_lease_recovery(server->nfs_client);
3335 ++ exception.retry = 1;
3336 ++ continue;
3337 ++ }
3338 + if (status == -EAGAIN) {
3339 + /* We must have found a delegation */
3340 + exception.retry = 1;
3341 +@@ -3239,6 +3244,8 @@ static int _nfs4_do_setattr(struct inode *inode,
3342 + nfs_put_lock_context(l_ctx);
3343 + if (status == -EIO)
3344 + return -EBADF;
3345 ++ else if (status == -EAGAIN)
3346 ++ goto zero_stateid;
3347 + } else {
3348 + zero_stateid:
3349 + nfs4_stateid_copy(&arg->stateid, &zero_stateid);
3350 +@@ -5019,16 +5026,13 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str
3351 + struct nfs4_exception exception = {
3352 + .interruptible = true,
3353 + };
3354 +- unsigned long now = jiffies;
3355 + int err;
3356 +
3357 + do {
3358 + err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
3359 + trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
3360 + if (err == 0) {
3361 +- nfs4_set_lease_period(server->nfs_client,
3362 +- fsinfo->lease_time * HZ,
3363 +- now);
3364 ++ nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
3365 + break;
3366 + }
3367 + err = nfs4_handle_exception(server, err, &exception);
3368 +@@ -6084,6 +6088,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3369 + .callback_data = &setclientid,
3370 + .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
3371 + };
3372 ++ unsigned long now = jiffies;
3373 + int status;
3374 +
3375 + /* nfs_client_id4 */
3376 +@@ -6116,6 +6121,9 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3377 + clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
3378 + put_rpccred(setclientid.sc_cred);
3379 + }
3380 ++
3381 ++ if (status == 0)
3382 ++ do_renew_lease(clp, now);
3383 + out:
3384 + trace_nfs4_setclientid(clp, status);
3385 + dprintk("NFS reply setclientid: %d\n", status);
3386 +@@ -8203,6 +8211,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
3387 + struct rpc_task *task;
3388 + struct nfs41_exchange_id_args *argp;
3389 + struct nfs41_exchange_id_res *resp;
3390 ++ unsigned long now = jiffies;
3391 + int status;
3392 +
3393 + task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
3394 +@@ -8223,6 +8232,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
3395 + if (status != 0)
3396 + goto out;
3397 +
3398 ++ do_renew_lease(clp, now);
3399 ++
3400 + clp->cl_clientid = resp->clientid;
3401 + clp->cl_exchange_flags = resp->flags;
3402 + clp->cl_seqid = resp->seqid;
3403 +diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
3404 +index 6ea431b067dd..ff876dda7f06 100644
3405 +--- a/fs/nfs/nfs4renewd.c
3406 ++++ b/fs/nfs/nfs4renewd.c
3407 +@@ -138,15 +138,12 @@ nfs4_kill_renewd(struct nfs_client *clp)
3408 + *
3409 + * @clp: pointer to nfs_client
3410 + * @lease: new value for lease period
3411 +- * @lastrenewed: time at which lease was last renewed
3412 + */
3413 + void nfs4_set_lease_period(struct nfs_client *clp,
3414 +- unsigned long lease,
3415 +- unsigned long lastrenewed)
3416 ++ unsigned long lease)
3417 + {
3418 + spin_lock(&clp->cl_lock);
3419 + clp->cl_lease_time = lease;
3420 +- clp->cl_last_renewal = lastrenewed;
3421 + spin_unlock(&clp->cl_lock);
3422 +
3423 + /* Cap maximum reconnect timeout at 1/2 lease period */
3424 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
3425 +index 34552329233d..f0b002734355 100644
3426 +--- a/fs/nfs/nfs4state.c
3427 ++++ b/fs/nfs/nfs4state.c
3428 +@@ -92,17 +92,15 @@ static int nfs4_setup_state_renewal(struct nfs_client *clp)
3429 + {
3430 + int status;
3431 + struct nfs_fsinfo fsinfo;
3432 +- unsigned long now;
3433 +
3434 + if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
3435 + nfs4_schedule_state_renewal(clp);
3436 + return 0;
3437 + }
3438 +
3439 +- now = jiffies;
3440 + status = nfs4_proc_get_lease_time(clp, &fsinfo);
3441 + if (status == 0) {
3442 +- nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
3443 ++ nfs4_set_lease_period(clp, fsinfo.lease_time * HZ);
3444 + nfs4_schedule_state_renewal(clp);
3445 + }
3446 +
3447 +diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
3448 +index e60b6fbd5ada..d405557cb43f 100644
3449 +--- a/fs/nfs/nfs4trace.h
3450 ++++ b/fs/nfs/nfs4trace.h
3451 +@@ -352,7 +352,7 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
3452 + ),
3453 +
3454 + TP_fast_assign(
3455 +- __entry->error = error;
3456 ++ __entry->error = error < 0 ? -error : 0;
3457 + __assign_str(dstaddr, clp->cl_hostname);
3458 + ),
3459 +
3460 +@@ -432,7 +432,8 @@ TRACE_EVENT(nfs4_sequence_done,
3461 + __entry->target_highest_slotid =
3462 + res->sr_target_highest_slotid;
3463 + __entry->status_flags = res->sr_status_flags;
3464 +- __entry->error = res->sr_status;
3465 ++ __entry->error = res->sr_status < 0 ?
3466 ++ -res->sr_status : 0;
3467 + ),
3468 + TP_printk(
3469 + "error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u "
3470 +@@ -640,7 +641,7 @@ TRACE_EVENT(nfs4_state_mgr_failed,
3471 + ),
3472 +
3473 + TP_fast_assign(
3474 +- __entry->error = status;
3475 ++ __entry->error = status < 0 ? -status : 0;
3476 + __entry->state = clp->cl_state;
3477 + __assign_str(hostname, clp->cl_hostname);
3478 + __assign_str(section, section);
3479 +@@ -659,7 +660,7 @@ TRACE_EVENT(nfs4_xdr_status,
3480 + TP_PROTO(
3481 + const struct xdr_stream *xdr,
3482 + u32 op,
3483 +- int error
3484 ++ u32 error
3485 + ),
3486 +
3487 + TP_ARGS(xdr, op, error),
3488 +@@ -849,7 +850,7 @@ TRACE_EVENT(nfs4_close,
3489 + __entry->fileid = NFS_FILEID(inode);
3490 + __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
3491 + __entry->fmode = (__force unsigned int)state->state;
3492 +- __entry->error = error;
3493 ++ __entry->error = error < 0 ? -error : 0;
3494 + __entry->stateid_seq =
3495 + be32_to_cpu(args->stateid.seqid);
3496 + __entry->stateid_hash =
3497 +@@ -914,7 +915,7 @@ DECLARE_EVENT_CLASS(nfs4_lock_event,
3498 + TP_fast_assign(
3499 + const struct inode *inode = state->inode;
3500 +
3501 +- __entry->error = error;
3502 ++ __entry->error = error < 0 ? -error : 0;
3503 + __entry->cmd = cmd;
3504 + __entry->type = request->fl_type;
3505 + __entry->start = request->fl_start;
3506 +@@ -986,7 +987,7 @@ TRACE_EVENT(nfs4_set_lock,
3507 + TP_fast_assign(
3508 + const struct inode *inode = state->inode;
3509 +
3510 +- __entry->error = error;
3511 ++ __entry->error = error < 0 ? -error : 0;
3512 + __entry->cmd = cmd;
3513 + __entry->type = request->fl_type;
3514 + __entry->start = request->fl_start;
3515 +@@ -1164,7 +1165,7 @@ TRACE_EVENT(nfs4_delegreturn_exit,
3516 + TP_fast_assign(
3517 + __entry->dev = res->server->s_dev;
3518 + __entry->fhandle = nfs_fhandle_hash(args->fhandle);
3519 +- __entry->error = error;
3520 ++ __entry->error = error < 0 ? -error : 0;
3521 + __entry->stateid_seq =
3522 + be32_to_cpu(args->stateid->seqid);
3523 + __entry->stateid_hash =
3524 +@@ -1204,7 +1205,7 @@ DECLARE_EVENT_CLASS(nfs4_test_stateid_event,
3525 + TP_fast_assign(
3526 + const struct inode *inode = state->inode;
3527 +
3528 +- __entry->error = error;
3529 ++ __entry->error = error < 0 ? -error : 0;
3530 + __entry->dev = inode->i_sb->s_dev;
3531 + __entry->fileid = NFS_FILEID(inode);
3532 + __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
3533 +@@ -1306,7 +1307,7 @@ TRACE_EVENT(nfs4_lookupp,
3534 + TP_fast_assign(
3535 + __entry->dev = inode->i_sb->s_dev;
3536 + __entry->ino = NFS_FILEID(inode);
3537 +- __entry->error = error;
3538 ++ __entry->error = error < 0 ? -error : 0;
3539 + ),
3540 +
3541 + TP_printk(
3542 +@@ -1342,7 +1343,7 @@ TRACE_EVENT(nfs4_rename,
3543 + __entry->dev = olddir->i_sb->s_dev;
3544 + __entry->olddir = NFS_FILEID(olddir);
3545 + __entry->newdir = NFS_FILEID(newdir);
3546 +- __entry->error = error;
3547 ++ __entry->error = error < 0 ? -error : 0;
3548 + __assign_str(oldname, oldname->name);
3549 + __assign_str(newname, newname->name);
3550 + ),
3551 +@@ -1433,7 +1434,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_event,
3552 + __entry->dev = inode->i_sb->s_dev;
3553 + __entry->fileid = NFS_FILEID(inode);
3554 + __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
3555 +- __entry->error = error;
3556 ++ __entry->error = error < 0 ? -error : 0;
3557 + __entry->stateid_seq =
3558 + be32_to_cpu(stateid->seqid);
3559 + __entry->stateid_hash =
3560 +@@ -1489,7 +1490,7 @@ DECLARE_EVENT_CLASS(nfs4_getattr_event,
3561 + __entry->valid = fattr->valid;
3562 + __entry->fhandle = nfs_fhandle_hash(fhandle);
3563 + __entry->fileid = (fattr->valid & NFS_ATTR_FATTR_FILEID) ? fattr->fileid : 0;
3564 +- __entry->error = error;
3565 ++ __entry->error = error < 0 ? -error : 0;
3566 + ),
3567 +
3568 + TP_printk(
3569 +@@ -1536,7 +1537,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
3570 + ),
3571 +
3572 + TP_fast_assign(
3573 +- __entry->error = error;
3574 ++ __entry->error = error < 0 ? -error : 0;
3575 + __entry->fhandle = nfs_fhandle_hash(fhandle);
3576 + if (!IS_ERR_OR_NULL(inode)) {
3577 + __entry->fileid = NFS_FILEID(inode);
3578 +@@ -1593,7 +1594,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
3579 + ),
3580 +
3581 + TP_fast_assign(
3582 +- __entry->error = error;
3583 ++ __entry->error = error < 0 ? -error : 0;
3584 + __entry->fhandle = nfs_fhandle_hash(fhandle);
3585 + if (!IS_ERR_OR_NULL(inode)) {
3586 + __entry->fileid = NFS_FILEID(inode);
3587 +@@ -1896,7 +1897,7 @@ TRACE_EVENT(nfs4_layoutget,
3588 + __entry->iomode = args->iomode;
3589 + __entry->offset = args->offset;
3590 + __entry->count = args->length;
3591 +- __entry->error = error;
3592 ++ __entry->error = error < 0 ? -error : 0;
3593 + __entry->stateid_seq =
3594 + be32_to_cpu(state->stateid.seqid);
3595 + __entry->stateid_hash =
3596 +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
3597 +index 936c57779ff4..d0feef17db50 100644
3598 +--- a/fs/nfs/nfs4xdr.c
3599 ++++ b/fs/nfs/nfs4xdr.c
3600 +@@ -4313,11 +4313,14 @@ static int decode_write_verifier(struct xdr_stream *xdr, struct nfs_write_verifi
3601 +
3602 + static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res)
3603 + {
3604 ++ struct nfs_writeverf *verf = res->verf;
3605 + int status;
3606 +
3607 + status = decode_op_hdr(xdr, OP_COMMIT);
3608 + if (!status)
3609 +- status = decode_write_verifier(xdr, &res->verf->verifier);
3610 ++ status = decode_write_verifier(xdr, &verf->verifier);
3611 ++ if (!status)
3612 ++ verf->committed = NFS_FILE_SYNC;
3613 + return status;
3614 + }
3615 +
3616 +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
3617 +index cec3070ab577..542ea8dfd1bc 100644
3618 +--- a/fs/nfs/pnfs.c
3619 ++++ b/fs/nfs/pnfs.c
3620 +@@ -1425,7 +1425,7 @@ retry:
3621 + /* lo ref dropped in pnfs_roc_release() */
3622 + layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
3623 + /* If the creds don't match, we can't compound the layoutreturn */
3624 +- if (!layoutreturn || cred != lo->plh_lc_cred)
3625 ++ if (!layoutreturn || cred_fscmp(cred, lo->plh_lc_cred) != 0)
3626 + goto out_noroc;
3627 +
3628 + roc = layoutreturn;
3629 +@@ -1998,8 +1998,6 @@ lookup_again:
3630 + trace_pnfs_update_layout(ino, pos, count,
3631 + iomode, lo, lseg,
3632 + PNFS_UPDATE_LAYOUT_INVALID_OPEN);
3633 +- if (status != -EAGAIN)
3634 +- goto out_unlock;
3635 + spin_unlock(&ino->i_lock);
3636 + nfs4_schedule_stateid_recovery(server, ctx->state);
3637 + pnfs_clear_first_layoutget(lo);
3638 +diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
3639 +index 82af4809b869..8b37e7f8e789 100644
3640 +--- a/fs/nfs/pnfs_nfs.c
3641 ++++ b/fs/nfs/pnfs_nfs.c
3642 +@@ -31,12 +31,11 @@ EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
3643 + /* Fake up some data that will cause nfs_commit_release to retry the writes. */
3644 + void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
3645 + {
3646 +- struct nfs_page *first = nfs_list_entry(data->pages.next);
3647 ++ struct nfs_writeverf *verf = data->res.verf;
3648 +
3649 + data->task.tk_status = 0;
3650 +- memcpy(&data->verf.verifier, &first->wb_verf,
3651 +- sizeof(data->verf.verifier));
3652 +- data->verf.verifier.data[0]++; /* ensure verifier mismatch */
3653 ++ memset(&verf->verifier, 0, sizeof(verf->verifier));
3654 ++ verf->committed = NFS_UNSTABLE;
3655 + }
3656 + EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
3657 +
3658 +diff --git a/fs/nfs/write.c b/fs/nfs/write.c
3659 +index 52cab65f91cf..913eb37c249b 100644
3660 +--- a/fs/nfs/write.c
3661 ++++ b/fs/nfs/write.c
3662 +@@ -243,7 +243,15 @@ out:
3663 + /* A writeback failed: mark the page as bad, and invalidate the page cache */
3664 + static void nfs_set_pageerror(struct address_space *mapping)
3665 + {
3666 ++ struct inode *inode = mapping->host;
3667 ++
3668 + nfs_zap_mapping(mapping->host, mapping);
3669 ++ /* Force file size revalidation */
3670 ++ spin_lock(&inode->i_lock);
3671 ++ NFS_I(inode)->cache_validity |= NFS_INO_REVAL_FORCED |
3672 ++ NFS_INO_REVAL_PAGECACHE |
3673 ++ NFS_INO_INVALID_SIZE;
3674 ++ spin_unlock(&inode->i_lock);
3675 + }
3676 +
3677 + static void nfs_mapping_set_error(struct page *page, int error)
3678 +@@ -1829,6 +1837,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
3679 +
3680 + static void nfs_commit_release_pages(struct nfs_commit_data *data)
3681 + {
3682 ++ const struct nfs_writeverf *verf = data->res.verf;
3683 + struct nfs_page *req;
3684 + int status = data->task.tk_status;
3685 + struct nfs_commit_info cinfo;
3686 +@@ -1856,7 +1865,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
3687 +
3688 + /* Okay, COMMIT succeeded, apparently. Check the verifier
3689 + * returned by the server against all stored verfs. */
3690 +- if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
3691 ++ if (verf->committed > NFS_UNSTABLE &&
3692 ++ !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) {
3693 + /* We have a match */
3694 + if (req->wb_page)
3695 + nfs_inode_remove_request(req);
3696 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
3697 +index 27200dea0297..a24937fc56b9 100644
3698 +--- a/include/linux/mlx5/driver.h
3699 ++++ b/include/linux/mlx5/driver.h
3700 +@@ -461,6 +461,11 @@ struct mlx5_vf_context {
3701 + int enabled;
3702 + u64 port_guid;
3703 + u64 node_guid;
3704 ++ /* Valid bits are used to validate administrative guid only.
3705 ++ * Enabled after ndo_set_vf_guid
3706 ++ */
3707 ++ u8 port_guid_valid:1;
3708 ++ u8 node_guid_valid:1;
3709 + enum port_state_policy policy;
3710 + };
3711 +
3712 +diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
3713 +index 5608e14e3aad..8d0f447e1faa 100644
3714 +--- a/include/rdma/ib_verbs.h
3715 ++++ b/include/rdma/ib_verbs.h
3716 +@@ -4300,6 +4300,9 @@ static inline int ib_check_mr_access(int flags)
3717 + !(flags & IB_ACCESS_LOCAL_WRITE))
3718 + return -EINVAL;
3719 +
3720 ++ if (flags & ~IB_ACCESS_SUPPORTED)
3721 ++ return -EINVAL;
3722 ++
3723 + return 0;
3724 + }
3725 +
3726 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
3727 +index 90e4b00ace89..bfe756dee129 100644
3728 +--- a/kernel/sched/core.c
3729 ++++ b/kernel/sched/core.c
3730 +@@ -7100,6 +7100,12 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
3731 +
3732 + if (parent)
3733 + sched_online_group(tg, parent);
3734 ++
3735 ++#ifdef CONFIG_UCLAMP_TASK_GROUP
3736 ++ /* Propagate the effective uclamp value for the new group */
3737 ++ cpu_util_update_eff(css);
3738 ++#endif
3739 ++
3740 + return 0;
3741 + }
3742 +
3743 +diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
3744 +index 458be6b3eda9..3ab23f698221 100644
3745 +--- a/net/core/bpf_sk_storage.c
3746 ++++ b/net/core/bpf_sk_storage.c
3747 +@@ -643,9 +643,10 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
3748 + return ERR_PTR(-ENOMEM);
3749 + bpf_map_init_from_attr(&smap->map, attr);
3750 +
3751 ++ nbuckets = roundup_pow_of_two(num_possible_cpus());
3752 + /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
3753 +- smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus())));
3754 +- nbuckets = 1U << smap->bucket_log;
3755 ++ nbuckets = max_t(u32, 2, nbuckets);
3756 ++ smap->bucket_log = ilog2(nbuckets);
3757 + cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
3758 +
3759 + ret = bpf_map_charge_init(&smap->map.memory, cost);
3760 +diff --git a/net/core/sock_map.c b/net/core/sock_map.c
3761 +index 8998e356f423..085cef5857bb 100644
3762 +--- a/net/core/sock_map.c
3763 ++++ b/net/core/sock_map.c
3764 +@@ -234,7 +234,6 @@ static void sock_map_free(struct bpf_map *map)
3765 + int i;
3766 +
3767 + synchronize_rcu();
3768 +- rcu_read_lock();
3769 + raw_spin_lock_bh(&stab->lock);
3770 + for (i = 0; i < stab->map.max_entries; i++) {
3771 + struct sock **psk = &stab->sks[i];
3772 +@@ -243,13 +242,15 @@ static void sock_map_free(struct bpf_map *map)
3773 + sk = xchg(psk, NULL);
3774 + if (sk) {
3775 + lock_sock(sk);
3776 ++ rcu_read_lock();
3777 + sock_map_unref(sk, psk);
3778 ++ rcu_read_unlock();
3779 + release_sock(sk);
3780 + }
3781 + }
3782 + raw_spin_unlock_bh(&stab->lock);
3783 +- rcu_read_unlock();
3784 +
3785 ++ /* wait for psock readers accessing its map link */
3786 + synchronize_rcu();
3787 +
3788 + bpf_map_area_free(stab->sks);
3789 +@@ -416,14 +417,16 @@ static int sock_map_update_elem(struct bpf_map *map, void *key,
3790 + ret = -EINVAL;
3791 + goto out;
3792 + }
3793 +- if (!sock_map_sk_is_suitable(sk) ||
3794 +- sk->sk_state != TCP_ESTABLISHED) {
3795 ++ if (!sock_map_sk_is_suitable(sk)) {
3796 + ret = -EOPNOTSUPP;
3797 + goto out;
3798 + }
3799 +
3800 + sock_map_sk_acquire(sk);
3801 +- ret = sock_map_update_common(map, idx, sk, flags);
3802 ++ if (sk->sk_state != TCP_ESTABLISHED)
3803 ++ ret = -EOPNOTSUPP;
3804 ++ else
3805 ++ ret = sock_map_update_common(map, idx, sk, flags);
3806 + sock_map_sk_release(sk);
3807 + out:
3808 + fput(sock->file);
3809 +@@ -739,14 +742,16 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key,
3810 + ret = -EINVAL;
3811 + goto out;
3812 + }
3813 +- if (!sock_map_sk_is_suitable(sk) ||
3814 +- sk->sk_state != TCP_ESTABLISHED) {
3815 ++ if (!sock_map_sk_is_suitable(sk)) {
3816 + ret = -EOPNOTSUPP;
3817 + goto out;
3818 + }
3819 +
3820 + sock_map_sk_acquire(sk);
3821 +- ret = sock_hash_update_common(map, key, sk, flags);
3822 ++ if (sk->sk_state != TCP_ESTABLISHED)
3823 ++ ret = -EOPNOTSUPP;
3824 ++ else
3825 ++ ret = sock_hash_update_common(map, key, sk, flags);
3826 + sock_map_sk_release(sk);
3827 + out:
3828 + fput(sock->file);
3829 +@@ -859,19 +864,22 @@ static void sock_hash_free(struct bpf_map *map)
3830 + int i;
3831 +
3832 + synchronize_rcu();
3833 +- rcu_read_lock();
3834 + for (i = 0; i < htab->buckets_num; i++) {
3835 + bucket = sock_hash_select_bucket(htab, i);
3836 + raw_spin_lock_bh(&bucket->lock);
3837 + hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
3838 + hlist_del_rcu(&elem->node);
3839 + lock_sock(elem->sk);
3840 ++ rcu_read_lock();
3841 + sock_map_unref(elem->sk, elem);
3842 ++ rcu_read_unlock();
3843 + release_sock(elem->sk);
3844 + }
3845 + raw_spin_unlock_bh(&bucket->lock);
3846 + }
3847 +- rcu_read_unlock();
3848 ++
3849 ++ /* wait for psock readers accessing its map link */
3850 ++ synchronize_rcu();
3851 +
3852 + bpf_map_area_free(htab->buckets);
3853 + kfree(htab);
3854 +diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
3855 +index e33a73cb1f42..86eefb613b08 100644
3856 +--- a/net/netfilter/nf_flow_table_core.c
3857 ++++ b/net/netfilter/nf_flow_table_core.c
3858 +@@ -348,9 +348,6 @@ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
3859 + {
3860 + struct nf_flowtable *flow_table = data;
3861 +
3862 +- if (flow->flags & FLOW_OFFLOAD_HW)
3863 +- nf_flow_offload_stats(flow_table, flow);
3864 +-
3865 + if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
3866 + (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))) {
3867 + if (flow->flags & FLOW_OFFLOAD_HW) {
3868 +@@ -361,6 +358,8 @@ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
3869 + } else {
3870 + flow_offload_del(flow_table, flow);
3871 + }
3872 ++ } else if (flow->flags & FLOW_OFFLOAD_HW) {
3873 ++ nf_flow_offload_stats(flow_table, flow);
3874 + }
3875 + }
3876 +
3877 +@@ -530,9 +529,9 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
3878 + static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
3879 + struct net_device *dev)
3880 + {
3881 +- nf_flow_table_offload_flush(flowtable);
3882 + nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
3883 + flush_delayed_work(&flowtable->gc_work);
3884 ++ nf_flow_table_offload_flush(flowtable);
3885 + }
3886 +
3887 + void nf_flow_table_cleanup(struct net_device *dev)
3888 +@@ -554,6 +553,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
3889 + cancel_delayed_work_sync(&flow_table->gc_work);
3890 + nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
3891 + nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
3892 ++ nf_flow_table_offload_flush(flow_table);
3893 + rhashtable_destroy(&flow_table->rhashtable);
3894 + }
3895 + EXPORT_SYMBOL_GPL(nf_flow_table_free);
3896 +diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
3897 +index d06969af1085..b879e673953f 100644
3898 +--- a/net/netfilter/nf_flow_table_offload.c
3899 ++++ b/net/netfilter/nf_flow_table_offload.c
3900 +@@ -24,6 +24,7 @@ struct flow_offload_work {
3901 + };
3902 +
3903 + struct nf_flow_key {
3904 ++ struct flow_dissector_key_meta meta;
3905 + struct flow_dissector_key_control control;
3906 + struct flow_dissector_key_basic basic;
3907 + union {
3908 +@@ -55,6 +56,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
3909 + struct nf_flow_key *mask = &match->mask;
3910 + struct nf_flow_key *key = &match->key;
3911 +
3912 ++ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta);
3913 + NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
3914 + NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
3915 + NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
3916 +@@ -62,6 +64,9 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
3917 + NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
3918 + NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
3919 +
3920 ++ key->meta.ingress_ifindex = tuple->iifidx;
3921 ++ mask->meta.ingress_ifindex = 0xffffffff;
3922 ++
3923 + switch (tuple->l3proto) {
3924 + case AF_INET:
3925 + key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
3926 +@@ -105,7 +110,8 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
3927 + key->tp.dst = tuple->dst_port;
3928 + mask->tp.dst = 0xffff;
3929 +
3930 +- match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) |
3931 ++ match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_META) |
3932 ++ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
3933 + BIT(FLOW_DISSECTOR_KEY_BASIC) |
3934 + BIT(FLOW_DISSECTOR_KEY_PORTS);
3935 + return 0;
3936 +@@ -784,8 +790,7 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
3937 + __s32 delta;
3938 +
3939 + delta = nf_flow_timeout_delta(flow->timeout);
3940 +- if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
3941 +- flow->flags & FLOW_OFFLOAD_HW_DYING)
3942 ++ if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10))
3943 + return;
3944 +
3945 + offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
3946 +diff --git a/security/selinux/avc.c b/security/selinux/avc.c
3947 +index ecd3829996aa..23dc888ae305 100644
3948 +--- a/security/selinux/avc.c
3949 ++++ b/security/selinux/avc.c
3950 +@@ -424,7 +424,7 @@ static inline int avc_xperms_audit(struct selinux_state *state,
3951 + if (likely(!audited))
3952 + return 0;
3953 + return slow_avc_audit(state, ssid, tsid, tclass, requested,
3954 +- audited, denied, result, ad, 0);
3955 ++ audited, denied, result, ad);
3956 + }
3957 +
3958 + static void avc_node_free(struct rcu_head *rhead)
3959 +@@ -758,8 +758,7 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
3960 + noinline int slow_avc_audit(struct selinux_state *state,
3961 + u32 ssid, u32 tsid, u16 tclass,
3962 + u32 requested, u32 audited, u32 denied, int result,
3963 +- struct common_audit_data *a,
3964 +- unsigned int flags)
3965 ++ struct common_audit_data *a)
3966 + {
3967 + struct common_audit_data stack_data;
3968 + struct selinux_audit_data sad;
3969 +@@ -772,17 +771,6 @@ noinline int slow_avc_audit(struct selinux_state *state,
3970 + a->type = LSM_AUDIT_DATA_NONE;
3971 + }
3972 +
3973 +- /*
3974 +- * When in a RCU walk do the audit on the RCU retry. This is because
3975 +- * the collection of the dname in an inode audit message is not RCU
3976 +- * safe. Note this may drop some audits when the situation changes
3977 +- * during retry. However this is logically just as if the operation
3978 +- * happened a little later.
3979 +- */
3980 +- if ((a->type == LSM_AUDIT_DATA_INODE) &&
3981 +- (flags & MAY_NOT_BLOCK))
3982 +- return -ECHILD;
3983 +-
3984 + sad.tclass = tclass;
3985 + sad.requested = requested;
3986 + sad.ssid = ssid;
3987 +@@ -855,15 +843,14 @@ static int avc_update_node(struct selinux_avc *avc,
3988 + /*
3989 + * If we are in a non-blocking code path, e.g. VFS RCU walk,
3990 + * then we must not add permissions to a cache entry
3991 +- * because we cannot safely audit the denial. Otherwise,
3992 ++ * because we will not audit the denial. Otherwise,
3993 + * during the subsequent blocking retry (e.g. VFS ref walk), we
3994 + * will find the permissions already granted in the cache entry
3995 + * and won't audit anything at all, leading to silent denials in
3996 + * permissive mode that only appear when in enforcing mode.
3997 + *
3998 +- * See the corresponding handling in slow_avc_audit(), and the
3999 +- * logic in selinux_inode_permission for the MAY_NOT_BLOCK flag,
4000 +- * which is transliterated into AVC_NONBLOCKING.
4001 ++ * See the corresponding handling of MAY_NOT_BLOCK in avc_audit()
4002 ++ * and selinux_inode_permission().
4003 + */
4004 + if (flags & AVC_NONBLOCKING)
4005 + return 0;
4006 +@@ -1205,6 +1192,25 @@ int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass,
4007 + return rc;
4008 + }
4009 +
4010 ++int avc_has_perm_flags(struct selinux_state *state,
4011 ++ u32 ssid, u32 tsid, u16 tclass, u32 requested,
4012 ++ struct common_audit_data *auditdata,
4013 ++ int flags)
4014 ++{
4015 ++ struct av_decision avd;
4016 ++ int rc, rc2;
4017 ++
4018 ++ rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested,
4019 ++ (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
4020 ++ &avd);
4021 ++
4022 ++ rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
4023 ++ auditdata, flags);
4024 ++ if (rc2)
4025 ++ return rc2;
4026 ++ return rc;
4027 ++}
4028 ++
4029 + u32 avc_policy_seqno(struct selinux_state *state)
4030 + {
4031 + return state->avc->avc_cache.latest_notif;
4032 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
4033 +index 116b4d644f68..db44c7eb4321 100644
4034 +--- a/security/selinux/hooks.c
4035 ++++ b/security/selinux/hooks.c
4036 +@@ -2762,6 +2762,14 @@ static int selinux_mount(const char *dev_name,
4037 + return path_has_perm(cred, path, FILE__MOUNTON);
4038 + }
4039 +
4040 ++static int selinux_move_mount(const struct path *from_path,
4041 ++ const struct path *to_path)
4042 ++{
4043 ++ const struct cred *cred = current_cred();
4044 ++
4045 ++ return path_has_perm(cred, to_path, FILE__MOUNTON);
4046 ++}
4047 ++
4048 + static int selinux_umount(struct vfsmount *mnt, int flags)
4049 + {
4050 + const struct cred *cred = current_cred();
4051 +@@ -3004,14 +3012,14 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
4052 + if (IS_ERR(isec))
4053 + return PTR_ERR(isec);
4054 +
4055 +- return avc_has_perm(&selinux_state,
4056 +- sid, isec->sid, isec->sclass, FILE__READ, &ad);
4057 ++ return avc_has_perm_flags(&selinux_state,
4058 ++ sid, isec->sid, isec->sclass, FILE__READ, &ad,
4059 ++ rcu ? MAY_NOT_BLOCK : 0);
4060 + }
4061 +
4062 + static noinline int audit_inode_permission(struct inode *inode,
4063 + u32 perms, u32 audited, u32 denied,
4064 +- int result,
4065 +- unsigned flags)
4066 ++ int result)
4067 + {
4068 + struct common_audit_data ad;
4069 + struct inode_security_struct *isec = selinux_inode(inode);
4070 +@@ -3022,7 +3030,7 @@ static noinline int audit_inode_permission(struct inode *inode,
4071 +
4072 + rc = slow_avc_audit(&selinux_state,
4073 + current_sid(), isec->sid, isec->sclass, perms,
4074 +- audited, denied, result, &ad, flags);
4075 ++ audited, denied, result, &ad);
4076 + if (rc)
4077 + return rc;
4078 + return 0;
4079 +@@ -3069,7 +3077,11 @@ static int selinux_inode_permission(struct inode *inode, int mask)
4080 + if (likely(!audited))
4081 + return rc;
4082 +
4083 +- rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags);
4084 ++ /* fall back to ref-walk if we have to generate audit */
4085 ++ if (flags & MAY_NOT_BLOCK)
4086 ++ return -ECHILD;
4087 ++
4088 ++ rc2 = audit_inode_permission(inode, perms, audited, denied, rc);
4089 + if (rc2)
4090 + return rc2;
4091 + return rc;
4092 +@@ -6903,6 +6915,8 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
4093 + LSM_HOOK_INIT(sb_clone_mnt_opts, selinux_sb_clone_mnt_opts),
4094 + LSM_HOOK_INIT(sb_add_mnt_opt, selinux_add_mnt_opt),
4095 +
4096 ++ LSM_HOOK_INIT(move_mount, selinux_move_mount),
4097 ++
4098 + LSM_HOOK_INIT(dentry_init_security, selinux_dentry_init_security),
4099 + LSM_HOOK_INIT(dentry_create_files_as, selinux_dentry_create_files_as),
4100 +
4101 +diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
4102 +index 7be0e1e90e8b..cf4cc3ef959b 100644
4103 +--- a/security/selinux/include/avc.h
4104 ++++ b/security/selinux/include/avc.h
4105 +@@ -100,8 +100,7 @@ static inline u32 avc_audit_required(u32 requested,
4106 + int slow_avc_audit(struct selinux_state *state,
4107 + u32 ssid, u32 tsid, u16 tclass,
4108 + u32 requested, u32 audited, u32 denied, int result,
4109 +- struct common_audit_data *a,
4110 +- unsigned flags);
4111 ++ struct common_audit_data *a);
4112 +
4113 + /**
4114 + * avc_audit - Audit the granting or denial of permissions.
4115 +@@ -135,9 +134,12 @@ static inline int avc_audit(struct selinux_state *state,
4116 + audited = avc_audit_required(requested, avd, result, 0, &denied);
4117 + if (likely(!audited))
4118 + return 0;
4119 ++ /* fall back to ref-walk if we have to generate audit */
4120 ++ if (flags & MAY_NOT_BLOCK)
4121 ++ return -ECHILD;
4122 + return slow_avc_audit(state, ssid, tsid, tclass,
4123 + requested, audited, denied, result,
4124 +- a, flags);
4125 ++ a);
4126 + }
4127 +
4128 + #define AVC_STRICT 1 /* Ignore permissive mode. */
4129 +@@ -153,6 +155,11 @@ int avc_has_perm(struct selinux_state *state,
4130 + u32 ssid, u32 tsid,
4131 + u16 tclass, u32 requested,
4132 + struct common_audit_data *auditdata);
4133 ++int avc_has_perm_flags(struct selinux_state *state,
4134 ++ u32 ssid, u32 tsid,
4135 ++ u16 tclass, u32 requested,
4136 ++ struct common_audit_data *auditdata,
4137 ++ int flags);
4138 +
4139 + int avc_has_extended_perms(struct selinux_state *state,
4140 + u32 ssid, u32 tsid, u16 tclass, u32 requested,
4141 +diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
4142 +index a428ff393ea2..2b5f3b1b062b 100644
4143 +--- a/sound/soc/soc-generic-dmaengine-pcm.c
4144 ++++ b/sound/soc/soc-generic-dmaengine-pcm.c
4145 +@@ -117,7 +117,6 @@ dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
4146 + struct dma_chan *chan = pcm->chan[substream->stream];
4147 + struct snd_dmaengine_dai_dma_data *dma_data;
4148 + struct snd_pcm_hardware hw;
4149 +- int ret;
4150 +
4151 + if (pcm->config && pcm->config->pcm_hardware)
4152 + return snd_soc_set_runtime_hwparams(substream,
4153 +@@ -138,12 +137,15 @@ dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
4154 + if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
4155 + hw.info |= SNDRV_PCM_INFO_BATCH;
4156 +
4157 +- ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream,
4158 +- dma_data,
4159 +- &hw,
4160 +- chan);
4161 +- if (ret)
4162 +- return ret;
4163 ++ /**
4164 ++ * FIXME: Remove the return value check to align with the code
4165 ++ * before adding snd_dmaengine_pcm_refine_runtime_hwparams
4166 ++ * function.
4167 ++ */
4168 ++ snd_dmaengine_pcm_refine_runtime_hwparams(substream,
4169 ++ dma_data,
4170 ++ &hw,
4171 ++ chan);
4172 +
4173 + return snd_soc_set_runtime_hwparams(substream, &hw);
4174 + }
4175 +diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
4176 +index 2ce9c5ba1934..9288be1d6bf0 100644
4177 +--- a/tools/bpf/bpftool/prog.c
4178 ++++ b/tools/bpf/bpftool/prog.c
4179 +@@ -500,7 +500,7 @@ static int do_dump(int argc, char **argv)
4180 + buf = (unsigned char *)(info->jited_prog_insns);
4181 + member_len = info->jited_prog_len;
4182 + } else { /* DUMP_XLATED */
4183 +- if (info->xlated_prog_len == 0) {
4184 ++ if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
4185 + p_err("error retrieving insn dump: kernel.kptr_restrict set?");
4186 + goto err_free;
4187 + }
4188 +diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
4189 +index 0111d246d1ca..54a2857c2510 100644
4190 +--- a/tools/power/acpi/Makefile.config
4191 ++++ b/tools/power/acpi/Makefile.config
4192 +@@ -15,7 +15,7 @@ include $(srctree)/../../scripts/Makefile.include
4193 +
4194 + OUTPUT=$(srctree)/
4195 + ifeq ("$(origin O)", "command line")
4196 +- OUTPUT := $(O)/power/acpi/
4197 ++ OUTPUT := $(O)/tools/power/acpi/
4198 + endif
4199 + #$(info Determined 'OUTPUT' to be $(OUTPUT))
4200 +
4201 +diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
4202 +new file mode 100644
4203 +index 000000000000..07f5b462c2ef
4204 +--- /dev/null
4205 ++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
4206 +@@ -0,0 +1,74 @@
4207 ++// SPDX-License-Identifier: GPL-2.0
4208 ++// Copyright (c) 2020 Cloudflare
4209 ++
4210 ++#include "test_progs.h"
4211 ++
4212 ++static int connected_socket_v4(void)
4213 ++{
4214 ++ struct sockaddr_in addr = {
4215 ++ .sin_family = AF_INET,
4216 ++ .sin_port = htons(80),
4217 ++ .sin_addr = { inet_addr("127.0.0.1") },
4218 ++ };
4219 ++ socklen_t len = sizeof(addr);
4220 ++ int s, repair, err;
4221 ++
4222 ++ s = socket(AF_INET, SOCK_STREAM, 0);
4223 ++ if (CHECK_FAIL(s == -1))
4224 ++ goto error;
4225 ++
4226 ++ repair = TCP_REPAIR_ON;
4227 ++ err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
4228 ++ if (CHECK_FAIL(err))
4229 ++ goto error;
4230 ++
4231 ++ err = connect(s, (struct sockaddr *)&addr, len);
4232 ++ if (CHECK_FAIL(err))
4233 ++ goto error;
4234 ++
4235 ++ repair = TCP_REPAIR_OFF_NO_WP;
4236 ++ err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
4237 ++ if (CHECK_FAIL(err))
4238 ++ goto error;
4239 ++
4240 ++ return s;
4241 ++error:
4242 ++ perror(__func__);
4243 ++ close(s);
4244 ++ return -1;
4245 ++}
4246 ++
4247 ++/* Create a map, populate it with one socket, and free the map. */
4248 ++static void test_sockmap_create_update_free(enum bpf_map_type map_type)
4249 ++{
4250 ++ const int zero = 0;
4251 ++ int s, map, err;
4252 ++
4253 ++ s = connected_socket_v4();
4254 ++ if (CHECK_FAIL(s == -1))
4255 ++ return;
4256 ++
4257 ++ map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
4258 ++ if (CHECK_FAIL(map == -1)) {
4259 ++ perror("bpf_create_map");
4260 ++ goto out;
4261 ++ }
4262 ++
4263 ++ err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
4264 ++ if (CHECK_FAIL(err)) {
4265 ++ perror("bpf_map_update");
4266 ++ goto out;
4267 ++ }
4268 ++
4269 ++out:
4270 ++ close(map);
4271 ++ close(s);
4272 ++}
4273 ++
4274 ++void test_sockmap_basic(void)
4275 ++{
4276 ++ if (test__start_subtest("sockmap create_update_free"))
4277 ++ test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
4278 ++ if (test__start_subtest("sockhash create_update_free"))
4279 ++ test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
4280 ++}
4281 +diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
4282 +index 631d397ac81b..0a356aa91aa1 100644
4283 +--- a/virt/kvm/arm/aarch32.c
4284 ++++ b/virt/kvm/arm/aarch32.c
4285 +@@ -15,6 +15,10 @@
4286 + #include <asm/kvm_emulate.h>
4287 + #include <asm/kvm_hyp.h>
4288 +
4289 ++#define DFSR_FSC_EXTABT_LPAE 0x10
4290 ++#define DFSR_FSC_EXTABT_nLPAE 0x08
4291 ++#define DFSR_LPAE BIT(9)
4292 ++
4293 + /*
4294 + * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
4295 + */
4296 +@@ -181,10 +185,12 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
4297 +
4298 + /* Give the guest an IMPLEMENTATION DEFINED exception */
4299 + is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
4300 +- if (is_lpae)
4301 +- *fsr = 1 << 9 | 0x34;
4302 +- else
4303 +- *fsr = 0x14;
4304 ++ if (is_lpae) {
4305 ++ *fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
4306 ++ } else {
4307 ++ /* no need to shuffle FS[4] into DFSR[10] as its 0 */
4308 ++ *fsr = DFSR_FSC_EXTABT_nLPAE;
4309 ++ }
4310 + }
4311 +
4312 + void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
4313 +diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
4314 +index f182b2380345..c6c2a9dde00c 100644
4315 +--- a/virt/kvm/arm/arch_timer.c
4316 ++++ b/virt/kvm/arm/arch_timer.c
4317 +@@ -805,6 +805,7 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
4318 + switch (treg) {
4319 + case TIMER_REG_TVAL:
4320 + val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
4321 ++ val &= lower_32_bits(val);
4322 + break;
4323 +
4324 + case TIMER_REG_CTL:
4325 +@@ -850,7 +851,7 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
4326 + {
4327 + switch (treg) {
4328 + case TIMER_REG_TVAL:
4329 +- timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val;
4330 ++ timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val;
4331 + break;
4332 +
4333 + case TIMER_REG_CTL:
4334 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
4335 +index 0b32a904a1bb..a2777efb558e 100644
4336 +--- a/virt/kvm/arm/mmu.c
4337 ++++ b/virt/kvm/arm/mmu.c
4338 +@@ -2147,7 +2147,8 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
4339 + if (!kvm->arch.pgd)
4340 + return 0;
4341 + trace_kvm_test_age_hva(hva);
4342 +- return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
4343 ++ return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
4344 ++ kvm_test_age_hva_handler, NULL);
4345 + }
4346 +
4347 + void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
4348 +diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
4349 +index 8731dfeced8b..4c08fd009768 100644
4350 +--- a/virt/kvm/arm/pmu.c
4351 ++++ b/virt/kvm/arm/pmu.c
4352 +@@ -480,25 +480,45 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
4353 + */
4354 + void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
4355 + {
4356 ++ struct kvm_pmu *pmu = &vcpu->arch.pmu;
4357 + int i;
4358 +- u64 type, enable, reg;
4359 +
4360 +- if (val == 0)
4361 ++ if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
4362 + return;
4363 +
4364 +- enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
4365 ++ /* Weed out disabled counters */
4366 ++ val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
4367 ++
4368 + for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
4369 ++ u64 type, reg;
4370 ++
4371 + if (!(val & BIT(i)))
4372 + continue;
4373 +- type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
4374 +- & ARMV8_PMU_EVTYPE_EVENT;
4375 +- if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
4376 +- && (enable & BIT(i))) {
4377 +- reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
4378 ++
4379 ++ /* PMSWINC only applies to ... SW_INC! */
4380 ++ type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
4381 ++ type &= ARMV8_PMU_EVTYPE_EVENT;
4382 ++ if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
4383 ++ continue;
4384 ++
4385 ++ /* increment this even SW_INC counter */
4386 ++ reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
4387 ++ reg = lower_32_bits(reg);
4388 ++ __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
4389 ++
4390 ++ if (reg) /* no overflow on the low part */
4391 ++ continue;
4392 ++
4393 ++ if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
4394 ++ /* increment the high counter */
4395 ++ reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
4396 + reg = lower_32_bits(reg);
4397 +- __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
4398 +- if (!reg)
4399 +- __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
4400 ++ __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
4401 ++ if (!reg) /* mark overflow on the high counter */
4402 ++ __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
4403 ++ } else {
4404 ++ /* mark overflow on low counter */
4405 ++ __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
4406 + }
4407 + }
4408 + }
4409 +diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
4410 +index 98c7360d9fb7..17920d1b350a 100644
4411 +--- a/virt/kvm/arm/vgic/vgic-its.c
4412 ++++ b/virt/kvm/arm/vgic/vgic-its.c
4413 +@@ -2475,7 +2475,8 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
4414 + target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
4415 + coll_id = val & KVM_ITS_CTE_ICID_MASK;
4416 +
4417 +- if (target_addr >= atomic_read(&kvm->online_vcpus))
4418 ++ if (target_addr != COLLECTION_NOT_MAPPED &&
4419 ++ target_addr >= atomic_read(&kvm->online_vcpus))
4420 + return -EINVAL;
4421 +
4422 + collection = find_collection(its, coll_id);