Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Fri, 14 Feb 2020 23:55:46
Message-Id: 1581724527.5728ac7b1e9628a3cc928d07506d99820949dab4.mpagano@gentoo
1 commit: 5728ac7b1e9628a3cc928d07506d99820949dab4
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Feb 14 23:55:27 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Feb 14 23:55:27 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5728ac7b
7
8 Linux patch 5.4.20
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1019_linux-5.4.20.patch | 3553 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3557 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index b15a5b3..48dbccb 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -119,6 +119,10 @@ Patch: 1018_linux-5.4.19.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.19
23
24 +Patch: 1019_linux-5.4.20.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.20
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1019_linux-5.4.20.patch b/1019_linux-5.4.20.patch
33 new file mode 100644
34 index 0000000..8237f6a
35 --- /dev/null
36 +++ b/1019_linux-5.4.20.patch
37 @@ -0,0 +1,3553 @@
38 +diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
39 +index cc544fdc38be..bc8aed17800d 100644
40 +--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
41 ++++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
42 +@@ -85,7 +85,7 @@ properties:
43 + Must be the device tree identifier of the over-sampling
44 + mode pins. As the line is active high, it should be marked
45 + GPIO_ACTIVE_HIGH.
46 +- maxItems: 1
47 ++ maxItems: 3
48 +
49 + adi,sw-mode:
50 + description:
51 +@@ -128,9 +128,9 @@ examples:
52 + adi,conversion-start-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
53 + reset-gpios = <&gpio 27 GPIO_ACTIVE_HIGH>;
54 + adi,first-data-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
55 +- adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH
56 +- &gpio 23 GPIO_ACTIVE_HIGH
57 +- &gpio 26 GPIO_ACTIVE_HIGH>;
58 ++ adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>,
59 ++ <&gpio 23 GPIO_ACTIVE_HIGH>,
60 ++ <&gpio 26 GPIO_ACTIVE_HIGH>;
61 + standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>;
62 + adi,sw-mode;
63 + };
64 +diff --git a/Makefile b/Makefile
65 +index 2f55d377f0db..21e58bd54715 100644
66 +--- a/Makefile
67 ++++ b/Makefile
68 +@@ -1,7 +1,7 @@
69 + # SPDX-License-Identifier: GPL-2.0
70 + VERSION = 5
71 + PATCHLEVEL = 4
72 +-SUBLEVEL = 19
73 ++SUBLEVEL = 20
74 + EXTRAVERSION =
75 + NAME = Kleptomaniac Octopus
76 +
77 +diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
78 +index 08bcfed6b80f..134cc223ea81 100644
79 +--- a/arch/arc/boot/dts/axs10x_mb.dtsi
80 ++++ b/arch/arc/boot/dts/axs10x_mb.dtsi
81 +@@ -77,6 +77,7 @@
82 + interrupt-names = "macirq";
83 + phy-mode = "rgmii";
84 + snps,pbl = < 32 >;
85 ++ snps,multicast-filter-bins = <256>;
86 + clocks = <&apbclk>;
87 + clock-names = "stmmaceth";
88 + max-speed = <100>;
89 +diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
90 +index 091356f2a8c1..c726cd8dbdf1 100644
91 +--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
92 ++++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
93 +@@ -704,6 +704,60 @@
94 + ti,bit-shift = <8>;
95 + reg = <0x2a48>;
96 + };
97 ++
98 ++ clkout1_osc_div_ck: clkout1-osc-div-ck {
99 ++ #clock-cells = <0>;
100 ++ compatible = "ti,divider-clock";
101 ++ clocks = <&sys_clkin_ck>;
102 ++ ti,bit-shift = <20>;
103 ++ ti,max-div = <4>;
104 ++ reg = <0x4100>;
105 ++ };
106 ++
107 ++ clkout1_src2_mux_ck: clkout1-src2-mux-ck {
108 ++ #clock-cells = <0>;
109 ++ compatible = "ti,mux-clock";
110 ++ clocks = <&clk_rc32k_ck>, <&sysclk_div>, <&dpll_ddr_m2_ck>,
111 ++ <&dpll_per_m2_ck>, <&dpll_disp_m2_ck>,
112 ++ <&dpll_mpu_m2_ck>;
113 ++ reg = <0x4100>;
114 ++ };
115 ++
116 ++ clkout1_src2_pre_div_ck: clkout1-src2-pre-div-ck {
117 ++ #clock-cells = <0>;
118 ++ compatible = "ti,divider-clock";
119 ++ clocks = <&clkout1_src2_mux_ck>;
120 ++ ti,bit-shift = <4>;
121 ++ ti,max-div = <8>;
122 ++ reg = <0x4100>;
123 ++ };
124 ++
125 ++ clkout1_src2_post_div_ck: clkout1-src2-post-div-ck {
126 ++ #clock-cells = <0>;
127 ++ compatible = "ti,divider-clock";
128 ++ clocks = <&clkout1_src2_pre_div_ck>;
129 ++ ti,bit-shift = <8>;
130 ++ ti,max-div = <32>;
131 ++ ti,index-power-of-two;
132 ++ reg = <0x4100>;
133 ++ };
134 ++
135 ++ clkout1_mux_ck: clkout1-mux-ck {
136 ++ #clock-cells = <0>;
137 ++ compatible = "ti,mux-clock";
138 ++ clocks = <&clkout1_osc_div_ck>, <&clk_rc32k_ck>,
139 ++ <&clkout1_src2_post_div_ck>, <&dpll_extdev_m2_ck>;
140 ++ ti,bit-shift = <16>;
141 ++ reg = <0x4100>;
142 ++ };
143 ++
144 ++ clkout1_ck: clkout1-ck {
145 ++ #clock-cells = <0>;
146 ++ compatible = "ti,gate-clock";
147 ++ clocks = <&clkout1_mux_ck>;
148 ++ ti,bit-shift = <23>;
149 ++ reg = <0x4100>;
150 ++ };
151 + };
152 +
153 + &prcm {
154 +diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
155 +index dee9c0c8a096..16c6fd3c4246 100644
156 +--- a/arch/arm/boot/dts/at91sam9260.dtsi
157 ++++ b/arch/arm/boot/dts/at91sam9260.dtsi
158 +@@ -187,7 +187,7 @@
159 + usart0 {
160 + pinctrl_usart0: usart0-0 {
161 + atmel,pins =
162 +- <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE
163 ++ <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
164 + AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
165 + };
166 +
167 +@@ -221,7 +221,7 @@
168 + usart1 {
169 + pinctrl_usart1: usart1-0 {
170 + atmel,pins =
171 +- <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE
172 ++ <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
173 + AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
174 + };
175 +
176 +@@ -239,7 +239,7 @@
177 + usart2 {
178 + pinctrl_usart2: usart2-0 {
179 + atmel,pins =
180 +- <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE
181 ++ <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
182 + AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
183 + };
184 +
185 +@@ -257,7 +257,7 @@
186 + usart3 {
187 + pinctrl_usart3: usart3-0 {
188 + atmel,pins =
189 +- <AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_NONE
190 ++ <AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
191 + AT91_PIOB 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
192 + };
193 +
194 +@@ -275,7 +275,7 @@
195 + uart0 {
196 + pinctrl_uart0: uart0-0 {
197 + atmel,pins =
198 +- <AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_NONE
199 ++ <AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_PULL_UP
200 + AT91_PIOA 30 AT91_PERIPH_B AT91_PINCTRL_PULL_UP>;
201 + };
202 + };
203 +@@ -283,7 +283,7 @@
204 + uart1 {
205 + pinctrl_uart1: uart1-0 {
206 + atmel,pins =
207 +- <AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_NONE
208 ++ <AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
209 + AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
210 + };
211 + };
212 +diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
213 +index dba025a98527..5ed3d745ac86 100644
214 +--- a/arch/arm/boot/dts/at91sam9261.dtsi
215 ++++ b/arch/arm/boot/dts/at91sam9261.dtsi
216 +@@ -329,7 +329,7 @@
217 + usart0 {
218 + pinctrl_usart0: usart0-0 {
219 + atmel,pins =
220 +- <AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE>,
221 ++ <AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
222 + <AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
223 + };
224 +
225 +@@ -347,7 +347,7 @@
226 + usart1 {
227 + pinctrl_usart1: usart1-0 {
228 + atmel,pins =
229 +- <AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_NONE>,
230 ++ <AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
231 + <AT91_PIOC 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
232 + };
233 +
234 +@@ -365,7 +365,7 @@
235 + usart2 {
236 + pinctrl_usart2: usart2-0 {
237 + atmel,pins =
238 +- <AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_NONE>,
239 ++ <AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
240 + <AT91_PIOC 15 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
241 + };
242 +
243 +diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
244 +index 99678abdda93..5c990cfae254 100644
245 +--- a/arch/arm/boot/dts/at91sam9263.dtsi
246 ++++ b/arch/arm/boot/dts/at91sam9263.dtsi
247 +@@ -183,7 +183,7 @@
248 + usart0 {
249 + pinctrl_usart0: usart0-0 {
250 + atmel,pins =
251 +- <AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_NONE
252 ++ <AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
253 + AT91_PIOA 27 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
254 + };
255 +
256 +@@ -201,7 +201,7 @@
257 + usart1 {
258 + pinctrl_usart1: usart1-0 {
259 + atmel,pins =
260 +- <AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_NONE
261 ++ <AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
262 + AT91_PIOD 1 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
263 + };
264 +
265 +@@ -219,7 +219,7 @@
266 + usart2 {
267 + pinctrl_usart2: usart2-0 {
268 + atmel,pins =
269 +- <AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_NONE
270 ++ <AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
271 + AT91_PIOD 3 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
272 + };
273 +
274 +diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
275 +index 691c95ea6175..fd179097a4bf 100644
276 +--- a/arch/arm/boot/dts/at91sam9g45.dtsi
277 ++++ b/arch/arm/boot/dts/at91sam9g45.dtsi
278 +@@ -556,7 +556,7 @@
279 + usart0 {
280 + pinctrl_usart0: usart0-0 {
281 + atmel,pins =
282 +- <AT91_PIOB 19 AT91_PERIPH_A AT91_PINCTRL_NONE
283 ++ <AT91_PIOB 19 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
284 + AT91_PIOB 18 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
285 + };
286 +
287 +@@ -574,7 +574,7 @@
288 + usart1 {
289 + pinctrl_usart1: usart1-0 {
290 + atmel,pins =
291 +- <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE
292 ++ <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
293 + AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
294 + };
295 +
296 +@@ -592,7 +592,7 @@
297 + usart2 {
298 + pinctrl_usart2: usart2-0 {
299 + atmel,pins =
300 +- <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE
301 ++ <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
302 + AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
303 + };
304 +
305 +@@ -610,7 +610,7 @@
306 + usart3 {
307 + pinctrl_usart3: usart3-0 {
308 + atmel,pins =
309 +- <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE
310 ++ <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
311 + AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
312 + };
313 +
314 +diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
315 +index 8643b7151565..ea024e4b6e09 100644
316 +--- a/arch/arm/boot/dts/at91sam9rl.dtsi
317 ++++ b/arch/arm/boot/dts/at91sam9rl.dtsi
318 +@@ -682,7 +682,7 @@
319 + usart0 {
320 + pinctrl_usart0: usart0-0 {
321 + atmel,pins =
322 +- <AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE>,
323 ++ <AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
324 + <AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
325 + };
326 +
327 +@@ -721,7 +721,7 @@
328 + usart1 {
329 + pinctrl_usart1: usart1-0 {
330 + atmel,pins =
331 +- <AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE>,
332 ++ <AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
333 + <AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
334 + };
335 +
336 +@@ -744,7 +744,7 @@
337 + usart2 {
338 + pinctrl_usart2: usart2-0 {
339 + atmel,pins =
340 +- <AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE>,
341 ++ <AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
342 + <AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
343 + };
344 +
345 +@@ -767,7 +767,7 @@
346 + usart3 {
347 + pinctrl_usart3: usart3-0 {
348 + atmel,pins =
349 +- <AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_NONE>,
350 ++ <AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
351 + <AT91_PIOB 1 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
352 + };
353 +
354 +diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
355 +index 3c534cd50ee3..db2033f674c6 100644
356 +--- a/arch/arm/boot/dts/meson8.dtsi
357 ++++ b/arch/arm/boot/dts/meson8.dtsi
358 +@@ -129,8 +129,8 @@
359 + gpu_opp_table: gpu-opp-table {
360 + compatible = "operating-points-v2";
361 +
362 +- opp-182150000 {
363 +- opp-hz = /bits/ 64 <182150000>;
364 ++ opp-182142857 {
365 ++ opp-hz = /bits/ 64 <182142857>;
366 + opp-microvolt = <1150000>;
367 + };
368 + opp-318750000 {
369 +diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
370 +index 099bf8e711c9..1e8c5d7bc824 100644
371 +--- a/arch/arm/boot/dts/meson8b.dtsi
372 ++++ b/arch/arm/boot/dts/meson8b.dtsi
373 +@@ -125,8 +125,8 @@
374 + opp-hz = /bits/ 64 <255000000>;
375 + opp-microvolt = <1100000>;
376 + };
377 +- opp-364300000 {
378 +- opp-hz = /bits/ 64 <364300000>;
379 ++ opp-364285714 {
380 ++ opp-hz = /bits/ 64 <364285714>;
381 + opp-microvolt = <1100000>;
382 + };
383 + opp-425000000 {
384 +diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
385 +index f770aace0efd..203d40be70a5 100644
386 +--- a/arch/arm/boot/dts/sama5d3.dtsi
387 ++++ b/arch/arm/boot/dts/sama5d3.dtsi
388 +@@ -1188,49 +1188,49 @@
389 + usart0_clk: usart0_clk {
390 + #clock-cells = <0>;
391 + reg = <12>;
392 +- atmel,clk-output-range = <0 66000000>;
393 ++ atmel,clk-output-range = <0 83000000>;
394 + };
395 +
396 + usart1_clk: usart1_clk {
397 + #clock-cells = <0>;
398 + reg = <13>;
399 +- atmel,clk-output-range = <0 66000000>;
400 ++ atmel,clk-output-range = <0 83000000>;
401 + };
402 +
403 + usart2_clk: usart2_clk {
404 + #clock-cells = <0>;
405 + reg = <14>;
406 +- atmel,clk-output-range = <0 66000000>;
407 ++ atmel,clk-output-range = <0 83000000>;
408 + };
409 +
410 + usart3_clk: usart3_clk {
411 + #clock-cells = <0>;
412 + reg = <15>;
413 +- atmel,clk-output-range = <0 66000000>;
414 ++ atmel,clk-output-range = <0 83000000>;
415 + };
416 +
417 + uart0_clk: uart0_clk {
418 + #clock-cells = <0>;
419 + reg = <16>;
420 +- atmel,clk-output-range = <0 66000000>;
421 ++ atmel,clk-output-range = <0 83000000>;
422 + };
423 +
424 + twi0_clk: twi0_clk {
425 + reg = <18>;
426 + #clock-cells = <0>;
427 +- atmel,clk-output-range = <0 16625000>;
428 ++ atmel,clk-output-range = <0 41500000>;
429 + };
430 +
431 + twi1_clk: twi1_clk {
432 + #clock-cells = <0>;
433 + reg = <19>;
434 +- atmel,clk-output-range = <0 16625000>;
435 ++ atmel,clk-output-range = <0 41500000>;
436 + };
437 +
438 + twi2_clk: twi2_clk {
439 + #clock-cells = <0>;
440 + reg = <20>;
441 +- atmel,clk-output-range = <0 16625000>;
442 ++ atmel,clk-output-range = <0 41500000>;
443 + };
444 +
445 + mci0_clk: mci0_clk {
446 +@@ -1246,19 +1246,19 @@
447 + spi0_clk: spi0_clk {
448 + #clock-cells = <0>;
449 + reg = <24>;
450 +- atmel,clk-output-range = <0 133000000>;
451 ++ atmel,clk-output-range = <0 166000000>;
452 + };
453 +
454 + spi1_clk: spi1_clk {
455 + #clock-cells = <0>;
456 + reg = <25>;
457 +- atmel,clk-output-range = <0 133000000>;
458 ++ atmel,clk-output-range = <0 166000000>;
459 + };
460 +
461 + tcb0_clk: tcb0_clk {
462 + #clock-cells = <0>;
463 + reg = <26>;
464 +- atmel,clk-output-range = <0 133000000>;
465 ++ atmel,clk-output-range = <0 166000000>;
466 + };
467 +
468 + pwm_clk: pwm_clk {
469 +@@ -1269,7 +1269,7 @@
470 + adc_clk: adc_clk {
471 + #clock-cells = <0>;
472 + reg = <29>;
473 +- atmel,clk-output-range = <0 66000000>;
474 ++ atmel,clk-output-range = <0 83000000>;
475 + };
476 +
477 + dma0_clk: dma0_clk {
478 +@@ -1300,13 +1300,13 @@
479 + ssc0_clk: ssc0_clk {
480 + #clock-cells = <0>;
481 + reg = <38>;
482 +- atmel,clk-output-range = <0 66000000>;
483 ++ atmel,clk-output-range = <0 83000000>;
484 + };
485 +
486 + ssc1_clk: ssc1_clk {
487 + #clock-cells = <0>;
488 + reg = <39>;
489 +- atmel,clk-output-range = <0 66000000>;
490 ++ atmel,clk-output-range = <0 83000000>;
491 + };
492 +
493 + sha_clk: sha_clk {
494 +diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi
495 +index cf06a018ed0f..2470dd3fff25 100644
496 +--- a/arch/arm/boot/dts/sama5d3_can.dtsi
497 ++++ b/arch/arm/boot/dts/sama5d3_can.dtsi
498 +@@ -36,13 +36,13 @@
499 + can0_clk: can0_clk {
500 + #clock-cells = <0>;
501 + reg = <40>;
502 +- atmel,clk-output-range = <0 66000000>;
503 ++ atmel,clk-output-range = <0 83000000>;
504 + };
505 +
506 + can1_clk: can1_clk {
507 + #clock-cells = <0>;
508 + reg = <41>;
509 +- atmel,clk-output-range = <0 66000000>;
510 ++ atmel,clk-output-range = <0 83000000>;
511 + };
512 + };
513 + };
514 +diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
515 +index 1584035daf51..215802b8db30 100644
516 +--- a/arch/arm/boot/dts/sama5d3_tcb1.dtsi
517 ++++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
518 +@@ -22,6 +22,7 @@
519 + tcb1_clk: tcb1_clk {
520 + #clock-cells = <0>;
521 + reg = <27>;
522 ++ atmel,clk-output-range = <0 166000000>;
523 + };
524 + };
525 + };
526 +diff --git a/arch/arm/boot/dts/sama5d3_uart.dtsi b/arch/arm/boot/dts/sama5d3_uart.dtsi
527 +index 4316bdbdc25d..cb62adbd28ed 100644
528 +--- a/arch/arm/boot/dts/sama5d3_uart.dtsi
529 ++++ b/arch/arm/boot/dts/sama5d3_uart.dtsi
530 +@@ -41,13 +41,13 @@
531 + uart0_clk: uart0_clk {
532 + #clock-cells = <0>;
533 + reg = <16>;
534 +- atmel,clk-output-range = <0 66000000>;
535 ++ atmel,clk-output-range = <0 83000000>;
536 + };
537 +
538 + uart1_clk: uart1_clk {
539 + #clock-cells = <0>;
540 + reg = <17>;
541 +- atmel,clk-output-range = <0 66000000>;
542 ++ atmel,clk-output-range = <0 83000000>;
543 + };
544 + };
545 + };
546 +diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
547 +index d5af6aedc02c..52665f30d236 100644
548 +--- a/arch/arm/mach-at91/pm.c
549 ++++ b/arch/arm/mach-at91/pm.c
550 +@@ -691,6 +691,12 @@ static void __init at91_pm_use_default_mode(int pm_mode)
551 + soc_pm.data.suspend_mode = AT91_PM_ULP0;
552 + }
553 +
554 ++static const struct of_device_id atmel_shdwc_ids[] = {
555 ++ { .compatible = "atmel,sama5d2-shdwc" },
556 ++ { .compatible = "microchip,sam9x60-shdwc" },
557 ++ { /* sentinel. */ }
558 ++};
559 ++
560 + static void __init at91_pm_modes_init(void)
561 + {
562 + struct device_node *np;
563 +@@ -700,7 +706,7 @@ static void __init at91_pm_modes_init(void)
564 + !at91_is_pm_mode_active(AT91_PM_ULP1))
565 + return;
566 +
567 +- np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-shdwc");
568 ++ np = of_find_matching_node(NULL, atmel_shdwc_ids);
569 + if (!np) {
570 + pr_warn("%s: failed to find shdwc!\n", __func__);
571 + goto ulp1_default;
572 +@@ -751,6 +757,7 @@ static const struct of_device_id atmel_pmc_ids[] __initconst = {
573 + { .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
574 + { .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
575 + { .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
576 ++ { .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[1] },
577 + { /* sentinel */ },
578 + };
579 +
580 +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
581 +index b4be3baa83d4..6f19ba53fd1f 100644
582 +--- a/arch/arm/mm/init.c
583 ++++ b/arch/arm/mm/init.c
584 +@@ -323,7 +323,7 @@ static inline void poison_init_mem(void *s, size_t count)
585 + *p++ = 0xe7fddef0;
586 + }
587 +
588 +-static inline void
589 ++static inline void __init
590 + free_memmap(unsigned long start_pfn, unsigned long end_pfn)
591 + {
592 + struct page *start_pg, *end_pg;
593 +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
594 +index bd4aab6092e0..e31813a4f972 100644
595 +--- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
596 ++++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
597 +@@ -143,6 +143,7 @@
598 + phy-mode = "sgmii";
599 + status = "okay";
600 + managed = "in-band-status";
601 ++ phys = <&comphy1 0>;
602 + sfp = <&sfp_eth0>;
603 + };
604 +
605 +@@ -150,11 +151,14 @@
606 + phy-mode = "sgmii";
607 + status = "okay";
608 + managed = "in-band-status";
609 ++ phys = <&comphy0 1>;
610 + sfp = <&sfp_eth1>;
611 + };
612 +
613 + &usb3 {
614 + status = "okay";
615 ++ phys = <&usb2_utmi_otg_phy>;
616 ++ phy-names = "usb2-utmi-otg-phy";
617 + };
618 +
619 + &uart0 {
620 +diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
621 +index bd881497b872..a211a046b2f2 100644
622 +--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
623 ++++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
624 +@@ -408,6 +408,8 @@
625 + reg = <5>;
626 + label = "cpu";
627 + ethernet = <&cp1_eth2>;
628 ++ phy-mode = "2500base-x";
629 ++ managed = "in-band-status";
630 + };
631 + };
632 +
633 +diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
634 +index ffb64fc239ee..ccd535edbf4e 100644
635 +--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
636 ++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
637 +@@ -985,7 +985,7 @@
638 +
639 + tcsr_mutex_regs: syscon@1f40000 {
640 + compatible = "syscon";
641 +- reg = <0x01f40000 0x20000>;
642 ++ reg = <0x01f40000 0x40000>;
643 + };
644 +
645 + tlmm: pinctrl@3400000 {
646 +diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
647 +index b38f9d442fc0..e6d700f8c194 100644
648 +--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
649 ++++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
650 +@@ -636,7 +636,6 @@
651 + /* audio_clkout0/1/2/3 */
652 + #clock-cells = <1>;
653 + clock-frequency = <12288000 11289600>;
654 +- clkout-lr-synchronous;
655 +
656 + status = "okay";
657 +
658 +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
659 +index 80f459ad0190..f400cb29b811 100644
660 +--- a/arch/arm64/kernel/cpufeature.c
661 ++++ b/arch/arm64/kernel/cpufeature.c
662 +@@ -32,9 +32,7 @@ static unsigned long elf_hwcap __read_mostly;
663 + #define COMPAT_ELF_HWCAP_DEFAULT \
664 + (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
665 + COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
666 +- COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
667 +- COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
668 +- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
669 ++ COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
670 + COMPAT_HWCAP_LPAE)
671 + unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
672 + unsigned int compat_elf_hwcap2 __read_mostly;
673 +@@ -1367,7 +1365,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
674 + {
675 + /* FP/SIMD is not implemented */
676 + .capability = ARM64_HAS_NO_FPSIMD,
677 +- .type = ARM64_CPUCAP_SYSTEM_FEATURE,
678 ++ .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
679 + .min_field_value = 0,
680 + .matches = has_no_fpsimd,
681 + },
682 +@@ -1595,6 +1593,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
683 + .match_list = list, \
684 + }
685 +
686 ++#define HWCAP_CAP_MATCH(match, cap_type, cap) \
687 ++ { \
688 ++ __HWCAP_CAP(#cap, cap_type, cap) \
689 ++ .matches = match, \
690 ++ }
691 ++
692 + #ifdef CONFIG_ARM64_PTR_AUTH
693 + static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
694 + {
695 +@@ -1668,8 +1672,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
696 + {},
697 + };
698 +
699 ++#ifdef CONFIG_COMPAT
700 ++static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
701 ++{
702 ++ /*
703 ++ * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
704 ++ * in line with that of arm32 as in vfp_init(). We make sure that the
705 ++ * check is future proof, by making sure value is non-zero.
706 ++ */
707 ++ u32 mvfr1;
708 ++
709 ++ WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
710 ++ if (scope == SCOPE_SYSTEM)
711 ++ mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
712 ++ else
713 ++ mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
714 ++
715 ++ return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
716 ++ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
717 ++ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
718 ++}
719 ++#endif
720 ++
721 + static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
722 + #ifdef CONFIG_COMPAT
723 ++ HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
724 ++ HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
725 ++ /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
726 ++ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
727 ++ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
728 + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
729 + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
730 + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
731 +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
732 +index 37d3912cfe06..1765e5284994 100644
733 +--- a/arch/arm64/kernel/fpsimd.c
734 ++++ b/arch/arm64/kernel/fpsimd.c
735 +@@ -269,6 +269,7 @@ static void sve_free(struct task_struct *task)
736 + */
737 + static void task_fpsimd_load(void)
738 + {
739 ++ WARN_ON(!system_supports_fpsimd());
740 + WARN_ON(!have_cpu_fpsimd_context());
741 +
742 + if (system_supports_sve() && test_thread_flag(TIF_SVE))
743 +@@ -289,6 +290,7 @@ static void fpsimd_save(void)
744 + this_cpu_ptr(&fpsimd_last_state);
745 + /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
746 +
747 ++ WARN_ON(!system_supports_fpsimd());
748 + WARN_ON(!have_cpu_fpsimd_context());
749 +
750 + if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
751 +@@ -1092,6 +1094,7 @@ void fpsimd_bind_task_to_cpu(void)
752 + struct fpsimd_last_state_struct *last =
753 + this_cpu_ptr(&fpsimd_last_state);
754 +
755 ++ WARN_ON(!system_supports_fpsimd());
756 + last->st = &current->thread.uw.fpsimd_state;
757 + last->sve_state = current->thread.sve_state;
758 + last->sve_vl = current->thread.sve_vl;
759 +@@ -1114,6 +1117,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
760 + struct fpsimd_last_state_struct *last =
761 + this_cpu_ptr(&fpsimd_last_state);
762 +
763 ++ WARN_ON(!system_supports_fpsimd());
764 + WARN_ON(!in_softirq() && !irqs_disabled());
765 +
766 + last->st = st;
767 +@@ -1128,8 +1132,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
768 + */
769 + void fpsimd_restore_current_state(void)
770 + {
771 +- if (!system_supports_fpsimd())
772 ++ /*
773 ++ * For the tasks that were created before we detected the absence of
774 ++ * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
775 ++ * e.g, init. This could be then inherited by the children processes.
776 ++ * If we later detect that the system doesn't support FP/SIMD,
777 ++ * we must clear the flag for all the tasks to indicate that the
778 ++ * FPSTATE is clean (as we can't have one) to avoid looping for ever in
779 ++ * do_notify_resume().
780 ++ */
781 ++ if (!system_supports_fpsimd()) {
782 ++ clear_thread_flag(TIF_FOREIGN_FPSTATE);
783 + return;
784 ++ }
785 +
786 + get_cpu_fpsimd_context();
787 +
788 +@@ -1148,7 +1163,7 @@ void fpsimd_restore_current_state(void)
789 + */
790 + void fpsimd_update_current_state(struct user_fpsimd_state const *state)
791 + {
792 +- if (!system_supports_fpsimd())
793 ++ if (WARN_ON(!system_supports_fpsimd()))
794 + return;
795 +
796 + get_cpu_fpsimd_context();
797 +@@ -1179,7 +1194,13 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
798 + void fpsimd_flush_task_state(struct task_struct *t)
799 + {
800 + t->thread.fpsimd_cpu = NR_CPUS;
801 +-
802 ++ /*
803 ++ * If we don't support fpsimd, bail out after we have
804 ++ * reset the fpsimd_cpu for this task and clear the
805 ++ * FPSTATE.
806 ++ */
807 ++ if (!system_supports_fpsimd())
808 ++ return;
809 + barrier();
810 + set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
811 +
812 +@@ -1193,6 +1214,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
813 + */
814 + static void fpsimd_flush_cpu_state(void)
815 + {
816 ++ WARN_ON(!system_supports_fpsimd());
817 + __this_cpu_write(fpsimd_last_state.st, NULL);
818 + set_thread_flag(TIF_FOREIGN_FPSTATE);
819 + }
820 +@@ -1203,6 +1225,8 @@ static void fpsimd_flush_cpu_state(void)
821 + */
822 + void fpsimd_save_and_flush_cpu_state(void)
823 + {
824 ++ if (!system_supports_fpsimd())
825 ++ return;
826 + WARN_ON(preemptible());
827 + __get_cpu_fpsimd_context();
828 + fpsimd_save();
829 +diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
830 +index 21176d02e21a..9168c4f1a37f 100644
831 +--- a/arch/arm64/kernel/ptrace.c
832 ++++ b/arch/arm64/kernel/ptrace.c
833 +@@ -615,6 +615,13 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
834 + return 0;
835 + }
836 +
837 ++static int fpr_active(struct task_struct *target, const struct user_regset *regset)
838 ++{
839 ++ if (!system_supports_fpsimd())
840 ++ return -ENODEV;
841 ++ return regset->n;
842 ++}
843 ++
844 + /*
845 + * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
846 + */
847 +@@ -637,6 +644,9 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
848 + unsigned int pos, unsigned int count,
849 + void *kbuf, void __user *ubuf)
850 + {
851 ++ if (!system_supports_fpsimd())
852 ++ return -EINVAL;
853 ++
854 + if (target == current)
855 + fpsimd_preserve_current_state();
856 +
857 +@@ -676,6 +686,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
858 + {
859 + int ret;
860 +
861 ++ if (!system_supports_fpsimd())
862 ++ return -EINVAL;
863 ++
864 + ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
865 + if (ret)
866 + return ret;
867 +@@ -1134,6 +1147,7 @@ static const struct user_regset aarch64_regsets[] = {
868 + */
869 + .size = sizeof(u32),
870 + .align = sizeof(u32),
871 ++ .active = fpr_active,
872 + .get = fpr_get,
873 + .set = fpr_set
874 + },
875 +@@ -1348,6 +1362,9 @@ static int compat_vfp_get(struct task_struct *target,
876 + compat_ulong_t fpscr;
877 + int ret, vregs_end_pos;
878 +
879 ++ if (!system_supports_fpsimd())
880 ++ return -EINVAL;
881 ++
882 + uregs = &target->thread.uw.fpsimd_state;
883 +
884 + if (target == current)
885 +@@ -1381,6 +1398,9 @@ static int compat_vfp_set(struct task_struct *target,
886 + compat_ulong_t fpscr;
887 + int ret, vregs_end_pos;
888 +
889 ++ if (!system_supports_fpsimd())
890 ++ return -EINVAL;
891 ++
892 + uregs = &target->thread.uw.fpsimd_state;
893 +
894 + vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
895 +@@ -1438,6 +1458,7 @@ static const struct user_regset aarch32_regsets[] = {
896 + .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
897 + .size = sizeof(compat_ulong_t),
898 + .align = sizeof(compat_ulong_t),
899 ++ .active = fpr_active,
900 + .get = compat_vfp_get,
901 + .set = compat_vfp_set
902 + },
903 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
904 +index 799e84a40335..d76a3d39b269 100644
905 +--- a/arch/arm64/kvm/hyp/switch.c
906 ++++ b/arch/arm64/kvm/hyp/switch.c
907 +@@ -28,7 +28,15 @@
908 + /* Check whether the FP regs were dirtied while in the host-side run loop: */
909 + static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
910 + {
911 +- if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
912 ++ /*
913 ++ * When the system doesn't support FP/SIMD, we cannot rely on
914 ++ * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
915 ++ * abort on the very first access to FP and thus we should never
916 ++ * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
917 ++ * trap the accesses.
918 ++ */
919 ++ if (!system_supports_fpsimd() ||
920 ++ vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
921 + vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
922 + KVM_ARM64_FP_HOST);
923 +
924 +diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
925 +index c59920920ddc..b915fe658979 100644
926 +--- a/arch/powerpc/Kconfig.debug
927 ++++ b/arch/powerpc/Kconfig.debug
928 +@@ -371,7 +371,7 @@ config PPC_PTDUMP
929 +
930 + config PPC_DEBUG_WX
931 + bool "Warn on W+X mappings at boot"
932 +- depends on PPC_PTDUMP
933 ++ depends on PPC_PTDUMP && STRICT_KERNEL_RWX
934 + help
935 + Generate a warning if any W+X mappings are found at boot.
936 +
937 +diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
938 +index 8ec5dfb65b2e..784cae9f5697 100644
939 +--- a/arch/powerpc/mm/pgtable_32.c
940 ++++ b/arch/powerpc/mm/pgtable_32.c
941 +@@ -221,6 +221,7 @@ void mark_rodata_ro(void)
942 +
943 + if (v_block_mapped((unsigned long)_sinittext)) {
944 + mmu_mark_rodata_ro();
945 ++ ptdump_check_wx();
946 + return;
947 + }
948 +
949 +diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
950 +index 6ba081dd61c9..b4ce9d472dfe 100644
951 +--- a/arch/powerpc/platforms/pseries/iommu.c
952 ++++ b/arch/powerpc/platforms/pseries/iommu.c
953 +@@ -36,7 +36,6 @@
954 + #include <asm/udbg.h>
955 + #include <asm/mmzone.h>
956 + #include <asm/plpar_wrappers.h>
957 +-#include <asm/svm.h>
958 +
959 + #include "pseries.h"
960 +
961 +@@ -133,10 +132,10 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
962 + return be64_to_cpu(*tcep);
963 + }
964 +
965 +-static void tce_free_pSeriesLP(struct iommu_table*, long, long);
966 ++static void tce_free_pSeriesLP(unsigned long liobn, long, long);
967 + static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
968 +
969 +-static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
970 ++static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
971 + long npages, unsigned long uaddr,
972 + enum dma_data_direction direction,
973 + unsigned long attrs)
974 +@@ -147,25 +146,25 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
975 + int ret = 0;
976 + long tcenum_start = tcenum, npages_start = npages;
977 +
978 +- rpn = __pa(uaddr) >> TCE_SHIFT;
979 ++ rpn = __pa(uaddr) >> tceshift;
980 + proto_tce = TCE_PCI_READ;
981 + if (direction != DMA_TO_DEVICE)
982 + proto_tce |= TCE_PCI_WRITE;
983 +
984 + while (npages--) {
985 +- tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
986 +- rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
987 ++ tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
988 ++ rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
989 +
990 + if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
991 + ret = (int)rc;
992 +- tce_free_pSeriesLP(tbl, tcenum_start,
993 ++ tce_free_pSeriesLP(liobn, tcenum_start,
994 + (npages_start - (npages + 1)));
995 + break;
996 + }
997 +
998 + if (rc && printk_ratelimit()) {
999 + printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
1000 +- printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
1001 ++ printk("\tindex = 0x%llx\n", (u64)liobn);
1002 + printk("\ttcenum = 0x%llx\n", (u64)tcenum);
1003 + printk("\ttce val = 0x%llx\n", tce );
1004 + dump_stack();
1005 +@@ -194,7 +193,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
1006 + unsigned long flags;
1007 +
1008 + if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
1009 +- return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
1010 ++ return tce_build_pSeriesLP(tbl->it_index, tcenum,
1011 ++ tbl->it_page_shift, npages, uaddr,
1012 + direction, attrs);
1013 + }
1014 +
1015 +@@ -210,8 +210,9 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
1016 + /* If allocation fails, fall back to the loop implementation */
1017 + if (!tcep) {
1018 + local_irq_restore(flags);
1019 +- return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
1020 +- direction, attrs);
1021 ++ return tce_build_pSeriesLP(tbl->it_index, tcenum,
1022 ++ tbl->it_page_shift,
1023 ++ npages, uaddr, direction, attrs);
1024 + }
1025 + __this_cpu_write(tce_page, tcep);
1026 + }
1027 +@@ -262,16 +263,16 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
1028 + return ret;
1029 + }
1030 +
1031 +-static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
1032 ++static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
1033 + {
1034 + u64 rc;
1035 +
1036 + while (npages--) {
1037 +- rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
1038 ++ rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
1039 +
1040 + if (rc && printk_ratelimit()) {
1041 + printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
1042 +- printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
1043 ++ printk("\tindex = 0x%llx\n", (u64)liobn);
1044 + printk("\ttcenum = 0x%llx\n", (u64)tcenum);
1045 + dump_stack();
1046 + }
1047 +@@ -286,7 +287,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
1048 + u64 rc;
1049 +
1050 + if (!firmware_has_feature(FW_FEATURE_MULTITCE))
1051 +- return tce_free_pSeriesLP(tbl, tcenum, npages);
1052 ++ return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
1053 +
1054 + rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
1055 +
1056 +@@ -401,6 +402,19 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
1057 + u64 rc = 0;
1058 + long l, limit;
1059 +
1060 ++ if (!firmware_has_feature(FW_FEATURE_MULTITCE)) {
1061 ++ unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
1062 ++ unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
1063 ++ be64_to_cpu(maprange->dma_base);
1064 ++ unsigned long tcenum = dmastart >> tceshift;
1065 ++ unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
1066 ++ void *uaddr = __va(start_pfn << PAGE_SHIFT);
1067 ++
1068 ++ return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
1069 ++ tcenum, tceshift, npages, (unsigned long) uaddr,
1070 ++ DMA_BIDIRECTIONAL, 0);
1071 ++ }
1072 ++
1073 + local_irq_disable(); /* to protect tcep and the page behind it */
1074 + tcep = __this_cpu_read(tce_page);
1075 +
1076 +@@ -1320,15 +1334,7 @@ void iommu_init_early_pSeries(void)
1077 + of_reconfig_notifier_register(&iommu_reconfig_nb);
1078 + register_memory_notifier(&iommu_mem_nb);
1079 +
1080 +- /*
1081 +- * Secure guest memory is inacessible to devices so regular DMA isn't
1082 +- * possible.
1083 +- *
1084 +- * In that case keep devices' dma_map_ops as NULL so that the generic
1085 +- * DMA code path will use SWIOTLB to bounce buffers for DMA.
1086 +- */
1087 +- if (!is_secure_guest())
1088 +- set_pci_dma_ops(&dma_iommu_ops);
1089 ++ set_pci_dma_ops(&dma_iommu_ops);
1090 + }
1091 +
1092 + static int __init disable_multitce(char *str)
1093 +diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
1094 +index ee07d0718bf1..66fd517c4816 100644
1095 +--- a/arch/powerpc/platforms/pseries/papr_scm.c
1096 ++++ b/arch/powerpc/platforms/pseries/papr_scm.c
1097 +@@ -342,6 +342,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
1098 + p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
1099 + if (!p->bus) {
1100 + dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
1101 ++ kfree(p->bus_desc.provider_name);
1102 + return -ENXIO;
1103 + }
1104 +
1105 +@@ -498,6 +499,7 @@ static int papr_scm_remove(struct platform_device *pdev)
1106 +
1107 + nvdimm_bus_unregister(p->bus);
1108 + drc_pmem_unbind(p);
1109 ++ kfree(p->bus_desc.provider_name);
1110 + kfree(p);
1111 +
1112 + return 0;
1113 +diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
1114 +index 79e2287991db..f682b7babc09 100644
1115 +--- a/arch/powerpc/platforms/pseries/vio.c
1116 ++++ b/arch/powerpc/platforms/pseries/vio.c
1117 +@@ -1176,6 +1176,8 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1118 + if (tbl == NULL)
1119 + return NULL;
1120 +
1121 ++ kref_init(&tbl->it_kref);
1122 ++
1123 + of_parse_dma_window(dev->dev.of_node, dma_window,
1124 + &tbl->it_index, &offset, &size);
1125 +
1126 +diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c
1127 +index 25019d42ae93..ef2ad7253cd5 100644
1128 +--- a/arch/x86/boot/compressed/acpi.c
1129 ++++ b/arch/x86/boot/compressed/acpi.c
1130 +@@ -393,7 +393,13 @@ int count_immovable_mem_regions(void)
1131 + table = table_addr + sizeof(struct acpi_table_srat);
1132 +
1133 + while (table + sizeof(struct acpi_subtable_header) < table_end) {
1134 ++
1135 + sub_table = (struct acpi_subtable_header *)table;
1136 ++ if (!sub_table->length) {
1137 ++ debug_putstr("Invalid zero length SRAT subtable.\n");
1138 ++ return 0;
1139 ++ }
1140 ++
1141 + if (sub_table->type == ACPI_SRAT_TYPE_MEMORY_AFFINITY) {
1142 + struct acpi_srat_mem_affinity *ma;
1143 +
1144 +diff --git a/crypto/testmgr.c b/crypto/testmgr.c
1145 +index c39e39e55dc2..7473c5bc06b1 100644
1146 +--- a/crypto/testmgr.c
1147 ++++ b/crypto/testmgr.c
1148 +@@ -2102,6 +2102,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
1149 + * If the key or authentication tag size couldn't be set, no need to
1150 + * continue to encrypt.
1151 + */
1152 ++ vec->crypt_error = 0;
1153 + if (vec->setkey_error || vec->setauthsize_error)
1154 + goto done;
1155 +
1156 +@@ -2245,10 +2246,12 @@ static int test_aead_vs_generic_impl(const char *driver,
1157 + req, tsgls);
1158 + if (err)
1159 + goto out;
1160 +- err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, cfg,
1161 +- req, tsgls);
1162 +- if (err)
1163 +- goto out;
1164 ++ if (vec.crypt_error == 0) {
1165 ++ err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name,
1166 ++ cfg, req, tsgls);
1167 ++ if (err)
1168 ++ goto out;
1169 ++ }
1170 + cond_resched();
1171 + }
1172 + err = 0;
1173 +@@ -2678,6 +2681,15 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
1174 + skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
1175 + skcipher_request_set_crypt(req, &src, &dst, vec->len, iv);
1176 + vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
1177 ++ if (vec->crypt_error != 0) {
1178 ++ /*
1179 ++ * The only acceptable error here is for an invalid length, so
1180 ++ * skcipher decryption should fail with the same error too.
1181 ++ * We'll test for this. But to keep the API usage well-defined,
1182 ++ * explicitly initialize the ciphertext buffer too.
1183 ++ */
1184 ++ memset((u8 *)vec->ctext, 0, vec->len);
1185 ++ }
1186 + done:
1187 + snprintf(name, max_namelen, "\"random: len=%u klen=%u\"",
1188 + vec->len, vec->klen);
1189 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
1190 +index 19f57ccfbe1d..59f911e57719 100644
1191 +--- a/drivers/base/regmap/regmap.c
1192 ++++ b/drivers/base/regmap/regmap.c
1193 +@@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1194 +
1195 + WARN_ON(!map->bus);
1196 +
1197 +- /* Check for unwritable registers before we start */
1198 +- for (i = 0; i < val_len / map->format.val_bytes; i++)
1199 +- if (!regmap_writeable(map,
1200 +- reg + regmap_get_offset(map, i)))
1201 +- return -EINVAL;
1202 ++ /* Check for unwritable or noinc registers in range
1203 ++ * before we start
1204 ++ */
1205 ++ if (!regmap_writeable_noinc(map, reg)) {
1206 ++ for (i = 0; i < val_len / map->format.val_bytes; i++) {
1207 ++ unsigned int element =
1208 ++ reg + regmap_get_offset(map, i);
1209 ++ if (!regmap_writeable(map, element) ||
1210 ++ regmap_writeable_noinc(map, element))
1211 ++ return -EINVAL;
1212 ++ }
1213 ++ }
1214 +
1215 + if (!map->cache_bypass && map->format.parse_val) {
1216 + unsigned int ival;
1217 +diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
1218 +index b3af61cc6fb9..d2760a021301 100644
1219 +--- a/drivers/clk/meson/g12a.c
1220 ++++ b/drivers/clk/meson/g12a.c
1221 +@@ -4692,6 +4692,7 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
1222 + &g12a_bt656,
1223 + &g12a_usb1_to_ddr,
1224 + &g12a_mmc_pclk,
1225 ++ &g12a_uart2,
1226 + &g12a_vpu_intr,
1227 + &g12a_gic,
1228 + &g12a_sd_emmc_a_clk0,
1229 +diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
1230 +index d32626458e67..1f9c16395a3f 100644
1231 +--- a/drivers/crypto/atmel-sha.c
1232 ++++ b/drivers/crypto/atmel-sha.c
1233 +@@ -1918,12 +1918,7 @@ static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1234 + {
1235 + struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1236 +
1237 +- if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
1238 +- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1239 +- return -EINVAL;
1240 +- }
1241 +-
1242 +- return 0;
1243 ++ return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen);
1244 + }
1245 +
1246 + static int atmel_sha_hmac_init(struct ahash_request *req)
1247 +diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
1248 +index 4b20606983a4..22ebe40f09f5 100644
1249 +--- a/drivers/crypto/axis/artpec6_crypto.c
1250 ++++ b/drivers/crypto/axis/artpec6_crypto.c
1251 +@@ -1251,7 +1251,7 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
1252 +
1253 + if (len != 16 && len != 24 && len != 32) {
1254 + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1255 +- return -1;
1256 ++ return -EINVAL;
1257 + }
1258 +
1259 + ctx->key_length = len;
1260 +diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
1261 +index 3443f6d6dd83..6863d7097674 100644
1262 +--- a/drivers/crypto/caam/caamalg_qi2.c
1263 ++++ b/drivers/crypto/caam/caamalg_qi2.c
1264 +@@ -2481,7 +2481,7 @@ static struct caam_aead_alg driver_aeads[] = {
1265 + .cra_name = "echainiv(authenc(hmac(sha256),"
1266 + "cbc(des)))",
1267 + .cra_driver_name = "echainiv-authenc-"
1268 +- "hmac-sha256-cbc-desi-"
1269 ++ "hmac-sha256-cbc-des-"
1270 + "caam-qi2",
1271 + .cra_blocksize = DES_BLOCK_SIZE,
1272 + },
1273 +diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
1274 +index a0ee404b736e..f1d149e32839 100644
1275 +--- a/drivers/dma/dma-axi-dmac.c
1276 ++++ b/drivers/dma/dma-axi-dmac.c
1277 +@@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
1278 + struct dma_device *dma_dev;
1279 + struct axi_dmac *dmac;
1280 + struct resource *res;
1281 ++ struct regmap *regmap;
1282 + int ret;
1283 +
1284 + dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1285 +@@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev)
1286 +
1287 + platform_set_drvdata(pdev, dmac);
1288 +
1289 +- devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
1290 ++ regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
1291 ++ &axi_dmac_regmap_config);
1292 ++ if (IS_ERR(regmap)) {
1293 ++ ret = PTR_ERR(regmap);
1294 ++ goto err_free_irq;
1295 ++ }
1296 +
1297 + return 0;
1298 +
1299 ++err_free_irq:
1300 ++ free_irq(dmac->irq, dmac);
1301 + err_unregister_of:
1302 + of_dma_controller_free(pdev->dev.of_node);
1303 + err_unregister_device:
1304 +diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
1305 +index 606fa6d86685..1753a9801b70 100644
1306 +--- a/drivers/infiniband/core/addr.c
1307 ++++ b/drivers/infiniband/core/addr.c
1308 +@@ -139,7 +139,7 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
1309 + if (ib_nl_is_good_ip_resp(nlh))
1310 + ib_nl_process_good_ip_rsep(nlh);
1311 +
1312 +- return skb->len;
1313 ++ return 0;
1314 + }
1315 +
1316 + static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
1317 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1318 +index 50052e9a1731..9008937f8ed8 100644
1319 +--- a/drivers/infiniband/core/cma.c
1320 ++++ b/drivers/infiniband/core/cma.c
1321 +@@ -3091,6 +3091,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1322 + rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1323 + rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1324 +
1325 ++ atomic_inc(&id_priv->refcount);
1326 + cma_init_resolve_addr_work(work, id_priv);
1327 + queue_work(cma_wq, &work->work);
1328 + return 0;
1329 +@@ -3117,6 +3118,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
1330 + rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
1331 + &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
1332 +
1333 ++ atomic_inc(&id_priv->refcount);
1334 + cma_init_resolve_addr_work(work, id_priv);
1335 + queue_work(cma_wq, &work->work);
1336 + return 0;
1337 +diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
1338 +index 17fc2936c077..bddb5434fbed 100644
1339 +--- a/drivers/infiniband/core/sa_query.c
1340 ++++ b/drivers/infiniband/core/sa_query.c
1341 +@@ -1068,7 +1068,7 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
1342 + }
1343 +
1344 + settimeout_out:
1345 +- return skb->len;
1346 ++ return 0;
1347 + }
1348 +
1349 + static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1350 +@@ -1139,7 +1139,7 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1351 + }
1352 +
1353 + resp_out:
1354 +- return skb->len;
1355 ++ return 0;
1356 + }
1357 +
1358 + static void free_sm_ah(struct kref *kref)
1359 +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
1360 +index 24244a2f68cc..0d42ba8c0b69 100644
1361 +--- a/drivers/infiniband/core/umem.c
1362 ++++ b/drivers/infiniband/core/umem.c
1363 +@@ -166,10 +166,13 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
1364 + * for any address.
1365 + */
1366 + mask |= (sg_dma_address(sg) + pgoff) ^ va;
1367 +- if (i && i != (umem->nmap - 1))
1368 +- /* restrict by length as well for interior SGEs */
1369 +- mask |= sg_dma_len(sg);
1370 + va += sg_dma_len(sg) - pgoff;
1371 ++ /* Except for the last entry, the ending iova alignment sets
1372 ++ * the maximum possible page size as the low bits of the iova
1373 ++ * must be zero when starting the next chunk.
1374 ++ */
1375 ++ if (i != (umem->nmap - 1))
1376 ++ mask |= va;
1377 + pgoff = 0;
1378 + }
1379 + best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap);
1380 +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
1381 +index db98111b47f4..f2a2d1246c19 100644
1382 +--- a/drivers/infiniband/core/uverbs_main.c
1383 ++++ b/drivers/infiniband/core/uverbs_main.c
1384 +@@ -220,7 +220,6 @@ void ib_uverbs_release_file(struct kref *ref)
1385 + }
1386 +
1387 + static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
1388 +- struct ib_uverbs_file *uverbs_file,
1389 + struct file *filp, char __user *buf,
1390 + size_t count, loff_t *pos,
1391 + size_t eventsz)
1392 +@@ -238,19 +237,16 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
1393 +
1394 + if (wait_event_interruptible(ev_queue->poll_wait,
1395 + (!list_empty(&ev_queue->event_list) ||
1396 +- /* The barriers built into wait_event_interruptible()
1397 +- * and wake_up() guarentee this will see the null set
1398 +- * without using RCU
1399 +- */
1400 +- !uverbs_file->device->ib_dev)))
1401 ++ ev_queue->is_closed)))
1402 + return -ERESTARTSYS;
1403 +
1404 ++ spin_lock_irq(&ev_queue->lock);
1405 ++
1406 + /* If device was disassociated and no event exists set an error */
1407 +- if (list_empty(&ev_queue->event_list) &&
1408 +- !uverbs_file->device->ib_dev)
1409 ++ if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
1410 ++ spin_unlock_irq(&ev_queue->lock);
1411 + return -EIO;
1412 +-
1413 +- spin_lock_irq(&ev_queue->lock);
1414 ++ }
1415 + }
1416 +
1417 + event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
1418 +@@ -285,8 +281,7 @@ static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
1419 + {
1420 + struct ib_uverbs_async_event_file *file = filp->private_data;
1421 +
1422 +- return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp,
1423 +- buf, count, pos,
1424 ++ return ib_uverbs_event_read(&file->ev_queue, filp, buf, count, pos,
1425 + sizeof(struct ib_uverbs_async_event_desc));
1426 + }
1427 +
1428 +@@ -296,9 +291,8 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
1429 + struct ib_uverbs_completion_event_file *comp_ev_file =
1430 + filp->private_data;
1431 +
1432 +- return ib_uverbs_event_read(&comp_ev_file->ev_queue,
1433 +- comp_ev_file->uobj.ufile, filp,
1434 +- buf, count, pos,
1435 ++ return ib_uverbs_event_read(&comp_ev_file->ev_queue, filp, buf, count,
1436 ++ pos,
1437 + sizeof(struct ib_uverbs_comp_event_desc));
1438 + }
1439 +
1440 +@@ -321,7 +315,9 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
1441 + static __poll_t ib_uverbs_async_event_poll(struct file *filp,
1442 + struct poll_table_struct *wait)
1443 + {
1444 +- return ib_uverbs_event_poll(filp->private_data, filp, wait);
1445 ++ struct ib_uverbs_async_event_file *file = filp->private_data;
1446 ++
1447 ++ return ib_uverbs_event_poll(&file->ev_queue, filp, wait);
1448 + }
1449 +
1450 + static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
1451 +@@ -335,9 +331,9 @@ static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
1452 +
1453 + static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
1454 + {
1455 +- struct ib_uverbs_event_queue *ev_queue = filp->private_data;
1456 ++ struct ib_uverbs_async_event_file *file = filp->private_data;
1457 +
1458 +- return fasync_helper(fd, filp, on, &ev_queue->async_queue);
1459 ++ return fasync_helper(fd, filp, on, &file->ev_queue.async_queue);
1460 + }
1461 +
1462 + static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
1463 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
1464 +index d44cf33df81a..238614370927 100644
1465 +--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
1466 ++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
1467 +@@ -1225,6 +1225,8 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
1468 + const struct in_ifaddr *ifa;
1469 +
1470 + idev = in_dev_get(dev);
1471 ++ if (!idev)
1472 ++ continue;
1473 + in_dev_for_each_ifa_rtnl(ifa, idev) {
1474 + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
1475 + "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
1476 +diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
1477 +index ecd6cadd529a..b591861934b3 100644
1478 +--- a/drivers/infiniband/hw/mlx4/cm.c
1479 ++++ b/drivers/infiniband/hw/mlx4/cm.c
1480 +@@ -186,23 +186,6 @@ out:
1481 + kfree(ent);
1482 + }
1483 +
1484 +-static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
1485 +-{
1486 +- struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
1487 +- struct rb_root *sl_id_map = &sriov->sl_id_map;
1488 +- struct id_map_entry *ent, *found_ent;
1489 +-
1490 +- spin_lock(&sriov->id_map_lock);
1491 +- ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
1492 +- if (!ent)
1493 +- goto out;
1494 +- found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
1495 +- if (found_ent && found_ent == ent)
1496 +- rb_erase(&found_ent->node, sl_id_map);
1497 +-out:
1498 +- spin_unlock(&sriov->id_map_lock);
1499 +-}
1500 +-
1501 + static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
1502 + {
1503 + struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
1504 +@@ -294,7 +277,7 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
1505 + spin_lock(&sriov->id_map_lock);
1506 + spin_lock_irqsave(&sriov->going_down_lock, flags);
1507 + /*make sure that there is no schedule inside the scheduled work.*/
1508 +- if (!sriov->is_going_down) {
1509 ++ if (!sriov->is_going_down && !id->scheduled_delete) {
1510 + id->scheduled_delete = 1;
1511 + schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
1512 + }
1513 +@@ -341,9 +324,6 @@ cont:
1514 +
1515 + if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
1516 + schedule_delayed(ibdev, id);
1517 +- else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
1518 +- id_map_find_del(ibdev, pv_cm_id);
1519 +-
1520 + return 0;
1521 + }
1522 +
1523 +@@ -382,12 +362,9 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
1524 + *slave = id->slave_id;
1525 + set_remote_comm_id(mad, id->sl_cm_id);
1526 +
1527 +- if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
1528 ++ if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
1529 ++ mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
1530 + schedule_delayed(ibdev, id);
1531 +- else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
1532 +- mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
1533 +- id_map_find_del(ibdev, (int) pv_cm_id);
1534 +- }
1535 +
1536 + return 0;
1537 + }
1538 +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
1539 +index 907d99822bf0..369a203332a2 100644
1540 +--- a/drivers/infiniband/hw/mlx4/main.c
1541 ++++ b/drivers/infiniband/hw/mlx4/main.c
1542 +@@ -246,6 +246,13 @@ static int mlx4_ib_update_gids(struct gid_entry *gids,
1543 + return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
1544 + }
1545 +
1546 ++static void free_gid_entry(struct gid_entry *entry)
1547 ++{
1548 ++ memset(&entry->gid, 0, sizeof(entry->gid));
1549 ++ kfree(entry->ctx);
1550 ++ entry->ctx = NULL;
1551 ++}
1552 ++
1553 + static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
1554 + {
1555 + struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
1556 +@@ -306,6 +313,8 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
1557 + GFP_ATOMIC);
1558 + if (!gids) {
1559 + ret = -ENOMEM;
1560 ++ *context = NULL;
1561 ++ free_gid_entry(&port_gid_table->gids[free]);
1562 + } else {
1563 + for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
1564 + memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
1565 +@@ -317,6 +326,12 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
1566 +
1567 + if (!ret && hw_update) {
1568 + ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
1569 ++ if (ret) {
1570 ++ spin_lock_bh(&iboe->lock);
1571 ++ *context = NULL;
1572 ++ free_gid_entry(&port_gid_table->gids[free]);
1573 ++ spin_unlock_bh(&iboe->lock);
1574 ++ }
1575 + kfree(gids);
1576 + }
1577 +
1578 +@@ -346,10 +361,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
1579 + if (!ctx->refcount) {
1580 + unsigned int real_index = ctx->real_index;
1581 +
1582 +- memset(&port_gid_table->gids[real_index].gid, 0,
1583 +- sizeof(port_gid_table->gids[real_index].gid));
1584 +- kfree(port_gid_table->gids[real_index].ctx);
1585 +- port_gid_table->gids[real_index].ctx = NULL;
1586 ++ free_gid_entry(&port_gid_table->gids[real_index]);
1587 + hw_update = 1;
1588 + }
1589 + }
1590 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1591 +index b5960351bec0..8708ed5477e9 100644
1592 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
1593 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
1594 +@@ -2536,7 +2536,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1595 + if (lrsp->opcode == SRP_LOGIN_RSP) {
1596 + ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1597 + ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1598 +- ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
1599 ++ ch->use_imm_data = srp_use_imm_data &&
1600 ++ (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
1601 + ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1602 + ch->use_imm_data);
1603 + WARN_ON_ONCE(ch->max_it_iu_len >
1604 +diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
1605 +index ed90361b84dc..ee8d48d863e1 100644
1606 +--- a/drivers/iommu/arm-smmu-v3.c
1607 ++++ b/drivers/iommu/arm-smmu-v3.c
1608 +@@ -856,6 +856,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
1609 + cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
1610 + break;
1611 + case CMDQ_OP_TLBI_NH_VA:
1612 ++ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
1613 + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
1614 + cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
1615 + cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
1616 +diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
1617 +index be2a2a201603..33ddc5269e8d 100644
1618 +--- a/drivers/md/bcache/journal.c
1619 ++++ b/drivers/md/bcache/journal.c
1620 +@@ -417,10 +417,14 @@ err:
1621 +
1622 + /* Journalling */
1623 +
1624 ++#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask))
1625 ++
1626 + static void btree_flush_write(struct cache_set *c)
1627 + {
1628 + struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
1629 +- unsigned int i, n;
1630 ++ unsigned int i, nr, ref_nr;
1631 ++ atomic_t *fifo_front_p, *now_fifo_front_p;
1632 ++ size_t mask;
1633 +
1634 + if (c->journal.btree_flushing)
1635 + return;
1636 +@@ -433,12 +437,50 @@ static void btree_flush_write(struct cache_set *c)
1637 + c->journal.btree_flushing = true;
1638 + spin_unlock(&c->journal.flush_write_lock);
1639 +
1640 ++ /* get the oldest journal entry and check its refcount */
1641 ++ spin_lock(&c->journal.lock);
1642 ++ fifo_front_p = &fifo_front(&c->journal.pin);
1643 ++ ref_nr = atomic_read(fifo_front_p);
1644 ++ if (ref_nr <= 0) {
1645 ++ /*
1646 ++ * do nothing if no btree node references
1647 ++ * the oldest journal entry
1648 ++ */
1649 ++ spin_unlock(&c->journal.lock);
1650 ++ goto out;
1651 ++ }
1652 ++ spin_unlock(&c->journal.lock);
1653 ++
1654 ++ mask = c->journal.pin.mask;
1655 ++ nr = 0;
1656 + atomic_long_inc(&c->flush_write);
1657 + memset(btree_nodes, 0, sizeof(btree_nodes));
1658 +- n = 0;
1659 +
1660 + mutex_lock(&c->bucket_lock);
1661 + list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
1662 ++ /*
1663 ++ * It is safe to get now_fifo_front_p without holding
1664 ++ * c->journal.lock here, because we don't need to know
1665 ++ * the exactly accurate value, just check whether the
1666 ++ * front pointer of c->journal.pin is changed.
1667 ++ */
1668 ++ now_fifo_front_p = &fifo_front(&c->journal.pin);
1669 ++ /*
1670 ++ * If the oldest journal entry is reclaimed and front
1671 ++ * pointer of c->journal.pin changes, it is unnecessary
1672 ++ * to scan c->btree_cache anymore, just quit the loop and
1673 ++ * flush out what we have already.
1674 ++ */
1675 ++ if (now_fifo_front_p != fifo_front_p)
1676 ++ break;
1677 ++ /*
1678 ++ * quit this loop if all matching btree nodes are
1679 ++ * scanned and record in btree_nodes[] already.
1680 ++ */
1681 ++ ref_nr = atomic_read(fifo_front_p);
1682 ++ if (nr >= ref_nr)
1683 ++ break;
1684 ++
1685 + if (btree_node_journal_flush(b))
1686 + pr_err("BUG: flush_write bit should not be set here!");
1687 +
1688 +@@ -454,17 +496,44 @@ static void btree_flush_write(struct cache_set *c)
1689 + continue;
1690 + }
1691 +
1692 ++ /*
1693 ++ * Only select the btree node which exactly references
1694 ++ * the oldest journal entry.
1695 ++ *
1696 ++ * If the journal entry pointed by fifo_front_p is
1697 ++ * reclaimed in parallel, don't worry:
1698 ++ * - the list_for_each_xxx loop will quit when checking
1699 ++ * next now_fifo_front_p.
1700 ++ * - If there are matched nodes recorded in btree_nodes[],
1701 ++ * they are clean now (this is why and how the oldest
1702 ++ * journal entry can be reclaimed). These selected nodes
1703 ++ * will be ignored and skipped in the folowing for-loop.
1704 ++ */
1705 ++ if (nr_to_fifo_front(btree_current_write(b)->journal,
1706 ++ fifo_front_p,
1707 ++ mask) != 0) {
1708 ++ mutex_unlock(&b->write_lock);
1709 ++ continue;
1710 ++ }
1711 ++
1712 + set_btree_node_journal_flush(b);
1713 +
1714 + mutex_unlock(&b->write_lock);
1715 +
1716 +- btree_nodes[n++] = b;
1717 +- if (n == BTREE_FLUSH_NR)
1718 ++ btree_nodes[nr++] = b;
1719 ++ /*
1720 ++ * To avoid holding c->bucket_lock too long time,
1721 ++ * only scan for BTREE_FLUSH_NR matched btree nodes
1722 ++ * at most. If there are more btree nodes reference
1723 ++ * the oldest journal entry, try to flush them next
1724 ++ * time when btree_flush_write() is called.
1725 ++ */
1726 ++ if (nr == BTREE_FLUSH_NR)
1727 + break;
1728 + }
1729 + mutex_unlock(&c->bucket_lock);
1730 +
1731 +- for (i = 0; i < n; i++) {
1732 ++ for (i = 0; i < nr; i++) {
1733 + b = btree_nodes[i];
1734 + if (!b) {
1735 + pr_err("BUG: btree_nodes[%d] is NULL", i);
1736 +@@ -497,6 +566,7 @@ static void btree_flush_write(struct cache_set *c)
1737 + mutex_unlock(&b->write_lock);
1738 + }
1739 +
1740 ++out:
1741 + spin_lock(&c->journal.flush_write_lock);
1742 + c->journal.btree_flushing = false;
1743 + spin_unlock(&c->journal.flush_write_lock);
1744 +diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
1745 +index 5042f9e94aee..fccb388ce179 100644
1746 +--- a/drivers/media/i2c/adv748x/adv748x.h
1747 ++++ b/drivers/media/i2c/adv748x/adv748x.h
1748 +@@ -394,10 +394,10 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
1749 +
1750 + #define io_read(s, r) adv748x_read(s, ADV748X_PAGE_IO, r)
1751 + #define io_write(s, r, v) adv748x_write(s, ADV748X_PAGE_IO, r, v)
1752 +-#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~m) | v)
1753 ++#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~(m)) | (v))
1754 +
1755 + #define hdmi_read(s, r) adv748x_read(s, ADV748X_PAGE_HDMI, r)
1756 +-#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, r+1)) & m)
1757 ++#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, (r)+1)) & (m))
1758 + #define hdmi_write(s, r, v) adv748x_write(s, ADV748X_PAGE_HDMI, r, v)
1759 +
1760 + #define repeater_read(s, r) adv748x_read(s, ADV748X_PAGE_REPEATER, r)
1761 +@@ -405,11 +405,11 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
1762 +
1763 + #define sdp_read(s, r) adv748x_read(s, ADV748X_PAGE_SDP, r)
1764 + #define sdp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_SDP, r, v)
1765 +-#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~m) | v)
1766 ++#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~(m)) | (v))
1767 +
1768 + #define cp_read(s, r) adv748x_read(s, ADV748X_PAGE_CP, r)
1769 + #define cp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_CP, r, v)
1770 +-#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~m) | v)
1771 ++#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~(m)) | (v))
1772 +
1773 + #define tx_read(t, r) adv748x_read(t->state, t->page, r)
1774 + #define tx_write(t, r, v) adv748x_write(t->state, t->page, r, v)
1775 +diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
1776 +index ae24d3ea68ea..43169f25da1f 100644
1777 +--- a/drivers/mfd/Kconfig
1778 ++++ b/drivers/mfd/Kconfig
1779 +@@ -758,6 +758,7 @@ config MFD_MAX77650
1780 + depends on OF || COMPILE_TEST
1781 + select MFD_CORE
1782 + select REGMAP_I2C
1783 ++ select REGMAP_IRQ
1784 + help
1785 + Say Y here to add support for Maxim Semiconductor MAX77650 and
1786 + MAX77651 Power Management ICs. This is the core multifunction
1787 +diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
1788 +index 77bd32a683e1..9e81cd982dd3 100644
1789 +--- a/drivers/mtd/nand/onenand/onenand_base.c
1790 ++++ b/drivers/mtd/nand/onenand/onenand_base.c
1791 +@@ -1248,44 +1248,44 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1792 +
1793 + stats = mtd->ecc_stats;
1794 +
1795 +- /* Read-while-load method */
1796 ++ /* Read-while-load method */
1797 +
1798 +- /* Do first load to bufferRAM */
1799 +- if (read < len) {
1800 +- if (!onenand_check_bufferram(mtd, from)) {
1801 ++ /* Do first load to bufferRAM */
1802 ++ if (read < len) {
1803 ++ if (!onenand_check_bufferram(mtd, from)) {
1804 + this->command(mtd, ONENAND_CMD_READ, from, writesize);
1805 +- ret = this->wait(mtd, FL_READING);
1806 +- onenand_update_bufferram(mtd, from, !ret);
1807 ++ ret = this->wait(mtd, FL_READING);
1808 ++ onenand_update_bufferram(mtd, from, !ret);
1809 + if (mtd_is_eccerr(ret))
1810 + ret = 0;
1811 +- }
1812 +- }
1813 ++ }
1814 ++ }
1815 +
1816 + thislen = min_t(int, writesize, len - read);
1817 + column = from & (writesize - 1);
1818 + if (column + thislen > writesize)
1819 + thislen = writesize - column;
1820 +
1821 +- while (!ret) {
1822 +- /* If there is more to load then start next load */
1823 +- from += thislen;
1824 +- if (read + thislen < len) {
1825 ++ while (!ret) {
1826 ++ /* If there is more to load then start next load */
1827 ++ from += thislen;
1828 ++ if (read + thislen < len) {
1829 + this->command(mtd, ONENAND_CMD_READ, from, writesize);
1830 +- /*
1831 +- * Chip boundary handling in DDP
1832 +- * Now we issued chip 1 read and pointed chip 1
1833 ++ /*
1834 ++ * Chip boundary handling in DDP
1835 ++ * Now we issued chip 1 read and pointed chip 1
1836 + * bufferram so we have to point chip 0 bufferram.
1837 +- */
1838 +- if (ONENAND_IS_DDP(this) &&
1839 +- unlikely(from == (this->chipsize >> 1))) {
1840 +- this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
1841 +- boundary = 1;
1842 +- } else
1843 +- boundary = 0;
1844 +- ONENAND_SET_PREV_BUFFERRAM(this);
1845 +- }
1846 +- /* While load is going, read from last bufferRAM */
1847 +- this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
1848 ++ */
1849 ++ if (ONENAND_IS_DDP(this) &&
1850 ++ unlikely(from == (this->chipsize >> 1))) {
1851 ++ this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
1852 ++ boundary = 1;
1853 ++ } else
1854 ++ boundary = 0;
1855 ++ ONENAND_SET_PREV_BUFFERRAM(this);
1856 ++ }
1857 ++ /* While load is going, read from last bufferRAM */
1858 ++ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
1859 +
1860 + /* Read oob area if needed */
1861 + if (oobbuf) {
1862 +@@ -1301,24 +1301,24 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1863 + oobcolumn = 0;
1864 + }
1865 +
1866 +- /* See if we are done */
1867 +- read += thislen;
1868 +- if (read == len)
1869 +- break;
1870 +- /* Set up for next read from bufferRAM */
1871 +- if (unlikely(boundary))
1872 +- this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
1873 +- ONENAND_SET_NEXT_BUFFERRAM(this);
1874 +- buf += thislen;
1875 ++ /* See if we are done */
1876 ++ read += thislen;
1877 ++ if (read == len)
1878 ++ break;
1879 ++ /* Set up for next read from bufferRAM */
1880 ++ if (unlikely(boundary))
1881 ++ this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
1882 ++ ONENAND_SET_NEXT_BUFFERRAM(this);
1883 ++ buf += thislen;
1884 + thislen = min_t(int, writesize, len - read);
1885 +- column = 0;
1886 +- cond_resched();
1887 +- /* Now wait for load */
1888 +- ret = this->wait(mtd, FL_READING);
1889 +- onenand_update_bufferram(mtd, from, !ret);
1890 ++ column = 0;
1891 ++ cond_resched();
1892 ++ /* Now wait for load */
1893 ++ ret = this->wait(mtd, FL_READING);
1894 ++ onenand_update_bufferram(mtd, from, !ret);
1895 + if (mtd_is_eccerr(ret))
1896 + ret = 0;
1897 +- }
1898 ++ }
1899 +
1900 + /*
1901 + * Return success, if no ECC failures, else -EBADMSG
1902 +diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
1903 +index e5ea6127ab5a..671a61845bd5 100644
1904 +--- a/drivers/mtd/parsers/sharpslpart.c
1905 ++++ b/drivers/mtd/parsers/sharpslpart.c
1906 +@@ -165,10 +165,10 @@ static int sharpsl_nand_get_logical_num(u8 *oob)
1907 +
1908 + static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
1909 + {
1910 +- unsigned int block_num, log_num, phymax;
1911 ++ unsigned int block_num, phymax;
1912 ++ int i, ret, log_num;
1913 + loff_t block_adr;
1914 + u8 *oob;
1915 +- int i, ret;
1916 +
1917 + oob = kzalloc(mtd->oobsize, GFP_KERNEL);
1918 + if (!oob)
1919 +diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
1920 +index 347bb92e4130..0a727502d14c 100644
1921 +--- a/drivers/net/wireless/ath/ath10k/pci.c
1922 ++++ b/drivers/net/wireless/ath/ath10k/pci.c
1923 +@@ -1604,11 +1604,22 @@ static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
1924 + {
1925 + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1926 + u32 i;
1927 ++ int ret;
1928 ++
1929 ++ mutex_lock(&ar->conf_mutex);
1930 ++ if (ar->state != ATH10K_STATE_ON) {
1931 ++ ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
1932 ++ ret = -EIO;
1933 ++ goto done;
1934 ++ }
1935 +
1936 + for (i = 0; i < region->len; i += 4)
1937 + *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
1938 +
1939 +- return region->len;
1940 ++ ret = region->len;
1941 ++done:
1942 ++ mutex_unlock(&ar->conf_mutex);
1943 ++ return ret;
1944 + }
1945 +
1946 + /* if an error happened returns < 0, otherwise the length */
1947 +@@ -1704,7 +1715,11 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
1948 + count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
1949 + break;
1950 + case ATH10K_MEM_REGION_TYPE_IOREG:
1951 +- count = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1952 ++ ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1953 ++ if (ret < 0)
1954 ++ break;
1955 ++
1956 ++ count = ret;
1957 + break;
1958 + default:
1959 + ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
1960 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
1961 +index 9f4b117db9d7..d47f76890cf9 100644
1962 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
1963 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
1964 +@@ -8,6 +8,7 @@
1965 + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
1966 + * Copyright (C) 2018 Intel Corporation
1967 + * Copyright (C) 2019 Intel Corporation
1968 ++ * Copyright (C) 2020 Intel Corporation
1969 + *
1970 + * This program is free software; you can redistribute it and/or modify
1971 + * it under the terms of version 2 of the GNU General Public License as
1972 +@@ -30,6 +31,7 @@
1973 + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
1974 + * Copyright (C) 2018 Intel Corporation
1975 + * Copyright (C) 2019 Intel Corporation
1976 ++ * Copyright (C) 2020 Intel Corporation
1977 + * All rights reserved.
1978 + *
1979 + * Redistribution and use in source and binary forms, with or without
1980 +@@ -389,6 +391,8 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
1981 + if (req != mvm->ftm_initiator.req)
1982 + return;
1983 +
1984 ++ iwl_mvm_ftm_reset(mvm);
1985 ++
1986 + if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
1987 + LOCATION_GROUP, 0),
1988 + 0, sizeof(cmd), &cmd))
1989 +@@ -502,7 +506,6 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1990 + lockdep_assert_held(&mvm->mutex);
1991 +
1992 + if (!mvm->ftm_initiator.req) {
1993 +- IWL_ERR(mvm, "Got FTM response but have no request?\n");
1994 + return;
1995 + }
1996 +
1997 +diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
1998 +index c9401c121a14..4e3de684928b 100644
1999 +--- a/drivers/net/wireless/marvell/libertas/cfg.c
2000 ++++ b/drivers/net/wireless/marvell/libertas/cfg.c
2001 +@@ -1785,6 +1785,8 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
2002 + rates_max = rates_eid[1];
2003 + if (rates_max > MAX_RATES) {
2004 + lbs_deb_join("invalid rates");
2005 ++ rcu_read_unlock();
2006 ++ ret = -EINVAL;
2007 + goto out;
2008 + }
2009 + rates = cmd.bss.rates;
2010 +diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
2011 +index 593c594982cb..59f0651d148b 100644
2012 +--- a/drivers/net/wireless/marvell/mwifiex/scan.c
2013 ++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
2014 +@@ -2886,6 +2886,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
2015 + vs_param_set->header.len =
2016 + cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
2017 + & 0x00FF) + 2);
2018 ++ if (le16_to_cpu(vs_param_set->header.len) >
2019 ++ MWIFIEX_MAX_VSIE_LEN) {
2020 ++ mwifiex_dbg(priv->adapter, ERROR,
2021 ++ "Invalid param length!\n");
2022 ++ break;
2023 ++ }
2024 ++
2025 + memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
2026 + le16_to_cpu(vs_param_set->header.len));
2027 + *buffer += le16_to_cpu(vs_param_set->header.len) +
2028 +diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
2029 +index 41f0231376c0..132f9e8ed68c 100644
2030 +--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
2031 ++++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
2032 +@@ -970,6 +970,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
2033 + "WMM Parameter Set Count: %d\n",
2034 + wmm_param_ie->qos_info_bitmap & mask);
2035 +
2036 ++ if (wmm_param_ie->vend_hdr.len + 2 >
2037 ++ sizeof(struct ieee_types_wmm_parameter))
2038 ++ break;
2039 ++
2040 + memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
2041 + wmm_ie, wmm_param_ie,
2042 + wmm_param_ie->vend_hdr.len + 2);
2043 +diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
2044 +index 090b632965e2..ac93f5a0398e 100644
2045 +--- a/drivers/pci/controller/pci-tegra.c
2046 ++++ b/drivers/pci/controller/pci-tegra.c
2047 +@@ -2499,7 +2499,6 @@ static const struct tegra_pcie_soc tegra20_pcie = {
2048 + .num_ports = 2,
2049 + .ports = tegra20_pcie_ports,
2050 + .msi_base_shift = 0,
2051 +- .afi_pex2_ctrl = 0x128,
2052 + .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2053 + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2054 + .pads_refclk_cfg0 = 0xfa5cfa5c,
2055 +@@ -2528,6 +2527,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
2056 + .num_ports = 3,
2057 + .ports = tegra30_pcie_ports,
2058 + .msi_base_shift = 8,
2059 ++ .afi_pex2_ctrl = 0x128,
2060 + .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2061 + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2062 + .pads_refclk_cfg0 = 0xfa5cfa5c,
2063 +diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
2064 +index b3f972e8cfed..deec9f9e0b61 100644
2065 +--- a/drivers/pci/iov.c
2066 ++++ b/drivers/pci/iov.c
2067 +@@ -187,10 +187,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2068 + sprintf(buf, "virtfn%u", id);
2069 + rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
2070 + if (rc)
2071 +- goto failed2;
2072 ++ goto failed1;
2073 + rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
2074 + if (rc)
2075 +- goto failed3;
2076 ++ goto failed2;
2077 +
2078 + kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
2079 +
2080 +@@ -198,11 +198,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2081 +
2082 + return 0;
2083 +
2084 +-failed3:
2085 +- sysfs_remove_link(&dev->dev.kobj, buf);
2086 + failed2:
2087 +- pci_stop_and_remove_bus_device(virtfn);
2088 ++ sysfs_remove_link(&dev->dev.kobj, buf);
2089 + failed1:
2090 ++ pci_stop_and_remove_bus_device(virtfn);
2091 + pci_dev_put(dev);
2092 + failed0:
2093 + virtfn_remove_bus(dev->bus, bus);
2094 +diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
2095 +index b45bc47d04fe..271aecfbc3bf 100644
2096 +--- a/drivers/pci/pcie/aer.c
2097 ++++ b/drivers/pci/pcie/aer.c
2098 +@@ -1387,6 +1387,7 @@ static int aer_probe(struct pcie_device *dev)
2099 + return -ENOMEM;
2100 +
2101 + rpc->rpd = port;
2102 ++ INIT_KFIFO(rpc->aer_fifo);
2103 + set_service_data(dev, rpc);
2104 +
2105 + status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
2106 +diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
2107 +index e7dbe21705ba..5356630e0e48 100644
2108 +--- a/drivers/pci/setup-bus.c
2109 ++++ b/drivers/pci/setup-bus.c
2110 +@@ -1785,12 +1785,18 @@ again:
2111 + /* Restore size and flags */
2112 + list_for_each_entry(fail_res, &fail_head, list) {
2113 + struct resource *res = fail_res->res;
2114 ++ int idx;
2115 +
2116 + res->start = fail_res->start;
2117 + res->end = fail_res->end;
2118 + res->flags = fail_res->flags;
2119 +- if (fail_res->dev->subordinate)
2120 +- res->flags = 0;
2121 ++
2122 ++ if (pci_is_bridge(fail_res->dev)) {
2123 ++ idx = res - &fail_res->dev->resource[0];
2124 ++ if (idx >= PCI_BRIDGE_RESOURCES &&
2125 ++ idx <= PCI_BRIDGE_RESOURCE_END)
2126 ++ res->flags = 0;
2127 ++ }
2128 + }
2129 + free_list(&fail_head);
2130 +
2131 +@@ -2037,12 +2043,18 @@ again:
2132 + /* Restore size and flags */
2133 + list_for_each_entry(fail_res, &fail_head, list) {
2134 + struct resource *res = fail_res->res;
2135 ++ int idx;
2136 +
2137 + res->start = fail_res->start;
2138 + res->end = fail_res->end;
2139 + res->flags = fail_res->flags;
2140 +- if (fail_res->dev->subordinate)
2141 +- res->flags = 0;
2142 ++
2143 ++ if (pci_is_bridge(fail_res->dev)) {
2144 ++ idx = res - &fail_res->dev->resource[0];
2145 ++ if (idx >= PCI_BRIDGE_RESOURCES &&
2146 ++ idx <= PCI_BRIDGE_RESOURCE_END)
2147 ++ res->flags = 0;
2148 ++ }
2149 + }
2150 + free_list(&fail_head);
2151 +
2152 +diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
2153 +index 465d6afd826e..cc43c855452f 100644
2154 +--- a/drivers/pci/switch/switchtec.c
2155 ++++ b/drivers/pci/switch/switchtec.c
2156 +@@ -1276,7 +1276,7 @@ static int switchtec_init_isr(struct switchtec_dev *stdev)
2157 + if (nvecs < 0)
2158 + return nvecs;
2159 +
2160 +- event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
2161 ++ event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
2162 + if (event_irq < 0 || event_irq >= nvecs)
2163 + return -EFAULT;
2164 +
2165 +@@ -1349,7 +1349,7 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
2166 + if (rc)
2167 + return rc;
2168 +
2169 +- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2170 ++ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2171 + if (rc)
2172 + return rc;
2173 +
2174 +diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
2175 +index 24866a5958ae..a9875038ed9b 100644
2176 +--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
2177 ++++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
2178 +@@ -2305,7 +2305,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
2179 + FN_ATAG0_A, 0, FN_REMOCON_B, 0,
2180 + /* IP0_11_8 [4] */
2181 + FN_SD1_DAT2_A, FN_MMC_D2, 0, FN_BS,
2182 +- FN_ATADIR0_A, 0, FN_SDSELF_B, 0,
2183 ++ FN_ATADIR0_A, 0, FN_SDSELF_A, 0,
2184 + FN_PWM4_B, 0, 0, 0,
2185 + 0, 0, 0, 0,
2186 + /* IP0_7_5 [3] */
2187 +@@ -2349,7 +2349,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
2188 + FN_TS_SDAT0_A, 0, 0, 0,
2189 + 0, 0, 0, 0,
2190 + /* IP1_10_8 [3] */
2191 +- FN_SD1_CLK_B, FN_MMC_D6, 0, FN_A24,
2192 ++ FN_SD1_CD_A, FN_MMC_D6, 0, FN_A24,
2193 + FN_DREQ1_A, 0, FN_HRX0_B, FN_TS_SPSYNC0_A,
2194 + /* IP1_7_5 [3] */
2195 + FN_A23, FN_HTX0_B, FN_TX2_B, FN_DACK2_A,
2196 +diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
2197 +index 697c77a4ea95..773d3bc38c8c 100644
2198 +--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
2199 ++++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
2200 +@@ -5984,7 +5984,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
2201 + { PIN_DU_DOTCLKIN1, 0, 2 }, /* DU_DOTCLKIN1 */
2202 + } },
2203 + { PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
2204 +- { PIN_DU_DOTCLKIN3, 28, 2 }, /* DU_DOTCLKIN3 */
2205 ++ { PIN_DU_DOTCLKIN3, 24, 2 }, /* DU_DOTCLKIN3 */
2206 + { PIN_FSCLKST, 20, 2 }, /* FSCLKST */
2207 + { PIN_TMS, 4, 2 }, /* TMS */
2208 + } },
2209 +@@ -6240,8 +6240,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
2210 + [31] = PIN_DU_DOTCLKIN1, /* DU_DOTCLKIN1 */
2211 + } },
2212 + { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
2213 +- [ 0] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
2214 +- [ 1] = SH_PFC_PIN_NONE,
2215 ++ [ 0] = SH_PFC_PIN_NONE,
2216 ++ [ 1] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
2217 + [ 2] = PIN_FSCLKST, /* FSCLKST */
2218 + [ 3] = PIN_EXTALR, /* EXTALR*/
2219 + [ 4] = PIN_TRST_N, /* TRST# */
2220 +diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
2221 +index 292bace83f1e..6f436836fe50 100644
2222 +--- a/drivers/platform/x86/intel_mid_powerbtn.c
2223 ++++ b/drivers/platform/x86/intel_mid_powerbtn.c
2224 +@@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev)
2225 +
2226 + input_set_capability(input, EV_KEY, KEY_POWER);
2227 +
2228 +- ddata = (struct mid_pb_ddata *)id->driver_data;
2229 ++ ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data,
2230 ++ sizeof(*ddata), GFP_KERNEL);
2231 + if (!ddata)
2232 +- return -ENODATA;
2233 ++ return -ENOMEM;
2234 +
2235 + ddata->dev = &pdev->dev;
2236 + ddata->irq = irq;
2237 +diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
2238 +index 033303708c8b..cb28bbdc9e17 100644
2239 +--- a/drivers/rtc/rtc-cmos.c
2240 ++++ b/drivers/rtc/rtc-cmos.c
2241 +@@ -850,7 +850,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
2242 + rtc_cmos_int_handler = cmos_interrupt;
2243 +
2244 + retval = request_irq(rtc_irq, rtc_cmos_int_handler,
2245 +- IRQF_SHARED, dev_name(&cmos_rtc.rtc->dev),
2246 ++ 0, dev_name(&cmos_rtc.rtc->dev),
2247 + cmos_rtc.rtc);
2248 + if (retval < 0) {
2249 + dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
2250 +diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
2251 +index 443f6d05ce29..fb6d7967ec00 100644
2252 +--- a/drivers/rtc/rtc-hym8563.c
2253 ++++ b/drivers/rtc/rtc-hym8563.c
2254 +@@ -97,7 +97,7 @@ static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
2255 +
2256 + if (!hym8563->valid) {
2257 + dev_warn(&client->dev, "no valid clock/calendar values available\n");
2258 +- return -EPERM;
2259 ++ return -EINVAL;
2260 + }
2261 +
2262 + ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf);
2263 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
2264 +index 0d41a7dc1d6b..b0d6978d78bf 100644
2265 +--- a/drivers/scsi/ufs/ufshcd.c
2266 ++++ b/drivers/scsi/ufs/ufshcd.c
2267 +@@ -6953,7 +6953,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
2268 + ufshcd_init_icc_levels(hba);
2269 +
2270 + /* Add required well known logical units to scsi mid layer */
2271 +- if (ufshcd_scsi_add_wlus(hba))
2272 ++ ret = ufshcd_scsi_add_wlus(hba);
2273 ++ if (ret)
2274 + goto out;
2275 +
2276 + /* Initialize devfreq after UFS device is detected */
2277 +diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
2278 +index 5741ec3fa814..51850cc68b70 100644
2279 +--- a/drivers/soc/qcom/rpmhpd.c
2280 ++++ b/drivers/soc/qcom/rpmhpd.c
2281 +@@ -93,6 +93,7 @@ static struct rpmhpd sdm845_mx = {
2282 +
2283 + static struct rpmhpd sdm845_mx_ao = {
2284 + .pd = { .name = "mx_ao", },
2285 ++ .active_only = true,
2286 + .peer = &sdm845_mx,
2287 + .res_name = "mx.lvl",
2288 + };
2289 +@@ -107,6 +108,7 @@ static struct rpmhpd sdm845_cx = {
2290 +
2291 + static struct rpmhpd sdm845_cx_ao = {
2292 + .pd = { .name = "cx_ao", },
2293 ++ .active_only = true,
2294 + .peer = &sdm845_cx,
2295 + .parent = &sdm845_mx_ao.pd,
2296 + .res_name = "cx.lvl",
2297 +diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
2298 +index a494543d3ae1..eb47fe5ed280 100644
2299 +--- a/drivers/watchdog/qcom-wdt.c
2300 ++++ b/drivers/watchdog/qcom-wdt.c
2301 +@@ -246,7 +246,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
2302 + }
2303 +
2304 + /* check if there is pretimeout support */
2305 +- irq = platform_get_irq(pdev, 0);
2306 ++ irq = platform_get_irq_optional(pdev, 0);
2307 + if (irq > 0) {
2308 + ret = devm_request_irq(dev, irq, qcom_wdt_isr,
2309 + IRQF_TRIGGER_RISING,
2310 +diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
2311 +index a3a329011a06..25188d6bbe15 100644
2312 +--- a/drivers/watchdog/stm32_iwdg.c
2313 ++++ b/drivers/watchdog/stm32_iwdg.c
2314 +@@ -262,6 +262,24 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
2315 + watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
2316 + watchdog_init_timeout(wdd, 0, dev);
2317 +
2318 ++ /*
2319 ++ * In case of CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED is set
2320 ++ * (Means U-Boot/bootloaders leaves the watchdog running)
2321 ++ * When we get here we should make a decision to prevent
2322 ++ * any side effects before user space daemon will take care of it.
2323 ++ * The best option, taking into consideration that there is no
2324 ++ * way to read values back from hardware, is to enforce watchdog
2325 ++ * being run with deterministic values.
2326 ++ */
2327 ++ if (IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) {
2328 ++ ret = stm32_iwdg_start(wdd);
2329 ++ if (ret)
2330 ++ return ret;
2331 ++
2332 ++ /* Make sure the watchdog is serviced */
2333 ++ set_bit(WDOG_HW_RUNNING, &wdd->status);
2334 ++ }
2335 ++
2336 + ret = devm_watchdog_register_device(dev, wdd);
2337 + if (ret)
2338 + return ret;
2339 +diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
2340 +index 295a7a21b774..e7dd07f47825 100644
2341 +--- a/fs/nfs/Kconfig
2342 ++++ b/fs/nfs/Kconfig
2343 +@@ -90,7 +90,7 @@ config NFS_V4
2344 + config NFS_SWAP
2345 + bool "Provide swap over NFS support"
2346 + default n
2347 +- depends on NFS_FS
2348 ++ depends on NFS_FS && SWAP
2349 + select SUNRPC_SWAP
2350 + help
2351 + This option enables swapon to work on files located on NFS mounts.
2352 +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
2353 +index 040a50fd9bf3..29f00da8a0b7 100644
2354 +--- a/fs/nfs/direct.c
2355 ++++ b/fs/nfs/direct.c
2356 +@@ -245,10 +245,10 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
2357 + data->ds_commit_index);
2358 +
2359 + /* verifier not set so always fail */
2360 +- if (verfp->committed < 0)
2361 ++ if (verfp->committed < 0 || data->res.verf->committed <= NFS_UNSTABLE)
2362 + return 1;
2363 +
2364 +- return nfs_direct_cmp_verf(verfp, &data->verf);
2365 ++ return nfs_direct_cmp_verf(verfp, data->res.verf);
2366 + }
2367 +
2368 + /**
2369 +diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
2370 +index 602767850b36..1f60ab2535ee 100644
2371 +--- a/fs/nfs/nfs3xdr.c
2372 ++++ b/fs/nfs/nfs3xdr.c
2373 +@@ -2338,6 +2338,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
2374 + void *data)
2375 + {
2376 + struct nfs_commitres *result = data;
2377 ++ struct nfs_writeverf *verf = result->verf;
2378 + enum nfs_stat status;
2379 + int error;
2380 +
2381 +@@ -2350,7 +2351,9 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
2382 + result->op_status = status;
2383 + if (status != NFS3_OK)
2384 + goto out_status;
2385 +- error = decode_writeverf3(xdr, &result->verf->verifier);
2386 ++ error = decode_writeverf3(xdr, &verf->verifier);
2387 ++ if (!error)
2388 ++ verf->committed = NFS_FILE_SYNC;
2389 + out:
2390 + return error;
2391 + out_status:
2392 +diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
2393 +index 16b2e5cc3e94..bb322d9de313 100644
2394 +--- a/fs/nfs/nfs4_fs.h
2395 ++++ b/fs/nfs/nfs4_fs.h
2396 +@@ -439,9 +439,7 @@ extern void nfs4_schedule_state_renewal(struct nfs_client *);
2397 + extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
2398 + extern void nfs4_kill_renewd(struct nfs_client *);
2399 + extern void nfs4_renew_state(struct work_struct *);
2400 +-extern void nfs4_set_lease_period(struct nfs_client *clp,
2401 +- unsigned long lease,
2402 +- unsigned long lastrenewed);
2403 ++extern void nfs4_set_lease_period(struct nfs_client *clp, unsigned long lease);
2404 +
2405 +
2406 + /* nfs4state.c */
2407 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2408 +index f26d714f9f28..423960d480f1 100644
2409 +--- a/fs/nfs/nfs4proc.c
2410 ++++ b/fs/nfs/nfs4proc.c
2411 +@@ -3187,6 +3187,11 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
2412 + exception.retry = 1;
2413 + continue;
2414 + }
2415 ++ if (status == -NFS4ERR_EXPIRED) {
2416 ++ nfs4_schedule_lease_recovery(server->nfs_client);
2417 ++ exception.retry = 1;
2418 ++ continue;
2419 ++ }
2420 + if (status == -EAGAIN) {
2421 + /* We must have found a delegation */
2422 + exception.retry = 1;
2423 +@@ -5019,16 +5024,13 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str
2424 + struct nfs4_exception exception = {
2425 + .interruptible = true,
2426 + };
2427 +- unsigned long now = jiffies;
2428 + int err;
2429 +
2430 + do {
2431 + err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
2432 + trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
2433 + if (err == 0) {
2434 +- nfs4_set_lease_period(server->nfs_client,
2435 +- fsinfo->lease_time * HZ,
2436 +- now);
2437 ++ nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
2438 + break;
2439 + }
2440 + err = nfs4_handle_exception(server, err, &exception);
2441 +@@ -6084,6 +6086,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
2442 + .callback_data = &setclientid,
2443 + .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
2444 + };
2445 ++ unsigned long now = jiffies;
2446 + int status;
2447 +
2448 + /* nfs_client_id4 */
2449 +@@ -6116,6 +6119,9 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
2450 + clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
2451 + put_rpccred(setclientid.sc_cred);
2452 + }
2453 ++
2454 ++ if (status == 0)
2455 ++ do_renew_lease(clp, now);
2456 + out:
2457 + trace_nfs4_setclientid(clp, status);
2458 + dprintk("NFS reply setclientid: %d\n", status);
2459 +@@ -8199,6 +8205,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
2460 + struct rpc_task *task;
2461 + struct nfs41_exchange_id_args *argp;
2462 + struct nfs41_exchange_id_res *resp;
2463 ++ unsigned long now = jiffies;
2464 + int status;
2465 +
2466 + task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
2467 +@@ -8219,6 +8226,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
2468 + if (status != 0)
2469 + goto out;
2470 +
2471 ++ do_renew_lease(clp, now);
2472 ++
2473 + clp->cl_clientid = resp->clientid;
2474 + clp->cl_exchange_flags = resp->flags;
2475 + clp->cl_seqid = resp->seqid;
2476 +diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
2477 +index 6ea431b067dd..ff876dda7f06 100644
2478 +--- a/fs/nfs/nfs4renewd.c
2479 ++++ b/fs/nfs/nfs4renewd.c
2480 +@@ -138,15 +138,12 @@ nfs4_kill_renewd(struct nfs_client *clp)
2481 + *
2482 + * @clp: pointer to nfs_client
2483 + * @lease: new value for lease period
2484 +- * @lastrenewed: time at which lease was last renewed
2485 + */
2486 + void nfs4_set_lease_period(struct nfs_client *clp,
2487 +- unsigned long lease,
2488 +- unsigned long lastrenewed)
2489 ++ unsigned long lease)
2490 + {
2491 + spin_lock(&clp->cl_lock);
2492 + clp->cl_lease_time = lease;
2493 +- clp->cl_last_renewal = lastrenewed;
2494 + spin_unlock(&clp->cl_lock);
2495 +
2496 + /* Cap maximum reconnect timeout at 1/2 lease period */
2497 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2498 +index 0c6d53dc3672..b53bcf40e2a7 100644
2499 +--- a/fs/nfs/nfs4state.c
2500 ++++ b/fs/nfs/nfs4state.c
2501 +@@ -91,17 +91,15 @@ static int nfs4_setup_state_renewal(struct nfs_client *clp)
2502 + {
2503 + int status;
2504 + struct nfs_fsinfo fsinfo;
2505 +- unsigned long now;
2506 +
2507 + if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
2508 + nfs4_schedule_state_renewal(clp);
2509 + return 0;
2510 + }
2511 +
2512 +- now = jiffies;
2513 + status = nfs4_proc_get_lease_time(clp, &fsinfo);
2514 + if (status == 0) {
2515 +- nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
2516 ++ nfs4_set_lease_period(clp, fsinfo.lease_time * HZ);
2517 + nfs4_schedule_state_renewal(clp);
2518 + }
2519 +
2520 +diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
2521 +index b2f395fa7350..9398c0b6e0a3 100644
2522 +--- a/fs/nfs/nfs4trace.h
2523 ++++ b/fs/nfs/nfs4trace.h
2524 +@@ -352,7 +352,7 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
2525 + ),
2526 +
2527 + TP_fast_assign(
2528 +- __entry->error = error;
2529 ++ __entry->error = error < 0 ? -error : 0;
2530 + __assign_str(dstaddr, clp->cl_hostname);
2531 + ),
2532 +
2533 +@@ -432,7 +432,8 @@ TRACE_EVENT(nfs4_sequence_done,
2534 + __entry->target_highest_slotid =
2535 + res->sr_target_highest_slotid;
2536 + __entry->status_flags = res->sr_status_flags;
2537 +- __entry->error = res->sr_status;
2538 ++ __entry->error = res->sr_status < 0 ?
2539 ++ -res->sr_status : 0;
2540 + ),
2541 + TP_printk(
2542 + "error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u "
2543 +@@ -566,7 +567,7 @@ TRACE_EVENT(nfs4_xdr_status,
2544 + TP_PROTO(
2545 + const struct xdr_stream *xdr,
2546 + u32 op,
2547 +- int error
2548 ++ u32 error
2549 + ),
2550 +
2551 + TP_ARGS(xdr, op, error),
2552 +@@ -756,7 +757,7 @@ TRACE_EVENT(nfs4_close,
2553 + __entry->fileid = NFS_FILEID(inode);
2554 + __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
2555 + __entry->fmode = (__force unsigned int)state->state;
2556 +- __entry->error = error;
2557 ++ __entry->error = error < 0 ? -error : 0;
2558 + __entry->stateid_seq =
2559 + be32_to_cpu(args->stateid.seqid);
2560 + __entry->stateid_hash =
2561 +@@ -821,7 +822,7 @@ DECLARE_EVENT_CLASS(nfs4_lock_event,
2562 + TP_fast_assign(
2563 + const struct inode *inode = state->inode;
2564 +
2565 +- __entry->error = error;
2566 ++ __entry->error = error < 0 ? -error : 0;
2567 + __entry->cmd = cmd;
2568 + __entry->type = request->fl_type;
2569 + __entry->start = request->fl_start;
2570 +@@ -893,7 +894,7 @@ TRACE_EVENT(nfs4_set_lock,
2571 + TP_fast_assign(
2572 + const struct inode *inode = state->inode;
2573 +
2574 +- __entry->error = error;
2575 ++ __entry->error = error < 0 ? -error : 0;
2576 + __entry->cmd = cmd;
2577 + __entry->type = request->fl_type;
2578 + __entry->start = request->fl_start;
2579 +@@ -989,7 +990,7 @@ TRACE_EVENT(nfs4_delegreturn_exit,
2580 + TP_fast_assign(
2581 + __entry->dev = res->server->s_dev;
2582 + __entry->fhandle = nfs_fhandle_hash(args->fhandle);
2583 +- __entry->error = error;
2584 ++ __entry->error = error < 0 ? -error : 0;
2585 + __entry->stateid_seq =
2586 + be32_to_cpu(args->stateid->seqid);
2587 + __entry->stateid_hash =
2588 +@@ -1029,7 +1030,7 @@ DECLARE_EVENT_CLASS(nfs4_test_stateid_event,
2589 + TP_fast_assign(
2590 + const struct inode *inode = state->inode;
2591 +
2592 +- __entry->error = error;
2593 ++ __entry->error = error < 0 ? -error : 0;
2594 + __entry->dev = inode->i_sb->s_dev;
2595 + __entry->fileid = NFS_FILEID(inode);
2596 + __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
2597 +@@ -1131,7 +1132,7 @@ TRACE_EVENT(nfs4_lookupp,
2598 + TP_fast_assign(
2599 + __entry->dev = inode->i_sb->s_dev;
2600 + __entry->ino = NFS_FILEID(inode);
2601 +- __entry->error = error;
2602 ++ __entry->error = error < 0 ? -error : 0;
2603 + ),
2604 +
2605 + TP_printk(
2606 +@@ -1167,7 +1168,7 @@ TRACE_EVENT(nfs4_rename,
2607 + __entry->dev = olddir->i_sb->s_dev;
2608 + __entry->olddir = NFS_FILEID(olddir);
2609 + __entry->newdir = NFS_FILEID(newdir);
2610 +- __entry->error = error;
2611 ++ __entry->error = error < 0 ? -error : 0;
2612 + __assign_str(oldname, oldname->name);
2613 + __assign_str(newname, newname->name);
2614 + ),
2615 +@@ -1258,7 +1259,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_event,
2616 + __entry->dev = inode->i_sb->s_dev;
2617 + __entry->fileid = NFS_FILEID(inode);
2618 + __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
2619 +- __entry->error = error;
2620 ++ __entry->error = error < 0 ? -error : 0;
2621 + __entry->stateid_seq =
2622 + be32_to_cpu(stateid->seqid);
2623 + __entry->stateid_hash =
2624 +@@ -1314,7 +1315,7 @@ DECLARE_EVENT_CLASS(nfs4_getattr_event,
2625 + __entry->valid = fattr->valid;
2626 + __entry->fhandle = nfs_fhandle_hash(fhandle);
2627 + __entry->fileid = (fattr->valid & NFS_ATTR_FATTR_FILEID) ? fattr->fileid : 0;
2628 +- __entry->error = error;
2629 ++ __entry->error = error < 0 ? -error : 0;
2630 + ),
2631 +
2632 + TP_printk(
2633 +@@ -1361,7 +1362,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
2634 + ),
2635 +
2636 + TP_fast_assign(
2637 +- __entry->error = error;
2638 ++ __entry->error = error < 0 ? -error : 0;
2639 + __entry->fhandle = nfs_fhandle_hash(fhandle);
2640 + if (!IS_ERR_OR_NULL(inode)) {
2641 + __entry->fileid = NFS_FILEID(inode);
2642 +@@ -1418,7 +1419,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
2643 + ),
2644 +
2645 + TP_fast_assign(
2646 +- __entry->error = error;
2647 ++ __entry->error = error < 0 ? -error : 0;
2648 + __entry->fhandle = nfs_fhandle_hash(fhandle);
2649 + if (!IS_ERR_OR_NULL(inode)) {
2650 + __entry->fileid = NFS_FILEID(inode);
2651 +@@ -1721,7 +1722,7 @@ TRACE_EVENT(nfs4_layoutget,
2652 + __entry->iomode = args->iomode;
2653 + __entry->offset = args->offset;
2654 + __entry->count = args->length;
2655 +- __entry->error = error;
2656 ++ __entry->error = error < 0 ? -error : 0;
2657 + __entry->stateid_seq =
2658 + be32_to_cpu(state->stateid.seqid);
2659 + __entry->stateid_hash =
2660 +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
2661 +index ab07db0f07cd..7c0ff1a3b591 100644
2662 +--- a/fs/nfs/nfs4xdr.c
2663 ++++ b/fs/nfs/nfs4xdr.c
2664 +@@ -4316,11 +4316,14 @@ static int decode_write_verifier(struct xdr_stream *xdr, struct nfs_write_verifi
2665 +
2666 + static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res)
2667 + {
2668 ++ struct nfs_writeverf *verf = res->verf;
2669 + int status;
2670 +
2671 + status = decode_op_hdr(xdr, OP_COMMIT);
2672 + if (!status)
2673 +- status = decode_write_verifier(xdr, &res->verf->verifier);
2674 ++ status = decode_write_verifier(xdr, &verf->verifier);
2675 ++ if (!status)
2676 ++ verf->committed = NFS_FILE_SYNC;
2677 + return status;
2678 + }
2679 +
2680 +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
2681 +index bb80034a7661..443639cbb0cf 100644
2682 +--- a/fs/nfs/pnfs.c
2683 ++++ b/fs/nfs/pnfs.c
2684 +@@ -1425,7 +1425,7 @@ retry:
2685 + /* lo ref dropped in pnfs_roc_release() */
2686 + layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
2687 + /* If the creds don't match, we can't compound the layoutreturn */
2688 +- if (!layoutreturn || cred != lo->plh_lc_cred)
2689 ++ if (!layoutreturn || cred_fscmp(cred, lo->plh_lc_cred) != 0)
2690 + goto out_noroc;
2691 +
2692 + roc = layoutreturn;
2693 +diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
2694 +index 82af4809b869..8b37e7f8e789 100644
2695 +--- a/fs/nfs/pnfs_nfs.c
2696 ++++ b/fs/nfs/pnfs_nfs.c
2697 +@@ -31,12 +31,11 @@ EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
2698 + /* Fake up some data that will cause nfs_commit_release to retry the writes. */
2699 + void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
2700 + {
2701 +- struct nfs_page *first = nfs_list_entry(data->pages.next);
2702 ++ struct nfs_writeverf *verf = data->res.verf;
2703 +
2704 + data->task.tk_status = 0;
2705 +- memcpy(&data->verf.verifier, &first->wb_verf,
2706 +- sizeof(data->verf.verifier));
2707 +- data->verf.verifier.data[0]++; /* ensure verifier mismatch */
2708 ++ memset(&verf->verifier, 0, sizeof(verf->verifier));
2709 ++ verf->committed = NFS_UNSTABLE;
2710 + }
2711 + EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
2712 +
2713 +diff --git a/fs/nfs/write.c b/fs/nfs/write.c
2714 +index 52cab65f91cf..913eb37c249b 100644
2715 +--- a/fs/nfs/write.c
2716 ++++ b/fs/nfs/write.c
2717 +@@ -243,7 +243,15 @@ out:
2718 + /* A writeback failed: mark the page as bad, and invalidate the page cache */
2719 + static void nfs_set_pageerror(struct address_space *mapping)
2720 + {
2721 ++ struct inode *inode = mapping->host;
2722 ++
2723 + nfs_zap_mapping(mapping->host, mapping);
2724 ++ /* Force file size revalidation */
2725 ++ spin_lock(&inode->i_lock);
2726 ++ NFS_I(inode)->cache_validity |= NFS_INO_REVAL_FORCED |
2727 ++ NFS_INO_REVAL_PAGECACHE |
2728 ++ NFS_INO_INVALID_SIZE;
2729 ++ spin_unlock(&inode->i_lock);
2730 + }
2731 +
2732 + static void nfs_mapping_set_error(struct page *page, int error)
2733 +@@ -1829,6 +1837,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
2734 +
2735 + static void nfs_commit_release_pages(struct nfs_commit_data *data)
2736 + {
2737 ++ const struct nfs_writeverf *verf = data->res.verf;
2738 + struct nfs_page *req;
2739 + int status = data->task.tk_status;
2740 + struct nfs_commit_info cinfo;
2741 +@@ -1856,7 +1865,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
2742 +
2743 + /* Okay, COMMIT succeeded, apparently. Check the verifier
2744 + * returned by the server against all stored verfs. */
2745 +- if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
2746 ++ if (verf->committed > NFS_UNSTABLE &&
2747 ++ !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) {
2748 + /* We have a match */
2749 + if (req->wb_page)
2750 + nfs_inode_remove_request(req);
2751 +diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
2752 +index 44c52639db55..75c7b5ed53c5 100644
2753 +--- a/include/rdma/ib_verbs.h
2754 ++++ b/include/rdma/ib_verbs.h
2755 +@@ -4252,6 +4252,9 @@ static inline int ib_check_mr_access(int flags)
2756 + !(flags & IB_ACCESS_LOCAL_WRITE))
2757 + return -EINVAL;
2758 +
2759 ++ if (flags & ~IB_ACCESS_SUPPORTED)
2760 ++ return -EINVAL;
2761 ++
2762 + return 0;
2763 + }
2764 +
2765 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2766 +index 8dacda4b0362..00743684a549 100644
2767 +--- a/kernel/sched/core.c
2768 ++++ b/kernel/sched/core.c
2769 +@@ -7090,6 +7090,12 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
2770 +
2771 + if (parent)
2772 + sched_online_group(tg, parent);
2773 ++
2774 ++#ifdef CONFIG_UCLAMP_TASK_GROUP
2775 ++ /* Propagate the effective uclamp value for the new group */
2776 ++ cpu_util_update_eff(css);
2777 ++#endif
2778 ++
2779 + return 0;
2780 + }
2781 +
2782 +diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
2783 +index da5639a5bd3b..0147b26f585a 100644
2784 +--- a/net/core/bpf_sk_storage.c
2785 ++++ b/net/core/bpf_sk_storage.c
2786 +@@ -643,9 +643,10 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
2787 + return ERR_PTR(-ENOMEM);
2788 + bpf_map_init_from_attr(&smap->map, attr);
2789 +
2790 ++ nbuckets = roundup_pow_of_two(num_possible_cpus());
2791 + /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
2792 +- smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus())));
2793 +- nbuckets = 1U << smap->bucket_log;
2794 ++ nbuckets = max_t(u32, 2, nbuckets);
2795 ++ smap->bucket_log = ilog2(nbuckets);
2796 + cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
2797 +
2798 + ret = bpf_map_charge_init(&smap->map.memory, cost);
2799 +diff --git a/net/core/sock_map.c b/net/core/sock_map.c
2800 +index 8998e356f423..085cef5857bb 100644
2801 +--- a/net/core/sock_map.c
2802 ++++ b/net/core/sock_map.c
2803 +@@ -234,7 +234,6 @@ static void sock_map_free(struct bpf_map *map)
2804 + int i;
2805 +
2806 + synchronize_rcu();
2807 +- rcu_read_lock();
2808 + raw_spin_lock_bh(&stab->lock);
2809 + for (i = 0; i < stab->map.max_entries; i++) {
2810 + struct sock **psk = &stab->sks[i];
2811 +@@ -243,13 +242,15 @@ static void sock_map_free(struct bpf_map *map)
2812 + sk = xchg(psk, NULL);
2813 + if (sk) {
2814 + lock_sock(sk);
2815 ++ rcu_read_lock();
2816 + sock_map_unref(sk, psk);
2817 ++ rcu_read_unlock();
2818 + release_sock(sk);
2819 + }
2820 + }
2821 + raw_spin_unlock_bh(&stab->lock);
2822 +- rcu_read_unlock();
2823 +
2824 ++ /* wait for psock readers accessing its map link */
2825 + synchronize_rcu();
2826 +
2827 + bpf_map_area_free(stab->sks);
2828 +@@ -416,14 +417,16 @@ static int sock_map_update_elem(struct bpf_map *map, void *key,
2829 + ret = -EINVAL;
2830 + goto out;
2831 + }
2832 +- if (!sock_map_sk_is_suitable(sk) ||
2833 +- sk->sk_state != TCP_ESTABLISHED) {
2834 ++ if (!sock_map_sk_is_suitable(sk)) {
2835 + ret = -EOPNOTSUPP;
2836 + goto out;
2837 + }
2838 +
2839 + sock_map_sk_acquire(sk);
2840 +- ret = sock_map_update_common(map, idx, sk, flags);
2841 ++ if (sk->sk_state != TCP_ESTABLISHED)
2842 ++ ret = -EOPNOTSUPP;
2843 ++ else
2844 ++ ret = sock_map_update_common(map, idx, sk, flags);
2845 + sock_map_sk_release(sk);
2846 + out:
2847 + fput(sock->file);
2848 +@@ -739,14 +742,16 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key,
2849 + ret = -EINVAL;
2850 + goto out;
2851 + }
2852 +- if (!sock_map_sk_is_suitable(sk) ||
2853 +- sk->sk_state != TCP_ESTABLISHED) {
2854 ++ if (!sock_map_sk_is_suitable(sk)) {
2855 + ret = -EOPNOTSUPP;
2856 + goto out;
2857 + }
2858 +
2859 + sock_map_sk_acquire(sk);
2860 +- ret = sock_hash_update_common(map, key, sk, flags);
2861 ++ if (sk->sk_state != TCP_ESTABLISHED)
2862 ++ ret = -EOPNOTSUPP;
2863 ++ else
2864 ++ ret = sock_hash_update_common(map, key, sk, flags);
2865 + sock_map_sk_release(sk);
2866 + out:
2867 + fput(sock->file);
2868 +@@ -859,19 +864,22 @@ static void sock_hash_free(struct bpf_map *map)
2869 + int i;
2870 +
2871 + synchronize_rcu();
2872 +- rcu_read_lock();
2873 + for (i = 0; i < htab->buckets_num; i++) {
2874 + bucket = sock_hash_select_bucket(htab, i);
2875 + raw_spin_lock_bh(&bucket->lock);
2876 + hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
2877 + hlist_del_rcu(&elem->node);
2878 + lock_sock(elem->sk);
2879 ++ rcu_read_lock();
2880 + sock_map_unref(elem->sk, elem);
2881 ++ rcu_read_unlock();
2882 + release_sock(elem->sk);
2883 + }
2884 + raw_spin_unlock_bh(&bucket->lock);
2885 + }
2886 +- rcu_read_unlock();
2887 ++
2888 ++ /* wait for psock readers accessing its map link */
2889 ++ synchronize_rcu();
2890 +
2891 + bpf_map_area_free(htab->buckets);
2892 + kfree(htab);
2893 +diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
2894 +index c443db7af8d4..463cefc1e5ae 100644
2895 +--- a/net/vmw_vsock/hyperv_transport.c
2896 ++++ b/net/vmw_vsock/hyperv_transport.c
2897 +@@ -136,28 +136,15 @@ struct hvsock {
2898 + ****************************************************************************
2899 + * The only valid Service GUIDs, from the perspectives of both the host and *
2900 + * Linux VM, that can be connected by the other end, must conform to this *
2901 +- * format: <port>-facb-11e6-bd58-64006a7986d3, and the "port" must be in *
2902 +- * this range [0, 0x7FFFFFFF]. *
2903 ++ * format: <port>-facb-11e6-bd58-64006a7986d3. *
2904 + ****************************************************************************
2905 + *
2906 + * When we write apps on the host to connect(), the GUID ServiceID is used.
2907 + * When we write apps in Linux VM to connect(), we only need to specify the
2908 + * port and the driver will form the GUID and use that to request the host.
2909 + *
2910 +- * From the perspective of Linux VM:
2911 +- * 1. the local ephemeral port (i.e. the local auto-bound port when we call
2912 +- * connect() without explicit bind()) is generated by __vsock_bind_stream(),
2913 +- * and the range is [1024, 0xFFFFFFFF).
2914 +- * 2. the remote ephemeral port (i.e. the auto-generated remote port for
2915 +- * a connect request initiated by the host's connect()) is generated by
2916 +- * hvs_remote_addr_init() and the range is [0x80000000, 0xFFFFFFFF).
2917 + */
2918 +
2919 +-#define MAX_LISTEN_PORT ((u32)0x7FFFFFFF)
2920 +-#define MAX_VM_LISTEN_PORT MAX_LISTEN_PORT
2921 +-#define MAX_HOST_LISTEN_PORT MAX_LISTEN_PORT
2922 +-#define MIN_HOST_EPHEMERAL_PORT (MAX_HOST_LISTEN_PORT + 1)
2923 +-
2924 + /* 00000000-facb-11e6-bd58-64006a7986d3 */
2925 + static const guid_t srv_id_template =
2926 + GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58,
2927 +@@ -180,33 +167,6 @@ static void hvs_addr_init(struct sockaddr_vm *addr, const guid_t *svr_id)
2928 + vsock_addr_init(addr, VMADDR_CID_ANY, port);
2929 + }
2930 +
2931 +-static void hvs_remote_addr_init(struct sockaddr_vm *remote,
2932 +- struct sockaddr_vm *local)
2933 +-{
2934 +- static u32 host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
2935 +- struct sock *sk;
2936 +-
2937 +- vsock_addr_init(remote, VMADDR_CID_ANY, VMADDR_PORT_ANY);
2938 +-
2939 +- while (1) {
2940 +- /* Wrap around ? */
2941 +- if (host_ephemeral_port < MIN_HOST_EPHEMERAL_PORT ||
2942 +- host_ephemeral_port == VMADDR_PORT_ANY)
2943 +- host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
2944 +-
2945 +- remote->svm_port = host_ephemeral_port++;
2946 +-
2947 +- sk = vsock_find_connected_socket(remote, local);
2948 +- if (!sk) {
2949 +- /* Found an available ephemeral port */
2950 +- return;
2951 +- }
2952 +-
2953 +- /* Release refcnt got in vsock_find_connected_socket */
2954 +- sock_put(sk);
2955 +- }
2956 +-}
2957 +-
2958 + static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan)
2959 + {
2960 + set_channel_pending_send_size(chan,
2961 +@@ -336,12 +296,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
2962 + if_type = &chan->offermsg.offer.if_type;
2963 + if_instance = &chan->offermsg.offer.if_instance;
2964 + conn_from_host = chan->offermsg.offer.u.pipe.user_def[0];
2965 +-
2966 +- /* The host or the VM should only listen on a port in
2967 +- * [0, MAX_LISTEN_PORT]
2968 +- */
2969 +- if (!is_valid_srv_id(if_type) ||
2970 +- get_port_by_srv_id(if_type) > MAX_LISTEN_PORT)
2971 ++ if (!is_valid_srv_id(if_type))
2972 + return;
2973 +
2974 + hvs_addr_init(&addr, conn_from_host ? if_type : if_instance);
2975 +@@ -365,6 +320,13 @@ static void hvs_open_connection(struct vmbus_channel *chan)
2976 +
2977 + new->sk_state = TCP_SYN_SENT;
2978 + vnew = vsock_sk(new);
2979 ++
2980 ++ hvs_addr_init(&vnew->local_addr, if_type);
2981 ++
2982 ++ /* Remote peer is always the host */
2983 ++ vsock_addr_init(&vnew->remote_addr,
2984 ++ VMADDR_CID_HOST, VMADDR_PORT_ANY);
2985 ++ vnew->remote_addr.svm_port = get_port_by_srv_id(if_instance);
2986 + hvs_new = vnew->trans;
2987 + hvs_new->chan = chan;
2988 + } else {
2989 +@@ -429,8 +391,6 @@ static void hvs_open_connection(struct vmbus_channel *chan)
2990 + sk->sk_ack_backlog++;
2991 +
2992 + hvs_addr_init(&vnew->local_addr, if_type);
2993 +- hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr);
2994 +-
2995 + hvs_new->vm_srv_id = *if_type;
2996 + hvs_new->host_srv_id = *if_instance;
2997 +
2998 +@@ -753,16 +713,6 @@ static bool hvs_stream_is_active(struct vsock_sock *vsk)
2999 +
3000 + static bool hvs_stream_allow(u32 cid, u32 port)
3001 + {
3002 +- /* The host's port range [MIN_HOST_EPHEMERAL_PORT, 0xFFFFFFFF) is
3003 +- * reserved as ephemeral ports, which are used as the host's ports
3004 +- * when the host initiates connections.
3005 +- *
3006 +- * Perform this check in the guest so an immediate error is produced
3007 +- * instead of a timeout.
3008 +- */
3009 +- if (port > MAX_HOST_LISTEN_PORT)
3010 +- return false;
3011 +-
3012 + if (cid == VMADDR_CID_HOST)
3013 + return true;
3014 +
3015 +diff --git a/security/selinux/avc.c b/security/selinux/avc.c
3016 +index ecd3829996aa..23dc888ae305 100644
3017 +--- a/security/selinux/avc.c
3018 ++++ b/security/selinux/avc.c
3019 +@@ -424,7 +424,7 @@ static inline int avc_xperms_audit(struct selinux_state *state,
3020 + if (likely(!audited))
3021 + return 0;
3022 + return slow_avc_audit(state, ssid, tsid, tclass, requested,
3023 +- audited, denied, result, ad, 0);
3024 ++ audited, denied, result, ad);
3025 + }
3026 +
3027 + static void avc_node_free(struct rcu_head *rhead)
3028 +@@ -758,8 +758,7 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
3029 + noinline int slow_avc_audit(struct selinux_state *state,
3030 + u32 ssid, u32 tsid, u16 tclass,
3031 + u32 requested, u32 audited, u32 denied, int result,
3032 +- struct common_audit_data *a,
3033 +- unsigned int flags)
3034 ++ struct common_audit_data *a)
3035 + {
3036 + struct common_audit_data stack_data;
3037 + struct selinux_audit_data sad;
3038 +@@ -772,17 +771,6 @@ noinline int slow_avc_audit(struct selinux_state *state,
3039 + a->type = LSM_AUDIT_DATA_NONE;
3040 + }
3041 +
3042 +- /*
3043 +- * When in a RCU walk do the audit on the RCU retry. This is because
3044 +- * the collection of the dname in an inode audit message is not RCU
3045 +- * safe. Note this may drop some audits when the situation changes
3046 +- * during retry. However this is logically just as if the operation
3047 +- * happened a little later.
3048 +- */
3049 +- if ((a->type == LSM_AUDIT_DATA_INODE) &&
3050 +- (flags & MAY_NOT_BLOCK))
3051 +- return -ECHILD;
3052 +-
3053 + sad.tclass = tclass;
3054 + sad.requested = requested;
3055 + sad.ssid = ssid;
3056 +@@ -855,15 +843,14 @@ static int avc_update_node(struct selinux_avc *avc,
3057 + /*
3058 + * If we are in a non-blocking code path, e.g. VFS RCU walk,
3059 + * then we must not add permissions to a cache entry
3060 +- * because we cannot safely audit the denial. Otherwise,
3061 ++ * because we will not audit the denial. Otherwise,
3062 + * during the subsequent blocking retry (e.g. VFS ref walk), we
3063 + * will find the permissions already granted in the cache entry
3064 + * and won't audit anything at all, leading to silent denials in
3065 + * permissive mode that only appear when in enforcing mode.
3066 + *
3067 +- * See the corresponding handling in slow_avc_audit(), and the
3068 +- * logic in selinux_inode_permission for the MAY_NOT_BLOCK flag,
3069 +- * which is transliterated into AVC_NONBLOCKING.
3070 ++ * See the corresponding handling of MAY_NOT_BLOCK in avc_audit()
3071 ++ * and selinux_inode_permission().
3072 + */
3073 + if (flags & AVC_NONBLOCKING)
3074 + return 0;
3075 +@@ -1205,6 +1192,25 @@ int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass,
3076 + return rc;
3077 + }
3078 +
3079 ++int avc_has_perm_flags(struct selinux_state *state,
3080 ++ u32 ssid, u32 tsid, u16 tclass, u32 requested,
3081 ++ struct common_audit_data *auditdata,
3082 ++ int flags)
3083 ++{
3084 ++ struct av_decision avd;
3085 ++ int rc, rc2;
3086 ++
3087 ++ rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested,
3088 ++ (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
3089 ++ &avd);
3090 ++
3091 ++ rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
3092 ++ auditdata, flags);
3093 ++ if (rc2)
3094 ++ return rc2;
3095 ++ return rc;
3096 ++}
3097 ++
3098 + u32 avc_policy_seqno(struct selinux_state *state)
3099 + {
3100 + return state->avc->avc_cache.latest_notif;
3101 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
3102 +index 9625b99e677f..39410913a694 100644
3103 +--- a/security/selinux/hooks.c
3104 ++++ b/security/selinux/hooks.c
3105 +@@ -2766,6 +2766,14 @@ static int selinux_mount(const char *dev_name,
3106 + return path_has_perm(cred, path, FILE__MOUNTON);
3107 + }
3108 +
3109 ++static int selinux_move_mount(const struct path *from_path,
3110 ++ const struct path *to_path)
3111 ++{
3112 ++ const struct cred *cred = current_cred();
3113 ++
3114 ++ return path_has_perm(cred, to_path, FILE__MOUNTON);
3115 ++}
3116 ++
3117 + static int selinux_umount(struct vfsmount *mnt, int flags)
3118 + {
3119 + const struct cred *cred = current_cred();
3120 +@@ -3008,14 +3016,14 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
3121 + if (IS_ERR(isec))
3122 + return PTR_ERR(isec);
3123 +
3124 +- return avc_has_perm(&selinux_state,
3125 +- sid, isec->sid, isec->sclass, FILE__READ, &ad);
3126 ++ return avc_has_perm_flags(&selinux_state,
3127 ++ sid, isec->sid, isec->sclass, FILE__READ, &ad,
3128 ++ rcu ? MAY_NOT_BLOCK : 0);
3129 + }
3130 +
3131 + static noinline int audit_inode_permission(struct inode *inode,
3132 + u32 perms, u32 audited, u32 denied,
3133 +- int result,
3134 +- unsigned flags)
3135 ++ int result)
3136 + {
3137 + struct common_audit_data ad;
3138 + struct inode_security_struct *isec = selinux_inode(inode);
3139 +@@ -3026,7 +3034,7 @@ static noinline int audit_inode_permission(struct inode *inode,
3140 +
3141 + rc = slow_avc_audit(&selinux_state,
3142 + current_sid(), isec->sid, isec->sclass, perms,
3143 +- audited, denied, result, &ad, flags);
3144 ++ audited, denied, result, &ad);
3145 + if (rc)
3146 + return rc;
3147 + return 0;
3148 +@@ -3073,7 +3081,11 @@ static int selinux_inode_permission(struct inode *inode, int mask)
3149 + if (likely(!audited))
3150 + return rc;
3151 +
3152 +- rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags);
3153 ++ /* fall back to ref-walk if we have to generate audit */
3154 ++ if (flags & MAY_NOT_BLOCK)
3155 ++ return -ECHILD;
3156 ++
3157 ++ rc2 = audit_inode_permission(inode, perms, audited, denied, rc);
3158 + if (rc2)
3159 + return rc2;
3160 + return rc;
3161 +@@ -6834,6 +6846,8 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
3162 + LSM_HOOK_INIT(sb_clone_mnt_opts, selinux_sb_clone_mnt_opts),
3163 + LSM_HOOK_INIT(sb_add_mnt_opt, selinux_add_mnt_opt),
3164 +
3165 ++ LSM_HOOK_INIT(move_mount, selinux_move_mount),
3166 ++
3167 + LSM_HOOK_INIT(dentry_init_security, selinux_dentry_init_security),
3168 + LSM_HOOK_INIT(dentry_create_files_as, selinux_dentry_create_files_as),
3169 +
3170 +diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
3171 +index 7be0e1e90e8b..cf4cc3ef959b 100644
3172 +--- a/security/selinux/include/avc.h
3173 ++++ b/security/selinux/include/avc.h
3174 +@@ -100,8 +100,7 @@ static inline u32 avc_audit_required(u32 requested,
3175 + int slow_avc_audit(struct selinux_state *state,
3176 + u32 ssid, u32 tsid, u16 tclass,
3177 + u32 requested, u32 audited, u32 denied, int result,
3178 +- struct common_audit_data *a,
3179 +- unsigned flags);
3180 ++ struct common_audit_data *a);
3181 +
3182 + /**
3183 + * avc_audit - Audit the granting or denial of permissions.
3184 +@@ -135,9 +134,12 @@ static inline int avc_audit(struct selinux_state *state,
3185 + audited = avc_audit_required(requested, avd, result, 0, &denied);
3186 + if (likely(!audited))
3187 + return 0;
3188 ++ /* fall back to ref-walk if we have to generate audit */
3189 ++ if (flags & MAY_NOT_BLOCK)
3190 ++ return -ECHILD;
3191 + return slow_avc_audit(state, ssid, tsid, tclass,
3192 + requested, audited, denied, result,
3193 +- a, flags);
3194 ++ a);
3195 + }
3196 +
3197 + #define AVC_STRICT 1 /* Ignore permissive mode. */
3198 +@@ -153,6 +155,11 @@ int avc_has_perm(struct selinux_state *state,
3199 + u32 ssid, u32 tsid,
3200 + u16 tclass, u32 requested,
3201 + struct common_audit_data *auditdata);
3202 ++int avc_has_perm_flags(struct selinux_state *state,
3203 ++ u32 ssid, u32 tsid,
3204 ++ u16 tclass, u32 requested,
3205 ++ struct common_audit_data *auditdata,
3206 ++ int flags);
3207 +
3208 + int avc_has_extended_perms(struct selinux_state *state,
3209 + u32 ssid, u32 tsid, u16 tclass, u32 requested,
3210 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
3211 +index d07026a846b9..8712a91e0e3e 100644
3212 +--- a/sound/soc/soc-pcm.c
3213 ++++ b/sound/soc/soc-pcm.c
3214 +@@ -2297,42 +2297,81 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
3215 + }
3216 + EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
3217 +
3218 ++static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream,
3219 ++ int cmd, bool fe_first)
3220 ++{
3221 ++ struct snd_soc_pcm_runtime *fe = substream->private_data;
3222 ++ int ret;
3223 ++
3224 ++ /* call trigger on the frontend before the backend. */
3225 ++ if (fe_first) {
3226 ++ dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
3227 ++ fe->dai_link->name, cmd);
3228 ++
3229 ++ ret = soc_pcm_trigger(substream, cmd);
3230 ++ if (ret < 0)
3231 ++ return ret;
3232 ++
3233 ++ ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
3234 ++ return ret;
3235 ++ }
3236 ++
3237 ++ /* call trigger on the frontend after the backend. */
3238 ++ ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
3239 ++ if (ret < 0)
3240 ++ return ret;
3241 ++
3242 ++ dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
3243 ++ fe->dai_link->name, cmd);
3244 ++
3245 ++ ret = soc_pcm_trigger(substream, cmd);
3246 ++
3247 ++ return ret;
3248 ++}
3249 ++
3250 + static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
3251 + {
3252 + struct snd_soc_pcm_runtime *fe = substream->private_data;
3253 +- int stream = substream->stream, ret;
3254 ++ int stream = substream->stream;
3255 ++ int ret = 0;
3256 + enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
3257 +
3258 + fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
3259 +
3260 + switch (trigger) {
3261 + case SND_SOC_DPCM_TRIGGER_PRE:
3262 +- /* call trigger on the frontend before the backend. */
3263 +-
3264 +- dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
3265 +- fe->dai_link->name, cmd);
3266 +-
3267 +- ret = soc_pcm_trigger(substream, cmd);
3268 +- if (ret < 0) {
3269 +- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
3270 +- goto out;
3271 ++ switch (cmd) {
3272 ++ case SNDRV_PCM_TRIGGER_START:
3273 ++ case SNDRV_PCM_TRIGGER_RESUME:
3274 ++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
3275 ++ ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
3276 ++ break;
3277 ++ case SNDRV_PCM_TRIGGER_STOP:
3278 ++ case SNDRV_PCM_TRIGGER_SUSPEND:
3279 ++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
3280 ++ ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
3281 ++ break;
3282 ++ default:
3283 ++ ret = -EINVAL;
3284 ++ break;
3285 + }
3286 +-
3287 +- ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
3288 + break;
3289 + case SND_SOC_DPCM_TRIGGER_POST:
3290 +- /* call trigger on the frontend after the backend. */
3291 +-
3292 +- ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
3293 +- if (ret < 0) {
3294 +- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
3295 +- goto out;
3296 ++ switch (cmd) {
3297 ++ case SNDRV_PCM_TRIGGER_START:
3298 ++ case SNDRV_PCM_TRIGGER_RESUME:
3299 ++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
3300 ++ ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
3301 ++ break;
3302 ++ case SNDRV_PCM_TRIGGER_STOP:
3303 ++ case SNDRV_PCM_TRIGGER_SUSPEND:
3304 ++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
3305 ++ ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
3306 ++ break;
3307 ++ default:
3308 ++ ret = -EINVAL;
3309 ++ break;
3310 + }
3311 +-
3312 +- dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
3313 +- fe->dai_link->name, cmd);
3314 +-
3315 +- ret = soc_pcm_trigger(substream, cmd);
3316 + break;
3317 + case SND_SOC_DPCM_TRIGGER_BESPOKE:
3318 + /* bespoke trigger() - handles both FE and BEs */
3319 +@@ -2341,10 +2380,6 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
3320 + fe->dai_link->name, cmd);
3321 +
3322 + ret = soc_pcm_bespoke_trigger(substream, cmd);
3323 +- if (ret < 0) {
3324 +- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
3325 +- goto out;
3326 +- }
3327 + break;
3328 + default:
3329 + dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd,
3330 +@@ -2353,6 +2388,12 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
3331 + goto out;
3332 + }
3333 +
3334 ++ if (ret < 0) {
3335 ++ dev_err(fe->dev, "ASoC: trigger FE cmd: %d failed: %d\n",
3336 ++ cmd, ret);
3337 ++ goto out;
3338 ++ }
3339 ++
3340 + switch (cmd) {
3341 + case SNDRV_PCM_TRIGGER_START:
3342 + case SNDRV_PCM_TRIGGER_RESUME:
3343 +diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
3344 +index ea0bcd58bcb9..2e388421c32f 100644
3345 +--- a/tools/bpf/bpftool/prog.c
3346 ++++ b/tools/bpf/bpftool/prog.c
3347 +@@ -500,7 +500,7 @@ static int do_dump(int argc, char **argv)
3348 + buf = (unsigned char *)(info->jited_prog_insns);
3349 + member_len = info->jited_prog_len;
3350 + } else { /* DUMP_XLATED */
3351 +- if (info->xlated_prog_len == 0) {
3352 ++ if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
3353 + p_err("error retrieving insn dump: kernel.kptr_restrict set?");
3354 + goto err_free;
3355 + }
3356 +diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
3357 +index 0111d246d1ca..54a2857c2510 100644
3358 +--- a/tools/power/acpi/Makefile.config
3359 ++++ b/tools/power/acpi/Makefile.config
3360 +@@ -15,7 +15,7 @@ include $(srctree)/../../scripts/Makefile.include
3361 +
3362 + OUTPUT=$(srctree)/
3363 + ifeq ("$(origin O)", "command line")
3364 +- OUTPUT := $(O)/power/acpi/
3365 ++ OUTPUT := $(O)/tools/power/acpi/
3366 + endif
3367 + #$(info Determined 'OUTPUT' to be $(OUTPUT))
3368 +
3369 +diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
3370 +new file mode 100644
3371 +index 000000000000..07f5b462c2ef
3372 +--- /dev/null
3373 ++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
3374 +@@ -0,0 +1,74 @@
3375 ++// SPDX-License-Identifier: GPL-2.0
3376 ++// Copyright (c) 2020 Cloudflare
3377 ++
3378 ++#include "test_progs.h"
3379 ++
3380 ++static int connected_socket_v4(void)
3381 ++{
3382 ++ struct sockaddr_in addr = {
3383 ++ .sin_family = AF_INET,
3384 ++ .sin_port = htons(80),
3385 ++ .sin_addr = { inet_addr("127.0.0.1") },
3386 ++ };
3387 ++ socklen_t len = sizeof(addr);
3388 ++ int s, repair, err;
3389 ++
3390 ++ s = socket(AF_INET, SOCK_STREAM, 0);
3391 ++ if (CHECK_FAIL(s == -1))
3392 ++ goto error;
3393 ++
3394 ++ repair = TCP_REPAIR_ON;
3395 ++ err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
3396 ++ if (CHECK_FAIL(err))
3397 ++ goto error;
3398 ++
3399 ++ err = connect(s, (struct sockaddr *)&addr, len);
3400 ++ if (CHECK_FAIL(err))
3401 ++ goto error;
3402 ++
3403 ++ repair = TCP_REPAIR_OFF_NO_WP;
3404 ++ err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
3405 ++ if (CHECK_FAIL(err))
3406 ++ goto error;
3407 ++
3408 ++ return s;
3409 ++error:
3410 ++ perror(__func__);
3411 ++ close(s);
3412 ++ return -1;
3413 ++}
3414 ++
3415 ++/* Create a map, populate it with one socket, and free the map. */
3416 ++static void test_sockmap_create_update_free(enum bpf_map_type map_type)
3417 ++{
3418 ++ const int zero = 0;
3419 ++ int s, map, err;
3420 ++
3421 ++ s = connected_socket_v4();
3422 ++ if (CHECK_FAIL(s == -1))
3423 ++ return;
3424 ++
3425 ++ map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
3426 ++ if (CHECK_FAIL(map == -1)) {
3427 ++ perror("bpf_create_map");
3428 ++ goto out;
3429 ++ }
3430 ++
3431 ++ err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
3432 ++ if (CHECK_FAIL(err)) {
3433 ++ perror("bpf_map_update");
3434 ++ goto out;
3435 ++ }
3436 ++
3437 ++out:
3438 ++ close(map);
3439 ++ close(s);
3440 ++}
3441 ++
3442 ++void test_sockmap_basic(void)
3443 ++{
3444 ++ if (test__start_subtest("sockmap create_update_free"))
3445 ++ test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
3446 ++ if (test__start_subtest("sockhash create_update_free"))
3447 ++ test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
3448 ++}
3449 +diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
3450 +index 631d397ac81b..0a356aa91aa1 100644
3451 +--- a/virt/kvm/arm/aarch32.c
3452 ++++ b/virt/kvm/arm/aarch32.c
3453 +@@ -15,6 +15,10 @@
3454 + #include <asm/kvm_emulate.h>
3455 + #include <asm/kvm_hyp.h>
3456 +
3457 ++#define DFSR_FSC_EXTABT_LPAE 0x10
3458 ++#define DFSR_FSC_EXTABT_nLPAE 0x08
3459 ++#define DFSR_LPAE BIT(9)
3460 ++
3461 + /*
3462 + * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
3463 + */
3464 +@@ -181,10 +185,12 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
3465 +
3466 + /* Give the guest an IMPLEMENTATION DEFINED exception */
3467 + is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
3468 +- if (is_lpae)
3469 +- *fsr = 1 << 9 | 0x34;
3470 +- else
3471 +- *fsr = 0x14;
3472 ++ if (is_lpae) {
3473 ++ *fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
3474 ++ } else {
3475 ++ /* no need to shuffle FS[4] into DFSR[10] as its 0 */
3476 ++ *fsr = DFSR_FSC_EXTABT_nLPAE;
3477 ++ }
3478 + }
3479 +
3480 + void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
3481 +diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
3482 +index e2bb5bd60227..6b222100608f 100644
3483 +--- a/virt/kvm/arm/arch_timer.c
3484 ++++ b/virt/kvm/arm/arch_timer.c
3485 +@@ -805,6 +805,7 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
3486 + switch (treg) {
3487 + case TIMER_REG_TVAL:
3488 + val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
3489 ++ val &= lower_32_bits(val);
3490 + break;
3491 +
3492 + case TIMER_REG_CTL:
3493 +@@ -850,7 +851,7 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
3494 + {
3495 + switch (treg) {
3496 + case TIMER_REG_TVAL:
3497 +- timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val;
3498 ++ timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val;
3499 + break;
3500 +
3501 + case TIMER_REG_CTL:
3502 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
3503 +index f23c9cd5684f..ce7fa37987e1 100644
3504 +--- a/virt/kvm/arm/mmu.c
3505 ++++ b/virt/kvm/arm/mmu.c
3506 +@@ -2147,7 +2147,8 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
3507 + if (!kvm->arch.pgd)
3508 + return 0;
3509 + trace_kvm_test_age_hva(hva);
3510 +- return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
3511 ++ return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
3512 ++ kvm_test_age_hva_handler, NULL);
3513 + }
3514 +
3515 + void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
3516 +diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
3517 +index 8731dfeced8b..4c08fd009768 100644
3518 +--- a/virt/kvm/arm/pmu.c
3519 ++++ b/virt/kvm/arm/pmu.c
3520 +@@ -480,25 +480,45 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
3521 + */
3522 + void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
3523 + {
3524 ++ struct kvm_pmu *pmu = &vcpu->arch.pmu;
3525 + int i;
3526 +- u64 type, enable, reg;
3527 +
3528 +- if (val == 0)
3529 ++ if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
3530 + return;
3531 +
3532 +- enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
3533 ++ /* Weed out disabled counters */
3534 ++ val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
3535 ++
3536 + for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
3537 ++ u64 type, reg;
3538 ++
3539 + if (!(val & BIT(i)))
3540 + continue;
3541 +- type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
3542 +- & ARMV8_PMU_EVTYPE_EVENT;
3543 +- if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
3544 +- && (enable & BIT(i))) {
3545 +- reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
3546 ++
3547 ++ /* PMSWINC only applies to ... SW_INC! */
3548 ++ type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
3549 ++ type &= ARMV8_PMU_EVTYPE_EVENT;
3550 ++ if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
3551 ++ continue;
3552 ++
3553 ++ /* increment this even SW_INC counter */
3554 ++ reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
3555 ++ reg = lower_32_bits(reg);
3556 ++ __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
3557 ++
3558 ++ if (reg) /* no overflow on the low part */
3559 ++ continue;
3560 ++
3561 ++ if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
3562 ++ /* increment the high counter */
3563 ++ reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
3564 + reg = lower_32_bits(reg);
3565 +- __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
3566 +- if (!reg)
3567 +- __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
3568 ++ __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
3569 ++ if (!reg) /* mark overflow on the high counter */
3570 ++ __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
3571 ++ } else {
3572 ++ /* mark overflow on low counter */
3573 ++ __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
3574 + }
3575 + }
3576 + }
3577 +diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
3578 +index 2be6b66b3856..f8ad7096555d 100644
3579 +--- a/virt/kvm/arm/vgic/vgic-its.c
3580 ++++ b/virt/kvm/arm/vgic/vgic-its.c
3581 +@@ -2472,7 +2472,8 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
3582 + target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
3583 + coll_id = val & KVM_ITS_CTE_ICID_MASK;
3584 +
3585 +- if (target_addr >= atomic_read(&kvm->online_vcpus))
3586 ++ if (target_addr != COLLECTION_NOT_MAPPED &&
3587 ++ target_addr >= atomic_read(&kvm->online_vcpus))
3588 + return -EINVAL;
3589 +
3590 + collection = find_collection(its, coll_id);