Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.16 commit in: /
Date: Wed, 30 May 2018 11:44:53
Message-Id: 1527680678.8a5950d77db4cc1cc9e4b9b359bdd8d288d2167c.mpagano@gentoo
1 commit: 8a5950d77db4cc1cc9e4b9b359bdd8d288d2167c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed May 30 11:44:38 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed May 30 11:44:38 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8a5950d7
7
8 Linux patch 4.16.13
9
10 0000_README | 4 +
11 1012_linux-4.16.13.patch | 10200 +++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 10204 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 603fb6f..f199583 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -91,6 +91,10 @@ Patch: 1011_linux-4.16.12.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.16.12
21
22 +Patch: 1012_linux-4.16.13.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.16.13
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1012_linux-4.16.13.patch b/1012_linux-4.16.13.patch
31 new file mode 100644
32 index 0000000..8fb1dc5
33 --- /dev/null
34 +++ b/1012_linux-4.16.13.patch
35 @@ -0,0 +1,10200 @@
36 +diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
37 +index 4ca21c3a6fc9..460ef27b1008 100644
38 +--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
39 ++++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
40 +@@ -20,6 +20,7 @@ Required properties :
41 + - "allwinner,sun50i-a64-ccu"
42 + - "allwinner,sun50i-a64-r-ccu"
43 + - "allwinner,sun50i-h5-ccu"
44 ++ - "allwinner,sun50i-h6-ccu"
45 + - "nextthing,gr8-ccu"
46 +
47 + - reg: Must contain the registers base address and length
48 +@@ -31,6 +32,9 @@ Required properties :
49 + - #clock-cells : must contain 1
50 + - #reset-cells : must contain 1
51 +
52 ++For the main CCU on H6, one more clock is needed:
53 ++- "iosc": the SoC's internal frequency oscillator
54 ++
55 + For the PRCM CCUs on A83T/H3/A64, two more clocks are needed:
56 + - "pll-periph": the SoC's peripheral PLL from the main CCU
57 + - "iosc": the SoC's internal frequency oscillator
58 +diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
59 +index a6671bd2c85a..ae38a1ee9c29 100644
60 +--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
61 ++++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
62 +@@ -102,7 +102,11 @@ Required properties:
63 + - clocks: Phandles to device clocks. See [1] for details on clock bindings.
64 + - clock-names: the following clocks are required:
65 + * "iface"
66 ++ For 28nm HPM/LP, 28nm 8960 PHYs:
67 + - vddio-supply: phandle to vdd-io regulator device node
68 ++ For 20nm PHY:
69 ++- vddio-supply: phandle to vdd-io regulator device node
70 ++- vcca-supply: phandle to vcca regulator device node
71 +
72 + Optional properties:
73 + - qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
74 +diff --git a/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
75 +index 47284f85ec80..c3f9826692bc 100644
76 +--- a/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
77 ++++ b/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
78 +@@ -20,7 +20,8 @@ Required subnode-properties:
79 + gpio: cpuclkoutgrp0, udlclkoutgrp0, i2c1grp0, i2c2grp0,
80 + i2c3grp0, i2s0grp0, i2s1grp0, i2srefclkgrp0, spi0grp0,
81 + spi1grp0, pciedebuggrp0, uart0grp0, uart0grp1, uart1grp0,
82 +- uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0
83 ++ uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0,
84 ++ uart5nocts
85 + cpuclkout: cpuclkoutgrp0
86 + udlclkout: udlclkoutgrp0
87 + i2c1: i2c1grp0
88 +@@ -37,7 +38,7 @@ Required subnode-properties:
89 + uart2: uart2grp0, uart2grp1
90 + uart3: uart3grp0
91 + uart4: uart4grp0
92 +- uart5: uart5grp0
93 ++ uart5: uart5grp0, uart5nocts
94 + nand: nandgrp0
95 + sdio0: sdio0grp0
96 + sdio1: sdio1grp0
97 +diff --git a/Makefile b/Makefile
98 +index ded9e8480d74..146e527a5e06 100644
99 +--- a/Makefile
100 ++++ b/Makefile
101 +@@ -1,7 +1,7 @@
102 + # SPDX-License-Identifier: GPL-2.0
103 + VERSION = 4
104 + PATCHLEVEL = 16
105 +-SUBLEVEL = 12
106 ++SUBLEVEL = 13
107 + EXTRAVERSION =
108 + NAME = Fearless Coyote
109 +
110 +diff --git a/arch/arm/boot/dts/at91-nattis-2-natte-2.dts b/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
111 +index 3ea1d26e1c68..c457eff25911 100644
112 +--- a/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
113 ++++ b/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
114 +@@ -146,7 +146,7 @@
115 + };
116 +
117 + eeprom@50 {
118 +- compatible = "nxp,24c02";
119 ++ compatible = "nxp,se97b", "atmel,24c02";
120 + reg = <0x50>;
121 + pagesize = <16>;
122 + };
123 +diff --git a/arch/arm/boot/dts/at91-tse850-3.dts b/arch/arm/boot/dts/at91-tse850-3.dts
124 +index 9b82cc8843e1..97b227693658 100644
125 +--- a/arch/arm/boot/dts/at91-tse850-3.dts
126 ++++ b/arch/arm/boot/dts/at91-tse850-3.dts
127 +@@ -246,7 +246,7 @@
128 + };
129 +
130 + eeprom@50 {
131 +- compatible = "nxp,24c02", "atmel,24c02";
132 ++ compatible = "nxp,se97b", "atmel,24c02";
133 + reg = <0x50>;
134 + pagesize = <16>;
135 + };
136 +diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
137 +index 9d293decf8d3..8d9a0df207a4 100644
138 +--- a/arch/arm/boot/dts/bcm283x.dtsi
139 ++++ b/arch/arm/boot/dts/bcm283x.dtsi
140 +@@ -252,7 +252,7 @@
141 +
142 + jtag_gpio4: jtag_gpio4 {
143 + brcm,pins = <4 5 6 12 13>;
144 +- brcm,function = <BCM2835_FSEL_ALT4>;
145 ++ brcm,function = <BCM2835_FSEL_ALT5>;
146 + };
147 + jtag_gpio22: jtag_gpio22 {
148 + brcm,pins = <22 23 24 25 26 27>;
149 +@@ -397,8 +397,8 @@
150 +
151 + i2s: i2s@7e203000 {
152 + compatible = "brcm,bcm2835-i2s";
153 +- reg = <0x7e203000 0x20>,
154 +- <0x7e101098 0x02>;
155 ++ reg = <0x7e203000 0x24>;
156 ++ clocks = <&clocks BCM2835_CLOCK_PCM>;
157 +
158 + dmas = <&dma 2>,
159 + <&dma 3>;
160 +diff --git a/arch/arm/boot/dts/dra71-evm.dts b/arch/arm/boot/dts/dra71-evm.dts
161 +index 41c9132eb550..64363f75c01a 100644
162 +--- a/arch/arm/boot/dts/dra71-evm.dts
163 ++++ b/arch/arm/boot/dts/dra71-evm.dts
164 +@@ -24,13 +24,13 @@
165 +
166 + regulator-name = "vddshv8";
167 + regulator-min-microvolt = <1800000>;
168 +- regulator-max-microvolt = <3000000>;
169 ++ regulator-max-microvolt = <3300000>;
170 + regulator-boot-on;
171 + vin-supply = <&evm_5v0>;
172 +
173 + gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>;
174 + states = <1800000 0x0
175 +- 3000000 0x1>;
176 ++ 3300000 0x1>;
177 + };
178 +
179 + evm_1v8_sw: fixedregulator-evm_1v8 {
180 +diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
181 +index ae45af1ad062..3cc1fb9ce441 100644
182 +--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
183 ++++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
184 +@@ -213,37 +213,37 @@
185 + &iomuxc {
186 + pinctrl_enet1: enet1grp {
187 + fsl,pins = <
188 +- MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x3
189 +- MX7D_PAD_SD2_WP__ENET1_MDC 0x3
190 +- MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x1
191 +- MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x1
192 +- MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x1
193 +- MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x1
194 +- MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x1
195 +- MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x1
196 +- MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x1
197 +- MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x1
198 +- MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x1
199 +- MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x1
200 +- MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x1
201 +- MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x1
202 ++ MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x30
203 ++ MX7D_PAD_SD2_WP__ENET1_MDC 0x30
204 ++ MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x11
205 ++ MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x11
206 ++ MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x11
207 ++ MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x11
208 ++ MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x11
209 ++ MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x11
210 ++ MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x11
211 ++ MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x11
212 ++ MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x11
213 ++ MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x11
214 ++ MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x11
215 ++ MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x11
216 + >;
217 + };
218 +
219 + pinctrl_enet2: enet2grp {
220 + fsl,pins = <
221 +- MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x1
222 +- MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x1
223 +- MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x1
224 +- MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x1
225 +- MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x1
226 +- MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x1
227 +- MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x1
228 +- MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x1
229 +- MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x1
230 +- MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x1
231 +- MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x1
232 +- MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x1
233 ++ MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x11
234 ++ MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x11
235 ++ MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x11
236 ++ MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x11
237 ++ MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x11
238 ++ MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x11
239 ++ MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x11
240 ++ MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x11
241 ++ MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x11
242 ++ MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x11
243 ++ MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x11
244 ++ MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x11
245 + >;
246 + };
247 +
248 +diff --git a/arch/arm/boot/dts/keystone-k2e-clocks.dtsi b/arch/arm/boot/dts/keystone-k2e-clocks.dtsi
249 +index 5e0e7d232161..f7592155a740 100644
250 +--- a/arch/arm/boot/dts/keystone-k2e-clocks.dtsi
251 ++++ b/arch/arm/boot/dts/keystone-k2e-clocks.dtsi
252 +@@ -42,7 +42,7 @@ clocks {
253 + domain-id = <0>;
254 + };
255 +
256 +- clkhyperlink0: clkhyperlink02350030 {
257 ++ clkhyperlink0: clkhyperlink0@2350030 {
258 + #clock-cells = <0>;
259 + compatible = "ti,keystone,psc-clock";
260 + clocks = <&chipclk12>;
261 +diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
262 +index eb374956294f..9a02d03b23c2 100644
263 +--- a/arch/arm/boot/dts/r8a7791-porter.dts
264 ++++ b/arch/arm/boot/dts/r8a7791-porter.dts
265 +@@ -425,7 +425,7 @@
266 + "dclkin.0", "dclkin.1";
267 +
268 + ports {
269 +- port@1 {
270 ++ port@0 {
271 + endpoint {
272 + remote-endpoint = <&adv7511_in>;
273 + };
274 +diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
275 +index c42ca7022e8c..486d4e7433ed 100644
276 +--- a/arch/arm/boot/dts/socfpga.dtsi
277 ++++ b/arch/arm/boot/dts/socfpga.dtsi
278 +@@ -831,7 +831,7 @@
279 + timer@fffec600 {
280 + compatible = "arm,cortex-a9-twd-timer";
281 + reg = <0xfffec600 0x100>;
282 +- interrupts = <1 13 0xf04>;
283 ++ interrupts = <1 13 0xf01>;
284 + clocks = <&mpu_periph_clk>;
285 + };
286 +
287 +diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
288 +index 4f2f2eea0755..5df34345a354 100644
289 +--- a/arch/arm/boot/dts/sun4i-a10.dtsi
290 ++++ b/arch/arm/boot/dts/sun4i-a10.dtsi
291 +@@ -76,7 +76,7 @@
292 + allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
293 + clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_HDMI0>,
294 + <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
295 +- <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
296 ++ <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>,
297 + <&ccu CLK_TCON0_CH1>, <&ccu CLK_HDMI>,
298 + <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
299 + status = "disabled";
300 +@@ -88,7 +88,7 @@
301 + allwinner,pipeline = "de_fe0-de_be0-lcd0";
302 + clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_DE_BE0>,
303 + <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_BE0>,
304 +- <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_TCON0_CH0>,
305 ++ <&ccu CLK_DE_FE0>, <&ccu CLK_TCON0_CH0>,
306 + <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
307 + status = "disabled";
308 + };
309 +@@ -99,7 +99,7 @@
310 + allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
311 + clocks = <&ccu CLK_AHB_TVE0>, <&ccu CLK_AHB_LCD0>,
312 + <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
313 +- <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
314 ++ <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>,
315 + <&ccu CLK_TCON0_CH1>, <&ccu CLK_DRAM_TVE0>,
316 + <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
317 + status = "disabled";
318 +diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
319 +index 0a6f7952bbb1..48b85653ad66 100644
320 +--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
321 ++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
322 +@@ -497,8 +497,8 @@
323 + blsp2_spi5: spi@75ba000{
324 + compatible = "qcom,spi-qup-v2.2.1";
325 + reg = <0x075ba000 0x600>;
326 +- interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
327 +- clocks = <&gcc GCC_BLSP2_QUP5_SPI_APPS_CLK>,
328 ++ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
329 ++ clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
330 + <&gcc GCC_BLSP2_AHB_CLK>;
331 + clock-names = "core", "iface";
332 + pinctrl-names = "default", "sleep";
333 +diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
334 +index 9ef0797380cb..f9b0b09153e0 100644
335 +--- a/arch/arm64/include/asm/atomic_lse.h
336 ++++ b/arch/arm64/include/asm/atomic_lse.h
337 +@@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v)
338 + /* LSE atomics */
339 + " mvn %w[i], %w[i]\n"
340 + " stclr %w[i], %[v]")
341 +- : [i] "+r" (w0), [v] "+Q" (v->counter)
342 ++ : [i] "+&r" (w0), [v] "+Q" (v->counter)
343 + : "r" (x1)
344 + : __LL_SC_CLOBBERS);
345 + }
346 +@@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
347 + /* LSE atomics */ \
348 + " mvn %w[i], %w[i]\n" \
349 + " ldclr" #mb " %w[i], %w[i], %[v]") \
350 +- : [i] "+r" (w0), [v] "+Q" (v->counter) \
351 ++ : [i] "+&r" (w0), [v] "+Q" (v->counter) \
352 + : "r" (x1) \
353 + : __LL_SC_CLOBBERS, ##cl); \
354 + \
355 +@@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v)
356 + /* LSE atomics */
357 + " neg %w[i], %w[i]\n"
358 + " stadd %w[i], %[v]")
359 +- : [i] "+r" (w0), [v] "+Q" (v->counter)
360 ++ : [i] "+&r" (w0), [v] "+Q" (v->counter)
361 + : "r" (x1)
362 + : __LL_SC_CLOBBERS);
363 + }
364 +@@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
365 + " neg %w[i], %w[i]\n" \
366 + " ldadd" #mb " %w[i], w30, %[v]\n" \
367 + " add %w[i], %w[i], w30") \
368 +- : [i] "+r" (w0), [v] "+Q" (v->counter) \
369 ++ : [i] "+&r" (w0), [v] "+Q" (v->counter) \
370 + : "r" (x1) \
371 + : __LL_SC_CLOBBERS , ##cl); \
372 + \
373 +@@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
374 + /* LSE atomics */ \
375 + " neg %w[i], %w[i]\n" \
376 + " ldadd" #mb " %w[i], %w[i], %[v]") \
377 +- : [i] "+r" (w0), [v] "+Q" (v->counter) \
378 ++ : [i] "+&r" (w0), [v] "+Q" (v->counter) \
379 + : "r" (x1) \
380 + : __LL_SC_CLOBBERS, ##cl); \
381 + \
382 +@@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
383 + /* LSE atomics */
384 + " mvn %[i], %[i]\n"
385 + " stclr %[i], %[v]")
386 +- : [i] "+r" (x0), [v] "+Q" (v->counter)
387 ++ : [i] "+&r" (x0), [v] "+Q" (v->counter)
388 + : "r" (x1)
389 + : __LL_SC_CLOBBERS);
390 + }
391 +@@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
392 + /* LSE atomics */ \
393 + " mvn %[i], %[i]\n" \
394 + " ldclr" #mb " %[i], %[i], %[v]") \
395 +- : [i] "+r" (x0), [v] "+Q" (v->counter) \
396 ++ : [i] "+&r" (x0), [v] "+Q" (v->counter) \
397 + : "r" (x1) \
398 + : __LL_SC_CLOBBERS, ##cl); \
399 + \
400 +@@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
401 + /* LSE atomics */
402 + " neg %[i], %[i]\n"
403 + " stadd %[i], %[v]")
404 +- : [i] "+r" (x0), [v] "+Q" (v->counter)
405 ++ : [i] "+&r" (x0), [v] "+Q" (v->counter)
406 + : "r" (x1)
407 + : __LL_SC_CLOBBERS);
408 + }
409 +@@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
410 + " neg %[i], %[i]\n" \
411 + " ldadd" #mb " %[i], x30, %[v]\n" \
412 + " add %[i], %[i], x30") \
413 +- : [i] "+r" (x0), [v] "+Q" (v->counter) \
414 ++ : [i] "+&r" (x0), [v] "+Q" (v->counter) \
415 + : "r" (x1) \
416 + : __LL_SC_CLOBBERS, ##cl); \
417 + \
418 +@@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
419 + /* LSE atomics */ \
420 + " neg %[i], %[i]\n" \
421 + " ldadd" #mb " %[i], %[i], %[v]") \
422 +- : [i] "+r" (x0), [v] "+Q" (v->counter) \
423 ++ : [i] "+&r" (x0), [v] "+Q" (v->counter) \
424 + : "r" (x1) \
425 + : __LL_SC_CLOBBERS, ##cl); \
426 + \
427 +@@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
428 + " sub x30, x30, %[ret]\n"
429 + " cbnz x30, 1b\n"
430 + "2:")
431 +- : [ret] "+r" (x0), [v] "+Q" (v->counter)
432 ++ : [ret] "+&r" (x0), [v] "+Q" (v->counter)
433 + :
434 + : __LL_SC_CLOBBERS, "cc", "memory");
435 +
436 +@@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
437 + " eor %[old1], %[old1], %[oldval1]\n" \
438 + " eor %[old2], %[old2], %[oldval2]\n" \
439 + " orr %[old1], %[old1], %[old2]") \
440 +- : [old1] "+r" (x0), [old2] "+r" (x1), \
441 ++ : [old1] "+&r" (x0), [old2] "+&r" (x1), \
442 + [v] "+Q" (*(unsigned long *)ptr) \
443 + : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
444 + [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
445 +diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
446 +index 66be504edb6c..d894a20b70b2 100644
447 +--- a/arch/arm64/kernel/arm64ksyms.c
448 ++++ b/arch/arm64/kernel/arm64ksyms.c
449 +@@ -75,3 +75,11 @@ NOKPROBE_SYMBOL(_mcount);
450 + /* arm-smccc */
451 + EXPORT_SYMBOL(__arm_smccc_smc);
452 + EXPORT_SYMBOL(__arm_smccc_hvc);
453 ++
454 ++ /* tishift.S */
455 ++extern long long __ashlti3(long long a, int b);
456 ++EXPORT_SYMBOL(__ashlti3);
457 ++extern long long __ashrti3(long long a, int b);
458 ++EXPORT_SYMBOL(__ashrti3);
459 ++extern long long __lshrti3(long long a, int b);
460 ++EXPORT_SYMBOL(__lshrti3);
461 +diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S
462 +index d3db9b2cd479..0fdff97794de 100644
463 +--- a/arch/arm64/lib/tishift.S
464 ++++ b/arch/arm64/lib/tishift.S
465 +@@ -1,17 +1,6 @@
466 +-/*
467 +- * Copyright (C) 2017 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved.
468 ++/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
469 + *
470 +- * This program is free software; you can redistribute it and/or modify
471 +- * it under the terms of the GNU General Public License version 2 as
472 +- * published by the Free Software Foundation.
473 +- *
474 +- * This program is distributed in the hope that it will be useful,
475 +- * but WITHOUT ANY WARRANTY; without even the implied warranty of
476 +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
477 +- * GNU General Public License for more details.
478 +- *
479 +- * You should have received a copy of the GNU General Public License
480 +- * along with this program. If not, see <http://www.gnu.org/licenses/>.
481 ++ * Copyright (C) 2017-2018 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved.
482 + */
483 +
484 + #include <linux/linkage.h>
485 +diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
486 +index 84938fdbbada..908d58347790 100644
487 +--- a/arch/m68k/coldfire/device.c
488 ++++ b/arch/m68k/coldfire/device.c
489 +@@ -135,7 +135,11 @@ static struct platform_device mcf_fec0 = {
490 + .id = 0,
491 + .num_resources = ARRAY_SIZE(mcf_fec0_resources),
492 + .resource = mcf_fec0_resources,
493 +- .dev.platform_data = FEC_PDATA,
494 ++ .dev = {
495 ++ .dma_mask = &mcf_fec0.dev.coherent_dma_mask,
496 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
497 ++ .platform_data = FEC_PDATA,
498 ++ }
499 + };
500 +
501 + #ifdef MCFFEC_BASE1
502 +@@ -167,7 +171,11 @@ static struct platform_device mcf_fec1 = {
503 + .id = 1,
504 + .num_resources = ARRAY_SIZE(mcf_fec1_resources),
505 + .resource = mcf_fec1_resources,
506 +- .dev.platform_data = FEC_PDATA,
507 ++ .dev = {
508 ++ .dma_mask = &mcf_fec1.dev.coherent_dma_mask,
509 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
510 ++ .platform_data = FEC_PDATA,
511 ++ }
512 + };
513 + #endif /* MCFFEC_BASE1 */
514 + #endif /* CONFIG_FEC */
515 +diff --git a/arch/mips/boot/compressed/uart-16550.c b/arch/mips/boot/compressed/uart-16550.c
516 +index b3043c08f769..aee8d7b8f091 100644
517 +--- a/arch/mips/boot/compressed/uart-16550.c
518 ++++ b/arch/mips/boot/compressed/uart-16550.c
519 +@@ -18,9 +18,9 @@
520 + #define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset))
521 + #endif
522 +
523 +-#if defined(CONFIG_MACH_JZ4740) || defined(CONFIG_MACH_JZ4780)
524 +-#include <asm/mach-jz4740/base.h>
525 +-#define PORT(offset) (CKSEG1ADDR(JZ4740_UART0_BASE_ADDR) + (4 * offset))
526 ++#ifdef CONFIG_MACH_INGENIC
527 ++#define INGENIC_UART0_BASE_ADDR 0x10030000
528 ++#define PORT(offset) (CKSEG1ADDR(INGENIC_UART0_BASE_ADDR) + (4 * offset))
529 + #endif
530 +
531 + #ifdef CONFIG_CPU_XLR
532 +diff --git a/arch/mips/boot/dts/xilfpga/Makefile b/arch/mips/boot/dts/xilfpga/Makefile
533 +index 9987e0e378c5..69ca00590b8d 100644
534 +--- a/arch/mips/boot/dts/xilfpga/Makefile
535 ++++ b/arch/mips/boot/dts/xilfpga/Makefile
536 +@@ -1,4 +1,2 @@
537 + # SPDX-License-Identifier: GPL-2.0
538 + dtb-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += nexys4ddr.dtb
539 +-
540 +-obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
541 +diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
542 +index d99f5242169e..b3aec101a65d 100644
543 +--- a/arch/mips/cavium-octeon/octeon-irq.c
544 ++++ b/arch/mips/cavium-octeon/octeon-irq.c
545 +@@ -2271,7 +2271,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
546 +
547 + parent_irq = irq_of_parse_and_map(ciu_node, 0);
548 + if (!parent_irq) {
549 +- pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
550 ++ pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
551 + ciu_node->name);
552 + return -EINVAL;
553 + }
554 +@@ -2283,7 +2283,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
555 +
556 + addr = of_get_address(ciu_node, 0, NULL, NULL);
557 + if (!addr) {
558 +- pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
559 ++ pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
560 + return -EINVAL;
561 + }
562 + host_data->raw_reg = (u64)phys_to_virt(
563 +@@ -2291,7 +2291,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
564 +
565 + addr = of_get_address(ciu_node, 1, NULL, NULL);
566 + if (!addr) {
567 +- pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
568 ++ pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
569 + return -EINVAL;
570 + }
571 + host_data->en_reg = (u64)phys_to_virt(
572 +@@ -2299,7 +2299,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
573 +
574 + r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
575 + if (r) {
576 +- pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
577 ++ pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
578 + ciu_node->name);
579 + return r;
580 + }
581 +@@ -2309,7 +2309,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
582 + &octeon_irq_domain_cib_ops,
583 + host_data);
584 + if (!cib_domain) {
585 +- pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
586 ++ pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
587 + return -ENOMEM;
588 + }
589 +
590 +diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform
591 +index b51432dd10b6..0dd0d5d460a5 100644
592 +--- a/arch/mips/generic/Platform
593 ++++ b/arch/mips/generic/Platform
594 +@@ -16,3 +16,4 @@ all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb
595 + its-y := vmlinux.its.S
596 + its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S
597 + its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S
598 ++its-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += board-xilfpga.its.S
599 +diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
600 +index aa3800c82332..d99ca862dae3 100644
601 +--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
602 ++++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
603 +@@ -167,7 +167,7 @@
604 + #define AR71XX_AHB_DIV_MASK 0x7
605 +
606 + #define AR724X_PLL_REG_CPU_CONFIG 0x00
607 +-#define AR724X_PLL_REG_PCIE_CONFIG 0x18
608 ++#define AR724X_PLL_REG_PCIE_CONFIG 0x10
609 +
610 + #define AR724X_PLL_FB_SHIFT 0
611 + #define AR724X_PLL_FB_MASK 0x3ff
612 +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
613 +index 0b23b1ad99e6..8d098b9f395c 100644
614 +--- a/arch/mips/kernel/ptrace.c
615 ++++ b/arch/mips/kernel/ptrace.c
616 +@@ -463,7 +463,7 @@ static int fpr_get_msa(struct task_struct *target,
617 + /*
618 + * Copy the floating-point context to the supplied NT_PRFPREG buffer.
619 + * Choose the appropriate helper for general registers, and then copy
620 +- * the FCSR register separately.
621 ++ * the FCSR and FIR registers separately.
622 + */
623 + static int fpr_get(struct task_struct *target,
624 + const struct user_regset *regset,
625 +@@ -471,6 +471,7 @@ static int fpr_get(struct task_struct *target,
626 + void *kbuf, void __user *ubuf)
627 + {
628 + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
629 ++ const int fir_pos = fcr31_pos + sizeof(u32);
630 + int err;
631 +
632 + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
633 +@@ -483,6 +484,12 @@ static int fpr_get(struct task_struct *target,
634 + err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
635 + &target->thread.fpu.fcr31,
636 + fcr31_pos, fcr31_pos + sizeof(u32));
637 ++ if (err)
638 ++ return err;
639 ++
640 ++ err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
641 ++ &boot_cpu_data.fpu_id,
642 ++ fir_pos, fir_pos + sizeof(u32));
643 +
644 + return err;
645 + }
646 +@@ -531,7 +538,8 @@ static int fpr_set_msa(struct task_struct *target,
647 + /*
648 + * Copy the supplied NT_PRFPREG buffer to the floating-point context.
649 + * Choose the appropriate helper for general registers, and then copy
650 +- * the FCSR register separately.
651 ++ * the FCSR register separately. Ignore the incoming FIR register
652 ++ * contents though, as the register is read-only.
653 + *
654 + * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
655 + * which is supposed to have been guaranteed by the kernel before
656 +@@ -545,6 +553,7 @@ static int fpr_set(struct task_struct *target,
657 + const void *kbuf, const void __user *ubuf)
658 + {
659 + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
660 ++ const int fir_pos = fcr31_pos + sizeof(u32);
661 + u32 fcr31;
662 + int err;
663 +
664 +@@ -572,6 +581,11 @@ static int fpr_set(struct task_struct *target,
665 + ptrace_setfcr31(target, fcr31);
666 + }
667 +
668 ++ if (count > 0)
669 ++ err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
670 ++ fir_pos,
671 ++ fir_pos + sizeof(u32));
672 ++
673 + return err;
674 + }
675 +
676 +@@ -793,7 +807,7 @@ long arch_ptrace(struct task_struct *child, long request,
677 + fregs = get_fpu_regs(child);
678 +
679 + #ifdef CONFIG_32BIT
680 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
681 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
682 + /*
683 + * The odd registers are actually the high
684 + * order bits of the values stored in the even
685 +@@ -888,7 +902,7 @@ long arch_ptrace(struct task_struct *child, long request,
686 +
687 + init_fp_ctx(child);
688 + #ifdef CONFIG_32BIT
689 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
690 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
691 + /*
692 + * The odd registers are actually the high
693 + * order bits of the values stored in the even
694 +diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
695 +index 2b9260f92ccd..656a137c1fe2 100644
696 +--- a/arch/mips/kernel/ptrace32.c
697 ++++ b/arch/mips/kernel/ptrace32.c
698 +@@ -99,7 +99,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
699 + break;
700 + }
701 + fregs = get_fpu_regs(child);
702 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
703 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
704 + /*
705 + * The odd registers are actually the high
706 + * order bits of the values stored in the even
707 +@@ -212,7 +212,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
708 + sizeof(child->thread.fpu));
709 + child->thread.fpu.fcr31 = 0;
710 + }
711 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
712 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
713 + /*
714 + * The odd registers are actually the high
715 + * order bits of the values stored in the even
716 +diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
717 +index 2549fdd27ee1..0f725e9cee8f 100644
718 +--- a/arch/mips/kvm/mips.c
719 ++++ b/arch/mips/kvm/mips.c
720 +@@ -45,7 +45,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
721 + { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
722 + { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
723 + { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
724 +- { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
725 ++ { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
726 + { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
727 + { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
728 + { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
729 +diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
730 +index 6f534b209971..e12dfa48b478 100644
731 +--- a/arch/mips/mm/c-r4k.c
732 ++++ b/arch/mips/mm/c-r4k.c
733 +@@ -851,9 +851,12 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
734 + /*
735 + * Either no secondary cache or the available caches don't have the
736 + * subset property so we have to flush the primary caches
737 +- * explicitly
738 ++ * explicitly.
739 ++ * If we would need IPI to perform an INDEX-type operation, then
740 ++ * we have to use the HIT-type alternative as IPI cannot be used
741 ++ * here due to interrupts possibly being disabled.
742 + */
743 +- if (size >= dcache_size) {
744 ++ if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
745 + r4k_blast_dcache();
746 + } else {
747 + R4600_HIT_CACHEOP_WAR_IMPL;
748 +@@ -890,7 +893,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
749 + return;
750 + }
751 +
752 +- if (size >= dcache_size) {
753 ++ if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
754 + r4k_blast_dcache();
755 + } else {
756 + R4600_HIT_CACHEOP_WAR_IMPL;
757 +diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h
758 +new file mode 100644
759 +index 000000000000..db0dedab65ee
760 +--- /dev/null
761 ++++ b/arch/powerpc/include/asm/book3s/64/slice.h
762 +@@ -0,0 +1,27 @@
763 ++/* SPDX-License-Identifier: GPL-2.0 */
764 ++#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H
765 ++#define _ASM_POWERPC_BOOK3S_64_SLICE_H
766 ++
767 ++#ifdef CONFIG_PPC_MM_SLICES
768 ++
769 ++#define SLICE_LOW_SHIFT 28
770 ++#define SLICE_LOW_TOP (0x100000000ul)
771 ++#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
772 ++#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
773 ++
774 ++#define SLICE_HIGH_SHIFT 40
775 ++#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
776 ++#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
777 ++
778 ++#else /* CONFIG_PPC_MM_SLICES */
779 ++
780 ++#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
781 ++#define slice_set_user_psize(mm, psize) \
782 ++do { \
783 ++ (mm)->context.user_psize = (psize); \
784 ++ (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
785 ++} while (0)
786 ++
787 ++#endif /* CONFIG_PPC_MM_SLICES */
788 ++
789 ++#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */
790 +diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
791 +index c6d3078bd8c3..b8b0be8f1a07 100644
792 +--- a/arch/powerpc/include/asm/irq_work.h
793 ++++ b/arch/powerpc/include/asm/irq_work.h
794 +@@ -6,5 +6,6 @@ static inline bool arch_irq_work_has_interrupt(void)
795 + {
796 + return true;
797 + }
798 ++extern void arch_irq_work_raise(void);
799 +
800 + #endif /* _ASM_POWERPC_IRQ_WORK_H */
801 +diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
802 +index 2f806e329648..b324ab46d838 100644
803 +--- a/arch/powerpc/include/asm/mmu-8xx.h
804 ++++ b/arch/powerpc/include/asm/mmu-8xx.h
805 +@@ -191,6 +191,12 @@ typedef struct {
806 + unsigned int id;
807 + unsigned int active;
808 + unsigned long vdso_base;
809 ++#ifdef CONFIG_PPC_MM_SLICES
810 ++ u16 user_psize; /* page size index */
811 ++ u64 low_slices_psize; /* page size encodings */
812 ++ unsigned char high_slices_psize[0];
813 ++ unsigned long slb_addr_limit;
814 ++#endif
815 + } mm_context_t;
816 +
817 + #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
818 +diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h
819 +new file mode 100644
820 +index 000000000000..95d532e18092
821 +--- /dev/null
822 ++++ b/arch/powerpc/include/asm/nohash/32/slice.h
823 +@@ -0,0 +1,18 @@
824 ++/* SPDX-License-Identifier: GPL-2.0 */
825 ++#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H
826 ++#define _ASM_POWERPC_NOHASH_32_SLICE_H
827 ++
828 ++#ifdef CONFIG_PPC_MM_SLICES
829 ++
830 ++#define SLICE_LOW_SHIFT 28
831 ++#define SLICE_LOW_TOP (0x100000000ull)
832 ++#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
833 ++#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
834 ++
835 ++#define SLICE_HIGH_SHIFT 0
836 ++#define SLICE_NUM_HIGH 0ul
837 ++#define GET_HIGH_SLICE_INDEX(addr) (addr & 0)
838 ++
839 ++#endif /* CONFIG_PPC_MM_SLICES */
840 ++
841 ++#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */
842 +diff --git a/arch/powerpc/include/asm/nohash/64/slice.h b/arch/powerpc/include/asm/nohash/64/slice.h
843 +new file mode 100644
844 +index 000000000000..ad0d6e3cc1c5
845 +--- /dev/null
846 ++++ b/arch/powerpc/include/asm/nohash/64/slice.h
847 +@@ -0,0 +1,12 @@
848 ++/* SPDX-License-Identifier: GPL-2.0 */
849 ++#ifndef _ASM_POWERPC_NOHASH_64_SLICE_H
850 ++#define _ASM_POWERPC_NOHASH_64_SLICE_H
851 ++
852 ++#ifdef CONFIG_PPC_64K_PAGES
853 ++#define get_slice_psize(mm, addr) MMU_PAGE_64K
854 ++#else /* CONFIG_PPC_64K_PAGES */
855 ++#define get_slice_psize(mm, addr) MMU_PAGE_4K
856 ++#endif /* !CONFIG_PPC_64K_PAGES */
857 ++#define slice_set_user_psize(mm, psize) do { BUG(); } while (0)
858 ++
859 ++#endif /* _ASM_POWERPC_NOHASH_64_SLICE_H */
860 +diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
861 +index 8da5d4c1cab2..d5f1c41b7dba 100644
862 +--- a/arch/powerpc/include/asm/page.h
863 ++++ b/arch/powerpc/include/asm/page.h
864 +@@ -344,5 +344,6 @@ typedef struct page *pgtable_t;
865 +
866 + #include <asm-generic/memory_model.h>
867 + #endif /* __ASSEMBLY__ */
868 ++#include <asm/slice.h>
869 +
870 + #endif /* _ASM_POWERPC_PAGE_H */
871 +diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
872 +index 56234c6fcd61..af04acdb873f 100644
873 +--- a/arch/powerpc/include/asm/page_64.h
874 ++++ b/arch/powerpc/include/asm/page_64.h
875 +@@ -86,65 +86,6 @@ extern u64 ppc64_pft_size;
876 +
877 + #endif /* __ASSEMBLY__ */
878 +
879 +-#ifdef CONFIG_PPC_MM_SLICES
880 +-
881 +-#define SLICE_LOW_SHIFT 28
882 +-#define SLICE_HIGH_SHIFT 40
883 +-
884 +-#define SLICE_LOW_TOP (0x100000000ul)
885 +-#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
886 +-#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
887 +-
888 +-#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
889 +-#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
890 +-
891 +-#ifndef __ASSEMBLY__
892 +-struct mm_struct;
893 +-
894 +-extern unsigned long slice_get_unmapped_area(unsigned long addr,
895 +- unsigned long len,
896 +- unsigned long flags,
897 +- unsigned int psize,
898 +- int topdown);
899 +-
900 +-extern unsigned int get_slice_psize(struct mm_struct *mm,
901 +- unsigned long addr);
902 +-
903 +-extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
904 +-extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
905 +- unsigned long len, unsigned int psize);
906 +-
907 +-#endif /* __ASSEMBLY__ */
908 +-#else
909 +-#define slice_init()
910 +-#ifdef CONFIG_PPC_BOOK3S_64
911 +-#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
912 +-#define slice_set_user_psize(mm, psize) \
913 +-do { \
914 +- (mm)->context.user_psize = (psize); \
915 +- (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
916 +-} while (0)
917 +-#else /* !CONFIG_PPC_BOOK3S_64 */
918 +-#ifdef CONFIG_PPC_64K_PAGES
919 +-#define get_slice_psize(mm, addr) MMU_PAGE_64K
920 +-#else /* CONFIG_PPC_64K_PAGES */
921 +-#define get_slice_psize(mm, addr) MMU_PAGE_4K
922 +-#endif /* !CONFIG_PPC_64K_PAGES */
923 +-#define slice_set_user_psize(mm, psize) do { BUG(); } while(0)
924 +-#endif /* CONFIG_PPC_BOOK3S_64 */
925 +-
926 +-#define slice_set_range_psize(mm, start, len, psize) \
927 +- slice_set_user_psize((mm), (psize))
928 +-#endif /* CONFIG_PPC_MM_SLICES */
929 +-
930 +-#ifdef CONFIG_HUGETLB_PAGE
931 +-
932 +-#ifdef CONFIG_PPC_MM_SLICES
933 +-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
934 +-#endif
935 +-
936 +-#endif /* !CONFIG_HUGETLB_PAGE */
937 +-
938 + #define VM_DATA_DEFAULT_FLAGS \
939 + (is_32bit_task() ? \
940 + VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
941 +diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h
942 +new file mode 100644
943 +index 000000000000..172711fadb1c
944 +--- /dev/null
945 ++++ b/arch/powerpc/include/asm/slice.h
946 +@@ -0,0 +1,42 @@
947 ++/* SPDX-License-Identifier: GPL-2.0 */
948 ++#ifndef _ASM_POWERPC_SLICE_H
949 ++#define _ASM_POWERPC_SLICE_H
950 ++
951 ++#ifdef CONFIG_PPC_BOOK3S_64
952 ++#include <asm/book3s/64/slice.h>
953 ++#elif defined(CONFIG_PPC64)
954 ++#include <asm/nohash/64/slice.h>
955 ++#elif defined(CONFIG_PPC_MMU_NOHASH)
956 ++#include <asm/nohash/32/slice.h>
957 ++#endif
958 ++
959 ++#ifdef CONFIG_PPC_MM_SLICES
960 ++
961 ++#ifdef CONFIG_HUGETLB_PAGE
962 ++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
963 ++#endif
964 ++#define HAVE_ARCH_UNMAPPED_AREA
965 ++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
966 ++
967 ++#ifndef __ASSEMBLY__
968 ++
969 ++struct mm_struct;
970 ++
971 ++unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
972 ++ unsigned long flags, unsigned int psize,
973 ++ int topdown);
974 ++
975 ++unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
976 ++
977 ++void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
978 ++void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
979 ++ unsigned long len, unsigned int psize);
980 ++#endif /* __ASSEMBLY__ */
981 ++
982 ++#else /* CONFIG_PPC_MM_SLICES */
983 ++
984 ++#define slice_set_range_psize(mm, start, len, psize) \
985 ++ slice_set_user_psize((mm), (psize))
986 ++#endif /* CONFIG_PPC_MM_SLICES */
987 ++
988 ++#endif /* _ASM_POWERPC_SLICE_H */
989 +diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
990 +index 3f30c994e931..458b928dbd84 100644
991 +--- a/arch/powerpc/kernel/cpu_setup_power.S
992 ++++ b/arch/powerpc/kernel/cpu_setup_power.S
993 +@@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7)
994 + beqlr
995 + li r0,0
996 + mtspr SPRN_LPID,r0
997 ++ mtspr SPRN_PCR,r0
998 + mfspr r3,SPRN_LPCR
999 + li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
1000 + bl __init_LPCR_ISA206
1001 +@@ -41,6 +42,7 @@ _GLOBAL(__restore_cpu_power7)
1002 + beqlr
1003 + li r0,0
1004 + mtspr SPRN_LPID,r0
1005 ++ mtspr SPRN_PCR,r0
1006 + mfspr r3,SPRN_LPCR
1007 + li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
1008 + bl __init_LPCR_ISA206
1009 +@@ -57,6 +59,7 @@ _GLOBAL(__setup_cpu_power8)
1010 + beqlr
1011 + li r0,0
1012 + mtspr SPRN_LPID,r0
1013 ++ mtspr SPRN_PCR,r0
1014 + mfspr r3,SPRN_LPCR
1015 + ori r3, r3, LPCR_PECEDH
1016 + li r4,0 /* LPES = 0 */
1017 +@@ -78,6 +81,7 @@ _GLOBAL(__restore_cpu_power8)
1018 + beqlr
1019 + li r0,0
1020 + mtspr SPRN_LPID,r0
1021 ++ mtspr SPRN_PCR,r0
1022 + mfspr r3,SPRN_LPCR
1023 + ori r3, r3, LPCR_PECEDH
1024 + li r4,0 /* LPES = 0 */
1025 +@@ -99,6 +103,7 @@ _GLOBAL(__setup_cpu_power9)
1026 + mtspr SPRN_PSSCR,r0
1027 + mtspr SPRN_LPID,r0
1028 + mtspr SPRN_PID,r0
1029 ++ mtspr SPRN_PCR,r0
1030 + mfspr r3,SPRN_LPCR
1031 + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
1032 + or r3, r3, r4
1033 +@@ -123,6 +128,7 @@ _GLOBAL(__restore_cpu_power9)
1034 + mtspr SPRN_PSSCR,r0
1035 + mtspr SPRN_LPID,r0
1036 + mtspr SPRN_PID,r0
1037 ++ mtspr SPRN_PCR,r0
1038 + mfspr r3,SPRN_LPCR
1039 + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
1040 + or r3, r3, r4
1041 +diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
1042 +index 078553a177de..afe6808d7a41 100644
1043 +--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
1044 ++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
1045 +@@ -114,6 +114,7 @@ static void __restore_cpu_cpufeatures(void)
1046 + if (hv_mode) {
1047 + mtspr(SPRN_LPID, 0);
1048 + mtspr(SPRN_HFSCR, system_registers.hfscr);
1049 ++ mtspr(SPRN_PCR, 0);
1050 + }
1051 + mtspr(SPRN_FSCR, system_registers.fscr);
1052 +
1053 +diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
1054 +index 01e1c1997893..2fce278446f5 100644
1055 +--- a/arch/powerpc/kernel/idle_book3s.S
1056 ++++ b/arch/powerpc/kernel/idle_book3s.S
1057 +@@ -834,6 +834,8 @@ BEGIN_FTR_SECTION
1058 + mtspr SPRN_PTCR,r4
1059 + ld r4,_RPR(r1)
1060 + mtspr SPRN_RPR,r4
1061 ++ ld r4,_AMOR(r1)
1062 ++ mtspr SPRN_AMOR,r4
1063 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1064 +
1065 + ld r4,_TSCR(r1)
1066 +diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
1067 +index d73ec518ef80..a6002f9449b1 100644
1068 +--- a/arch/powerpc/kernel/setup-common.c
1069 ++++ b/arch/powerpc/kernel/setup-common.c
1070 +@@ -919,6 +919,8 @@ void __init setup_arch(char **cmdline_p)
1071 + #ifdef CONFIG_PPC64
1072 + if (!radix_enabled())
1073 + init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
1074 ++#elif defined(CONFIG_PPC_8xx)
1075 ++ init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW;
1076 + #else
1077 + #error "context.addr_limit not initialized."
1078 + #endif
1079 +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
1080 +index 1e48d157196a..578c5e80aa14 100644
1081 +--- a/arch/powerpc/kernel/traps.c
1082 ++++ b/arch/powerpc/kernel/traps.c
1083 +@@ -208,6 +208,12 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
1084 + }
1085 + raw_local_irq_restore(flags);
1086 +
1087 ++ /*
1088 ++ * system_reset_excption handles debugger, crash dump, panic, for 0x100
1089 ++ */
1090 ++ if (TRAP(regs) == 0x100)
1091 ++ return;
1092 ++
1093 + crash_fadump(regs, "die oops");
1094 +
1095 + if (kexec_should_crash(current))
1096 +@@ -272,8 +278,13 @@ void die(const char *str, struct pt_regs *regs, long err)
1097 + {
1098 + unsigned long flags;
1099 +
1100 +- if (debugger(regs))
1101 +- return;
1102 ++ /*
1103 ++ * system_reset_excption handles debugger, crash dump, panic, for 0x100
1104 ++ */
1105 ++ if (TRAP(regs) != 0x100) {
1106 ++ if (debugger(regs))
1107 ++ return;
1108 ++ }
1109 +
1110 + flags = oops_begin(regs);
1111 + if (__die(str, regs, err))
1112 +@@ -1612,6 +1623,22 @@ void facility_unavailable_exception(struct pt_regs *regs)
1113 + value = mfspr(SPRN_FSCR);
1114 +
1115 + status = value >> 56;
1116 ++ if ((hv || status >= 2) &&
1117 ++ (status < ARRAY_SIZE(facility_strings)) &&
1118 ++ facility_strings[status])
1119 ++ facility = facility_strings[status];
1120 ++
1121 ++ /* We should not have taken this interrupt in kernel */
1122 ++ if (!user_mode(regs)) {
1123 ++ pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
1124 ++ facility, status, regs->nip);
1125 ++ die("Unexpected facility unavailable exception", regs, SIGABRT);
1126 ++ }
1127 ++
1128 ++ /* We restore the interrupt state now */
1129 ++ if (!arch_irq_disabled_regs(regs))
1130 ++ local_irq_enable();
1131 ++
1132 + if (status == FSCR_DSCR_LG) {
1133 + /*
1134 + * User is accessing the DSCR register using the problem
1135 +@@ -1678,25 +1705,11 @@ void facility_unavailable_exception(struct pt_regs *regs)
1136 + return;
1137 + }
1138 +
1139 +- if ((hv || status >= 2) &&
1140 +- (status < ARRAY_SIZE(facility_strings)) &&
1141 +- facility_strings[status])
1142 +- facility = facility_strings[status];
1143 +-
1144 +- /* We restore the interrupt state now */
1145 +- if (!arch_irq_disabled_regs(regs))
1146 +- local_irq_enable();
1147 +-
1148 + pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1149 + hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
1150 +
1151 + out:
1152 +- if (user_mode(regs)) {
1153 +- _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1154 +- return;
1155 +- }
1156 +-
1157 +- die("Unexpected facility unavailable exception", regs, SIGABRT);
1158 ++ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1159 + }
1160 + #endif
1161 +
1162 +diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
1163 +index 849f50cd62f2..cf77d755246d 100644
1164 +--- a/arch/powerpc/mm/8xx_mmu.c
1165 ++++ b/arch/powerpc/mm/8xx_mmu.c
1166 +@@ -192,7 +192,7 @@ void set_context(unsigned long id, pgd_t *pgd)
1167 + mtspr(SPRN_M_TW, __pa(pgd) - offset);
1168 +
1169 + /* Update context */
1170 +- mtspr(SPRN_M_CASID, id);
1171 ++ mtspr(SPRN_M_CASID, id - 1);
1172 + /* sync */
1173 + mb();
1174 + }
1175 +diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
1176 +index 876da2bc1796..590be3fa0ce2 100644
1177 +--- a/arch/powerpc/mm/hugetlbpage.c
1178 ++++ b/arch/powerpc/mm/hugetlbpage.c
1179 +@@ -553,9 +553,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
1180 + struct hstate *hstate = hstate_file(file);
1181 + int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
1182 +
1183 ++#ifdef CONFIG_PPC_RADIX_MMU
1184 + if (radix_enabled())
1185 + return radix__hugetlb_get_unmapped_area(file, addr, len,
1186 + pgoff, flags);
1187 ++#endif
1188 + return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
1189 + }
1190 + #endif
1191 +diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
1192 +index 4554d6527682..d98f7e5c141b 100644
1193 +--- a/arch/powerpc/mm/mmu_context_nohash.c
1194 ++++ b/arch/powerpc/mm/mmu_context_nohash.c
1195 +@@ -331,6 +331,20 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
1196 + {
1197 + pr_hard("initing context for mm @%p\n", mm);
1198 +
1199 ++#ifdef CONFIG_PPC_MM_SLICES
1200 ++ if (!mm->context.slb_addr_limit)
1201 ++ mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
1202 ++
1203 ++ /*
1204 ++ * We have MMU_NO_CONTEXT set to be ~0. Hence check
1205 ++ * explicitly against context.id == 0. This ensures that we properly
1206 ++ * initialize context slice details for newly allocated mm's (which will
1207 ++ * have id == 0) and don't alter context slice inherited via fork (which
1208 ++ * will have id != 0).
1209 ++ */
1210 ++ if (mm->context.id == 0)
1211 ++ slice_set_user_psize(mm, mmu_virtual_psize);
1212 ++#endif
1213 + mm->context.id = MMU_NO_CONTEXT;
1214 + mm->context.active = 0;
1215 + return 0;
1216 +@@ -428,8 +442,8 @@ void __init mmu_context_init(void)
1217 + * -- BenH
1218 + */
1219 + if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
1220 +- first_context = 0;
1221 +- last_context = 15;
1222 ++ first_context = 1;
1223 ++ last_context = 16;
1224 + no_selective_tlbil = true;
1225 + } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
1226 + first_context = 1;
1227 +diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
1228 +index 23ec2c5e3b78..0beca1ba2282 100644
1229 +--- a/arch/powerpc/mm/slice.c
1230 ++++ b/arch/powerpc/mm/slice.c
1231 +@@ -73,10 +73,12 @@ static void slice_range_to_mask(unsigned long start, unsigned long len,
1232 + unsigned long end = start + len - 1;
1233 +
1234 + ret->low_slices = 0;
1235 +- bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
1236 ++ if (SLICE_NUM_HIGH)
1237 ++ bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
1238 +
1239 + if (start < SLICE_LOW_TOP) {
1240 +- unsigned long mend = min(end, (SLICE_LOW_TOP - 1));
1241 ++ unsigned long mend = min(end,
1242 ++ (unsigned long)(SLICE_LOW_TOP - 1));
1243 +
1244 + ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
1245 + - (1u << GET_LOW_SLICE_INDEX(start));
1246 +@@ -113,11 +115,13 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
1247 + unsigned long start = slice << SLICE_HIGH_SHIFT;
1248 + unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
1249 +
1250 ++#ifdef CONFIG_PPC64
1251 + /* Hack, so that each addresses is controlled by exactly one
1252 + * of the high or low area bitmaps, the first high area starts
1253 + * at 4GB, not 0 */
1254 + if (start == 0)
1255 + start = SLICE_LOW_TOP;
1256 ++#endif
1257 +
1258 + return !slice_area_is_free(mm, start, end - start);
1259 + }
1260 +@@ -128,7 +132,8 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
1261 + unsigned long i;
1262 +
1263 + ret->low_slices = 0;
1264 +- bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
1265 ++ if (SLICE_NUM_HIGH)
1266 ++ bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
1267 +
1268 + for (i = 0; i < SLICE_NUM_LOW; i++)
1269 + if (!slice_low_has_vma(mm, i))
1270 +@@ -151,7 +156,8 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
1271 + u64 lpsizes;
1272 +
1273 + ret->low_slices = 0;
1274 +- bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
1275 ++ if (SLICE_NUM_HIGH)
1276 ++ bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
1277 +
1278 + lpsizes = mm->context.low_slices_psize;
1279 + for (i = 0; i < SLICE_NUM_LOW; i++)
1280 +@@ -180,6 +186,10 @@ static int slice_check_fit(struct mm_struct *mm,
1281 + */
1282 + unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
1283 +
1284 ++ if (!SLICE_NUM_HIGH)
1285 ++ return (mask.low_slices & available.low_slices) ==
1286 ++ mask.low_slices;
1287 ++
1288 + bitmap_and(result, mask.high_slices,
1289 + available.high_slices, slice_count);
1290 +
1291 +@@ -189,6 +199,7 @@ static int slice_check_fit(struct mm_struct *mm,
1292 +
1293 + static void slice_flush_segments(void *parm)
1294 + {
1295 ++#ifdef CONFIG_PPC64
1296 + struct mm_struct *mm = parm;
1297 + unsigned long flags;
1298 +
1299 +@@ -200,6 +211,7 @@ static void slice_flush_segments(void *parm)
1300 + local_irq_save(flags);
1301 + slb_flush_and_rebolt();
1302 + local_irq_restore(flags);
1303 ++#endif
1304 + }
1305 +
1306 + static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
1307 +@@ -388,21 +400,21 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
1308 +
1309 + static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
1310 + {
1311 +- DECLARE_BITMAP(result, SLICE_NUM_HIGH);
1312 +-
1313 + dst->low_slices |= src->low_slices;
1314 +- bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
1315 +- bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
1316 ++ if (!SLICE_NUM_HIGH)
1317 ++ return;
1318 ++ bitmap_or(dst->high_slices, dst->high_slices, src->high_slices,
1319 ++ SLICE_NUM_HIGH);
1320 + }
1321 +
1322 + static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src)
1323 + {
1324 +- DECLARE_BITMAP(result, SLICE_NUM_HIGH);
1325 +-
1326 + dst->low_slices &= ~src->low_slices;
1327 +
1328 +- bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
1329 +- bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
1330 ++ if (!SLICE_NUM_HIGH)
1331 ++ return;
1332 ++ bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices,
1333 ++ SLICE_NUM_HIGH);
1334 + }
1335 +
1336 + #ifdef CONFIG_PPC_64K_PAGES
1337 +@@ -450,14 +462,17 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
1338 + * init different masks
1339 + */
1340 + mask.low_slices = 0;
1341 +- bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
1342 +
1343 + /* silence stupid warning */;
1344 + potential_mask.low_slices = 0;
1345 +- bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
1346 +
1347 + compat_mask.low_slices = 0;
1348 +- bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
1349 ++
1350 ++ if (SLICE_NUM_HIGH) {
1351 ++ bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
1352 ++ bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
1353 ++ bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
1354 ++ }
1355 +
1356 + /* Sanity checks */
1357 + BUG_ON(mm->task_size == 0);
1358 +@@ -595,7 +610,9 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
1359 + convert:
1360 + slice_andnot_mask(&mask, &good_mask);
1361 + slice_andnot_mask(&mask, &compat_mask);
1362 +- if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) {
1363 ++ if (mask.low_slices ||
1364 ++ (SLICE_NUM_HIGH &&
1365 ++ !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) {
1366 + slice_convert(mm, mask, psize);
1367 + if (psize > MMU_PAGE_BASE)
1368 + on_each_cpu(slice_flush_segments, mm, 1);
1369 +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
1370 +index f89bbd54ecec..1e55ae2f2afd 100644
1371 +--- a/arch/powerpc/perf/core-book3s.c
1372 ++++ b/arch/powerpc/perf/core-book3s.c
1373 +@@ -457,6 +457,16 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
1374 + /* invalid entry */
1375 + continue;
1376 +
1377 ++ /*
1378 ++ * BHRB rolling buffer could very much contain the kernel
1379 ++ * addresses at this point. Check the privileges before
1380 ++ * exporting it to userspace (avoid exposure of regions
1381 ++ * where we could have speculative execution)
1382 ++ */
1383 ++ if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
1384 ++ is_kernel_addr(addr))
1385 ++ continue;
1386 ++
1387 + /* Branches are read most recent first (ie. mfbhrb 0 is
1388 + * the most recent branch).
1389 + * There are two types of valid entries:
1390 +@@ -1226,6 +1236,7 @@ static void power_pmu_disable(struct pmu *pmu)
1391 + */
1392 + write_mmcr0(cpuhw, val);
1393 + mb();
1394 ++ isync();
1395 +
1396 + /*
1397 + * Disable instruction sampling if it was enabled
1398 +@@ -1234,12 +1245,26 @@ static void power_pmu_disable(struct pmu *pmu)
1399 + mtspr(SPRN_MMCRA,
1400 + cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
1401 + mb();
1402 ++ isync();
1403 + }
1404 +
1405 + cpuhw->disabled = 1;
1406 + cpuhw->n_added = 0;
1407 +
1408 + ebb_switch_out(mmcr0);
1409 ++
1410 ++#ifdef CONFIG_PPC64
1411 ++ /*
1412 ++ * These are readable by userspace, may contain kernel
1413 ++ * addresses and are not switched by context switch, so clear
1414 ++ * them now to avoid leaking anything to userspace in general
1415 ++ * including to another process.
1416 ++ */
1417 ++ if (ppmu->flags & PPMU_ARCH_207S) {
1418 ++ mtspr(SPRN_SDAR, 0);
1419 ++ mtspr(SPRN_SIAR, 0);
1420 ++ }
1421 ++#endif
1422 + }
1423 +
1424 + local_irq_restore(flags);
1425 +diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
1426 +index a429d859f15d..5a8b1bf1e819 100644
1427 +--- a/arch/powerpc/platforms/Kconfig.cputype
1428 ++++ b/arch/powerpc/platforms/Kconfig.cputype
1429 +@@ -326,6 +326,7 @@ config PPC_BOOK3E_MMU
1430 + config PPC_MM_SLICES
1431 + bool
1432 + default y if PPC_BOOK3S_64
1433 ++ default y if PPC_8xx && HUGETLB_PAGE
1434 + default n
1435 +
1436 + config PPC_HAVE_PMU_SUPPORT
1437 +diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
1438 +index e7b621f619b2..a9a3d62c34d6 100644
1439 +--- a/arch/powerpc/platforms/powernv/npu-dma.c
1440 ++++ b/arch/powerpc/platforms/powernv/npu-dma.c
1441 +@@ -417,6 +417,11 @@ struct npu_context {
1442 + void *priv;
1443 + };
1444 +
1445 ++struct mmio_atsd_reg {
1446 ++ struct npu *npu;
1447 ++ int reg;
1448 ++};
1449 ++
1450 + /*
1451 + * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
1452 + * if none are available.
1453 +@@ -426,7 +431,7 @@ static int get_mmio_atsd_reg(struct npu *npu)
1454 + int i;
1455 +
1456 + for (i = 0; i < npu->mmio_atsd_count; i++) {
1457 +- if (!test_and_set_bit(i, &npu->mmio_atsd_usage))
1458 ++ if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
1459 + return i;
1460 + }
1461 +
1462 +@@ -435,86 +440,90 @@ static int get_mmio_atsd_reg(struct npu *npu)
1463 +
1464 + static void put_mmio_atsd_reg(struct npu *npu, int reg)
1465 + {
1466 +- clear_bit(reg, &npu->mmio_atsd_usage);
1467 ++ clear_bit_unlock(reg, &npu->mmio_atsd_usage);
1468 + }
1469 +
1470 + /* MMIO ATSD register offsets */
1471 + #define XTS_ATSD_AVA 1
1472 + #define XTS_ATSD_STAT 2
1473 +
1474 +-static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
1475 +- unsigned long va)
1476 ++static void mmio_launch_invalidate(struct mmio_atsd_reg *mmio_atsd_reg,
1477 ++ unsigned long launch, unsigned long va)
1478 + {
1479 +- int mmio_atsd_reg;
1480 +-
1481 +- do {
1482 +- mmio_atsd_reg = get_mmio_atsd_reg(npu);
1483 +- cpu_relax();
1484 +- } while (mmio_atsd_reg < 0);
1485 ++ struct npu *npu = mmio_atsd_reg->npu;
1486 ++ int reg = mmio_atsd_reg->reg;
1487 +
1488 + __raw_writeq(cpu_to_be64(va),
1489 +- npu->mmio_atsd_regs[mmio_atsd_reg] + XTS_ATSD_AVA);
1490 ++ npu->mmio_atsd_regs[reg] + XTS_ATSD_AVA);
1491 + eieio();
1492 +- __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[mmio_atsd_reg]);
1493 +-
1494 +- return mmio_atsd_reg;
1495 ++ __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[reg]);
1496 + }
1497 +
1498 +-static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
1499 ++static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
1500 ++ unsigned long pid, bool flush)
1501 + {
1502 ++ int i;
1503 + unsigned long launch;
1504 +
1505 +- /* IS set to invalidate matching PID */
1506 +- launch = PPC_BIT(12);
1507 ++ for (i = 0; i <= max_npu2_index; i++) {
1508 ++ if (mmio_atsd_reg[i].reg < 0)
1509 ++ continue;
1510 ++
1511 ++ /* IS set to invalidate matching PID */
1512 ++ launch = PPC_BIT(12);
1513 +
1514 +- /* PRS set to process-scoped */
1515 +- launch |= PPC_BIT(13);
1516 ++ /* PRS set to process-scoped */
1517 ++ launch |= PPC_BIT(13);
1518 +
1519 +- /* AP */
1520 +- launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
1521 ++ /* AP */
1522 ++ launch |= (u64)
1523 ++ mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
1524 +
1525 +- /* PID */
1526 +- launch |= pid << PPC_BITLSHIFT(38);
1527 ++ /* PID */
1528 ++ launch |= pid << PPC_BITLSHIFT(38);
1529 +
1530 +- /* No flush */
1531 +- launch |= !flush << PPC_BITLSHIFT(39);
1532 ++ /* No flush */
1533 ++ launch |= !flush << PPC_BITLSHIFT(39);
1534 +
1535 +- /* Invalidating the entire process doesn't use a va */
1536 +- return mmio_launch_invalidate(npu, launch, 0);
1537 ++ /* Invalidating the entire process doesn't use a va */
1538 ++ mmio_launch_invalidate(&mmio_atsd_reg[i], launch, 0);
1539 ++ }
1540 + }
1541 +
1542 +-static int mmio_invalidate_va(struct npu *npu, unsigned long va,
1543 +- unsigned long pid, bool flush)
1544 ++static void mmio_invalidate_va(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
1545 ++ unsigned long va, unsigned long pid, bool flush)
1546 + {
1547 ++ int i;
1548 + unsigned long launch;
1549 +
1550 +- /* IS set to invalidate target VA */
1551 +- launch = 0;
1552 ++ for (i = 0; i <= max_npu2_index; i++) {
1553 ++ if (mmio_atsd_reg[i].reg < 0)
1554 ++ continue;
1555 ++
1556 ++ /* IS set to invalidate target VA */
1557 ++ launch = 0;
1558 +
1559 +- /* PRS set to process scoped */
1560 +- launch |= PPC_BIT(13);
1561 ++ /* PRS set to process scoped */
1562 ++ launch |= PPC_BIT(13);
1563 +
1564 +- /* AP */
1565 +- launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
1566 ++ /* AP */
1567 ++ launch |= (u64)
1568 ++ mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
1569 +
1570 +- /* PID */
1571 +- launch |= pid << PPC_BITLSHIFT(38);
1572 ++ /* PID */
1573 ++ launch |= pid << PPC_BITLSHIFT(38);
1574 +
1575 +- /* No flush */
1576 +- launch |= !flush << PPC_BITLSHIFT(39);
1577 ++ /* No flush */
1578 ++ launch |= !flush << PPC_BITLSHIFT(39);
1579 +
1580 +- return mmio_launch_invalidate(npu, launch, va);
1581 ++ mmio_launch_invalidate(&mmio_atsd_reg[i], launch, va);
1582 ++ }
1583 + }
1584 +
1585 + #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
1586 +
1587 +-struct mmio_atsd_reg {
1588 +- struct npu *npu;
1589 +- int reg;
1590 +-};
1591 +-
1592 + static void mmio_invalidate_wait(
1593 +- struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
1594 ++ struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
1595 + {
1596 + struct npu *npu;
1597 + int i, reg;
1598 +@@ -529,16 +538,67 @@ static void mmio_invalidate_wait(
1599 + reg = mmio_atsd_reg[i].reg;
1600 + while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
1601 + cpu_relax();
1602 ++ }
1603 ++}
1604 ++
1605 ++/*
1606 ++ * Acquires all the address translation shootdown (ATSD) registers required to
1607 ++ * launch an ATSD on all links this npu_context is active on.
1608 ++ */
1609 ++static void acquire_atsd_reg(struct npu_context *npu_context,
1610 ++ struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
1611 ++{
1612 ++ int i, j;
1613 ++ struct npu *npu;
1614 ++ struct pci_dev *npdev;
1615 ++ struct pnv_phb *nphb;
1616 +
1617 +- put_mmio_atsd_reg(npu, reg);
1618 ++ for (i = 0; i <= max_npu2_index; i++) {
1619 ++ mmio_atsd_reg[i].reg = -1;
1620 ++ for (j = 0; j < NV_MAX_LINKS; j++) {
1621 ++ /*
1622 ++ * There are no ordering requirements with respect to
1623 ++ * the setup of struct npu_context, but to ensure
1624 ++ * consistent behaviour we need to ensure npdev[][] is
1625 ++ * only read once.
1626 ++ */
1627 ++ npdev = READ_ONCE(npu_context->npdev[i][j]);
1628 ++ if (!npdev)
1629 ++ continue;
1630 +
1631 ++ nphb = pci_bus_to_host(npdev->bus)->private_data;
1632 ++ npu = &nphb->npu;
1633 ++ mmio_atsd_reg[i].npu = npu;
1634 ++ mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
1635 ++ while (mmio_atsd_reg[i].reg < 0) {
1636 ++ mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
1637 ++ cpu_relax();
1638 ++ }
1639 ++ break;
1640 ++ }
1641 ++ }
1642 ++}
1643 ++
1644 ++/*
1645 ++ * Release previously acquired ATSD registers. To avoid deadlocks the registers
1646 ++ * must be released in the same order they were acquired above in
1647 ++ * acquire_atsd_reg.
1648 ++ */
1649 ++static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
1650 ++{
1651 ++ int i;
1652 ++
1653 ++ for (i = 0; i <= max_npu2_index; i++) {
1654 + /*
1655 +- * The GPU requires two flush ATSDs to ensure all entries have
1656 +- * been flushed. We use PID 0 as it will never be used for a
1657 +- * process on the GPU.
1658 ++ * We can't rely on npu_context->npdev[][] being the same here
1659 ++ * as when acquire_atsd_reg() was called, hence we use the
1660 ++ * values stored in mmio_atsd_reg during the acquire phase
1661 ++ * rather than re-reading npdev[][].
1662 + */
1663 +- if (flush)
1664 +- mmio_invalidate_pid(npu, 0, true);
1665 ++ if (mmio_atsd_reg[i].reg < 0)
1666 ++ continue;
1667 ++
1668 ++ put_mmio_atsd_reg(mmio_atsd_reg[i].npu, mmio_atsd_reg[i].reg);
1669 + }
1670 + }
1671 +
1672 +@@ -549,10 +609,6 @@ static void mmio_invalidate_wait(
1673 + static void mmio_invalidate(struct npu_context *npu_context, int va,
1674 + unsigned long address, bool flush)
1675 + {
1676 +- int i, j;
1677 +- struct npu *npu;
1678 +- struct pnv_phb *nphb;
1679 +- struct pci_dev *npdev;
1680 + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
1681 + unsigned long pid = npu_context->mm->context.id;
1682 +
1683 +@@ -568,37 +624,25 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
1684 + * Loop over all the NPUs this process is active on and launch
1685 + * an invalidate.
1686 + */
1687 +- for (i = 0; i <= max_npu2_index; i++) {
1688 +- mmio_atsd_reg[i].reg = -1;
1689 +- for (j = 0; j < NV_MAX_LINKS; j++) {
1690 +- npdev = npu_context->npdev[i][j];
1691 +- if (!npdev)
1692 +- continue;
1693 +-
1694 +- nphb = pci_bus_to_host(npdev->bus)->private_data;
1695 +- npu = &nphb->npu;
1696 +- mmio_atsd_reg[i].npu = npu;
1697 +-
1698 +- if (va)
1699 +- mmio_atsd_reg[i].reg =
1700 +- mmio_invalidate_va(npu, address, pid,
1701 +- flush);
1702 +- else
1703 +- mmio_atsd_reg[i].reg =
1704 +- mmio_invalidate_pid(npu, pid, flush);
1705 +-
1706 +- /*
1707 +- * The NPU hardware forwards the shootdown to all GPUs
1708 +- * so we only have to launch one shootdown per NPU.
1709 +- */
1710 +- break;
1711 +- }
1712 ++ acquire_atsd_reg(npu_context, mmio_atsd_reg);
1713 ++ if (va)
1714 ++ mmio_invalidate_va(mmio_atsd_reg, address, pid, flush);
1715 ++ else
1716 ++ mmio_invalidate_pid(mmio_atsd_reg, pid, flush);
1717 ++
1718 ++ mmio_invalidate_wait(mmio_atsd_reg);
1719 ++ if (flush) {
1720 ++ /*
1721 ++ * The GPU requires two flush ATSDs to ensure all entries have
1722 ++ * been flushed. We use PID 0 as it will never be used for a
1723 ++ * process on the GPU.
1724 ++ */
1725 ++ mmio_invalidate_pid(mmio_atsd_reg, 0, true);
1726 ++ mmio_invalidate_wait(mmio_atsd_reg);
1727 ++ mmio_invalidate_pid(mmio_atsd_reg, 0, true);
1728 ++ mmio_invalidate_wait(mmio_atsd_reg);
1729 + }
1730 +-
1731 +- mmio_invalidate_wait(mmio_atsd_reg, flush);
1732 +- if (flush)
1733 +- /* Wait for the flush to complete */
1734 +- mmio_invalidate_wait(mmio_atsd_reg, false);
1735 ++ release_atsd_reg(mmio_atsd_reg);
1736 + }
1737 +
1738 + static void pnv_npu2_mn_release(struct mmu_notifier *mn,
1739 +@@ -741,7 +785,16 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
1740 + if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
1741 + &nvlink_index)))
1742 + return ERR_PTR(-ENODEV);
1743 +- npu_context->npdev[npu->index][nvlink_index] = npdev;
1744 ++
1745 ++ /*
1746 ++ * npdev is a pci_dev pointer setup by the PCI code. We assign it to
1747 ++ * npdev[][] to indicate to the mmu notifiers that an invalidation
1748 ++ * should also be sent over this nvlink. The notifiers don't use any
1749 ++ * other fields in npu_context, so we just need to ensure that when they
1750 ++ * deference npu_context->npdev[][] it is either a valid pointer or
1751 ++ * NULL.
1752 ++ */
1753 ++ WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev);
1754 +
1755 + if (!nphb->npu.nmmu_flush) {
1756 + /*
1757 +@@ -793,7 +846,7 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
1758 + if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
1759 + &nvlink_index)))
1760 + return;
1761 +- npu_context->npdev[npu->index][nvlink_index] = NULL;
1762 ++ WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
1763 + opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
1764 + PCI_DEVID(gpdev->bus->number, gpdev->devfn));
1765 + kref_put(&npu_context->kref, pnv_npu2_release_context);
1766 +diff --git a/arch/powerpc/platforms/powernv/vas-debug.c b/arch/powerpc/platforms/powernv/vas-debug.c
1767 +index ca22f1eae050..3161e39eea1d 100644
1768 +--- a/arch/powerpc/platforms/powernv/vas-debug.c
1769 ++++ b/arch/powerpc/platforms/powernv/vas-debug.c
1770 +@@ -179,6 +179,7 @@ void vas_instance_init_dbgdir(struct vas_instance *vinst)
1771 + {
1772 + struct dentry *d;
1773 +
1774 ++ vas_init_dbgdir();
1775 + if (!vas_debugfs)
1776 + return;
1777 +
1778 +@@ -201,8 +202,18 @@ void vas_instance_init_dbgdir(struct vas_instance *vinst)
1779 + vinst->dbgdir = NULL;
1780 + }
1781 +
1782 ++/*
1783 ++ * Set up the "root" VAS debugfs dir. Return if we already set it up
1784 ++ * (or failed to) in an earlier instance of VAS.
1785 ++ */
1786 + void vas_init_dbgdir(void)
1787 + {
1788 ++ static bool first_time = true;
1789 ++
1790 ++ if (!first_time)
1791 ++ return;
1792 ++
1793 ++ first_time = false;
1794 + vas_debugfs = debugfs_create_dir("vas", NULL);
1795 + if (IS_ERR(vas_debugfs))
1796 + vas_debugfs = NULL;
1797 +diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c
1798 +index aebbe95c9230..5a2b24cbbc88 100644
1799 +--- a/arch/powerpc/platforms/powernv/vas.c
1800 ++++ b/arch/powerpc/platforms/powernv/vas.c
1801 +@@ -160,8 +160,6 @@ static int __init vas_init(void)
1802 + int found = 0;
1803 + struct device_node *dn;
1804 +
1805 +- vas_init_dbgdir();
1806 +-
1807 + platform_driver_register(&vas_driver);
1808 +
1809 + for_each_compatible_node(dn, NULL, "ibm,vas") {
1810 +@@ -169,8 +167,10 @@ static int __init vas_init(void)
1811 + found++;
1812 + }
1813 +
1814 +- if (!found)
1815 ++ if (!found) {
1816 ++ platform_driver_unregister(&vas_driver);
1817 + return -ENODEV;
1818 ++ }
1819 +
1820 + pr_devel("Found %d instances\n", found);
1821 +
1822 +diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
1823 +index 73067805300a..1d4e0ef658d3 100644
1824 +--- a/arch/powerpc/sysdev/mpic.c
1825 ++++ b/arch/powerpc/sysdev/mpic.c
1826 +@@ -626,7 +626,7 @@ static inline u32 mpic_physmask(u32 cpumask)
1827 + int i;
1828 + u32 mask = 0;
1829 +
1830 +- for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
1831 ++ for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1)
1832 + mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
1833 + return mask;
1834 + }
1835 +diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
1836 +new file mode 100644
1837 +index 000000000000..2b443a3a487f
1838 +--- /dev/null
1839 ++++ b/arch/riscv/include/asm/fence.h
1840 +@@ -0,0 +1,12 @@
1841 ++#ifndef _ASM_RISCV_FENCE_H
1842 ++#define _ASM_RISCV_FENCE_H
1843 ++
1844 ++#ifdef CONFIG_SMP
1845 ++#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
1846 ++#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
1847 ++#else
1848 ++#define RISCV_ACQUIRE_BARRIER
1849 ++#define RISCV_RELEASE_BARRIER
1850 ++#endif
1851 ++
1852 ++#endif /* _ASM_RISCV_FENCE_H */
1853 +diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
1854 +index 2fd27e8ef1fd..8eb26d1ede81 100644
1855 +--- a/arch/riscv/include/asm/spinlock.h
1856 ++++ b/arch/riscv/include/asm/spinlock.h
1857 +@@ -17,6 +17,7 @@
1858 +
1859 + #include <linux/kernel.h>
1860 + #include <asm/current.h>
1861 ++#include <asm/fence.h>
1862 +
1863 + /*
1864 + * Simple spin lock operations. These provide no fairness guarantees.
1865 +@@ -28,10 +29,7 @@
1866 +
1867 + static inline void arch_spin_unlock(arch_spinlock_t *lock)
1868 + {
1869 +- __asm__ __volatile__ (
1870 +- "amoswap.w.rl x0, x0, %0"
1871 +- : "=A" (lock->lock)
1872 +- :: "memory");
1873 ++ smp_store_release(&lock->lock, 0);
1874 + }
1875 +
1876 + static inline int arch_spin_trylock(arch_spinlock_t *lock)
1877 +@@ -39,7 +37,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
1878 + int tmp = 1, busy;
1879 +
1880 + __asm__ __volatile__ (
1881 +- "amoswap.w.aq %0, %2, %1"
1882 ++ " amoswap.w %0, %2, %1\n"
1883 ++ RISCV_ACQUIRE_BARRIER
1884 + : "=r" (busy), "+A" (lock->lock)
1885 + : "r" (tmp)
1886 + : "memory");
1887 +@@ -68,8 +67,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
1888 + "1: lr.w %1, %0\n"
1889 + " bltz %1, 1b\n"
1890 + " addi %1, %1, 1\n"
1891 +- " sc.w.aq %1, %1, %0\n"
1892 ++ " sc.w %1, %1, %0\n"
1893 + " bnez %1, 1b\n"
1894 ++ RISCV_ACQUIRE_BARRIER
1895 + : "+A" (lock->lock), "=&r" (tmp)
1896 + :: "memory");
1897 + }
1898 +@@ -82,8 +82,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
1899 + "1: lr.w %1, %0\n"
1900 + " bnez %1, 1b\n"
1901 + " li %1, -1\n"
1902 +- " sc.w.aq %1, %1, %0\n"
1903 ++ " sc.w %1, %1, %0\n"
1904 + " bnez %1, 1b\n"
1905 ++ RISCV_ACQUIRE_BARRIER
1906 + : "+A" (lock->lock), "=&r" (tmp)
1907 + :: "memory");
1908 + }
1909 +@@ -96,8 +97,9 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
1910 + "1: lr.w %1, %0\n"
1911 + " bltz %1, 1f\n"
1912 + " addi %1, %1, 1\n"
1913 +- " sc.w.aq %1, %1, %0\n"
1914 ++ " sc.w %1, %1, %0\n"
1915 + " bnez %1, 1b\n"
1916 ++ RISCV_ACQUIRE_BARRIER
1917 + "1:\n"
1918 + : "+A" (lock->lock), "=&r" (busy)
1919 + :: "memory");
1920 +@@ -113,8 +115,9 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
1921 + "1: lr.w %1, %0\n"
1922 + " bnez %1, 1f\n"
1923 + " li %1, -1\n"
1924 +- " sc.w.aq %1, %1, %0\n"
1925 ++ " sc.w %1, %1, %0\n"
1926 + " bnez %1, 1b\n"
1927 ++ RISCV_ACQUIRE_BARRIER
1928 + "1:\n"
1929 + : "+A" (lock->lock), "=&r" (busy)
1930 + :: "memory");
1931 +@@ -125,7 +128,8 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
1932 + static inline void arch_read_unlock(arch_rwlock_t *lock)
1933 + {
1934 + __asm__ __volatile__(
1935 +- "amoadd.w.rl x0, %1, %0"
1936 ++ RISCV_RELEASE_BARRIER
1937 ++ " amoadd.w x0, %1, %0\n"
1938 + : "+A" (lock->lock)
1939 + : "r" (-1)
1940 + : "memory");
1941 +@@ -133,10 +137,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
1942 +
1943 + static inline void arch_write_unlock(arch_rwlock_t *lock)
1944 + {
1945 +- __asm__ __volatile__ (
1946 +- "amoswap.w.rl x0, x0, %0"
1947 +- : "=A" (lock->lock)
1948 +- :: "memory");
1949 ++ smp_store_release(&lock->lock, 0);
1950 + }
1951 +
1952 + #endif /* _ASM_RISCV_SPINLOCK_H */
1953 +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
1954 +index 8961e3970901..969882b54266 100644
1955 +--- a/arch/s390/kvm/vsie.c
1956 ++++ b/arch/s390/kvm/vsie.c
1957 +@@ -578,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1958 +
1959 + gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
1960 + if (gpa && (scb_s->ecb & ECB_TE)) {
1961 +- if (!(gpa & ~0x1fffU)) {
1962 ++ if (!(gpa & ~0x1fffUL)) {
1963 + rc = set_validity_icpt(scb_s, 0x0080U);
1964 + goto unpin;
1965 + }
1966 +diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
1967 +index c001f782c5f1..28cc61216b64 100644
1968 +--- a/arch/sh/kernel/entry-common.S
1969 ++++ b/arch/sh/kernel/entry-common.S
1970 +@@ -255,7 +255,7 @@ debug_trap:
1971 + mov.l @r8, r8
1972 + jsr @r8
1973 + nop
1974 +- bra __restore_all
1975 ++ bra ret_from_exception
1976 + nop
1977 + CFI_ENDPROC
1978 +
1979 +diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
1980 +index abad97edf736..28db058d471b 100644
1981 +--- a/arch/sparc/include/asm/atomic_64.h
1982 ++++ b/arch/sparc/include/asm/atomic_64.h
1983 +@@ -83,7 +83,11 @@ ATOMIC_OPS(xor)
1984 + #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
1985 +
1986 + #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
1987 +-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1988 ++
1989 ++static inline int atomic_xchg(atomic_t *v, int new)
1990 ++{
1991 ++ return xchg(&v->counter, new);
1992 ++}
1993 +
1994 + static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1995 + {
1996 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
1997 +index 06086439b7bd..70610604c360 100644
1998 +--- a/arch/x86/events/core.c
1999 ++++ b/arch/x86/events/core.c
2000 +@@ -1162,16 +1162,13 @@ int x86_perf_event_set_period(struct perf_event *event)
2001 +
2002 + per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
2003 +
2004 +- if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
2005 +- local64_read(&hwc->prev_count) != (u64)-left) {
2006 +- /*
2007 +- * The hw event starts counting from this event offset,
2008 +- * mark it to be able to extra future deltas:
2009 +- */
2010 +- local64_set(&hwc->prev_count, (u64)-left);
2011 ++ /*
2012 ++ * The hw event starts counting from this event offset,
2013 ++ * mark it to be able to extra future deltas:
2014 ++ */
2015 ++ local64_set(&hwc->prev_count, (u64)-left);
2016 +
2017 +- wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
2018 +- }
2019 ++ wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
2020 +
2021 + /*
2022 + * Due to erratum on certan cpu we need
2023 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
2024 +index 1e41d7508d99..39cd0b36c790 100644
2025 +--- a/arch/x86/events/intel/core.c
2026 ++++ b/arch/x86/events/intel/core.c
2027 +@@ -2201,9 +2201,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
2028 + int bit, loops;
2029 + u64 status;
2030 + int handled;
2031 ++ int pmu_enabled;
2032 +
2033 + cpuc = this_cpu_ptr(&cpu_hw_events);
2034 +
2035 ++ /*
2036 ++ * Save the PMU state.
2037 ++ * It needs to be restored when leaving the handler.
2038 ++ */
2039 ++ pmu_enabled = cpuc->enabled;
2040 + /*
2041 + * No known reason to not always do late ACK,
2042 + * but just in case do it opt-in.
2043 +@@ -2211,6 +2217,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
2044 + if (!x86_pmu.late_ack)
2045 + apic_write(APIC_LVTPC, APIC_DM_NMI);
2046 + intel_bts_disable_local();
2047 ++ cpuc->enabled = 0;
2048 + __intel_pmu_disable_all();
2049 + handled = intel_pmu_drain_bts_buffer();
2050 + handled += intel_bts_interrupt();
2051 +@@ -2320,7 +2327,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
2052 +
2053 + done:
2054 + /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2055 +- if (cpuc->enabled)
2056 ++ cpuc->enabled = pmu_enabled;
2057 ++ if (pmu_enabled)
2058 + __intel_pmu_enable_all(0, true);
2059 + intel_bts_enable_local();
2060 +
2061 +@@ -3188,7 +3196,7 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2062 + * Therefore the effective (average) period matches the requested period,
2063 + * despite coarser hardware granularity.
2064 + */
2065 +-static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
2066 ++static u64 bdw_limit_period(struct perf_event *event, u64 left)
2067 + {
2068 + if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
2069 + X86_CONFIG(.event=0xc0, .umask=0x01)) {
2070 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
2071 +index 5e526c54247e..cc0eb543cc70 100644
2072 +--- a/arch/x86/events/intel/ds.c
2073 ++++ b/arch/x86/events/intel/ds.c
2074 +@@ -1315,17 +1315,84 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
2075 + return NULL;
2076 + }
2077 +
2078 ++/*
2079 ++ * Special variant of intel_pmu_save_and_restart() for auto-reload.
2080 ++ */
2081 ++static int
2082 ++intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
2083 ++{
2084 ++ struct hw_perf_event *hwc = &event->hw;
2085 ++ int shift = 64 - x86_pmu.cntval_bits;
2086 ++ u64 period = hwc->sample_period;
2087 ++ u64 prev_raw_count, new_raw_count;
2088 ++ s64 new, old;
2089 ++
2090 ++ WARN_ON(!period);
2091 ++
2092 ++ /*
2093 ++ * drain_pebs() only happens when the PMU is disabled.
2094 ++ */
2095 ++ WARN_ON(this_cpu_read(cpu_hw_events.enabled));
2096 ++
2097 ++ prev_raw_count = local64_read(&hwc->prev_count);
2098 ++ rdpmcl(hwc->event_base_rdpmc, new_raw_count);
2099 ++ local64_set(&hwc->prev_count, new_raw_count);
2100 ++
2101 ++ /*
2102 ++ * Since the counter increments a negative counter value and
2103 ++ * overflows on the sign switch, giving the interval:
2104 ++ *
2105 ++ * [-period, 0]
2106 ++ *
2107 ++ * the difference between two consequtive reads is:
2108 ++ *
2109 ++ * A) value2 - value1;
2110 ++ * when no overflows have happened in between,
2111 ++ *
2112 ++ * B) (0 - value1) + (value2 - (-period));
2113 ++ * when one overflow happened in between,
2114 ++ *
2115 ++ * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
2116 ++ * when @n overflows happened in between.
2117 ++ *
2118 ++ * Here A) is the obvious difference, B) is the extension to the
2119 ++ * discrete interval, where the first term is to the top of the
2120 ++ * interval and the second term is from the bottom of the next
2121 ++ * interval and C) the extension to multiple intervals, where the
2122 ++ * middle term is the whole intervals covered.
2123 ++ *
2124 ++ * An equivalent of C, by reduction, is:
2125 ++ *
2126 ++ * value2 - value1 + n * period
2127 ++ */
2128 ++ new = ((s64)(new_raw_count << shift) >> shift);
2129 ++ old = ((s64)(prev_raw_count << shift) >> shift);
2130 ++ local64_add(new - old + count * period, &event->count);
2131 ++
2132 ++ perf_event_update_userpage(event);
2133 ++
2134 ++ return 0;
2135 ++}
2136 ++
2137 + static void __intel_pmu_pebs_event(struct perf_event *event,
2138 + struct pt_regs *iregs,
2139 + void *base, void *top,
2140 + int bit, int count)
2141 + {
2142 ++ struct hw_perf_event *hwc = &event->hw;
2143 + struct perf_sample_data data;
2144 + struct pt_regs regs;
2145 + void *at = get_next_pebs_record_by_bit(base, top, bit);
2146 +
2147 +- if (!intel_pmu_save_and_restart(event) &&
2148 +- !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
2149 ++ if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
2150 ++ /*
2151 ++ * Now, auto-reload is only enabled in fixed period mode.
2152 ++ * The reload value is always hwc->sample_period.
2153 ++ * May need to change it, if auto-reload is enabled in
2154 ++ * freq mode later.
2155 ++ */
2156 ++ intel_pmu_save_and_restart_reload(event, count);
2157 ++ } else if (!intel_pmu_save_and_restart(event))
2158 + return;
2159 +
2160 + while (count > 1) {
2161 +@@ -1377,8 +1444,11 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
2162 + return;
2163 +
2164 + n = top - at;
2165 +- if (n <= 0)
2166 ++ if (n <= 0) {
2167 ++ if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2168 ++ intel_pmu_save_and_restart_reload(event, 0);
2169 + return;
2170 ++ }
2171 +
2172 + __intel_pmu_pebs_event(event, iregs, at, top, 0, n);
2173 + }
2174 +@@ -1401,8 +1471,22 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
2175 +
2176 + ds->pebs_index = ds->pebs_buffer_base;
2177 +
2178 +- if (unlikely(base >= top))
2179 ++ if (unlikely(base >= top)) {
2180 ++ /*
2181 ++ * The drain_pebs() could be called twice in a short period
2182 ++ * for auto-reload event in pmu::read(). There are no
2183 ++ * overflows have happened in between.
2184 ++ * It needs to call intel_pmu_save_and_restart_reload() to
2185 ++ * update the event->count for this case.
2186 ++ */
2187 ++ for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
2188 ++ x86_pmu.max_pebs_events) {
2189 ++ event = cpuc->events[bit];
2190 ++ if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2191 ++ intel_pmu_save_and_restart_reload(event, 0);
2192 ++ }
2193 + return;
2194 ++ }
2195 +
2196 + for (at = base; at < top; at += x86_pmu.pebs_record_size) {
2197 + struct pebs_record_nhm *p = at;
2198 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
2199 +index 39cd0615f04f..5e2ef399ac86 100644
2200 +--- a/arch/x86/events/perf_event.h
2201 ++++ b/arch/x86/events/perf_event.h
2202 +@@ -557,7 +557,7 @@ struct x86_pmu {
2203 + struct x86_pmu_quirk *quirks;
2204 + int perfctr_second_write;
2205 + bool late_ack;
2206 +- unsigned (*limit_period)(struct perf_event *event, unsigned l);
2207 ++ u64 (*limit_period)(struct perf_event *event, u64 l);
2208 +
2209 + /*
2210 + * sysfs attrs
2211 +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
2212 +index 84137c22fdfa..6690cd3fc8b1 100644
2213 +--- a/arch/x86/include/asm/tlbflush.h
2214 ++++ b/arch/x86/include/asm/tlbflush.h
2215 +@@ -131,7 +131,12 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
2216 + static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
2217 + {
2218 + VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
2219 +- VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID));
2220 ++ /*
2221 ++ * Use boot_cpu_has() instead of this_cpu_has() as this function
2222 ++ * might be called during early boot. This should work even after
2223 ++ * boot because all CPU's the have same capabilities:
2224 ++ */
2225 ++ VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID));
2226 + return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
2227 + }
2228 +
2229 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
2230 +index b203af0855b5..5071cc7972ea 100644
2231 +--- a/arch/x86/kernel/apic/apic.c
2232 ++++ b/arch/x86/kernel/apic/apic.c
2233 +@@ -1570,7 +1570,7 @@ void setup_local_APIC(void)
2234 + * TODO: set up through-local-APIC from through-I/O-APIC? --macro
2235 + */
2236 + value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
2237 +- if (!cpu && (pic_mode || !value)) {
2238 ++ if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
2239 + value = APIC_DM_EXTINT;
2240 + apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
2241 + } else {
2242 +diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
2243 +index 25de5f6ca997..5cd387fcc777 100644
2244 +--- a/arch/x86/kernel/devicetree.c
2245 ++++ b/arch/x86/kernel/devicetree.c
2246 +@@ -11,6 +11,7 @@
2247 + #include <linux/of_address.h>
2248 + #include <linux/of_platform.h>
2249 + #include <linux/of_irq.h>
2250 ++#include <linux/libfdt.h>
2251 + #include <linux/slab.h>
2252 + #include <linux/pci.h>
2253 + #include <linux/of_pci.h>
2254 +@@ -194,19 +195,22 @@ static struct of_ioapic_type of_ioapic_type[] =
2255 + static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2256 + unsigned int nr_irqs, void *arg)
2257 + {
2258 +- struct of_phandle_args *irq_data = (void *)arg;
2259 ++ struct irq_fwspec *fwspec = (struct irq_fwspec *)arg;
2260 + struct of_ioapic_type *it;
2261 + struct irq_alloc_info tmp;
2262 ++ int type_index;
2263 +
2264 +- if (WARN_ON(irq_data->args_count < 2))
2265 ++ if (WARN_ON(fwspec->param_count < 2))
2266 + return -EINVAL;
2267 +- if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type))
2268 ++
2269 ++ type_index = fwspec->param[1];
2270 ++ if (type_index >= ARRAY_SIZE(of_ioapic_type))
2271 + return -EINVAL;
2272 +
2273 +- it = &of_ioapic_type[irq_data->args[1]];
2274 ++ it = &of_ioapic_type[type_index];
2275 + ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
2276 + tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
2277 +- tmp.ioapic_pin = irq_data->args[0];
2278 ++ tmp.ioapic_pin = fwspec->param[0];
2279 +
2280 + return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp);
2281 + }
2282 +@@ -270,14 +274,15 @@ static void __init x86_flattree_get_config(void)
2283 +
2284 + map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128);
2285 +
2286 +- initial_boot_params = dt = early_memremap(initial_dtb, map_len);
2287 +- size = of_get_flat_dt_size();
2288 ++ dt = early_memremap(initial_dtb, map_len);
2289 ++ size = fdt_totalsize(dt);
2290 + if (map_len < size) {
2291 + early_memunmap(dt, map_len);
2292 +- initial_boot_params = dt = early_memremap(initial_dtb, size);
2293 ++ dt = early_memremap(initial_dtb, size);
2294 + map_len = size;
2295 + }
2296 +
2297 ++ early_init_dt_verify(dt);
2298 + unflatten_and_copy_device_tree();
2299 + early_memunmap(dt, map_len);
2300 + }
2301 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
2302 +index 3f400004f602..752f361ef453 100644
2303 +--- a/arch/x86/kvm/cpuid.c
2304 ++++ b/arch/x86/kvm/cpuid.c
2305 +@@ -402,8 +402,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2306 +
2307 + /* cpuid 7.0.edx*/
2308 + const u32 kvm_cpuid_7_0_edx_x86_features =
2309 +- F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
2310 +- F(ARCH_CAPABILITIES);
2311 ++ F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
2312 ++ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
2313 +
2314 + /* all calls to cpuid_count() should be made on the same cpu */
2315 + get_cpu();
2316 +@@ -490,6 +490,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2317 + entry->ecx &= ~F(PKU);
2318 + entry->edx &= kvm_cpuid_7_0_edx_x86_features;
2319 + cpuid_mask(&entry->edx, CPUID_7_EDX);
2320 ++ /*
2321 ++ * We emulate ARCH_CAPABILITIES in software even
2322 ++ * if the host doesn't support it.
2323 ++ */
2324 ++ entry->edx |= F(ARCH_CAPABILITIES);
2325 + } else {
2326 + entry->ebx = 0;
2327 + entry->ecx = 0;
2328 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
2329 +index 7cf470a3755f..3773c4625114 100644
2330 +--- a/arch/x86/kvm/lapic.c
2331 ++++ b/arch/x86/kvm/lapic.c
2332 +@@ -321,8 +321,16 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
2333 + if (!lapic_in_kernel(vcpu))
2334 + return;
2335 +
2336 ++ /*
2337 ++ * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
2338 ++ * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
2339 ++ * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
2340 ++ * version first and level-triggered interrupts never get EOIed in
2341 ++ * IOAPIC.
2342 ++ */
2343 + feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
2344 +- if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
2345 ++ if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
2346 ++ !ioapic_in_kernel(vcpu->kvm))
2347 + v |= APIC_LVR_DIRECTED_EOI;
2348 + kvm_lapic_set_reg(apic, APIC_LVR, v);
2349 + }
2350 +@@ -1514,11 +1522,23 @@ static bool set_target_expiration(struct kvm_lapic *apic)
2351 +
2352 + static void advance_periodic_target_expiration(struct kvm_lapic *apic)
2353 + {
2354 +- apic->lapic_timer.tscdeadline +=
2355 +- nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
2356 ++ ktime_t now = ktime_get();
2357 ++ u64 tscl = rdtsc();
2358 ++ ktime_t delta;
2359 ++
2360 ++ /*
2361 ++ * Synchronize both deadlines to the same time source or
2362 ++ * differences in the periods (caused by differences in the
2363 ++ * underlying clocks or numerical approximation errors) will
2364 ++ * cause the two to drift apart over time as the errors
2365 ++ * accumulate.
2366 ++ */
2367 + apic->lapic_timer.target_expiration =
2368 + ktime_add_ns(apic->lapic_timer.target_expiration,
2369 + apic->lapic_timer.period);
2370 ++ delta = ktime_sub(apic->lapic_timer.target_expiration, now);
2371 ++ apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2372 ++ nsec_to_cycles(apic->vcpu, delta);
2373 + }
2374 +
2375 + static void start_sw_period(struct kvm_lapic *apic)
2376 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
2377 +index 3deb153bf9d9..11e2147c3824 100644
2378 +--- a/arch/x86/kvm/vmx.c
2379 ++++ b/arch/x86/kvm/vmx.c
2380 +@@ -2561,6 +2561,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
2381 + return;
2382 + }
2383 +
2384 ++ WARN_ON_ONCE(vmx->emulation_required);
2385 ++
2386 + if (kvm_exception_is_soft(nr)) {
2387 + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2388 + vmx->vcpu.arch.event_exit_inst_len);
2389 +@@ -6854,12 +6856,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
2390 + goto out;
2391 + }
2392 +
2393 +- if (err != EMULATE_DONE) {
2394 +- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2395 +- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2396 +- vcpu->run->internal.ndata = 0;
2397 +- return 0;
2398 +- }
2399 ++ if (err != EMULATE_DONE)
2400 ++ goto emulation_error;
2401 ++
2402 ++ if (vmx->emulation_required && !vmx->rmode.vm86_active &&
2403 ++ vcpu->arch.exception.pending)
2404 ++ goto emulation_error;
2405 +
2406 + if (vcpu->arch.halt_request) {
2407 + vcpu->arch.halt_request = 0;
2408 +@@ -6875,6 +6877,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
2409 +
2410 + out:
2411 + return ret;
2412 ++
2413 ++emulation_error:
2414 ++ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2415 ++ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2416 ++ vcpu->run->internal.ndata = 0;
2417 ++ return 0;
2418 + }
2419 +
2420 + static int __grow_ple_window(int val)
2421 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2422 +index f3df3a934733..999560ff12b5 100644
2423 +--- a/arch/x86/kvm/x86.c
2424 ++++ b/arch/x86/kvm/x86.c
2425 +@@ -7777,6 +7777,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2426 + {
2427 + struct msr_data apic_base_msr;
2428 + int mmu_reset_needed = 0;
2429 ++ int cpuid_update_needed = 0;
2430 + int pending_vec, max_bits, idx;
2431 + struct desc_ptr dt;
2432 + int ret = -EINVAL;
2433 +@@ -7817,8 +7818,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2434 + vcpu->arch.cr0 = sregs->cr0;
2435 +
2436 + mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
2437 ++ cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
2438 ++ (X86_CR4_OSXSAVE | X86_CR4_PKE));
2439 + kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2440 +- if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
2441 ++ if (cpuid_update_needed)
2442 + kvm_update_cpuid(vcpu);
2443 +
2444 + idx = srcu_read_lock(&vcpu->kvm->srcu);
2445 +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
2446 +index 85cf12219dea..94c41044a578 100644
2447 +--- a/arch/x86/mm/pageattr.c
2448 ++++ b/arch/x86/mm/pageattr.c
2449 +@@ -298,9 +298,11 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
2450 +
2451 + /*
2452 + * The .rodata section needs to be read-only. Using the pfn
2453 +- * catches all aliases.
2454 ++ * catches all aliases. This also includes __ro_after_init,
2455 ++ * so do not enforce until kernel_set_to_readonly is true.
2456 + */
2457 +- if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
2458 ++ if (kernel_set_to_readonly &&
2459 ++ within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
2460 + __pa_symbol(__end_rodata) >> PAGE_SHIFT))
2461 + pgprot_val(forbidden) |= _PAGE_RW;
2462 +
2463 +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
2464 +index 34cda7e0551b..c03c85e4fb6a 100644
2465 +--- a/arch/x86/mm/pgtable.c
2466 ++++ b/arch/x86/mm/pgtable.c
2467 +@@ -1,6 +1,7 @@
2468 + // SPDX-License-Identifier: GPL-2.0
2469 + #include <linux/mm.h>
2470 + #include <linux/gfp.h>
2471 ++#include <linux/hugetlb.h>
2472 + #include <asm/pgalloc.h>
2473 + #include <asm/pgtable.h>
2474 + #include <asm/tlb.h>
2475 +@@ -636,6 +637,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
2476 + (mtrr != MTRR_TYPE_WRBACK))
2477 + return 0;
2478 +
2479 ++ /* Bail out if we are we on a populated non-leaf entry: */
2480 ++ if (pud_present(*pud) && !pud_huge(*pud))
2481 ++ return 0;
2482 ++
2483 + prot = pgprot_4k_2_large(prot);
2484 +
2485 + set_pte((pte_t *)pud, pfn_pte(
2486 +@@ -664,6 +669,10 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
2487 + return 0;
2488 + }
2489 +
2490 ++ /* Bail out if we are we on a populated non-leaf entry: */
2491 ++ if (pmd_present(*pmd) && !pmd_huge(*pmd))
2492 ++ return 0;
2493 ++
2494 + prot = pgprot_4k_2_large(prot);
2495 +
2496 + set_pte((pte_t *)pmd, pfn_pte(
2497 +diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
2498 +index 754431031282..552c1f725b6c 100644
2499 +--- a/drivers/acpi/acpi_pad.c
2500 ++++ b/drivers/acpi/acpi_pad.c
2501 +@@ -110,6 +110,7 @@ static void round_robin_cpu(unsigned int tsk_index)
2502 + cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
2503 + if (cpumask_empty(tmp)) {
2504 + mutex_unlock(&round_robin_lock);
2505 ++ free_cpumask_var(tmp);
2506 + return;
2507 + }
2508 + for_each_cpu(cpu, tmp) {
2509 +@@ -127,6 +128,8 @@ static void round_robin_cpu(unsigned int tsk_index)
2510 + mutex_unlock(&round_robin_lock);
2511 +
2512 + set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
2513 ++
2514 ++ free_cpumask_var(tmp);
2515 + }
2516 +
2517 + static void exit_round_robin(unsigned int tsk_index)
2518 +diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
2519 +index 4b2b0b44a16b..a65c186114eb 100644
2520 +--- a/drivers/acpi/acpica/evevent.c
2521 ++++ b/drivers/acpi/acpica/evevent.c
2522 +@@ -204,6 +204,7 @@ u32 acpi_ev_fixed_event_detect(void)
2523 + u32 fixed_status;
2524 + u32 fixed_enable;
2525 + u32 i;
2526 ++ acpi_status status;
2527 +
2528 + ACPI_FUNCTION_NAME(ev_fixed_event_detect);
2529 +
2530 +@@ -211,8 +212,12 @@ u32 acpi_ev_fixed_event_detect(void)
2531 + * Read the fixed feature status and enable registers, as all the cases
2532 + * depend on their values. Ignore errors here.
2533 + */
2534 +- (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
2535 +- (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
2536 ++ status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
2537 ++ status |=
2538 ++ acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
2539 ++ if (ACPI_FAILURE(status)) {
2540 ++ return (int_status);
2541 ++ }
2542 +
2543 + ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
2544 + "Fixed Event Block: Enable %08X Status %08X\n",
2545 +diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
2546 +index c2d883b8c45e..a18e61081013 100644
2547 +--- a/drivers/acpi/acpica/nseval.c
2548 ++++ b/drivers/acpi/acpica/nseval.c
2549 +@@ -308,6 +308,14 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
2550 + /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
2551 +
2552 + status = AE_OK;
2553 ++ } else if (ACPI_FAILURE(status)) {
2554 ++
2555 ++ /* If return_object exists, delete it */
2556 ++
2557 ++ if (info->return_object) {
2558 ++ acpi_ut_remove_reference(info->return_object);
2559 ++ info->return_object = NULL;
2560 ++ }
2561 + }
2562 +
2563 + ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
2564 +diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
2565 +index dbc51bc5fdd6..5ca895da3b10 100644
2566 +--- a/drivers/acpi/acpica/psargs.c
2567 ++++ b/drivers/acpi/acpica/psargs.c
2568 +@@ -890,6 +890,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
2569 + ACPI_POSSIBLE_METHOD_CALL);
2570 +
2571 + if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) {
2572 ++
2573 ++ /* Free method call op and corresponding namestring sub-ob */
2574 ++
2575 ++ acpi_ps_free_op(arg->common.value.arg);
2576 + acpi_ps_free_op(arg);
2577 + arg = NULL;
2578 + walk_state->arg_count = 1;
2579 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
2580 +index 1ff17799769d..1d396b6e6000 100644
2581 +--- a/drivers/ata/ahci.c
2582 ++++ b/drivers/ata/ahci.c
2583 +@@ -334,6 +334,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
2584 + { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
2585 + { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
2586 + { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
2587 ++ { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */
2588 + { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
2589 + { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
2590 + { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
2591 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
2592 +index 0df21f046fc6..d4fb9e0c29ee 100644
2593 +--- a/drivers/ata/libata-core.c
2594 ++++ b/drivers/ata/libata-core.c
2595 +@@ -4493,6 +4493,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
2596 + /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
2597 + { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
2598 +
2599 ++ /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
2600 ++ SD7SN6S256G and SD8SN8U256G */
2601 ++ { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
2602 ++
2603 + /* devices which puke on READ_NATIVE_MAX */
2604 + { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
2605 + { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
2606 +@@ -4553,6 +4557,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
2607 + { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
2608 +
2609 + /* devices that don't properly handle queued TRIM commands */
2610 ++ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
2611 ++ ATA_HORKAGE_ZERO_AFTER_TRIM, },
2612 + { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2613 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
2614 + { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2615 +diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
2616 +index 7dd36ace6152..103b5a38ee38 100644
2617 +--- a/drivers/base/firmware_class.c
2618 ++++ b/drivers/base/firmware_class.c
2619 +@@ -524,7 +524,7 @@ static int fw_add_devm_name(struct device *dev, const char *name)
2620 +
2621 + fwn = fw_find_devm_name(dev, name);
2622 + if (fwn)
2623 +- return 1;
2624 ++ return 0;
2625 +
2626 + fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
2627 + GFP_KERNEL);
2628 +@@ -552,6 +552,7 @@ static int assign_fw(struct firmware *fw, struct device *device,
2629 + unsigned int opt_flags)
2630 + {
2631 + struct fw_priv *fw_priv = fw->priv;
2632 ++ int ret;
2633 +
2634 + mutex_lock(&fw_lock);
2635 + if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
2636 +@@ -568,8 +569,13 @@ static int assign_fw(struct firmware *fw, struct device *device,
2637 + */
2638 + /* don't cache firmware handled without uevent */
2639 + if (device && (opt_flags & FW_OPT_UEVENT) &&
2640 +- !(opt_flags & FW_OPT_NOCACHE))
2641 +- fw_add_devm_name(device, fw_priv->fw_name);
2642 ++ !(opt_flags & FW_OPT_NOCACHE)) {
2643 ++ ret = fw_add_devm_name(device, fw_priv->fw_name);
2644 ++ if (ret) {
2645 ++ mutex_unlock(&fw_lock);
2646 ++ return ret;
2647 ++ }
2648 ++ }
2649 +
2650 + /*
2651 + * After caching firmware image is started, let it piggyback
2652 +diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
2653 +index 02a497e7c785..e5e067091572 100644
2654 +--- a/drivers/base/power/main.c
2655 ++++ b/drivers/base/power/main.c
2656 +@@ -1923,10 +1923,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
2657 +
2658 + dev->power.wakeup_path = false;
2659 +
2660 +- if (dev->power.no_pm_callbacks) {
2661 +- ret = 1; /* Let device go direct_complete */
2662 ++ if (dev->power.no_pm_callbacks)
2663 + goto unlock;
2664 +- }
2665 +
2666 + if (dev->pm_domain)
2667 + callback = dev->pm_domain->ops.prepare;
2668 +@@ -1960,7 +1958,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
2669 + */
2670 + spin_lock_irq(&dev->power.lock);
2671 + dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
2672 +- pm_runtime_suspended(dev) && ret > 0 &&
2673 ++ ((pm_runtime_suspended(dev) && ret > 0) ||
2674 ++ dev->power.no_pm_callbacks) &&
2675 + !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
2676 + spin_unlock_irq(&dev->power.lock);
2677 + return 0;
2678 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
2679 +index 453116fd4362..c7b7c5fa73ab 100644
2680 +--- a/drivers/base/regmap/regmap.c
2681 ++++ b/drivers/base/regmap/regmap.c
2682 +@@ -99,7 +99,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg)
2683 + int ret;
2684 + unsigned int val;
2685 +
2686 +- if (map->cache == REGCACHE_NONE)
2687 ++ if (map->cache_type == REGCACHE_NONE)
2688 + return false;
2689 +
2690 + if (!map->cache_ops)
2691 +diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
2692 +index f040aba48d50..27e9686b6d3a 100644
2693 +--- a/drivers/bcma/driver_mips.c
2694 ++++ b/drivers/bcma/driver_mips.c
2695 +@@ -184,7 +184,7 @@ static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
2696 + {
2697 + int i;
2698 + static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
2699 +- char interrupts[20];
2700 ++ char interrupts[25];
2701 + char *ints = interrupts;
2702 +
2703 + for (i = 0; i < ARRAY_SIZE(irq_name); i++)
2704 +diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
2705 +index 287a09611c0f..763f06603131 100644
2706 +--- a/drivers/block/null_blk.c
2707 ++++ b/drivers/block/null_blk.c
2708 +@@ -72,6 +72,7 @@ enum nullb_device_flags {
2709 + NULLB_DEV_FL_CACHE = 3,
2710 + };
2711 +
2712 ++#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
2713 + /*
2714 + * nullb_page is a page in memory for nullb devices.
2715 + *
2716 +@@ -86,10 +87,10 @@ enum nullb_device_flags {
2717 + */
2718 + struct nullb_page {
2719 + struct page *page;
2720 +- unsigned long bitmap;
2721 ++ DECLARE_BITMAP(bitmap, MAP_SZ);
2722 + };
2723 +-#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1)
2724 +-#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2)
2725 ++#define NULLB_PAGE_LOCK (MAP_SZ - 1)
2726 ++#define NULLB_PAGE_FREE (MAP_SZ - 2)
2727 +
2728 + struct nullb_device {
2729 + struct nullb *nullb;
2730 +@@ -728,7 +729,7 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
2731 + if (!t_page->page)
2732 + goto out_freepage;
2733 +
2734 +- t_page->bitmap = 0;
2735 ++ memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
2736 + return t_page;
2737 + out_freepage:
2738 + kfree(t_page);
2739 +@@ -738,13 +739,20 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
2740 +
2741 + static void null_free_page(struct nullb_page *t_page)
2742 + {
2743 +- __set_bit(NULLB_PAGE_FREE, &t_page->bitmap);
2744 +- if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap))
2745 ++ __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
2746 ++ if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
2747 + return;
2748 + __free_page(t_page->page);
2749 + kfree(t_page);
2750 + }
2751 +
2752 ++static bool null_page_empty(struct nullb_page *page)
2753 ++{
2754 ++ int size = MAP_SZ - 2;
2755 ++
2756 ++ return find_first_bit(page->bitmap, size) == size;
2757 ++}
2758 ++
2759 + static void null_free_sector(struct nullb *nullb, sector_t sector,
2760 + bool is_cache)
2761 + {
2762 +@@ -759,9 +767,9 @@ static void null_free_sector(struct nullb *nullb, sector_t sector,
2763 +
2764 + t_page = radix_tree_lookup(root, idx);
2765 + if (t_page) {
2766 +- __clear_bit(sector_bit, &t_page->bitmap);
2767 ++ __clear_bit(sector_bit, t_page->bitmap);
2768 +
2769 +- if (!t_page->bitmap) {
2770 ++ if (null_page_empty(t_page)) {
2771 + ret = radix_tree_delete_item(root, idx, t_page);
2772 + WARN_ON(ret != t_page);
2773 + null_free_page(ret);
2774 +@@ -832,7 +840,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
2775 + t_page = radix_tree_lookup(root, idx);
2776 + WARN_ON(t_page && t_page->page->index != idx);
2777 +
2778 +- if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap)))
2779 ++ if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
2780 + return t_page;
2781 +
2782 + return NULL;
2783 +@@ -895,10 +903,10 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
2784 +
2785 + t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
2786 +
2787 +- __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap);
2788 +- if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) {
2789 ++ __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
2790 ++ if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
2791 + null_free_page(c_page);
2792 +- if (t_page && t_page->bitmap == 0) {
2793 ++ if (t_page && null_page_empty(t_page)) {
2794 + ret = radix_tree_delete_item(&nullb->dev->data,
2795 + idx, t_page);
2796 + null_free_page(t_page);
2797 +@@ -914,11 +922,11 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
2798 +
2799 + for (i = 0; i < PAGE_SECTORS;
2800 + i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
2801 +- if (test_bit(i, &c_page->bitmap)) {
2802 ++ if (test_bit(i, c_page->bitmap)) {
2803 + offset = (i << SECTOR_SHIFT);
2804 + memcpy(dst + offset, src + offset,
2805 + nullb->dev->blocksize);
2806 +- __set_bit(i, &t_page->bitmap);
2807 ++ __set_bit(i, t_page->bitmap);
2808 + }
2809 + }
2810 +
2811 +@@ -955,10 +963,10 @@ static int null_make_cache_space(struct nullb *nullb, unsigned long n)
2812 + * We found the page which is being flushed to disk by other
2813 + * threads
2814 + */
2815 +- if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap))
2816 ++ if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
2817 + c_pages[i] = NULL;
2818 + else
2819 +- __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap);
2820 ++ __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
2821 + }
2822 +
2823 + one_round = 0;
2824 +@@ -1011,7 +1019,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
2825 + kunmap_atomic(dst);
2826 + kunmap_atomic(src);
2827 +
2828 +- __set_bit(sector & SECTOR_MASK, &t_page->bitmap);
2829 ++ __set_bit(sector & SECTOR_MASK, t_page->bitmap);
2830 +
2831 + if (is_fua)
2832 + null_free_sector(nullb, sector, true);
2833 +@@ -1802,10 +1810,6 @@ static int __init null_init(void)
2834 + struct nullb *nullb;
2835 + struct nullb_device *dev;
2836 +
2837 +- /* check for nullb_page.bitmap */
2838 +- if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT))
2839 +- return -EINVAL;
2840 +-
2841 + if (g_bs > PAGE_SIZE) {
2842 + pr_warn("null_blk: invalid block size\n");
2843 + pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
2844 +diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
2845 +index 7b8c6368beb7..a026211afb51 100644
2846 +--- a/drivers/block/paride/pcd.c
2847 ++++ b/drivers/block/paride/pcd.c
2848 +@@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode)
2849 + struct pcd_unit *cd = bdev->bd_disk->private_data;
2850 + int ret;
2851 +
2852 ++ check_disk_change(bdev);
2853 ++
2854 + mutex_lock(&pcd_mutex);
2855 + ret = cdrom_open(&cd->info, bdev, mode);
2856 + mutex_unlock(&pcd_mutex);
2857 +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
2858 +index 5f7d86509f2f..bfc566d3f31a 100644
2859 +--- a/drivers/cdrom/cdrom.c
2860 ++++ b/drivers/cdrom/cdrom.c
2861 +@@ -1152,9 +1152,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
2862 +
2863 + cd_dbg(CD_OPEN, "entering cdrom_open\n");
2864 +
2865 +- /* open is event synchronization point, check events first */
2866 +- check_disk_change(bdev);
2867 +-
2868 + /* if this was a O_NONBLOCK open and we should honor the flags,
2869 + * do a quick open without drive/disc integrity checks. */
2870 + cdi->use_count++;
2871 +diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
2872 +index 6495b03f576c..ae3a7537cf0f 100644
2873 +--- a/drivers/cdrom/gdrom.c
2874 ++++ b/drivers/cdrom/gdrom.c
2875 +@@ -497,6 +497,9 @@ static const struct cdrom_device_ops gdrom_ops = {
2876 + static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
2877 + {
2878 + int ret;
2879 ++
2880 ++ check_disk_change(bdev);
2881 ++
2882 + mutex_lock(&gdrom_mutex);
2883 + ret = cdrom_open(gd.cd_info, bdev, mode);
2884 + mutex_unlock(&gdrom_mutex);
2885 +diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
2886 +index 7a84cec30c3a..6767d965c36c 100644
2887 +--- a/drivers/char/hw_random/bcm2835-rng.c
2888 ++++ b/drivers/char/hw_random/bcm2835-rng.c
2889 +@@ -163,6 +163,8 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
2890 +
2891 + /* Clock is optional on most platforms */
2892 + priv->clk = devm_clk_get(dev, NULL);
2893 ++ if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
2894 ++ return -EPROBE_DEFER;
2895 +
2896 + priv->rng.name = pdev->name;
2897 + priv->rng.init = bcm2835_rng_init;
2898 +diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
2899 +index 63d84e6f1891..83c695938a2d 100644
2900 +--- a/drivers/char/hw_random/stm32-rng.c
2901 ++++ b/drivers/char/hw_random/stm32-rng.c
2902 +@@ -21,6 +21,7 @@
2903 + #include <linux/of_address.h>
2904 + #include <linux/of_platform.h>
2905 + #include <linux/pm_runtime.h>
2906 ++#include <linux/reset.h>
2907 + #include <linux/slab.h>
2908 +
2909 + #define RNG_CR 0x00
2910 +@@ -46,6 +47,7 @@ struct stm32_rng_private {
2911 + struct hwrng rng;
2912 + void __iomem *base;
2913 + struct clk *clk;
2914 ++ struct reset_control *rst;
2915 + };
2916 +
2917 + static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
2918 +@@ -140,6 +142,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
2919 + if (IS_ERR(priv->clk))
2920 + return PTR_ERR(priv->clk);
2921 +
2922 ++ priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
2923 ++ if (!IS_ERR(priv->rst)) {
2924 ++ reset_control_assert(priv->rst);
2925 ++ udelay(2);
2926 ++ reset_control_deassert(priv->rst);
2927 ++ }
2928 ++
2929 + dev_set_drvdata(dev, priv);
2930 +
2931 + priv->rng.name = dev_driver_string(dev),
2932 +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
2933 +index f929e72bdac8..16d7fb563718 100644
2934 +--- a/drivers/char/ipmi/ipmi_ssif.c
2935 ++++ b/drivers/char/ipmi/ipmi_ssif.c
2936 +@@ -761,7 +761,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
2937 + ssif_info->ssif_state = SSIF_NORMAL;
2938 + ipmi_ssif_unlock_cond(ssif_info, flags);
2939 + pr_warn(PFX "Error getting flags: %d %d, %x\n",
2940 +- result, len, data[2]);
2941 ++ result, len, (len >= 3) ? data[2] : 0);
2942 + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
2943 + || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
2944 + /*
2945 +@@ -783,7 +783,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
2946 + if ((result < 0) || (len < 3) || (data[2] != 0)) {
2947 + /* Error clearing flags */
2948 + pr_warn(PFX "Error clearing flags: %d %d, %x\n",
2949 +- result, len, data[2]);
2950 ++ result, len, (len >= 3) ? data[2] : 0);
2951 + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
2952 + || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
2953 + pr_warn(PFX "Invalid response clearing flags: %x %x\n",
2954 +diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
2955 +index dcb1cb9a4572..8b432d6e846d 100644
2956 +--- a/drivers/cpufreq/cppc_cpufreq.c
2957 ++++ b/drivers/cpufreq/cppc_cpufreq.c
2958 +@@ -167,9 +167,19 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
2959 + NSEC_PER_USEC;
2960 + policy->shared_type = cpu->shared_type;
2961 +
2962 +- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
2963 ++ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
2964 ++ int i;
2965 ++
2966 + cpumask_copy(policy->cpus, cpu->shared_cpu_map);
2967 +- else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
2968 ++
2969 ++ for_each_cpu(i, policy->cpus) {
2970 ++ if (unlikely(i == policy->cpu))
2971 ++ continue;
2972 ++
2973 ++ memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
2974 ++ sizeof(cpu->perf_caps));
2975 ++ }
2976 ++ } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
2977 + /* Support only SW_ANY for now. */
2978 + pr_debug("Unsupported CPU co-ord type\n");
2979 + return -EFAULT;
2980 +@@ -233,8 +243,13 @@ static int __init cppc_cpufreq_init(void)
2981 + return ret;
2982 +
2983 + out:
2984 +- for_each_possible_cpu(i)
2985 +- kfree(all_cpu_data[i]);
2986 ++ for_each_possible_cpu(i) {
2987 ++ cpu = all_cpu_data[i];
2988 ++ if (!cpu)
2989 ++ break;
2990 ++ free_cpumask_var(cpu->shared_cpu_map);
2991 ++ kfree(cpu);
2992 ++ }
2993 +
2994 + kfree(all_cpu_data);
2995 + return -ENODEV;
2996 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
2997 +index de33ebf008ad..8814c572e263 100644
2998 +--- a/drivers/cpufreq/cpufreq.c
2999 ++++ b/drivers/cpufreq/cpufreq.c
3000 +@@ -1327,14 +1327,14 @@ static int cpufreq_online(unsigned int cpu)
3001 + return 0;
3002 +
3003 + out_exit_policy:
3004 ++ for_each_cpu(j, policy->real_cpus)
3005 ++ remove_cpu_dev_symlink(policy, get_cpu_device(j));
3006 ++
3007 + up_write(&policy->rwsem);
3008 +
3009 + if (cpufreq_driver->exit)
3010 + cpufreq_driver->exit(policy);
3011 +
3012 +- for_each_cpu(j, policy->real_cpus)
3013 +- remove_cpu_dev_symlink(policy, get_cpu_device(j));
3014 +-
3015 + out_free_policy:
3016 + cpufreq_policy_free(policy);
3017 + return ret;
3018 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
3019 +index d7327fd5f445..de1fd59fe136 100644
3020 +--- a/drivers/dma/pl330.c
3021 ++++ b/drivers/dma/pl330.c
3022 +@@ -1510,7 +1510,7 @@ static void pl330_dotask(unsigned long data)
3023 + /* Returns 1 if state was updated, 0 otherwise */
3024 + static int pl330_update(struct pl330_dmac *pl330)
3025 + {
3026 +- struct dma_pl330_desc *descdone, *tmp;
3027 ++ struct dma_pl330_desc *descdone;
3028 + unsigned long flags;
3029 + void __iomem *regs;
3030 + u32 val;
3031 +@@ -1588,7 +1588,9 @@ static int pl330_update(struct pl330_dmac *pl330)
3032 + }
3033 +
3034 + /* Now that we are in no hurry, do the callbacks */
3035 +- list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
3036 ++ while (!list_empty(&pl330->req_done)) {
3037 ++ descdone = list_first_entry(&pl330->req_done,
3038 ++ struct dma_pl330_desc, rqd);
3039 + list_del(&descdone->rqd);
3040 + spin_unlock_irqrestore(&pl330->lock, flags);
3041 + dma_pl330_rqcb(descdone, PL330_ERR_NONE);
3042 +diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
3043 +index d076940e0c69..4cc58904ee52 100644
3044 +--- a/drivers/dma/qcom/bam_dma.c
3045 ++++ b/drivers/dma/qcom/bam_dma.c
3046 +@@ -393,6 +393,7 @@ struct bam_device {
3047 + struct device_dma_parameters dma_parms;
3048 + struct bam_chan *channels;
3049 + u32 num_channels;
3050 ++ u32 num_ees;
3051 +
3052 + /* execution environment ID, from DT */
3053 + u32 ee;
3054 +@@ -1128,15 +1129,19 @@ static int bam_init(struct bam_device *bdev)
3055 + u32 val;
3056 +
3057 + /* read revision and configuration information */
3058 +- val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
3059 +- val &= NUM_EES_MASK;
3060 ++ if (!bdev->num_ees) {
3061 ++ val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
3062 ++ bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
3063 ++ }
3064 +
3065 + /* check that configured EE is within range */
3066 +- if (bdev->ee >= val)
3067 ++ if (bdev->ee >= bdev->num_ees)
3068 + return -EINVAL;
3069 +
3070 +- val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
3071 +- bdev->num_channels = val & BAM_NUM_PIPES_MASK;
3072 ++ if (!bdev->num_channels) {
3073 ++ val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
3074 ++ bdev->num_channels = val & BAM_NUM_PIPES_MASK;
3075 ++ }
3076 +
3077 + if (bdev->controlled_remotely)
3078 + return 0;
3079 +@@ -1232,6 +1237,18 @@ static int bam_dma_probe(struct platform_device *pdev)
3080 + bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
3081 + "qcom,controlled-remotely");
3082 +
3083 ++ if (bdev->controlled_remotely) {
3084 ++ ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
3085 ++ &bdev->num_channels);
3086 ++ if (ret)
3087 ++ dev_err(bdev->dev, "num-channels unspecified in dt\n");
3088 ++
3089 ++ ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
3090 ++ &bdev->num_ees);
3091 ++ if (ret)
3092 ++ dev_err(bdev->dev, "num-ees unspecified in dt\n");
3093 ++ }
3094 ++
3095 + bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
3096 + if (IS_ERR(bdev->bamclk))
3097 + return PTR_ERR(bdev->bamclk);
3098 +diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
3099 +index d0cacdb0713e..2a2ccd9c78e4 100644
3100 +--- a/drivers/dma/sh/rcar-dmac.c
3101 ++++ b/drivers/dma/sh/rcar-dmac.c
3102 +@@ -1301,8 +1301,17 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
3103 + * If the cookie doesn't correspond to the currently running transfer
3104 + * then the descriptor hasn't been processed yet, and the residue is
3105 + * equal to the full descriptor size.
3106 ++ * Also, a client driver is possible to call this function before
3107 ++ * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running"
3108 ++ * will be the next descriptor, and the done list will appear. So, if
3109 ++ * the argument cookie matches the done list's cookie, we can assume
3110 ++ * the residue is zero.
3111 + */
3112 + if (cookie != desc->async_tx.cookie) {
3113 ++ list_for_each_entry(desc, &chan->desc.done, node) {
3114 ++ if (cookie == desc->async_tx.cookie)
3115 ++ return 0;
3116 ++ }
3117 + list_for_each_entry(desc, &chan->desc.pending, node) {
3118 + if (cookie == desc->async_tx.cookie)
3119 + return desc->size;
3120 +@@ -1677,8 +1686,8 @@ static const struct dev_pm_ops rcar_dmac_pm = {
3121 + * - Wait for the current transfer to complete and stop the device,
3122 + * - Resume transfers, if any.
3123 + */
3124 +- SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
3125 +- pm_runtime_force_resume)
3126 ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
3127 ++ pm_runtime_force_resume)
3128 + SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
3129 + NULL)
3130 + };
3131 +diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
3132 +index e763e1484331..c3be8ef9243f 100644
3133 +--- a/drivers/firmware/dmi_scan.c
3134 ++++ b/drivers/firmware/dmi_scan.c
3135 +@@ -186,7 +186,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
3136 + char *s;
3137 + int is_ff = 1, is_00 = 1, i;
3138 +
3139 +- if (dmi_ident[slot] || dm->length <= index + 16)
3140 ++ if (dmi_ident[slot] || dm->length < index + 16)
3141 + return;
3142 +
3143 + d = (u8 *) dm + index;
3144 +diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
3145 +index 1cc41c3d6315..86a1ad17a32e 100644
3146 +--- a/drivers/firmware/efi/arm-runtime.c
3147 ++++ b/drivers/firmware/efi/arm-runtime.c
3148 +@@ -54,6 +54,9 @@ static struct ptdump_info efi_ptdump_info = {
3149 +
3150 + static int __init ptdump_init(void)
3151 + {
3152 ++ if (!efi_enabled(EFI_RUNTIME_SERVICES))
3153 ++ return 0;
3154 ++
3155 + return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
3156 + }
3157 + device_initcall(ptdump_init);
3158 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
3159 +index 2a519f9062ee..e515ca01ffb2 100644
3160 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
3161 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
3162 +@@ -26,6 +26,7 @@
3163 + #define AMDGPU_AMDKFD_H_INCLUDED
3164 +
3165 + #include <linux/types.h>
3166 ++#include <linux/mm.h>
3167 + #include <linux/mmu_context.h>
3168 + #include <kgd_kfd_interface.h>
3169 +
3170 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
3171 +index a162d87ca0c8..b552a9416e92 100644
3172 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
3173 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
3174 +@@ -321,14 +321,45 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
3175 + {
3176 + unsigned i;
3177 + int r, ret = 0;
3178 ++ long tmo_gfx, tmo_mm;
3179 ++
3180 ++ tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
3181 ++ if (amdgpu_sriov_vf(adev)) {
3182 ++ /* for MM engines in hypervisor side they are not scheduled together
3183 ++ * with CP and SDMA engines, so even in exclusive mode MM engine could
3184 ++ * still running on other VF thus the IB TEST TIMEOUT for MM engines
3185 ++ * under SR-IOV should be set to a long time. 8 sec should be enough
3186 ++ * for the MM comes back to this VF.
3187 ++ */
3188 ++ tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
3189 ++ }
3190 ++
3191 ++ if (amdgpu_sriov_runtime(adev)) {
3192 ++ /* for CP & SDMA engines since they are scheduled together so
3193 ++ * need to make the timeout width enough to cover the time
3194 ++ * cost waiting for it coming back under RUNTIME only
3195 ++ */
3196 ++ tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
3197 ++ }
3198 +
3199 + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3200 + struct amdgpu_ring *ring = adev->rings[i];
3201 ++ long tmo;
3202 +
3203 + if (!ring || !ring->ready)
3204 + continue;
3205 +
3206 +- r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT);
3207 ++ /* MM engine need more time */
3208 ++ if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
3209 ++ ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
3210 ++ ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
3211 ++ ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
3212 ++ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
3213 ++ tmo = tmo_mm;
3214 ++ else
3215 ++ tmo = tmo_gfx;
3216 ++
3217 ++ r = amdgpu_ring_test_ib(ring, tmo);
3218 + if (r) {
3219 + ring->ready = false;
3220 +
3221 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
3222 +index c06479615e8a..d7bbccd67eb9 100644
3223 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
3224 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
3225 +@@ -2954,7 +2954,13 @@ static int gfx_v9_0_hw_fini(void *handle)
3226 + gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
3227 +
3228 + if (amdgpu_sriov_vf(adev)) {
3229 +- pr_debug("For SRIOV client, shouldn't do anything.\n");
3230 ++ gfx_v9_0_cp_gfx_enable(adev, false);
3231 ++ /* must disable polling for SRIOV when hw finished, otherwise
3232 ++ * CPC engine may still keep fetching WB address which is already
3233 ++ * invalid after sw finished and trigger DMAR reading error in
3234 ++ * hypervisor side.
3235 ++ */
3236 ++ WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3237 + return 0;
3238 + }
3239 + gfx_v9_0_cp_enable(adev, false);
3240 +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
3241 +index fa63c564cf91..7657cc5784a5 100644
3242 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
3243 ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
3244 +@@ -719,14 +719,17 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
3245 + WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
3246 + upper_32_bits(wptr_gpu_addr));
3247 + wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
3248 +- if (ring->use_pollmem)
3249 ++ if (ring->use_pollmem) {
3250 ++ /*wptr polling is not enogh fast, directly clean the wptr register */
3251 ++ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
3252 + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
3253 + SDMA0_GFX_RB_WPTR_POLL_CNTL,
3254 + ENABLE, 1);
3255 +- else
3256 ++ } else {
3257 + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
3258 + SDMA0_GFX_RB_WPTR_POLL_CNTL,
3259 + ENABLE, 0);
3260 ++ }
3261 + WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
3262 +
3263 + /* enable DMA RB */
3264 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
3265 +index 4d07ffebfd31..6d1dd64f50c3 100644
3266 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
3267 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
3268 +@@ -35,6 +35,7 @@
3269 + #include "core_types.h"
3270 + #include "set_mode_types.h"
3271 + #include "virtual/virtual_stream_encoder.h"
3272 ++#include "dpcd_defs.h"
3273 +
3274 + #include "dce80/dce80_resource.h"
3275 + #include "dce100/dce100_resource.h"
3276 +@@ -2428,7 +2429,8 @@ static void set_vsc_info_packet(
3277 + unsigned int vscPacketRevision = 0;
3278 + unsigned int i;
3279 +
3280 +- if (stream->sink->link->psr_enabled) {
3281 ++ /*VSC packet set to 2 when DP revision >= 1.2*/
3282 ++ if (stream->sink->link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
3283 + vscPacketRevision = 2;
3284 + }
3285 +
3286 +diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
3287 +index b1ab4ab09532..60373d7eb220 100644
3288 +--- a/drivers/gpu/drm/bridge/sii902x.c
3289 ++++ b/drivers/gpu/drm/bridge/sii902x.c
3290 +@@ -137,7 +137,9 @@ static int sii902x_get_modes(struct drm_connector *connector)
3291 + struct sii902x *sii902x = connector_to_sii902x(connector);
3292 + struct regmap *regmap = sii902x->regmap;
3293 + u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
3294 ++ struct device *dev = &sii902x->i2c->dev;
3295 + unsigned long timeout;
3296 ++ unsigned int retries;
3297 + unsigned int status;
3298 + struct edid *edid;
3299 + int num = 0;
3300 +@@ -159,7 +161,7 @@ static int sii902x_get_modes(struct drm_connector *connector)
3301 + time_before(jiffies, timeout));
3302 +
3303 + if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
3304 +- dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus\n");
3305 ++ dev_err(dev, "failed to acquire the i2c bus\n");
3306 + return -ETIMEDOUT;
3307 + }
3308 +
3309 +@@ -179,9 +181,19 @@ static int sii902x_get_modes(struct drm_connector *connector)
3310 + if (ret)
3311 + return ret;
3312 +
3313 +- ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
3314 ++ /*
3315 ++ * Sometimes the I2C bus can stall after failure to use the
3316 ++ * EDID channel. Retry a few times to see if things clear
3317 ++ * up, else continue anyway.
3318 ++ */
3319 ++ retries = 5;
3320 ++ do {
3321 ++ ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA,
3322 ++ &status);
3323 ++ retries--;
3324 ++ } while (ret && retries);
3325 + if (ret)
3326 +- return ret;
3327 ++ dev_err(dev, "failed to read status (%d)\n", ret);
3328 +
3329 + ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
3330 + SII902X_SYS_CTRL_DDC_BUS_REQ |
3331 +@@ -201,7 +213,7 @@ static int sii902x_get_modes(struct drm_connector *connector)
3332 +
3333 + if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
3334 + SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
3335 +- dev_err(&sii902x->i2c->dev, "failed to release the i2c bus\n");
3336 ++ dev_err(dev, "failed to release the i2c bus\n");
3337 + return -ETIMEDOUT;
3338 + }
3339 +
3340 +diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
3341 +index 32d9bcf5be7f..f0d3ed5f2528 100644
3342 +--- a/drivers/gpu/drm/drm_vblank.c
3343 ++++ b/drivers/gpu/drm/drm_vblank.c
3344 +@@ -271,7 +271,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
3345 + store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
3346 + }
3347 +
3348 +-static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
3349 ++static u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
3350 + {
3351 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
3352 +
3353 +@@ -292,11 +292,11 @@ static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
3354 + * This is mostly useful for hardware that can obtain the scanout position, but
3355 + * doesn't have a hardware frame counter.
3356 + */
3357 +-u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
3358 ++u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
3359 + {
3360 + struct drm_device *dev = crtc->dev;
3361 + unsigned int pipe = drm_crtc_index(crtc);
3362 +- u32 vblank;
3363 ++ u64 vblank;
3364 + unsigned long flags;
3365 +
3366 + WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp,
3367 +@@ -1055,7 +1055,7 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
3368 + {
3369 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
3370 + int ret;
3371 +- u32 last;
3372 ++ u64 last;
3373 +
3374 + if (WARN_ON(pipe >= dev->num_crtcs))
3375 + return;
3376 +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
3377 +index f9ad0e960263..2751b9107fc5 100644
3378 +--- a/drivers/gpu/drm/meson/meson_drv.c
3379 ++++ b/drivers/gpu/drm/meson/meson_drv.c
3380 +@@ -189,40 +189,51 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
3381 +
3382 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
3383 + regs = devm_ioremap_resource(dev, res);
3384 +- if (IS_ERR(regs))
3385 +- return PTR_ERR(regs);
3386 ++ if (IS_ERR(regs)) {
3387 ++ ret = PTR_ERR(regs);
3388 ++ goto free_drm;
3389 ++ }
3390 +
3391 + priv->io_base = regs;
3392 +
3393 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
3394 + /* Simply ioremap since it may be a shared register zone */
3395 + regs = devm_ioremap(dev, res->start, resource_size(res));
3396 +- if (!regs)
3397 +- return -EADDRNOTAVAIL;
3398 ++ if (!regs) {
3399 ++ ret = -EADDRNOTAVAIL;
3400 ++ goto free_drm;
3401 ++ }
3402 +
3403 + priv->hhi = devm_regmap_init_mmio(dev, regs,
3404 + &meson_regmap_config);
3405 + if (IS_ERR(priv->hhi)) {
3406 + dev_err(&pdev->dev, "Couldn't create the HHI regmap\n");
3407 +- return PTR_ERR(priv->hhi);
3408 ++ ret = PTR_ERR(priv->hhi);
3409 ++ goto free_drm;
3410 + }
3411 +
3412 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
3413 + /* Simply ioremap since it may be a shared register zone */
3414 + regs = devm_ioremap(dev, res->start, resource_size(res));
3415 +- if (!regs)
3416 +- return -EADDRNOTAVAIL;
3417 ++ if (!regs) {
3418 ++ ret = -EADDRNOTAVAIL;
3419 ++ goto free_drm;
3420 ++ }
3421 +
3422 + priv->dmc = devm_regmap_init_mmio(dev, regs,
3423 + &meson_regmap_config);
3424 + if (IS_ERR(priv->dmc)) {
3425 + dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
3426 +- return PTR_ERR(priv->dmc);
3427 ++ ret = PTR_ERR(priv->dmc);
3428 ++ goto free_drm;
3429 + }
3430 +
3431 + priv->vsync_irq = platform_get_irq(pdev, 0);
3432 +
3433 +- drm_vblank_init(drm, 1);
3434 ++ ret = drm_vblank_init(drm, 1);
3435 ++ if (ret)
3436 ++ goto free_drm;
3437 ++
3438 + drm_mode_config_init(drm);
3439 + drm->mode_config.max_width = 3840;
3440 + drm->mode_config.max_height = 2160;
3441 +diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
3442 +index 04300b2da1b1..f2a38727fa85 100644
3443 +--- a/drivers/gpu/drm/omapdrm/dss/dss.c
3444 ++++ b/drivers/gpu/drm/omapdrm/dss/dss.c
3445 +@@ -1300,88 +1300,18 @@ static const struct soc_device_attribute dss_soc_devices[] = {
3446 +
3447 + static int dss_bind(struct device *dev)
3448 + {
3449 +- struct platform_device *pdev = to_platform_device(dev);
3450 +- struct resource *dss_mem;
3451 +- u32 rev;
3452 + int r;
3453 +
3454 +- dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
3455 +- dss.base = devm_ioremap_resource(&pdev->dev, dss_mem);
3456 +- if (IS_ERR(dss.base))
3457 +- return PTR_ERR(dss.base);
3458 +-
3459 +- r = dss_get_clocks();
3460 ++ r = component_bind_all(dev, NULL);
3461 + if (r)
3462 + return r;
3463 +
3464 +- r = dss_setup_default_clock();
3465 +- if (r)
3466 +- goto err_setup_clocks;
3467 +-
3468 +- r = dss_video_pll_probe(pdev);
3469 +- if (r)
3470 +- goto err_pll_init;
3471 +-
3472 +- r = dss_init_ports(pdev);
3473 +- if (r)
3474 +- goto err_init_ports;
3475 +-
3476 +- pm_runtime_enable(&pdev->dev);
3477 +-
3478 +- r = dss_runtime_get();
3479 +- if (r)
3480 +- goto err_runtime_get;
3481 +-
3482 +- dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
3483 +-
3484 +- /* Select DPLL */
3485 +- REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
3486 +-
3487 +- dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
3488 +-
3489 +-#ifdef CONFIG_OMAP2_DSS_VENC
3490 +- REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
3491 +- REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
3492 +- REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
3493 +-#endif
3494 +- dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
3495 +- dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
3496 +- dss.dispc_clk_source = DSS_CLK_SRC_FCK;
3497 +- dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
3498 +- dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
3499 +-
3500 +- rev = dss_read_reg(DSS_REVISION);
3501 +- pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
3502 +-
3503 +- dss_runtime_put();
3504 +-
3505 +- r = component_bind_all(&pdev->dev, NULL);
3506 +- if (r)
3507 +- goto err_component;
3508 +-
3509 +- dss_debugfs_create_file("dss", dss_dump_regs);
3510 +-
3511 + pm_set_vt_switch(0);
3512 +
3513 + omapdss_gather_components(dev);
3514 + omapdss_set_is_initialized(true);
3515 +
3516 + return 0;
3517 +-
3518 +-err_component:
3519 +-err_runtime_get:
3520 +- pm_runtime_disable(&pdev->dev);
3521 +- dss_uninit_ports(pdev);
3522 +-err_init_ports:
3523 +- if (dss.video1_pll)
3524 +- dss_video_pll_uninit(dss.video1_pll);
3525 +-
3526 +- if (dss.video2_pll)
3527 +- dss_video_pll_uninit(dss.video2_pll);
3528 +-err_pll_init:
3529 +-err_setup_clocks:
3530 +- dss_put_clocks();
3531 +- return r;
3532 + }
3533 +
3534 + static void dss_unbind(struct device *dev)
3535 +@@ -1391,18 +1321,6 @@ static void dss_unbind(struct device *dev)
3536 + omapdss_set_is_initialized(false);
3537 +
3538 + component_unbind_all(&pdev->dev, NULL);
3539 +-
3540 +- if (dss.video1_pll)
3541 +- dss_video_pll_uninit(dss.video1_pll);
3542 +-
3543 +- if (dss.video2_pll)
3544 +- dss_video_pll_uninit(dss.video2_pll);
3545 +-
3546 +- dss_uninit_ports(pdev);
3547 +-
3548 +- pm_runtime_disable(&pdev->dev);
3549 +-
3550 +- dss_put_clocks();
3551 + }
3552 +
3553 + static const struct component_master_ops dss_component_ops = {
3554 +@@ -1434,10 +1352,46 @@ static int dss_add_child_component(struct device *dev, void *data)
3555 + return 0;
3556 + }
3557 +
3558 ++static int dss_probe_hardware(void)
3559 ++{
3560 ++ u32 rev;
3561 ++ int r;
3562 ++
3563 ++ r = dss_runtime_get();
3564 ++ if (r)
3565 ++ return r;
3566 ++
3567 ++ dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
3568 ++
3569 ++ /* Select DPLL */
3570 ++ REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
3571 ++
3572 ++ dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
3573 ++
3574 ++#ifdef CONFIG_OMAP2_DSS_VENC
3575 ++ REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
3576 ++ REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
3577 ++ REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
3578 ++#endif
3579 ++ dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
3580 ++ dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
3581 ++ dss.dispc_clk_source = DSS_CLK_SRC_FCK;
3582 ++ dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
3583 ++ dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
3584 ++
3585 ++ rev = dss_read_reg(DSS_REVISION);
3586 ++ pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
3587 ++
3588 ++ dss_runtime_put();
3589 ++
3590 ++ return 0;
3591 ++}
3592 ++
3593 + static int dss_probe(struct platform_device *pdev)
3594 + {
3595 + const struct soc_device_attribute *soc;
3596 + struct component_match *match = NULL;
3597 ++ struct resource *dss_mem;
3598 + int r;
3599 +
3600 + dss.pdev = pdev;
3601 +@@ -1458,20 +1412,69 @@ static int dss_probe(struct platform_device *pdev)
3602 + else
3603 + dss.feat = of_match_device(dss_of_match, &pdev->dev)->data;
3604 +
3605 +- r = dss_initialize_debugfs();
3606 ++ /* Map I/O registers, get and setup clocks. */
3607 ++ dss_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3608 ++ dss.base = devm_ioremap_resource(&pdev->dev, dss_mem);
3609 ++ if (IS_ERR(dss.base))
3610 ++ return PTR_ERR(dss.base);
3611 ++
3612 ++ r = dss_get_clocks();
3613 + if (r)
3614 + return r;
3615 +
3616 +- /* add all the child devices as components */
3617 ++ r = dss_setup_default_clock();
3618 ++ if (r)
3619 ++ goto err_put_clocks;
3620 ++
3621 ++ /* Setup the video PLLs and the DPI and SDI ports. */
3622 ++ r = dss_video_pll_probe(pdev);
3623 ++ if (r)
3624 ++ goto err_put_clocks;
3625 ++
3626 ++ r = dss_init_ports(pdev);
3627 ++ if (r)
3628 ++ goto err_uninit_plls;
3629 ++
3630 ++ /* Enable runtime PM and probe the hardware. */
3631 ++ pm_runtime_enable(&pdev->dev);
3632 ++
3633 ++ r = dss_probe_hardware();
3634 ++ if (r)
3635 ++ goto err_pm_runtime_disable;
3636 ++
3637 ++ /* Initialize debugfs. */
3638 ++ r = dss_initialize_debugfs();
3639 ++ if (r)
3640 ++ goto err_pm_runtime_disable;
3641 ++
3642 ++ dss_debugfs_create_file("dss", dss_dump_regs);
3643 ++
3644 ++ /* Add all the child devices as components. */
3645 + device_for_each_child(&pdev->dev, &match, dss_add_child_component);
3646 +
3647 + r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
3648 +- if (r) {
3649 +- dss_uninitialize_debugfs();
3650 +- return r;
3651 +- }
3652 ++ if (r)
3653 ++ goto err_uninit_debugfs;
3654 +
3655 + return 0;
3656 ++
3657 ++err_uninit_debugfs:
3658 ++ dss_uninitialize_debugfs();
3659 ++
3660 ++err_pm_runtime_disable:
3661 ++ pm_runtime_disable(&pdev->dev);
3662 ++ dss_uninit_ports(pdev);
3663 ++
3664 ++err_uninit_plls:
3665 ++ if (dss.video1_pll)
3666 ++ dss_video_pll_uninit(dss.video1_pll);
3667 ++ if (dss.video2_pll)
3668 ++ dss_video_pll_uninit(dss.video2_pll);
3669 ++
3670 ++err_put_clocks:
3671 ++ dss_put_clocks();
3672 ++
3673 ++ return r;
3674 + }
3675 +
3676 + static int dss_remove(struct platform_device *pdev)
3677 +@@ -1480,6 +1483,18 @@ static int dss_remove(struct platform_device *pdev)
3678 +
3679 + dss_uninitialize_debugfs();
3680 +
3681 ++ pm_runtime_disable(&pdev->dev);
3682 ++
3683 ++ dss_uninit_ports(pdev);
3684 ++
3685 ++ if (dss.video1_pll)
3686 ++ dss_video_pll_uninit(dss.video1_pll);
3687 ++
3688 ++ if (dss.video2_pll)
3689 ++ dss_video_pll_uninit(dss.video2_pll);
3690 ++
3691 ++ dss_put_clocks();
3692 ++
3693 + return 0;
3694 + }
3695 +
3696 +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
3697 +index 5591984a392b..f9649bded63f 100644
3698 +--- a/drivers/gpu/drm/panel/panel-simple.c
3699 ++++ b/drivers/gpu/drm/panel/panel-simple.c
3700 +@@ -1597,7 +1597,7 @@ static const struct panel_desc ontat_yx700wv03 = {
3701 + .width = 154,
3702 + .height = 83,
3703 + },
3704 +- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
3705 ++ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
3706 + };
3707 +
3708 + static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
3709 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
3710 +index 12d22f3db1af..6a4b8c98a719 100644
3711 +--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
3712 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
3713 +@@ -59,11 +59,8 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
3714 +
3715 + rcar_lvds_write(lvds, LVDPLLCR, pllcr);
3716 +
3717 +- /*
3718 +- * Select the input, hardcode mode 0, enable LVDS operation and turn
3719 +- * bias circuitry on.
3720 +- */
3721 +- lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN;
3722 ++ /* Select the input and set the LVDS mode. */
3723 ++ lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT;
3724 + if (rcrtc->index == 2)
3725 + lvdcr0 |= LVDCR0_DUSEL;
3726 + rcar_lvds_write(lvds, LVDCR0, lvdcr0);
3727 +@@ -74,6 +71,10 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
3728 + LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) |
3729 + LVDCR1_CLKSTBY_GEN2);
3730 +
3731 ++ /* Enable LVDS operation and turn bias circuitry on. */
3732 ++ lvdcr0 |= LVDCR0_BEN | LVDCR0_LVEN;
3733 ++ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
3734 ++
3735 + /*
3736 + * Turn the PLL on, wait for the startup delay, and turn the output
3737 + * on.
3738 +@@ -95,7 +96,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
3739 + u32 lvdcr0;
3740 + u32 pllcr;
3741 +
3742 +- /* PLL clock configuration */
3743 ++ /* Set the PLL clock configuration and LVDS mode. */
3744 + if (freq < 42000)
3745 + pllcr = LVDPLLCR_PLLDIVCNT_42M;
3746 + else if (freq < 85000)
3747 +@@ -107,6 +108,9 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
3748 +
3749 + rcar_lvds_write(lvds, LVDPLLCR, pllcr);
3750 +
3751 ++ lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT;
3752 ++ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
3753 ++
3754 + /* Turn all the channels on. */
3755 + rcar_lvds_write(lvds, LVDCR1,
3756 + LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
3757 +@@ -117,7 +121,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
3758 + * Turn the PLL on, set it to LVDS normal mode, wait for the startup
3759 + * delay and turn the output on.
3760 + */
3761 +- lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_PLLON;
3762 ++ lvdcr0 |= LVDCR0_PLLON;
3763 + rcar_lvds_write(lvds, LVDCR0, lvdcr0);
3764 +
3765 + lvdcr0 |= LVDCR0_PWD;
3766 +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
3767 +index 1d9655576b6e..6bf2f8289847 100644
3768 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
3769 ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
3770 +@@ -262,7 +262,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
3771 + * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
3772 + */
3773 + vma->vm_flags &= ~VM_PFNMAP;
3774 +- vma->vm_pgoff = 0;
3775 +
3776 + if (rk_obj->pages)
3777 + ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
3778 +@@ -297,6 +296,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
3779 + if (ret)
3780 + return ret;
3781 +
3782 ++ /*
3783 ++ * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
3784 ++ * whole buffer from the start.
3785 ++ */
3786 ++ vma->vm_pgoff = 0;
3787 ++
3788 + obj = vma->vm_private_data;
3789 +
3790 + return rockchip_drm_gem_object_mmap(obj, vma);
3791 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
3792 +index 557a033fb610..8545488aa0cf 100644
3793 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
3794 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
3795 +@@ -135,17 +135,24 @@
3796 +
3797 + #else
3798 +
3799 +-/* In the 32-bit version of this macro, we use "m" because there is no
3800 +- * more register left for bp
3801 ++/*
3802 ++ * In the 32-bit version of this macro, we store bp in a memory location
3803 ++ * because we've ran out of registers.
3804 ++ * Now we can't reference that memory location while we've modified
3805 ++ * %esp or %ebp, so we first push it on the stack, just before we push
3806 ++ * %ebp, and then when we need it we read it from the stack where we
3807 ++ * just pushed it.
3808 + */
3809 + #define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
3810 + port_num, magic, bp, \
3811 + eax, ebx, ecx, edx, si, di) \
3812 + ({ \
3813 +- asm volatile ("push %%ebp;" \
3814 +- "mov %12, %%ebp;" \
3815 ++ asm volatile ("push %12;" \
3816 ++ "push %%ebp;" \
3817 ++ "mov 0x04(%%esp), %%ebp;" \
3818 + "rep outsb;" \
3819 +- "pop %%ebp;" : \
3820 ++ "pop %%ebp;" \
3821 ++ "add $0x04, %%esp;" : \
3822 + "=a"(eax), \
3823 + "=b"(ebx), \
3824 + "=c"(ecx), \
3825 +@@ -167,10 +174,12 @@
3826 + port_num, magic, bp, \
3827 + eax, ebx, ecx, edx, si, di) \
3828 + ({ \
3829 +- asm volatile ("push %%ebp;" \
3830 +- "mov %12, %%ebp;" \
3831 ++ asm volatile ("push %12;" \
3832 ++ "push %%ebp;" \
3833 ++ "mov 0x04(%%esp), %%ebp;" \
3834 + "rep insb;" \
3835 +- "pop %%ebp" : \
3836 ++ "pop %%ebp;" \
3837 ++ "add $0x04, %%esp;" : \
3838 + "=a"(eax), \
3839 + "=b"(ebx), \
3840 + "=c"(ecx), \
3841 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
3842 +index 3ec9eae831b8..f9413c0199f0 100644
3843 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
3844 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
3845 +@@ -453,7 +453,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
3846 + struct drm_plane_state *old_state)
3847 + {
3848 + struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
3849 ++ struct drm_crtc *crtc = plane->state->crtc ?
3850 ++ plane->state->crtc : old_state->crtc;
3851 +
3852 ++ if (vps->dmabuf)
3853 ++ vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false);
3854 + vmw_dmabuf_unreference(&vps->dmabuf);
3855 + vps->dmabuf_size = 0;
3856 +
3857 +@@ -491,10 +495,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
3858 + }
3859 +
3860 + size = new_state->crtc_w * new_state->crtc_h * 4;
3861 ++ dev_priv = vmw_priv(crtc->dev);
3862 +
3863 + if (vps->dmabuf) {
3864 +- if (vps->dmabuf_size == size)
3865 +- return 0;
3866 ++ if (vps->dmabuf_size == size) {
3867 ++ /*
3868 ++ * Note that this might temporarily up the pin-count
3869 ++ * to 2, until cleanup_fb() is called.
3870 ++ */
3871 ++ return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf,
3872 ++ true);
3873 ++ }
3874 +
3875 + vmw_dmabuf_unreference(&vps->dmabuf);
3876 + vps->dmabuf_size = 0;
3877 +@@ -504,7 +515,6 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
3878 + if (!vps->dmabuf)
3879 + return -ENOMEM;
3880 +
3881 +- dev_priv = vmw_priv(crtc->dev);
3882 + vmw_svga_enable(dev_priv);
3883 +
3884 + /* After we have alloced the backing store might not be able to
3885 +@@ -515,13 +525,18 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
3886 + &vmw_vram_ne_placement,
3887 + false, &vmw_dmabuf_bo_free);
3888 + vmw_overlay_resume_all(dev_priv);
3889 +-
3890 +- if (ret != 0)
3891 ++ if (ret) {
3892 + vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
3893 +- else
3894 +- vps->dmabuf_size = size;
3895 ++ return ret;
3896 ++ }
3897 +
3898 +- return ret;
3899 ++ vps->dmabuf_size = size;
3900 ++
3901 ++ /*
3902 ++ * TTM already thinks the buffer is pinned, but make sure the
3903 ++ * pin_count is upped.
3904 ++ */
3905 ++ return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true);
3906 + }
3907 +
3908 +
3909 +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
3910 +index c219e43b8f02..f5f3f8cf57ea 100644
3911 +--- a/drivers/hwmon/nct6775.c
3912 ++++ b/drivers/hwmon/nct6775.c
3913 +@@ -1469,7 +1469,7 @@ static void nct6775_update_pwm(struct device *dev)
3914 + duty_is_dc = data->REG_PWM_MODE[i] &&
3915 + (nct6775_read_value(data, data->REG_PWM_MODE[i])
3916 + & data->PWM_MODE_MASK[i]);
3917 +- data->pwm_mode[i] = duty_is_dc;
3918 ++ data->pwm_mode[i] = !duty_is_dc;
3919 +
3920 + fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]);
3921 + for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) {
3922 +@@ -2350,7 +2350,7 @@ show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
3923 + struct nct6775_data *data = nct6775_update_device(dev);
3924 + struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
3925 +
3926 +- return sprintf(buf, "%d\n", !data->pwm_mode[sattr->index]);
3927 ++ return sprintf(buf, "%d\n", data->pwm_mode[sattr->index]);
3928 + }
3929 +
3930 + static ssize_t
3931 +@@ -2371,9 +2371,9 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
3932 + if (val > 1)
3933 + return -EINVAL;
3934 +
3935 +- /* Setting DC mode is not supported for all chips/channels */
3936 ++ /* Setting DC mode (0) is not supported for all chips/channels */
3937 + if (data->REG_PWM_MODE[nr] == 0) {
3938 +- if (val)
3939 ++ if (!val)
3940 + return -EINVAL;
3941 + return count;
3942 + }
3943 +@@ -2382,7 +2382,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
3944 + data->pwm_mode[nr] = val;
3945 + reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]);
3946 + reg &= ~data->PWM_MODE_MASK[nr];
3947 +- if (val)
3948 ++ if (!val)
3949 + reg |= data->PWM_MODE_MASK[nr];
3950 + nct6775_write_value(data, data->REG_PWM_MODE[nr], reg);
3951 + mutex_unlock(&data->update_lock);
3952 +diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
3953 +index 00d6995af4c2..8a44e94d5679 100644
3954 +--- a/drivers/hwmon/pmbus/adm1275.c
3955 ++++ b/drivers/hwmon/pmbus/adm1275.c
3956 +@@ -154,7 +154,7 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
3957 + const struct adm1275_data *data = to_adm1275_data(info);
3958 + int ret = 0;
3959 +
3960 +- if (page)
3961 ++ if (page > 0)
3962 + return -ENXIO;
3963 +
3964 + switch (reg) {
3965 +@@ -240,7 +240,7 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
3966 + const struct adm1275_data *data = to_adm1275_data(info);
3967 + int ret;
3968 +
3969 +- if (page)
3970 ++ if (page > 0)
3971 + return -ENXIO;
3972 +
3973 + switch (reg) {
3974 +diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
3975 +index dd4883a19045..e951f9b87abb 100644
3976 +--- a/drivers/hwmon/pmbus/max8688.c
3977 ++++ b/drivers/hwmon/pmbus/max8688.c
3978 +@@ -45,7 +45,7 @@ static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
3979 + {
3980 + int ret;
3981 +
3982 +- if (page)
3983 ++ if (page > 0)
3984 + return -ENXIO;
3985 +
3986 + switch (reg) {
3987 +diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
3988 +index 6ea62c62ff27..9cdb3fbc8c1f 100644
3989 +--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
3990 ++++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
3991 +@@ -315,7 +315,7 @@ static void debug_dump_regs(struct debug_drvdata *drvdata)
3992 + }
3993 +
3994 + pc = debug_adjust_pc(drvdata);
3995 +- dev_emerg(dev, " EDPCSR: [<%p>] %pS\n", (void *)pc, (void *)pc);
3996 ++ dev_emerg(dev, " EDPCSR: [<%px>] %pS\n", (void *)pc, (void *)pc);
3997 +
3998 + if (drvdata->edcidsr_present)
3999 + dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
4000 +diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
4001 +index 1a023e30488c..c1793313bb08 100644
4002 +--- a/drivers/hwtracing/intel_th/core.c
4003 ++++ b/drivers/hwtracing/intel_th/core.c
4004 +@@ -935,7 +935,7 @@ EXPORT_SYMBOL_GPL(intel_th_trace_disable);
4005 + int intel_th_set_output(struct intel_th_device *thdev,
4006 + unsigned int master)
4007 + {
4008 +- struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent);
4009 ++ struct intel_th_device *hub = to_intel_th_hub(thdev);
4010 + struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
4011 +
4012 + if (!hubdrv->set_output)
4013 +diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
4014 +index 440fe4a96e68..a5a95ea5b81a 100644
4015 +--- a/drivers/i2c/busses/i2c-mv64xxx.c
4016 ++++ b/drivers/i2c/busses/i2c-mv64xxx.c
4017 +@@ -845,12 +845,16 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
4018 + */
4019 + if (of_device_is_compatible(np, "marvell,mv78230-i2c")) {
4020 + drv_data->offload_enabled = true;
4021 +- drv_data->errata_delay = true;
4022 ++ /* The delay is only needed in standard mode (100kHz) */
4023 ++ if (bus_freq <= 100000)
4024 ++ drv_data->errata_delay = true;
4025 + }
4026 +
4027 + if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
4028 + drv_data->offload_enabled = false;
4029 +- drv_data->errata_delay = true;
4030 ++ /* The delay is only needed in standard mode (100kHz) */
4031 ++ if (bus_freq <= 100000)
4032 ++ drv_data->errata_delay = true;
4033 + }
4034 +
4035 + if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
4036 +diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
4037 +index 7c3ed7c9af77..5613cc2d51fc 100644
4038 +--- a/drivers/ide/ide-cd.c
4039 ++++ b/drivers/ide/ide-cd.c
4040 +@@ -1613,6 +1613,8 @@ static int idecd_open(struct block_device *bdev, fmode_t mode)
4041 + struct cdrom_info *info;
4042 + int rc = -ENXIO;
4043 +
4044 ++ check_disk_change(bdev);
4045 ++
4046 + mutex_lock(&ide_cd_mutex);
4047 + info = ide_cd_get(bdev->bd_disk);
4048 + if (!info)
4049 +diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
4050 +index 45f2f095f793..4eb72ff539fc 100644
4051 +--- a/drivers/infiniband/core/multicast.c
4052 ++++ b/drivers/infiniband/core/multicast.c
4053 +@@ -724,21 +724,19 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
4054 + {
4055 + int ret;
4056 + u16 gid_index;
4057 +- u8 p;
4058 +-
4059 +- if (rdma_protocol_roce(device, port_num)) {
4060 +- ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
4061 +- gid_type, port_num,
4062 +- ndev,
4063 +- &gid_index);
4064 +- } else if (rdma_protocol_ib(device, port_num)) {
4065 +- ret = ib_find_cached_gid(device, &rec->port_gid,
4066 +- IB_GID_TYPE_IB, NULL, &p,
4067 +- &gid_index);
4068 +- } else {
4069 +- ret = -EINVAL;
4070 +- }
4071 +
4072 ++ /* GID table is not based on the netdevice for IB link layer,
4073 ++ * so ignore ndev during search.
4074 ++ */
4075 ++ if (rdma_protocol_ib(device, port_num))
4076 ++ ndev = NULL;
4077 ++ else if (!rdma_protocol_roce(device, port_num))
4078 ++ return -EINVAL;
4079 ++
4080 ++ ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
4081 ++ gid_type, port_num,
4082 ++ ndev,
4083 ++ &gid_index);
4084 + if (ret)
4085 + return ret;
4086 +
4087 +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
4088 +index 9a4e899d94b3..2b6c9b516070 100644
4089 +--- a/drivers/infiniband/core/umem.c
4090 ++++ b/drivers/infiniband/core/umem.c
4091 +@@ -119,7 +119,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
4092 + umem->length = size;
4093 + umem->address = addr;
4094 + umem->page_shift = PAGE_SHIFT;
4095 +- umem->pid = get_task_pid(current, PIDTYPE_PID);
4096 + /*
4097 + * We ask for writable memory if any of the following
4098 + * access flags are set. "Local write" and "remote write"
4099 +@@ -132,7 +131,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
4100 + IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
4101 +
4102 + if (access & IB_ACCESS_ON_DEMAND) {
4103 +- put_pid(umem->pid);
4104 + ret = ib_umem_odp_get(context, umem, access);
4105 + if (ret) {
4106 + kfree(umem);
4107 +@@ -148,7 +146,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
4108 +
4109 + page_list = (struct page **) __get_free_page(GFP_KERNEL);
4110 + if (!page_list) {
4111 +- put_pid(umem->pid);
4112 + kfree(umem);
4113 + return ERR_PTR(-ENOMEM);
4114 + }
4115 +@@ -231,7 +228,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
4116 + if (ret < 0) {
4117 + if (need_release)
4118 + __ib_umem_release(context->device, umem, 0);
4119 +- put_pid(umem->pid);
4120 + kfree(umem);
4121 + } else
4122 + current->mm->pinned_vm = locked;
4123 +@@ -274,8 +270,7 @@ void ib_umem_release(struct ib_umem *umem)
4124 +
4125 + __ib_umem_release(umem->context->device, umem, 1);
4126 +
4127 +- task = get_pid_task(umem->pid, PIDTYPE_PID);
4128 +- put_pid(umem->pid);
4129 ++ task = get_pid_task(umem->context->tgid, PIDTYPE_PID);
4130 + if (!task)
4131 + goto out;
4132 + mm = get_task_mm(task);
4133 +diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
4134 +index e6a60fa59f2b..e6bdd0c1e80a 100644
4135 +--- a/drivers/infiniband/hw/hfi1/chip.c
4136 ++++ b/drivers/infiniband/hw/hfi1/chip.c
4137 +@@ -5944,6 +5944,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
4138 + u64 status;
4139 + u32 sw_index;
4140 + int i = 0;
4141 ++ unsigned long irq_flags;
4142 +
4143 + sw_index = dd->hw_to_sw[hw_context];
4144 + if (sw_index >= dd->num_send_contexts) {
4145 +@@ -5953,10 +5954,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
4146 + return;
4147 + }
4148 + sci = &dd->send_contexts[sw_index];
4149 ++ spin_lock_irqsave(&dd->sc_lock, irq_flags);
4150 + sc = sci->sc;
4151 + if (!sc) {
4152 + dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
4153 + sw_index, hw_context);
4154 ++ spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
4155 + return;
4156 + }
4157 +
4158 +@@ -5978,6 +5981,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
4159 + */
4160 + if (sc->type != SC_USER)
4161 + queue_work(dd->pport->hfi1_wq, &sc->halt_work);
4162 ++ spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
4163 +
4164 + /*
4165 + * Update the counters for the corresponding status bits.
4166 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
4167 +index 0881f7907848..c14ed9cc9c9e 100644
4168 +--- a/drivers/infiniband/hw/mlx5/main.c
4169 ++++ b/drivers/infiniband/hw/mlx5/main.c
4170 +@@ -388,6 +388,9 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
4171 + if (err)
4172 + goto out;
4173 +
4174 ++ props->active_width = IB_WIDTH_4X;
4175 ++ props->active_speed = IB_SPEED_QDR;
4176 ++
4177 + translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
4178 + &props->active_width);
4179 +
4180 +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
4181 +index 45594091353c..7ef21fa2c3f0 100644
4182 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
4183 ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
4184 +@@ -1206,7 +1206,7 @@ int rxe_register_device(struct rxe_dev *rxe)
4185 + rxe->ndev->dev_addr);
4186 + dev->dev.dma_ops = &dma_virt_ops;
4187 + dma_coerce_mask_and_coherent(&dev->dev,
4188 +- dma_get_required_mask(dev->dev.parent));
4189 ++ dma_get_required_mask(&dev->dev));
4190 +
4191 + dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
4192 + dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
4193 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
4194 +index 74788fdeb773..8b591c192daf 100644
4195 +--- a/drivers/iommu/amd_iommu.c
4196 ++++ b/drivers/iommu/amd_iommu.c
4197 +@@ -310,6 +310,8 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
4198 +
4199 + if (dev_data == NULL) {
4200 + dev_data = alloc_dev_data(devid);
4201 ++ if (!dev_data)
4202 ++ return NULL;
4203 +
4204 + if (translation_pre_enabled(iommu))
4205 + dev_data->defer_attach = true;
4206 +diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
4207 +index f227d73e7bf6..f2832a10fcea 100644
4208 +--- a/drivers/iommu/mtk_iommu.c
4209 ++++ b/drivers/iommu/mtk_iommu.c
4210 +@@ -60,7 +60,7 @@
4211 + (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))
4212 +
4213 + #define REG_MMU_IVRP_PADDR 0x114
4214 +-#define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31))
4215 ++
4216 + #define REG_MMU_VLD_PA_RNG 0x118
4217 + #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))
4218 +
4219 +@@ -539,8 +539,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
4220 + F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
4221 + writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
4222 +
4223 +- writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
4224 +- data->base + REG_MMU_IVRP_PADDR);
4225 ++ if (data->m4u_plat == M4U_MT8173)
4226 ++ regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
4227 ++ else
4228 ++ regval = lower_32_bits(data->protect_base) |
4229 ++ upper_32_bits(data->protect_base);
4230 ++ writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
4231 ++
4232 + if (data->enable_4GB && data->m4u_plat != M4U_MT8173) {
4233 + /*
4234 + * If 4GB mode is enabled, the validate PA range is from
4235 +@@ -695,6 +700,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
4236 + reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
4237 + reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
4238 + reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
4239 ++ reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
4240 + clk_disable_unprepare(data->bclk);
4241 + return 0;
4242 + }
4243 +@@ -717,8 +723,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
4244 + writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
4245 + writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
4246 + writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
4247 +- writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
4248 +- base + REG_MMU_IVRP_PADDR);
4249 ++ writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
4250 + if (data->m4u_dom)
4251 + writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
4252 + base + REG_MMU_PT_BASE_ADDR);
4253 +diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
4254 +index b4451a1c7c2f..778498b8633f 100644
4255 +--- a/drivers/iommu/mtk_iommu.h
4256 ++++ b/drivers/iommu/mtk_iommu.h
4257 +@@ -32,6 +32,7 @@ struct mtk_iommu_suspend_reg {
4258 + u32 ctrl_reg;
4259 + u32 int_control0;
4260 + u32 int_main_control;
4261 ++ u32 ivrp_paddr;
4262 + };
4263 +
4264 + enum mtk_iommu_plat {
4265 +diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
4266 +index 910b5b6f96b1..eb65b6e78d57 100644
4267 +--- a/drivers/macintosh/rack-meter.c
4268 ++++ b/drivers/macintosh/rack-meter.c
4269 +@@ -154,8 +154,8 @@ static void rackmeter_do_pause(struct rackmeter *rm, int pause)
4270 + DBDMA_DO_STOP(rm->dma_regs);
4271 + return;
4272 + }
4273 +- memset(rdma->buf1, 0, ARRAY_SIZE(rdma->buf1));
4274 +- memset(rdma->buf2, 0, ARRAY_SIZE(rdma->buf2));
4275 ++ memset(rdma->buf1, 0, sizeof(rdma->buf1));
4276 ++ memset(rdma->buf2, 0, sizeof(rdma->buf2));
4277 +
4278 + rm->dma_buf_v->mark = 0;
4279 +
4280 +diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
4281 +index 12e5197f186c..b5ddb848cd31 100644
4282 +--- a/drivers/md/bcache/bcache.h
4283 ++++ b/drivers/md/bcache/bcache.h
4284 +@@ -258,10 +258,11 @@ struct bcache_device {
4285 + struct gendisk *disk;
4286 +
4287 + unsigned long flags;
4288 +-#define BCACHE_DEV_CLOSING 0
4289 +-#define BCACHE_DEV_DETACHING 1
4290 +-#define BCACHE_DEV_UNLINK_DONE 2
4291 +-
4292 ++#define BCACHE_DEV_CLOSING 0
4293 ++#define BCACHE_DEV_DETACHING 1
4294 ++#define BCACHE_DEV_UNLINK_DONE 2
4295 ++#define BCACHE_DEV_WB_RUNNING 3
4296 ++#define BCACHE_DEV_RATE_DW_RUNNING 4
4297 + unsigned nr_stripes;
4298 + unsigned stripe_size;
4299 + atomic_t *stripe_sectors_dirty;
4300 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
4301 +index f2273143b3cb..432088adc497 100644
4302 +--- a/drivers/md/bcache/super.c
4303 ++++ b/drivers/md/bcache/super.c
4304 +@@ -899,6 +899,31 @@ void bch_cached_dev_run(struct cached_dev *dc)
4305 + pr_debug("error creating sysfs link");
4306 + }
4307 +
4308 ++/*
4309 ++ * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
4310 ++ * work dc->writeback_rate_update is running. Wait until the routine
4311 ++ * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
4312 ++ * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
4313 ++ * seconds, give up waiting here and continue to cancel it too.
4314 ++ */
4315 ++static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
4316 ++{
4317 ++ int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ;
4318 ++
4319 ++ do {
4320 ++ if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING,
4321 ++ &dc->disk.flags))
4322 ++ break;
4323 ++ time_out--;
4324 ++ schedule_timeout_interruptible(1);
4325 ++ } while (time_out > 0);
4326 ++
4327 ++ if (time_out == 0)
4328 ++ pr_warn("give up waiting for dc->writeback_write_update to quit");
4329 ++
4330 ++ cancel_delayed_work_sync(&dc->writeback_rate_update);
4331 ++}
4332 ++
4333 + static void cached_dev_detach_finish(struct work_struct *w)
4334 + {
4335 + struct cached_dev *dc = container_of(w, struct cached_dev, detach);
4336 +@@ -911,7 +936,9 @@ static void cached_dev_detach_finish(struct work_struct *w)
4337 +
4338 + mutex_lock(&bch_register_lock);
4339 +
4340 +- cancel_delayed_work_sync(&dc->writeback_rate_update);
4341 ++ if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
4342 ++ cancel_writeback_rate_update_dwork(dc);
4343 ++
4344 + if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
4345 + kthread_stop(dc->writeback_thread);
4346 + dc->writeback_thread = NULL;
4347 +@@ -954,6 +981,7 @@ void bch_cached_dev_detach(struct cached_dev *dc)
4348 + closure_get(&dc->disk.cl);
4349 +
4350 + bch_writeback_queue(dc);
4351 ++
4352 + cached_dev_put(dc);
4353 + }
4354 +
4355 +@@ -1065,7 +1093,6 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
4356 + if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
4357 + bch_sectors_dirty_init(&dc->disk);
4358 + atomic_set(&dc->has_dirty, 1);
4359 +- refcount_inc(&dc->count);
4360 + bch_writeback_queue(dc);
4361 + }
4362 +
4363 +@@ -1093,14 +1120,16 @@ static void cached_dev_free(struct closure *cl)
4364 + {
4365 + struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
4366 +
4367 +- cancel_delayed_work_sync(&dc->writeback_rate_update);
4368 ++ mutex_lock(&bch_register_lock);
4369 ++
4370 ++ if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
4371 ++ cancel_writeback_rate_update_dwork(dc);
4372 ++
4373 + if (!IS_ERR_OR_NULL(dc->writeback_thread))
4374 + kthread_stop(dc->writeback_thread);
4375 + if (dc->writeback_write_wq)
4376 + destroy_workqueue(dc->writeback_write_wq);
4377 +
4378 +- mutex_lock(&bch_register_lock);
4379 +-
4380 + if (atomic_read(&dc->running))
4381 + bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
4382 + bcache_device_free(&dc->disk);
4383 +diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
4384 +index 78cd7bd50fdd..55673508628f 100644
4385 +--- a/drivers/md/bcache/sysfs.c
4386 ++++ b/drivers/md/bcache/sysfs.c
4387 +@@ -309,7 +309,8 @@ STORE(bch_cached_dev)
4388 + bch_writeback_queue(dc);
4389 +
4390 + if (attr == &sysfs_writeback_percent)
4391 +- schedule_delayed_work(&dc->writeback_rate_update,
4392 ++ if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
4393 ++ schedule_delayed_work(&dc->writeback_rate_update,
4394 + dc->writeback_rate_update_seconds * HZ);
4395 +
4396 + mutex_unlock(&bch_register_lock);
4397 +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
4398 +index f1d2fc15abcc..8f98ef1038d3 100644
4399 +--- a/drivers/md/bcache/writeback.c
4400 ++++ b/drivers/md/bcache/writeback.c
4401 +@@ -115,6 +115,21 @@ static void update_writeback_rate(struct work_struct *work)
4402 + struct cached_dev,
4403 + writeback_rate_update);
4404 +
4405 ++ /*
4406 ++ * should check BCACHE_DEV_RATE_DW_RUNNING before calling
4407 ++ * cancel_delayed_work_sync().
4408 ++ */
4409 ++ set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
4410 ++ /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
4411 ++ smp_mb();
4412 ++
4413 ++ if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) {
4414 ++ clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
4415 ++ /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
4416 ++ smp_mb();
4417 ++ return;
4418 ++ }
4419 ++
4420 + down_read(&dc->writeback_lock);
4421 +
4422 + if (atomic_read(&dc->has_dirty) &&
4423 +@@ -123,8 +138,18 @@ static void update_writeback_rate(struct work_struct *work)
4424 +
4425 + up_read(&dc->writeback_lock);
4426 +
4427 +- schedule_delayed_work(&dc->writeback_rate_update,
4428 ++ if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) {
4429 ++ schedule_delayed_work(&dc->writeback_rate_update,
4430 + dc->writeback_rate_update_seconds * HZ);
4431 ++ }
4432 ++
4433 ++ /*
4434 ++ * should check BCACHE_DEV_RATE_DW_RUNNING before calling
4435 ++ * cancel_delayed_work_sync().
4436 ++ */
4437 ++ clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
4438 ++ /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
4439 ++ smp_mb();
4440 + }
4441 +
4442 + static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
4443 +@@ -565,14 +590,20 @@ static int bch_writeback_thread(void *arg)
4444 + while (!kthread_should_stop()) {
4445 + down_write(&dc->writeback_lock);
4446 + set_current_state(TASK_INTERRUPTIBLE);
4447 +- if (!atomic_read(&dc->has_dirty) ||
4448 +- (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
4449 +- !dc->writeback_running)) {
4450 ++ /*
4451 ++ * If the bache device is detaching, skip here and continue
4452 ++ * to perform writeback. Otherwise, if no dirty data on cache,
4453 ++ * or there is dirty data on cache but writeback is disabled,
4454 ++ * the writeback thread should sleep here and wait for others
4455 ++ * to wake up it.
4456 ++ */
4457 ++ if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
4458 ++ (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
4459 + up_write(&dc->writeback_lock);
4460 +
4461 + if (kthread_should_stop()) {
4462 + set_current_state(TASK_RUNNING);
4463 +- return 0;
4464 ++ break;
4465 + }
4466 +
4467 + schedule();
4468 +@@ -585,9 +616,16 @@ static int bch_writeback_thread(void *arg)
4469 + if (searched_full_index &&
4470 + RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
4471 + atomic_set(&dc->has_dirty, 0);
4472 +- cached_dev_put(dc);
4473 + SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
4474 + bch_write_bdev_super(dc, NULL);
4475 ++ /*
4476 ++ * If bcache device is detaching via sysfs interface,
4477 ++ * writeback thread should stop after there is no dirty
4478 ++ * data on cache. BCACHE_DEV_DETACHING flag is set in
4479 ++ * bch_cached_dev_detach().
4480 ++ */
4481 ++ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
4482 ++ break;
4483 + }
4484 +
4485 + up_write(&dc->writeback_lock);
4486 +@@ -606,6 +644,9 @@ static int bch_writeback_thread(void *arg)
4487 + }
4488 + }
4489 +
4490 ++ dc->writeback_thread = NULL;
4491 ++ cached_dev_put(dc);
4492 ++
4493 + return 0;
4494 + }
4495 +
4496 +@@ -659,6 +700,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
4497 + dc->writeback_rate_p_term_inverse = 40;
4498 + dc->writeback_rate_i_term_inverse = 10000;
4499 +
4500 ++ WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
4501 + INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
4502 + }
4503 +
4504 +@@ -669,11 +711,15 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc)
4505 + if (!dc->writeback_write_wq)
4506 + return -ENOMEM;
4507 +
4508 ++ cached_dev_get(dc);
4509 + dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
4510 + "bcache_writeback");
4511 +- if (IS_ERR(dc->writeback_thread))
4512 ++ if (IS_ERR(dc->writeback_thread)) {
4513 ++ cached_dev_put(dc);
4514 + return PTR_ERR(dc->writeback_thread);
4515 ++ }
4516 +
4517 ++ WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
4518 + schedule_delayed_work(&dc->writeback_rate_update,
4519 + dc->writeback_rate_update_seconds * HZ);
4520 +
4521 +diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
4522 +index 587b25599856..0bba8f1c6cdf 100644
4523 +--- a/drivers/md/bcache/writeback.h
4524 ++++ b/drivers/md/bcache/writeback.h
4525 +@@ -105,8 +105,6 @@ static inline void bch_writeback_add(struct cached_dev *dc)
4526 + {
4527 + if (!atomic_read(&dc->has_dirty) &&
4528 + !atomic_xchg(&dc->has_dirty, 1)) {
4529 +- refcount_inc(&dc->count);
4530 +-
4531 + if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
4532 + SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
4533 + /* XXX: should do this synchronously */
4534 +diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
4535 +index 4f015da78f28..4949b8d5a748 100644
4536 +--- a/drivers/misc/cxl/cxl.h
4537 ++++ b/drivers/misc/cxl/cxl.h
4538 +@@ -369,6 +369,9 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
4539 + #define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */
4540 + #define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */
4541 +
4542 ++/****** CXL_PSL_DEBUG *****************************************************/
4543 ++#define CXL_PSL_DEBUG_CDC (1ull << (63-27)) /* Coherent Data cache support */
4544 ++
4545 + /****** CXL_XSL9_IERAT_ERAT - CAIA 2 **********************************/
4546 + #define CXL_XSL9_IERAT_MLPID (1ull << (63-0)) /* Match LPID */
4547 + #define CXL_XSL9_IERAT_MPID (1ull << (63-1)) /* Match PID */
4548 +@@ -669,6 +672,7 @@ struct cxl_native {
4549 + irq_hw_number_t err_hwirq;
4550 + unsigned int err_virq;
4551 + u64 ps_off;
4552 ++ bool no_data_cache; /* set if no data cache on the card */
4553 + const struct cxl_service_layer_ops *sl_ops;
4554 + };
4555 +
4556 +diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
4557 +index 1b3d7c65ea3f..98f867fcef24 100644
4558 +--- a/drivers/misc/cxl/native.c
4559 ++++ b/drivers/misc/cxl/native.c
4560 +@@ -353,8 +353,17 @@ int cxl_data_cache_flush(struct cxl *adapter)
4561 + u64 reg;
4562 + unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
4563 +
4564 +- pr_devel("Flushing data cache\n");
4565 ++ /*
4566 ++ * Do a datacache flush only if datacache is available.
4567 ++ * In case of PSL9D datacache absent hence flush operation.
4568 ++ * would timeout.
4569 ++ */
4570 ++ if (adapter->native->no_data_cache) {
4571 ++ pr_devel("No PSL data cache. Ignoring cache flush req.\n");
4572 ++ return 0;
4573 ++ }
4574 +
4575 ++ pr_devel("Flushing data cache\n");
4576 + reg = cxl_p1_read(adapter, CXL_PSL_Control);
4577 + reg |= CXL_PSL_Control_Fr;
4578 + cxl_p1_write(adapter, CXL_PSL_Control, reg);
4579 +diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
4580 +index 758842f65a1b..61de57292e40 100644
4581 +--- a/drivers/misc/cxl/pci.c
4582 ++++ b/drivers/misc/cxl/pci.c
4583 +@@ -456,6 +456,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
4584 + u64 chipid;
4585 + u32 phb_index;
4586 + u64 capp_unit_id;
4587 ++ u64 psl_debug;
4588 + int rc;
4589 +
4590 + rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
4591 +@@ -506,6 +507,16 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
4592 + } else
4593 + cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x4000000000000000ULL);
4594 +
4595 ++ /*
4596 ++ * Check if PSL has data-cache. We need to flush adapter datacache
4597 ++ * when as its about to be removed.
4598 ++ */
4599 ++ psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG);
4600 ++ if (psl_debug & CXL_PSL_DEBUG_CDC) {
4601 ++ dev_dbg(&dev->dev, "No data-cache present\n");
4602 ++ adapter->native->no_data_cache = true;
4603 ++ }
4604 ++
4605 + return 0;
4606 + }
4607 +
4608 +@@ -1449,10 +1460,8 @@ int cxl_pci_reset(struct cxl *adapter)
4609 +
4610 + /*
4611 + * The adapter is about to be reset, so ignore errors.
4612 +- * Not supported on P9 DD1
4613 + */
4614 +- if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
4615 +- cxl_data_cache_flush(adapter);
4616 ++ cxl_data_cache_flush(adapter);
4617 +
4618 + /* pcie_warm_reset requests a fundamental pci reset which includes a
4619 + * PERST assert/deassert. PERST triggers a loading of the image
4620 +@@ -1936,10 +1945,8 @@ static void cxl_pci_remove_adapter(struct cxl *adapter)
4621 +
4622 + /*
4623 + * Flush adapter datacache as its about to be removed.
4624 +- * Not supported on P9 DD1.
4625 + */
4626 +- if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
4627 +- cxl_data_cache_flush(adapter);
4628 ++ cxl_data_cache_flush(adapter);
4629 +
4630 + cxl_deconfigure_adapter(adapter);
4631 +
4632 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
4633 +index 9c6f639d8a57..81501ebd5b26 100644
4634 +--- a/drivers/mmc/core/block.c
4635 ++++ b/drivers/mmc/core/block.c
4636 +@@ -2492,7 +2492,7 @@ static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
4637 + break;
4638 + }
4639 +
4640 +- return 0;
4641 ++ return ret;
4642 + }
4643 +
4644 + #ifdef CONFIG_COMPAT
4645 +diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
4646 +index 61666d269771..0cfbdb3ab68a 100644
4647 +--- a/drivers/mmc/host/sdhci-iproc.c
4648 ++++ b/drivers/mmc/host/sdhci-iproc.c
4649 +@@ -33,6 +33,8 @@ struct sdhci_iproc_host {
4650 + const struct sdhci_iproc_data *data;
4651 + u32 shadow_cmd;
4652 + u32 shadow_blk;
4653 ++ bool is_cmd_shadowed;
4654 ++ bool is_blk_shadowed;
4655 + };
4656 +
4657 + #define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
4658 +@@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
4659 +
4660 + static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
4661 + {
4662 +- u32 val = sdhci_iproc_readl(host, (reg & ~3));
4663 +- u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
4664 ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4665 ++ struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
4666 ++ u32 val;
4667 ++ u16 word;
4668 ++
4669 ++ if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
4670 ++ /* Get the saved transfer mode */
4671 ++ val = iproc_host->shadow_cmd;
4672 ++ } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
4673 ++ iproc_host->is_blk_shadowed) {
4674 ++ /* Get the saved block info */
4675 ++ val = iproc_host->shadow_blk;
4676 ++ } else {
4677 ++ val = sdhci_iproc_readl(host, (reg & ~3));
4678 ++ }
4679 ++ word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
4680 + return word;
4681 + }
4682 +
4683 +@@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
4684 +
4685 + if (reg == SDHCI_COMMAND) {
4686 + /* Write the block now as we are issuing a command */
4687 +- if (iproc_host->shadow_blk != 0) {
4688 ++ if (iproc_host->is_blk_shadowed) {
4689 + sdhci_iproc_writel(host, iproc_host->shadow_blk,
4690 + SDHCI_BLOCK_SIZE);
4691 +- iproc_host->shadow_blk = 0;
4692 ++ iproc_host->is_blk_shadowed = false;
4693 + }
4694 + oldval = iproc_host->shadow_cmd;
4695 +- } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
4696 ++ iproc_host->is_cmd_shadowed = false;
4697 ++ } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
4698 ++ iproc_host->is_blk_shadowed) {
4699 + /* Block size and count are stored in shadow reg */
4700 + oldval = iproc_host->shadow_blk;
4701 + } else {
4702 +@@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
4703 + if (reg == SDHCI_TRANSFER_MODE) {
4704 + /* Save the transfer mode until the command is issued */
4705 + iproc_host->shadow_cmd = newval;
4706 ++ iproc_host->is_cmd_shadowed = true;
4707 + } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
4708 + /* Save the block info until the command is issued */
4709 + iproc_host->shadow_blk = newval;
4710 ++ iproc_host->is_blk_shadowed = true;
4711 + } else {
4712 + /* Command or other regular 32-bit write */
4713 + sdhci_iproc_writel(host, newval, reg & ~3);
4714 +@@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
4715 +
4716 + static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
4717 + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
4718 +- .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
4719 ++ .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
4720 + .ops = &sdhci_iproc_32only_ops,
4721 + };
4722 +
4723 +@@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = {
4724 + .caps1 = SDHCI_DRIVER_TYPE_C |
4725 + SDHCI_DRIVER_TYPE_D |
4726 + SDHCI_SUPPORT_DDR50,
4727 +- .mmc_caps = MMC_CAP_1_8V_DDR,
4728 + };
4729 +
4730 + static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
4731 +diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
4732 +index 8eef9fb6b1fe..ad8195b0d161 100644
4733 +--- a/drivers/net/ethernet/broadcom/bgmac.c
4734 ++++ b/drivers/net/ethernet/broadcom/bgmac.c
4735 +@@ -533,7 +533,8 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
4736 + int i;
4737 +
4738 + for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
4739 +- int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
4740 ++ u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1);
4741 ++ unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN;
4742 +
4743 + slot = &ring->slots[i];
4744 + dev_kfree_skb(slot->skb);
4745 +diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
4746 +index 4040d846da8e..40d02fec2747 100644
4747 +--- a/drivers/net/ethernet/broadcom/bgmac.h
4748 ++++ b/drivers/net/ethernet/broadcom/bgmac.h
4749 +@@ -479,9 +479,9 @@ struct bgmac_rx_header {
4750 + struct bgmac {
4751 + union {
4752 + struct {
4753 +- void *base;
4754 +- void *idm_base;
4755 +- void *nicpm_base;
4756 ++ void __iomem *base;
4757 ++ void __iomem *idm_base;
4758 ++ void __iomem *nicpm_base;
4759 + } plat;
4760 + struct {
4761 + struct bcma_device *core;
4762 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
4763 +index 9442605f4fd4..0b71d3b44933 100644
4764 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
4765 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
4766 +@@ -2552,16 +2552,20 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
4767 + return -EOPNOTSUPP;
4768 +
4769 + rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
4770 +- if (!rc)
4771 ++ if (!rc) {
4772 + netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
4773 ++ *flags = 0;
4774 ++ }
4775 + } else if (*flags == ETH_RESET_AP) {
4776 + /* This feature is not supported in older firmware versions */
4777 + if (bp->hwrm_spec_code < 0x10803)
4778 + return -EOPNOTSUPP;
4779 +
4780 + rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
4781 +- if (!rc)
4782 ++ if (!rc) {
4783 + netdev_info(dev, "Reset Application Processor request successful.\n");
4784 ++ *flags = 0;
4785 ++ }
4786 + } else {
4787 + rc = -EINVAL;
4788 + }
4789 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
4790 +index 65c2cee35766..9d8aa96044d3 100644
4791 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
4792 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
4793 +@@ -992,8 +992,10 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
4794 +
4795 + /* Check if there's another flow using the same tunnel decap.
4796 + * If not, add this tunnel to the table and resolve the other
4797 +- * tunnel header fileds
4798 ++ * tunnel header fileds. Ignore src_port in the tunnel_key,
4799 ++ * since it is not required for decap filters.
4800 + */
4801 ++ decap_key->tp_src = 0;
4802 + decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
4803 + &tc_info->decap_ht_params,
4804 + decap_key);
4805 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4806 +index 61022b5f6743..57dcb957f27c 100644
4807 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4808 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4809 +@@ -833,8 +833,6 @@ static int setup_fw_sge_queues(struct adapter *adap)
4810 +
4811 + err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
4812 + adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
4813 +- if (err)
4814 +- t4_free_sge_resources(adap);
4815 + return err;
4816 + }
4817 +
4818 +@@ -5474,6 +5472,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4819 + if (err)
4820 + goto out_free_dev;
4821 +
4822 ++ err = setup_fw_sge_queues(adapter);
4823 ++ if (err) {
4824 ++ dev_err(adapter->pdev_dev,
4825 ++ "FW sge queue allocation failed, err %d", err);
4826 ++ goto out_free_dev;
4827 ++ }
4828 ++
4829 + /*
4830 + * The card is now ready to go. If any errors occur during device
4831 + * registration we do not fail the whole card but rather proceed only
4832 +@@ -5522,10 +5527,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4833 + cxgb4_ptp_init(adapter);
4834 +
4835 + print_adapter_info(adapter);
4836 +- setup_fw_sge_queues(adapter);
4837 + return 0;
4838 +
4839 + out_free_dev:
4840 ++ t4_free_sge_resources(adapter);
4841 + free_some_resources(adapter);
4842 + if (adapter->flags & USING_MSIX)
4843 + free_msix_info(adapter);
4844 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
4845 +index 6b5fea4532f3..2d827140a475 100644
4846 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
4847 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
4848 +@@ -342,6 +342,7 @@ static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
4849 + {
4850 + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
4851 +
4852 ++ adap->sge.uld_rxq_info[uld_type] = NULL;
4853 + kfree(rxq_info->rspq_id);
4854 + kfree(rxq_info->uldrxq);
4855 + kfree(rxq_info);
4856 +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
4857 +index f202ba72a811..b91109d967fa 100644
4858 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c
4859 ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
4860 +@@ -1898,6 +1898,8 @@ static int enic_open(struct net_device *netdev)
4861 + }
4862 +
4863 + for (i = 0; i < enic->rq_count; i++) {
4864 ++ /* enable rq before updating rq desc */
4865 ++ vnic_rq_enable(&enic->rq[i]);
4866 + vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
4867 + /* Need at least one buffer on ring to get going */
4868 + if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
4869 +@@ -1909,8 +1911,6 @@ static int enic_open(struct net_device *netdev)
4870 +
4871 + for (i = 0; i < enic->wq_count; i++)
4872 + vnic_wq_enable(&enic->wq[i]);
4873 +- for (i = 0; i < enic->rq_count; i++)
4874 +- vnic_rq_enable(&enic->rq[i]);
4875 +
4876 + if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
4877 + enic_dev_add_station_addr(enic);
4878 +@@ -1936,8 +1936,12 @@ static int enic_open(struct net_device *netdev)
4879 + return 0;
4880 +
4881 + err_out_free_rq:
4882 +- for (i = 0; i < enic->rq_count; i++)
4883 ++ for (i = 0; i < enic->rq_count; i++) {
4884 ++ err = vnic_rq_disable(&enic->rq[i]);
4885 ++ if (err)
4886 ++ return err;
4887 + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
4888 ++ }
4889 + enic_dev_notify_unset(enic);
4890 + err_out_free_intr:
4891 + enic_unset_affinity_hint(enic);
4892 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
4893 +index e4ec32a9ca15..3615e5f148dd 100644
4894 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
4895 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
4896 +@@ -1916,8 +1916,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
4897 + goto csum_failed;
4898 + }
4899 +
4900 ++ /* SGT[0] is used by the linear part */
4901 + sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
4902 +- qm_sg_entry_set_len(&sgt[0], skb_headlen(skb));
4903 ++ frag_len = skb_headlen(skb);
4904 ++ qm_sg_entry_set_len(&sgt[0], frag_len);
4905 + sgt[0].bpid = FSL_DPAA_BPID_INV;
4906 + sgt[0].offset = 0;
4907 + addr = dma_map_single(dev, skb->data,
4908 +@@ -1930,9 +1932,9 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
4909 + qm_sg_entry_set64(&sgt[0], addr);
4910 +
4911 + /* populate the rest of SGT entries */
4912 +- frag = &skb_shinfo(skb)->frags[0];
4913 +- frag_len = frag->size;
4914 +- for (i = 1; i <= nr_frags; i++, frag++) {
4915 ++ for (i = 0; i < nr_frags; i++) {
4916 ++ frag = &skb_shinfo(skb)->frags[i];
4917 ++ frag_len = frag->size;
4918 + WARN_ON(!skb_frag_page(frag));
4919 + addr = skb_frag_dma_map(dev, frag, 0,
4920 + frag_len, dma_dir);
4921 +@@ -1942,15 +1944,16 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
4922 + goto sg_map_failed;
4923 + }
4924 +
4925 +- qm_sg_entry_set_len(&sgt[i], frag_len);
4926 +- sgt[i].bpid = FSL_DPAA_BPID_INV;
4927 +- sgt[i].offset = 0;
4928 ++ qm_sg_entry_set_len(&sgt[i + 1], frag_len);
4929 ++ sgt[i + 1].bpid = FSL_DPAA_BPID_INV;
4930 ++ sgt[i + 1].offset = 0;
4931 +
4932 + /* keep the offset in the address */
4933 +- qm_sg_entry_set64(&sgt[i], addr);
4934 +- frag_len = frag->size;
4935 ++ qm_sg_entry_set64(&sgt[i + 1], addr);
4936 + }
4937 +- qm_sg_entry_set_f(&sgt[i - 1], frag_len);
4938 ++
4939 ++ /* Set the final bit in the last used entry of the SGT */
4940 ++ qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
4941 +
4942 + qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
4943 +
4944 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
4945 +index faea674094b9..85306d1b2acf 100644
4946 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
4947 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
4948 +@@ -211,7 +211,7 @@ static int dpaa_set_pauseparam(struct net_device *net_dev,
4949 + if (epause->rx_pause)
4950 + newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
4951 + if (epause->tx_pause)
4952 +- newadv |= ADVERTISED_Asym_Pause;
4953 ++ newadv ^= ADVERTISED_Asym_Pause;
4954 +
4955 + oldadv = phydev->advertising &
4956 + (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
4957 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4958 +index 601b6295d3f8..9f6a6a1640d6 100644
4959 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4960 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4961 +@@ -747,7 +747,7 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
4962 + {
4963 + /* Config bd buffer end */
4964 + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
4965 +- HNS3_TXD_BDTYPE_M, 0);
4966 ++ HNS3_TXD_BDTYPE_S, 0);
4967 + hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
4968 + hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
4969 + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
4970 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
4971 +index b034c7f24eda..a1e53c671944 100644
4972 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
4973 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
4974 +@@ -698,7 +698,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
4975 +
4976 + if (!h->ae_algo || !h->ae_algo->ops ||
4977 + !h->ae_algo->ops->get_rss_key_size)
4978 +- return -EOPNOTSUPP;
4979 ++ return 0;
4980 +
4981 + return h->ae_algo->ops->get_rss_key_size(h);
4982 + }
4983 +@@ -709,7 +709,7 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev)
4984 +
4985 + if (!h->ae_algo || !h->ae_algo->ops ||
4986 + !h->ae_algo->ops->get_rss_indir_size)
4987 +- return -EOPNOTSUPP;
4988 ++ return 0;
4989 +
4990 + return h->ae_algo->ops->get_rss_indir_size(h);
4991 + }
4992 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
4993 +index 1b3cc8bb0705..fd8e6937ee00 100644
4994 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
4995 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
4996 +@@ -812,8 +812,6 @@ static void release_resources(struct ibmvnic_adapter *adapter)
4997 + release_tx_pools(adapter);
4998 + release_rx_pools(adapter);
4999 +
5000 +- release_stats_token(adapter);
5001 +- release_stats_buffers(adapter);
5002 + release_error_buffers(adapter);
5003 +
5004 + if (adapter->napi) {
5005 +@@ -953,14 +951,6 @@ static int init_resources(struct ibmvnic_adapter *adapter)
5006 + if (rc)
5007 + return rc;
5008 +
5009 +- rc = init_stats_buffers(adapter);
5010 +- if (rc)
5011 +- return rc;
5012 +-
5013 +- rc = init_stats_token(adapter);
5014 +- if (rc)
5015 +- return rc;
5016 +-
5017 + adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
5018 + if (!adapter->vpd)
5019 + return -ENOMEM;
5020 +@@ -1699,12 +1689,14 @@ static int do_reset(struct ibmvnic_adapter *adapter,
5021 + rc = reset_rx_pools(adapter);
5022 + if (rc)
5023 + return rc;
5024 +-
5025 +- if (reset_state == VNIC_CLOSED)
5026 +- return 0;
5027 + }
5028 + }
5029 +
5030 ++ adapter->state = VNIC_CLOSED;
5031 ++
5032 ++ if (reset_state == VNIC_CLOSED)
5033 ++ return 0;
5034 ++
5035 + rc = __ibmvnic_open(netdev);
5036 + if (rc) {
5037 + if (list_empty(&adapter->rwi_list))
5038 +@@ -2266,6 +2258,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
5039 + }
5040 +
5041 + memset(scrq->msgs, 0, 4 * PAGE_SIZE);
5042 ++ atomic_set(&scrq->used, 0);
5043 + scrq->cur = 0;
5044 +
5045 + rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
5046 +@@ -4387,6 +4380,14 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
5047 + release_crq_queue(adapter);
5048 + }
5049 +
5050 ++ rc = init_stats_buffers(adapter);
5051 ++ if (rc)
5052 ++ return rc;
5053 ++
5054 ++ rc = init_stats_token(adapter);
5055 ++ if (rc)
5056 ++ return rc;
5057 ++
5058 + return rc;
5059 + }
5060 +
5061 +@@ -4494,6 +4495,9 @@ static int ibmvnic_remove(struct vio_dev *dev)
5062 + release_sub_crqs(adapter);
5063 + release_crq_queue(adapter);
5064 +
5065 ++ release_stats_token(adapter);
5066 ++ release_stats_buffers(adapter);
5067 ++
5068 + adapter->state = VNIC_REMOVED;
5069 +
5070 + mutex_unlock(&adapter->reset_lock);
5071 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
5072 +index e31adbc75f9c..e50d703d7353 100644
5073 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
5074 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
5075 +@@ -9215,6 +9215,17 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
5076 + }
5077 + i40e_get_oem_version(&pf->hw);
5078 +
5079 ++ if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
5080 ++ ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
5081 ++ hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
5082 ++ /* The following delay is necessary for 4.33 firmware and older
5083 ++ * to recover after EMP reset. 200 ms should suffice but we
5084 ++ * put here 300 ms to be sure that FW is ready to operate
5085 ++ * after reset.
5086 ++ */
5087 ++ mdelay(300);
5088 ++ }
5089 ++
5090 + /* re-verify the eeprom if we just had an EMP reset */
5091 + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
5092 + i40e_verify_eeprom(pf);
5093 +@@ -14216,7 +14227,13 @@ static int __maybe_unused i40e_suspend(struct device *dev)
5094 + if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
5095 + i40e_enable_mc_magic_wake(pf);
5096 +
5097 +- i40e_prep_for_reset(pf, false);
5098 ++ /* Since we're going to destroy queues during the
5099 ++ * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
5100 ++ * whole section
5101 ++ */
5102 ++ rtnl_lock();
5103 ++
5104 ++ i40e_prep_for_reset(pf, true);
5105 +
5106 + wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
5107 + wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
5108 +@@ -14228,6 +14245,8 @@ static int __maybe_unused i40e_suspend(struct device *dev)
5109 + */
5110 + i40e_clear_interrupt_scheme(pf);
5111 +
5112 ++ rtnl_unlock();
5113 ++
5114 + return 0;
5115 + }
5116 +
5117 +@@ -14245,6 +14264,11 @@ static int __maybe_unused i40e_resume(struct device *dev)
5118 + if (!test_bit(__I40E_SUSPENDED, pf->state))
5119 + return 0;
5120 +
5121 ++ /* We need to hold the RTNL lock prior to restoring interrupt schemes,
5122 ++ * since we're going to be restoring queues
5123 ++ */
5124 ++ rtnl_lock();
5125 ++
5126 + /* We cleared the interrupt scheme when we suspended, so we need to
5127 + * restore it now to resume device functionality.
5128 + */
5129 +@@ -14255,7 +14279,9 @@ static int __maybe_unused i40e_resume(struct device *dev)
5130 + }
5131 +
5132 + clear_bit(__I40E_DOWN, pf->state);
5133 +- i40e_reset_and_rebuild(pf, false, false);
5134 ++ i40e_reset_and_rebuild(pf, false, true);
5135 ++
5136 ++ rtnl_unlock();
5137 +
5138 + /* Clear suspended state last after everything is recovered */
5139 + clear_bit(__I40E_SUSPENDED, pf->state);
5140 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5141 +index 9fc063af233c..85369423452d 100644
5142 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5143 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5144 +@@ -7711,7 +7711,8 @@ static void ixgbe_service_task(struct work_struct *work)
5145 +
5146 + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
5147 + ixgbe_ptp_overflow_check(adapter);
5148 +- ixgbe_ptp_rx_hang(adapter);
5149 ++ if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
5150 ++ ixgbe_ptp_rx_hang(adapter);
5151 + ixgbe_ptp_tx_hang(adapter);
5152 + }
5153 +
5154 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
5155 +index e9a1fbcc4adf..3efe45bc2471 100644
5156 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
5157 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
5158 +@@ -1802,7 +1802,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
5159 +
5160 + cmd->checksum_disabled = 1;
5161 + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
5162 +- cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
5163 ++ cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
5164 +
5165 + cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
5166 + if (cmd->cmdif_rev > CMD_IF_REV) {
5167 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5168 +index 9b4827d36e3e..1ae61514b6a9 100644
5169 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5170 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5171 +@@ -153,26 +153,6 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
5172 + mutex_unlock(&priv->state_lock);
5173 + }
5174 +
5175 +-static void mlx5e_tx_timeout_work(struct work_struct *work)
5176 +-{
5177 +- struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
5178 +- tx_timeout_work);
5179 +- int err;
5180 +-
5181 +- rtnl_lock();
5182 +- mutex_lock(&priv->state_lock);
5183 +- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
5184 +- goto unlock;
5185 +- mlx5e_close_locked(priv->netdev);
5186 +- err = mlx5e_open_locked(priv->netdev);
5187 +- if (err)
5188 +- netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
5189 +- err);
5190 +-unlock:
5191 +- mutex_unlock(&priv->state_lock);
5192 +- rtnl_unlock();
5193 +-}
5194 +-
5195 + void mlx5e_update_stats(struct mlx5e_priv *priv)
5196 + {
5197 + int i;
5198 +@@ -3632,13 +3612,19 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
5199 + return true;
5200 + }
5201 +
5202 +-static void mlx5e_tx_timeout(struct net_device *dev)
5203 ++static void mlx5e_tx_timeout_work(struct work_struct *work)
5204 + {
5205 +- struct mlx5e_priv *priv = netdev_priv(dev);
5206 ++ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
5207 ++ tx_timeout_work);
5208 ++ struct net_device *dev = priv->netdev;
5209 + bool reopen_channels = false;
5210 +- int i;
5211 ++ int i, err;
5212 +
5213 +- netdev_err(dev, "TX timeout detected\n");
5214 ++ rtnl_lock();
5215 ++ mutex_lock(&priv->state_lock);
5216 ++
5217 ++ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
5218 ++ goto unlock;
5219 +
5220 + for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
5221 + struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
5222 +@@ -3646,7 +3632,9 @@ static void mlx5e_tx_timeout(struct net_device *dev)
5223 +
5224 + if (!netif_xmit_stopped(dev_queue))
5225 + continue;
5226 +- netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
5227 ++
5228 ++ netdev_err(dev,
5229 ++ "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
5230 + i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
5231 + jiffies_to_usecs(jiffies - dev_queue->trans_start));
5232 +
5233 +@@ -3659,8 +3647,27 @@ static void mlx5e_tx_timeout(struct net_device *dev)
5234 + }
5235 + }
5236 +
5237 +- if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state))
5238 +- schedule_work(&priv->tx_timeout_work);
5239 ++ if (!reopen_channels)
5240 ++ goto unlock;
5241 ++
5242 ++ mlx5e_close_locked(dev);
5243 ++ err = mlx5e_open_locked(dev);
5244 ++ if (err)
5245 ++ netdev_err(priv->netdev,
5246 ++ "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
5247 ++ err);
5248 ++
5249 ++unlock:
5250 ++ mutex_unlock(&priv->state_lock);
5251 ++ rtnl_unlock();
5252 ++}
5253 ++
5254 ++static void mlx5e_tx_timeout(struct net_device *dev)
5255 ++{
5256 ++ struct mlx5e_priv *priv = netdev_priv(dev);
5257 ++
5258 ++ netdev_err(dev, "TX timeout detected\n");
5259 ++ queue_work(priv->wq, &priv->tx_timeout_work);
5260 + }
5261 +
5262 + static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
5263 +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
5264 +index c4949183eef3..3881de91015e 100644
5265 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
5266 ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
5267 +@@ -307,6 +307,8 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
5268 + if (data[IFLA_VLAN_ID]) {
5269 + mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
5270 + ep = rmnet_get_endpoint(port, priv->mux_id);
5271 ++ if (!ep)
5272 ++ return -ENODEV;
5273 +
5274 + hlist_del_init_rcu(&ep->hlnode);
5275 + hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
5276 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
5277 +index 14c839bb09e7..7c9235c9d081 100644
5278 +--- a/drivers/net/ethernet/renesas/sh_eth.c
5279 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
5280 +@@ -763,6 +763,7 @@ static struct sh_eth_cpu_data sh7757_data = {
5281 + .rpadir = 1,
5282 + .rpadir_value = 2 << 16,
5283 + .rtrate = 1,
5284 ++ .dual_port = 1,
5285 + };
5286 +
5287 + #define SH_GIGA_ETH_BASE 0xfee00000UL
5288 +@@ -841,6 +842,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
5289 + .no_trimd = 1,
5290 + .no_ade = 1,
5291 + .tsu = 1,
5292 ++ .dual_port = 1,
5293 + };
5294 +
5295 + /* SH7734 */
5296 +@@ -911,6 +913,7 @@ static struct sh_eth_cpu_data sh7763_data = {
5297 + .tsu = 1,
5298 + .irq_flags = IRQF_SHARED,
5299 + .magic = 1,
5300 ++ .dual_port = 1,
5301 + };
5302 +
5303 + static struct sh_eth_cpu_data sh7619_data = {
5304 +@@ -943,6 +946,7 @@ static struct sh_eth_cpu_data sh771x_data = {
5305 + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
5306 + EESIPR_PREIP | EESIPR_CERFIP,
5307 + .tsu = 1,
5308 ++ .dual_port = 1,
5309 + };
5310 +
5311 + static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
5312 +@@ -2932,7 +2936,7 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
5313 + /* SuperH's TSU register init function */
5314 + static void sh_eth_tsu_init(struct sh_eth_private *mdp)
5315 + {
5316 +- if (sh_eth_is_rz_fast_ether(mdp)) {
5317 ++ if (!mdp->cd->dual_port) {
5318 + sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
5319 + sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
5320 + TSU_FWSLC); /* Enable POST registers */
5321 +diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
5322 +index e5fe70134690..fdd6d71c03d1 100644
5323 +--- a/drivers/net/ethernet/renesas/sh_eth.h
5324 ++++ b/drivers/net/ethernet/renesas/sh_eth.h
5325 +@@ -509,6 +509,7 @@ struct sh_eth_cpu_data {
5326 + unsigned rmiimode:1; /* EtherC has RMIIMODE register */
5327 + unsigned rtrate:1; /* EtherC has RTRATE register */
5328 + unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */
5329 ++ unsigned dual_port:1; /* Dual EtherC/E-DMAC */
5330 + };
5331 +
5332 + struct sh_eth_private {
5333 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5334 +index 3ea343b45d93..8044563453f9 100644
5335 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5336 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5337 +@@ -1843,6 +1843,11 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
5338 + if (unlikely(status & tx_dma_own))
5339 + break;
5340 +
5341 ++ /* Make sure descriptor fields are read after reading
5342 ++ * the own bit.
5343 ++ */
5344 ++ dma_rmb();
5345 ++
5346 + /* Just consider the last segment and ...*/
5347 + if (likely(!(status & tx_not_ls))) {
5348 + /* ... verify the status error condition */
5349 +@@ -2430,7 +2435,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
5350 + continue;
5351 +
5352 + packet = priv->plat->rx_queues_cfg[queue].pkt_route;
5353 +- priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
5354 ++ priv->hw->mac->rx_queue_routing(priv->hw, packet, queue);
5355 + }
5356 + }
5357 +
5358 +@@ -2980,8 +2985,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
5359 + tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
5360 +
5361 + /* If context desc is used to change MSS */
5362 +- if (mss_desc)
5363 ++ if (mss_desc) {
5364 ++ /* Make sure that first descriptor has been completely
5365 ++ * written, including its own bit. This is because MSS is
5366 ++ * actually before first descriptor, so we need to make
5367 ++ * sure that MSS's own bit is the last thing written.
5368 ++ */
5369 ++ dma_wmb();
5370 + priv->hw->desc->set_tx_owner(mss_desc);
5371 ++ }
5372 +
5373 + /* The own bit must be the latest setting done when prepare the
5374 + * descriptor and then barrier is needed to make sure that
5375 +diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
5376 +index 7472172823f3..11209e494502 100644
5377 +--- a/drivers/net/hyperv/netvsc.c
5378 ++++ b/drivers/net/hyperv/netvsc.c
5379 +@@ -1078,10 +1078,14 @@ static int netvsc_receive(struct net_device *ndev,
5380 + void *data = recv_buf
5381 + + vmxferpage_packet->ranges[i].byte_offset;
5382 + u32 buflen = vmxferpage_packet->ranges[i].byte_count;
5383 ++ int ret;
5384 +
5385 + /* Pass it to the upper layer */
5386 +- status = rndis_filter_receive(ndev, net_device,
5387 +- channel, data, buflen);
5388 ++ ret = rndis_filter_receive(ndev, net_device,
5389 ++ channel, data, buflen);
5390 ++
5391 ++ if (unlikely(ret != NVSP_STAT_SUCCESS))
5392 ++ status = NVSP_STAT_FAIL;
5393 + }
5394 +
5395 + enq_receive_complete(ndev, net_device, q_idx,
5396 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
5397 +index 4774766fe20d..2a7752c113df 100644
5398 +--- a/drivers/net/hyperv/netvsc_drv.c
5399 ++++ b/drivers/net/hyperv/netvsc_drv.c
5400 +@@ -831,7 +831,7 @@ int netvsc_recv_callback(struct net_device *net,
5401 + u64_stats_update_end(&rx_stats->syncp);
5402 +
5403 + napi_gro_receive(&nvchan->napi, skb);
5404 +- return 0;
5405 ++ return NVSP_STAT_SUCCESS;
5406 + }
5407 +
5408 + static void netvsc_get_drvinfo(struct net_device *net,
5409 +diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
5410 +index 95846f0321f3..33138e4f0b5a 100644
5411 +--- a/drivers/net/hyperv/rndis_filter.c
5412 ++++ b/drivers/net/hyperv/rndis_filter.c
5413 +@@ -434,10 +434,10 @@ int rndis_filter_receive(struct net_device *ndev,
5414 + "unhandled rndis message (type %u len %u)\n",
5415 + rndis_msg->ndis_msg_type,
5416 + rndis_msg->msg_len);
5417 +- break;
5418 ++ return NVSP_STAT_FAIL;
5419 + }
5420 +
5421 +- return 0;
5422 ++ return NVSP_STAT_SUCCESS;
5423 + }
5424 +
5425 + static int rndis_filter_query_device(struct rndis_device *dev,
5426 +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
5427 +index 377af43b81b3..58299fb666ed 100644
5428 +--- a/drivers/net/ieee802154/ca8210.c
5429 ++++ b/drivers/net/ieee802154/ca8210.c
5430 +@@ -2493,13 +2493,14 @@ static ssize_t ca8210_test_int_user_write(
5431 + struct ca8210_priv *priv = filp->private_data;
5432 + u8 command[CA8210_SPI_BUF_SIZE];
5433 +
5434 +- if (len > CA8210_SPI_BUF_SIZE) {
5435 ++ memset(command, SPI_IDLE, 6);
5436 ++ if (len > CA8210_SPI_BUF_SIZE || len < 2) {
5437 + dev_warn(
5438 + &priv->spi->dev,
5439 +- "userspace requested erroneously long write (%zu)\n",
5440 ++ "userspace requested erroneous write length (%zu)\n",
5441 + len
5442 + );
5443 +- return -EMSGSIZE;
5444 ++ return -EBADE;
5445 + }
5446 +
5447 + ret = copy_from_user(command, in_buf, len);
5448 +@@ -2511,6 +2512,13 @@ static ssize_t ca8210_test_int_user_write(
5449 + );
5450 + return -EIO;
5451 + }
5452 ++ if (len != command[1] + 2) {
5453 ++ dev_err(
5454 ++ &priv->spi->dev,
5455 ++ "write len does not match packet length field\n"
5456 ++ );
5457 ++ return -EBADE;
5458 ++ }
5459 +
5460 + ret = ca8210_test_check_upstream(command, priv->spi);
5461 + if (ret == 0) {
5462 +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
5463 +index 654f42d00092..a6c87793d899 100644
5464 +--- a/drivers/net/phy/dp83640.c
5465 ++++ b/drivers/net/phy/dp83640.c
5466 +@@ -1207,6 +1207,23 @@ static void dp83640_remove(struct phy_device *phydev)
5467 + kfree(dp83640);
5468 + }
5469 +
5470 ++static int dp83640_soft_reset(struct phy_device *phydev)
5471 ++{
5472 ++ int ret;
5473 ++
5474 ++ ret = genphy_soft_reset(phydev);
5475 ++ if (ret < 0)
5476 ++ return ret;
5477 ++
5478 ++ /* From DP83640 datasheet: "Software driver code must wait 3 us
5479 ++ * following a software reset before allowing further serial MII
5480 ++ * operations with the DP83640."
5481 ++ */
5482 ++ udelay(10); /* Taking udelay inaccuracy into account */
5483 ++
5484 ++ return 0;
5485 ++}
5486 ++
5487 + static int dp83640_config_init(struct phy_device *phydev)
5488 + {
5489 + struct dp83640_private *dp83640 = phydev->priv;
5490 +@@ -1501,6 +1518,7 @@ static struct phy_driver dp83640_driver = {
5491 + .flags = PHY_HAS_INTERRUPT,
5492 + .probe = dp83640_probe,
5493 + .remove = dp83640_remove,
5494 ++ .soft_reset = dp83640_soft_reset,
5495 + .config_init = dp83640_config_init,
5496 + .ack_interrupt = dp83640_ack_interrupt,
5497 + .config_intr = dp83640_config_intr,
5498 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
5499 +index 32cf21716f19..145bb7cbf5b2 100644
5500 +--- a/drivers/net/usb/lan78xx.c
5501 ++++ b/drivers/net/usb/lan78xx.c
5502 +@@ -2083,10 +2083,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
5503 +
5504 + dev->fc_autoneg = phydev->autoneg;
5505 +
5506 +- phy_start(phydev);
5507 +-
5508 +- netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
5509 +-
5510 + return 0;
5511 +
5512 + error:
5513 +@@ -2523,9 +2519,9 @@ static int lan78xx_open(struct net_device *net)
5514 + if (ret < 0)
5515 + goto done;
5516 +
5517 +- ret = lan78xx_phy_init(dev);
5518 +- if (ret < 0)
5519 +- goto done;
5520 ++ phy_start(net->phydev);
5521 ++
5522 ++ netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
5523 +
5524 + /* for Link Check */
5525 + if (dev->urb_intr) {
5526 +@@ -2586,13 +2582,8 @@ static int lan78xx_stop(struct net_device *net)
5527 + if (timer_pending(&dev->stat_monitor))
5528 + del_timer_sync(&dev->stat_monitor);
5529 +
5530 +- phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
5531 +- phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
5532 +-
5533 +- phy_stop(net->phydev);
5534 +- phy_disconnect(net->phydev);
5535 +-
5536 +- net->phydev = NULL;
5537 ++ if (net->phydev)
5538 ++ phy_stop(net->phydev);
5539 +
5540 + clear_bit(EVENT_DEV_OPEN, &dev->flags);
5541 + netif_stop_queue(net);
5542 +@@ -3507,8 +3498,13 @@ static void lan78xx_disconnect(struct usb_interface *intf)
5543 + return;
5544 +
5545 + udev = interface_to_usbdev(intf);
5546 +-
5547 + net = dev->net;
5548 ++
5549 ++ phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
5550 ++ phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
5551 ++
5552 ++ phy_disconnect(net->phydev);
5553 ++
5554 + unregister_netdev(net);
5555 +
5556 + cancel_delayed_work_sync(&dev->wq);
5557 +@@ -3664,8 +3660,14 @@ static int lan78xx_probe(struct usb_interface *intf,
5558 + pm_runtime_set_autosuspend_delay(&udev->dev,
5559 + DEFAULT_AUTOSUSPEND_DELAY);
5560 +
5561 ++ ret = lan78xx_phy_init(dev);
5562 ++ if (ret < 0)
5563 ++ goto out4;
5564 ++
5565 + return 0;
5566 +
5567 ++out4:
5568 ++ unregister_netdev(netdev);
5569 + out3:
5570 + lan78xx_unbind(dev, intf);
5571 + out2:
5572 +@@ -4013,7 +4015,7 @@ static int lan78xx_reset_resume(struct usb_interface *intf)
5573 +
5574 + lan78xx_reset(dev);
5575 +
5576 +- lan78xx_phy_init(dev);
5577 ++ phy_start(dev->net->phydev);
5578 +
5579 + return lan78xx_resume(intf);
5580 + }
5581 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
5582 +index aa21b2225679..16b0c7db431b 100644
5583 +--- a/drivers/net/virtio_net.c
5584 ++++ b/drivers/net/virtio_net.c
5585 +@@ -2874,8 +2874,8 @@ static int virtnet_probe(struct virtio_device *vdev)
5586 +
5587 + /* Assume link up if device can't report link status,
5588 + otherwise get link status from config. */
5589 ++ netif_carrier_off(dev);
5590 + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
5591 +- netif_carrier_off(dev);
5592 + schedule_work(&vi->config_work);
5593 + } else {
5594 + vi->status = VIRTIO_NET_S_LINK_UP;
5595 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
5596 +index 800a86e2d671..2d7ef7460780 100644
5597 +--- a/drivers/net/wireless/ath/ath10k/mac.c
5598 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
5599 +@@ -7084,10 +7084,20 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
5600 + {
5601 + struct ath10k *ar = hw->priv;
5602 + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5603 ++ struct ath10k_vif *arvif = (void *)vif->drv_priv;
5604 ++ struct ath10k_peer *peer;
5605 + u32 bw, smps;
5606 +
5607 + spin_lock_bh(&ar->data_lock);
5608 +
5609 ++ peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
5610 ++ if (!peer) {
5611 ++ spin_unlock_bh(&ar->data_lock);
5612 ++ ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
5613 ++ sta->addr, arvif->vdev_id);
5614 ++ return;
5615 ++ }
5616 ++
5617 + ath10k_dbg(ar, ATH10K_DBG_MAC,
5618 + "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
5619 + sta->addr, changed, sta->bandwidth, sta->rx_nss,
5620 +@@ -7873,6 +7883,7 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
5621 + .max_interfaces = 8,
5622 + .num_different_channels = 1,
5623 + .beacon_int_infra_match = true,
5624 ++ .beacon_int_min_gcd = 1,
5625 + #ifdef CONFIG_ATH10K_DFS_CERTIFIED
5626 + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
5627 + BIT(NL80211_CHAN_WIDTH_20) |
5628 +@@ -7996,6 +8007,7 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
5629 + .max_interfaces = 16,
5630 + .num_different_channels = 1,
5631 + .beacon_int_infra_match = true,
5632 ++ .beacon_int_min_gcd = 1,
5633 + #ifdef CONFIG_ATH10K_DFS_CERTIFIED
5634 + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
5635 + BIT(NL80211_CHAN_WIDTH_20) |
5636 +diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
5637 +index 5e77fe1f5b0d..a41bcbda1d9e 100644
5638 +--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
5639 ++++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
5640 +@@ -479,14 +479,16 @@ ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv)
5641 + {
5642 + int i = 0;
5643 + int ret = 0;
5644 ++ struct rchan_buf *buf;
5645 + struct rchan *rc = spec_priv->rfs_chan_spec_scan;
5646 +
5647 +- for_each_online_cpu(i)
5648 +- ret += relay_buf_full(*per_cpu_ptr(rc->buf, i));
5649 +-
5650 +- i = num_online_cpus();
5651 ++ for_each_possible_cpu(i) {
5652 ++ if ((buf = *per_cpu_ptr(rc->buf, i))) {
5653 ++ ret += relay_buf_full(buf);
5654 ++ }
5655 ++ }
5656 +
5657 +- if (ret == i)
5658 ++ if (ret)
5659 + return 1;
5660 + else
5661 + return 0;
5662 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
5663 +index 55d1274c6092..fb5745660509 100644
5664 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
5665 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
5666 +@@ -234,13 +234,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
5667 + struct iwl_mvm_sta *mvmsta;
5668 + struct iwl_lq_sta_rs_fw *lq_sta;
5669 +
5670 ++ rcu_read_lock();
5671 ++
5672 + notif = (void *)pkt->data;
5673 + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, notif->sta_id);
5674 +
5675 + if (!mvmsta) {
5676 + IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
5677 + notif->sta_id);
5678 +- return;
5679 ++ goto out;
5680 + }
5681 +
5682 + lq_sta = &mvmsta->lq_sta.rs_fw;
5683 +@@ -251,6 +253,8 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
5684 + IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
5685 + lq_sta->last_rate_n_flags);
5686 + }
5687 ++out:
5688 ++ rcu_read_unlock();
5689 + }
5690 +
5691 + void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
5692 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
5693 +index d65e1db7c097..70f8b8eb6117 100644
5694 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
5695 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
5696 +@@ -800,12 +800,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
5697 + .scd_queue = queue,
5698 + .action = SCD_CFG_DISABLE_QUEUE,
5699 + };
5700 +- bool remove_mac_queue = true;
5701 ++ bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
5702 + int ret;
5703 +
5704 ++ if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
5705 ++ return -EINVAL;
5706 ++
5707 + if (iwl_mvm_has_new_tx_api(mvm)) {
5708 + spin_lock_bh(&mvm->queue_info_lock);
5709 +- mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue);
5710 ++
5711 ++ if (remove_mac_queue)
5712 ++ mvm->hw_queue_to_mac80211[queue] &=
5713 ++ ~BIT(mac80211_queue);
5714 ++
5715 + spin_unlock_bh(&mvm->queue_info_lock);
5716 +
5717 + iwl_trans_txq_free(mvm->trans, queue);
5718 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
5719 +index 205043b470b2..7d4e308ee6a7 100644
5720 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
5721 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
5722 +@@ -336,6 +336,17 @@ mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5723 + int idx = key->keyidx;
5724 + int ret;
5725 +
5726 ++ /* fall back to sw encryption for unsupported ciphers */
5727 ++ switch (key->cipher) {
5728 ++ case WLAN_CIPHER_SUITE_WEP40:
5729 ++ case WLAN_CIPHER_SUITE_WEP104:
5730 ++ case WLAN_CIPHER_SUITE_TKIP:
5731 ++ case WLAN_CIPHER_SUITE_CCMP:
5732 ++ break;
5733 ++ default:
5734 ++ return -EOPNOTSUPP;
5735 ++ }
5736 ++
5737 + /*
5738 + * The hardware does not support per-STA RX GTK, fall back
5739 + * to software mode for these.
5740 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
5741 +index 534e4bf9a34c..e46eafc4c436 100644
5742 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
5743 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
5744 +@@ -36,9 +36,12 @@ void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
5745 +
5746 + msta = (struct mt76x2_sta *) control->sta->drv_priv;
5747 + wcid = &msta->wcid;
5748 ++ /* sw encrypted frames */
5749 ++ if (!info->control.hw_key && wcid->hw_key_idx != -1)
5750 ++ control->sta = NULL;
5751 + }
5752 +
5753 +- if (vif || (!info->control.hw_key && wcid->hw_key_idx != -1)) {
5754 ++ if (vif && !control->sta) {
5755 + struct mt76x2_vif *mvif;
5756 +
5757 + mvif = (struct mt76x2_vif *) vif->drv_priv;
5758 +diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
5759 +index b0cf41195051..96fc3c84d7d2 100644
5760 +--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
5761 ++++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
5762 +@@ -636,11 +636,14 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
5763 + u32 *read_buf, u16 size)
5764 + {
5765 + u32 addr_on_bus, *data;
5766 +- u32 align[2] = {};
5767 + u16 ms_addr;
5768 + int status;
5769 +
5770 +- data = PTR_ALIGN(&align[0], 8);
5771 ++ data = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL);
5772 ++ if (!data)
5773 ++ return -ENOMEM;
5774 ++
5775 ++ data = PTR_ALIGN(data, 8);
5776 +
5777 + ms_addr = (addr >> 16);
5778 + status = rsi_sdio_master_access_msword(adapter, ms_addr);
5779 +@@ -648,7 +651,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
5780 + rsi_dbg(ERR_ZONE,
5781 + "%s: Unable to set ms word to common reg\n",
5782 + __func__);
5783 +- return status;
5784 ++ goto err;
5785 + }
5786 + addr &= 0xFFFF;
5787 +
5788 +@@ -666,7 +669,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
5789 + (u8 *)data, 4);
5790 + if (status < 0) {
5791 + rsi_dbg(ERR_ZONE, "%s: AHB register read failed\n", __func__);
5792 +- return status;
5793 ++ goto err;
5794 + }
5795 + if (size == 2) {
5796 + if ((addr & 0x3) == 0)
5797 +@@ -688,17 +691,23 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
5798 + *read_buf = *data;
5799 + }
5800 +
5801 +- return 0;
5802 ++err:
5803 ++ kfree(data);
5804 ++ return status;
5805 + }
5806 +
5807 + static int rsi_sdio_master_reg_write(struct rsi_hw *adapter,
5808 + unsigned long addr,
5809 + unsigned long data, u16 size)
5810 + {
5811 +- unsigned long data1[2], *data_aligned;
5812 ++ unsigned long *data_aligned;
5813 + int status;
5814 +
5815 +- data_aligned = PTR_ALIGN(&data1[0], 8);
5816 ++ data_aligned = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL);
5817 ++ if (!data_aligned)
5818 ++ return -ENOMEM;
5819 ++
5820 ++ data_aligned = PTR_ALIGN(data_aligned, 8);
5821 +
5822 + if (size == 2) {
5823 + *data_aligned = ((data << 16) | (data & 0xFFFF));
5824 +@@ -717,6 +726,7 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter,
5825 + rsi_dbg(ERR_ZONE,
5826 + "%s: Unable to set ms word to common reg\n",
5827 + __func__);
5828 ++ kfree(data_aligned);
5829 + return -EIO;
5830 + }
5831 + addr = addr & 0xFFFF;
5832 +@@ -726,12 +736,12 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter,
5833 + (adapter,
5834 + (addr | RSI_SD_REQUEST_MASTER),
5835 + (u8 *)data_aligned, size);
5836 +- if (status < 0) {
5837 ++ if (status < 0)
5838 + rsi_dbg(ERR_ZONE,
5839 + "%s: Unable to do AHB reg write\n", __func__);
5840 +- return status;
5841 +- }
5842 +- return 0;
5843 ++
5844 ++ kfree(data_aligned);
5845 ++ return status;
5846 + }
5847 +
5848 + /**
5849 +diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
5850 +index 49c549ba6682..34242d84bd7b 100644
5851 +--- a/drivers/net/wireless/rsi/rsi_sdio.h
5852 ++++ b/drivers/net/wireless/rsi/rsi_sdio.h
5853 +@@ -46,6 +46,8 @@ enum sdio_interrupt_type {
5854 + #define PKT_BUFF_AVAILABLE 1
5855 + #define FW_ASSERT_IND 2
5856 +
5857 ++#define RSI_MASTER_REG_BUF_SIZE 12
5858 ++
5859 + #define RSI_DEVICE_BUFFER_STATUS_REGISTER 0xf3
5860 + #define RSI_FN1_INT_REGISTER 0xf9
5861 + #define RSI_INT_ENABLE_REGISTER 0x04
5862 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
5863 +index f81773570dfd..f5259912f049 100644
5864 +--- a/drivers/nvme/host/core.c
5865 ++++ b/drivers/nvme/host/core.c
5866 +@@ -379,6 +379,15 @@ static void nvme_put_ns(struct nvme_ns *ns)
5867 + kref_put(&ns->kref, nvme_free_ns);
5868 + }
5869 +
5870 ++static inline void nvme_clear_nvme_request(struct request *req)
5871 ++{
5872 ++ if (!(req->rq_flags & RQF_DONTPREP)) {
5873 ++ nvme_req(req)->retries = 0;
5874 ++ nvme_req(req)->flags = 0;
5875 ++ req->rq_flags |= RQF_DONTPREP;
5876 ++ }
5877 ++}
5878 ++
5879 + struct request *nvme_alloc_request(struct request_queue *q,
5880 + struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
5881 + {
5882 +@@ -395,6 +404,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
5883 + return req;
5884 +
5885 + req->cmd_flags |= REQ_FAILFAST_DRIVER;
5886 ++ nvme_clear_nvme_request(req);
5887 + nvme_req(req)->cmd = cmd;
5888 +
5889 + return req;
5890 +@@ -611,11 +621,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
5891 + {
5892 + blk_status_t ret = BLK_STS_OK;
5893 +
5894 +- if (!(req->rq_flags & RQF_DONTPREP)) {
5895 +- nvme_req(req)->retries = 0;
5896 +- nvme_req(req)->flags = 0;
5897 +- req->rq_flags |= RQF_DONTPREP;
5898 +- }
5899 ++ nvme_clear_nvme_request(req);
5900 +
5901 + switch (req_op(req)) {
5902 + case REQ_OP_DRV_IN:
5903 +@@ -745,6 +751,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
5904 + return PTR_ERR(req);
5905 +
5906 + req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
5907 ++ nvme_req(req)->flags |= NVME_REQ_USERCMD;
5908 +
5909 + if (ubuffer && bufflen) {
5910 + ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
5911 +diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
5912 +index 8f0f34d06d46..124c458806df 100644
5913 +--- a/drivers/nvme/host/fabrics.c
5914 ++++ b/drivers/nvme/host/fabrics.c
5915 +@@ -536,6 +536,85 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
5916 + return NULL;
5917 + }
5918 +
5919 ++blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
5920 ++ bool queue_live, bool is_connected)
5921 ++{
5922 ++ struct nvme_command *cmd = nvme_req(rq)->cmd;
5923 ++
5924 ++ if (likely(ctrl->state == NVME_CTRL_LIVE && is_connected))
5925 ++ return BLK_STS_OK;
5926 ++
5927 ++ switch (ctrl->state) {
5928 ++ case NVME_CTRL_DELETING:
5929 ++ goto reject_io;
5930 ++
5931 ++ case NVME_CTRL_NEW:
5932 ++ case NVME_CTRL_CONNECTING:
5933 ++ if (!is_connected)
5934 ++ /*
5935 ++ * This is the case of starting a new
5936 ++ * association but connectivity was lost
5937 ++ * before it was fully created. We need to
5938 ++ * error the commands used to initialize the
5939 ++ * controller so the reconnect can go into a
5940 ++ * retry attempt. The commands should all be
5941 ++ * marked REQ_FAILFAST_DRIVER, which will hit
5942 ++ * the reject path below. Anything else will
5943 ++ * be queued while the state settles.
5944 ++ */
5945 ++ goto reject_or_queue_io;
5946 ++
5947 ++ if ((queue_live &&
5948 ++ !(nvme_req(rq)->flags & NVME_REQ_USERCMD)) ||
5949 ++ (!queue_live && blk_rq_is_passthrough(rq) &&
5950 ++ cmd->common.opcode == nvme_fabrics_command &&
5951 ++ cmd->fabrics.fctype == nvme_fabrics_type_connect))
5952 ++ /*
5953 ++ * If queue is live, allow only commands that
5954 ++ * are internally generated pass through. These
5955 ++ * are commands on the admin queue to initialize
5956 ++ * the controller. This will reject any ioctl
5957 ++ * admin cmds received while initializing.
5958 ++ *
5959 ++ * If the queue is not live, allow only a
5960 ++ * connect command. This will reject any ioctl
5961 ++ * admin cmd as well as initialization commands
5962 ++ * if the controller reverted the queue to non-live.
5963 ++ */
5964 ++ return BLK_STS_OK;
5965 ++
5966 ++ /*
5967 ++ * fall-thru to the reject_or_queue_io clause
5968 ++ */
5969 ++ break;
5970 ++
5971 ++ /* these cases fall-thru
5972 ++ * case NVME_CTRL_LIVE:
5973 ++ * case NVME_CTRL_RESETTING:
5974 ++ */
5975 ++ default:
5976 ++ break;
5977 ++ }
5978 ++
5979 ++reject_or_queue_io:
5980 ++ /*
5981 ++ * Any other new io is something we're not in a state to send
5982 ++ * to the device. Default action is to busy it and retry it
5983 ++ * after the controller state is recovered. However, anything
5984 ++ * marked for failfast or nvme multipath is immediately failed.
5985 ++ * Note: commands used to initialize the controller will be
5986 ++ * marked for failfast.
5987 ++ * Note: nvme cli/ioctl commands are marked for failfast.
5988 ++ */
5989 ++ if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
5990 ++ return BLK_STS_RESOURCE;
5991 ++
5992 ++reject_io:
5993 ++ nvme_req(rq)->status = NVME_SC_ABORT_REQ;
5994 ++ return BLK_STS_IOERR;
5995 ++}
5996 ++EXPORT_SYMBOL_GPL(nvmf_check_if_ready);
5997 ++
5998 + static const match_table_t opt_tokens = {
5999 + { NVMF_OPT_TRANSPORT, "transport=%s" },
6000 + { NVMF_OPT_TRADDR, "traddr=%s" },
6001 +@@ -608,8 +687,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
6002 + opts->discovery_nqn =
6003 + !(strcmp(opts->subsysnqn,
6004 + NVME_DISC_SUBSYS_NAME));
6005 +- if (opts->discovery_nqn)
6006 ++ if (opts->discovery_nqn) {
6007 ++ opts->kato = 0;
6008 + opts->nr_io_queues = 0;
6009 ++ }
6010 + break;
6011 + case NVMF_OPT_TRADDR:
6012 + p = match_strdup(args);
6013 +diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
6014 +index a3145d90c1d2..ef46c915b7b5 100644
6015 +--- a/drivers/nvme/host/fabrics.h
6016 ++++ b/drivers/nvme/host/fabrics.h
6017 +@@ -157,36 +157,7 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
6018 + void nvmf_free_options(struct nvmf_ctrl_options *opts);
6019 + int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
6020 + bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
6021 +-
6022 +-static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
6023 +- struct request *rq)
6024 +-{
6025 +- struct nvme_command *cmd = nvme_req(rq)->cmd;
6026 +-
6027 +- /*
6028 +- * We cannot accept any other command until the connect command has
6029 +- * completed, so only allow connect to pass.
6030 +- */
6031 +- if (!blk_rq_is_passthrough(rq) ||
6032 +- cmd->common.opcode != nvme_fabrics_command ||
6033 +- cmd->fabrics.fctype != nvme_fabrics_type_connect) {
6034 +- /*
6035 +- * Connecting state means transport disruption or initial
6036 +- * establishment, which can take a long time and even might
6037 +- * fail permanently, fail fast to give upper layers a chance
6038 +- * to failover.
6039 +- * Deleting state means that the ctrl will never accept commands
6040 +- * again, fail it permanently.
6041 +- */
6042 +- if (ctrl->state == NVME_CTRL_CONNECTING ||
6043 +- ctrl->state == NVME_CTRL_DELETING) {
6044 +- nvme_req(rq)->status = NVME_SC_ABORT_REQ;
6045 +- return BLK_STS_IOERR;
6046 +- }
6047 +- return BLK_STS_RESOURCE; /* try again later */
6048 +- }
6049 +-
6050 +- return BLK_STS_OK;
6051 +-}
6052 ++blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl,
6053 ++ struct request *rq, bool queue_live, bool is_connected);
6054 +
6055 + #endif /* _NVME_FABRICS_H */
6056 +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
6057 +index 1dc1387b7134..6044f891c3ce 100644
6058 +--- a/drivers/nvme/host/fc.c
6059 ++++ b/drivers/nvme/host/fc.c
6060 +@@ -2191,7 +2191,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
6061 + struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
6062 + struct nvme_command *sqe = &cmdiu->sqe;
6063 + u32 csn;
6064 +- int ret;
6065 ++ int ret, opstate;
6066 +
6067 + /*
6068 + * before attempting to send the io, check to see if we believe
6069 +@@ -2269,6 +2269,9 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
6070 + queue->lldd_handle, &op->fcp_req);
6071 +
6072 + if (ret) {
6073 ++ opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
6074 ++ __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
6075 ++
6076 + if (!(op->flags & FCOP_FLAGS_AEN))
6077 + nvme_fc_unmap_data(ctrl, op->rq, op);
6078 +
6079 +@@ -2284,14 +2287,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
6080 + return BLK_STS_OK;
6081 + }
6082 +
6083 +-static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
6084 +- struct request *rq)
6085 +-{
6086 +- if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
6087 +- return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
6088 +- return BLK_STS_OK;
6089 +-}
6090 +-
6091 + static blk_status_t
6092 + nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
6093 + const struct blk_mq_queue_data *bd)
6094 +@@ -2307,7 +2302,9 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
6095 + u32 data_len;
6096 + blk_status_t ret;
6097 +
6098 +- ret = nvme_fc_is_ready(queue, rq);
6099 ++ ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
6100 ++ test_bit(NVME_FC_Q_LIVE, &queue->flags),
6101 ++ ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE);
6102 + if (unlikely(ret))
6103 + return ret;
6104 +
6105 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
6106 +index 013380641ddf..0133f3d2ce94 100644
6107 +--- a/drivers/nvme/host/nvme.h
6108 ++++ b/drivers/nvme/host/nvme.h
6109 +@@ -109,6 +109,7 @@ struct nvme_request {
6110 +
6111 + enum {
6112 + NVME_REQ_CANCELLED = (1 << 0),
6113 ++ NVME_REQ_USERCMD = (1 << 1),
6114 + };
6115 +
6116 + static inline struct nvme_request *nvme_req(struct request *req)
6117 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
6118 +index f6648610d153..dba797b57d73 100644
6119 +--- a/drivers/nvme/host/pci.c
6120 ++++ b/drivers/nvme/host/pci.c
6121 +@@ -2470,10 +2470,13 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
6122 + } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
6123 + /*
6124 + * Samsung SSD 960 EVO drops off the PCIe bus after system
6125 +- * suspend on a Ryzen board, ASUS PRIME B350M-A.
6126 ++ * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as
6127 ++ * within few minutes after bootup on a Coffee Lake board -
6128 ++ * ASUS PRIME Z370-A
6129 + */
6130 + if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
6131 +- dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
6132 ++ (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
6133 ++ dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
6134 + return NVME_QUIRK_NO_APST;
6135 + }
6136 +
6137 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
6138 +index 4d84a73ee12d..02dd232951b9 100644
6139 +--- a/drivers/nvme/host/rdma.c
6140 ++++ b/drivers/nvme/host/rdma.c
6141 +@@ -1594,17 +1594,6 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
6142 + return BLK_EH_HANDLED;
6143 + }
6144 +
6145 +-/*
6146 +- * We cannot accept any other command until the Connect command has completed.
6147 +- */
6148 +-static inline blk_status_t
6149 +-nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
6150 +-{
6151 +- if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
6152 +- return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
6153 +- return BLK_STS_OK;
6154 +-}
6155 +-
6156 + static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
6157 + const struct blk_mq_queue_data *bd)
6158 + {
6159 +@@ -1620,7 +1609,8 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
6160 +
6161 + WARN_ON_ONCE(rq->tag < 0);
6162 +
6163 +- ret = nvme_rdma_is_ready(queue, rq);
6164 ++ ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
6165 ++ test_bit(NVME_RDMA_Q_LIVE, &queue->flags), true);
6166 + if (unlikely(ret))
6167 + return ret;
6168 +
6169 +diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
6170 +index 861d1509b22b..e10987f87603 100644
6171 +--- a/drivers/nvme/target/loop.c
6172 ++++ b/drivers/nvme/target/loop.c
6173 +@@ -149,14 +149,6 @@ nvme_loop_timeout(struct request *rq, bool reserved)
6174 + return BLK_EH_HANDLED;
6175 + }
6176 +
6177 +-static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
6178 +- struct request *rq)
6179 +-{
6180 +- if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
6181 +- return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
6182 +- return BLK_STS_OK;
6183 +-}
6184 +-
6185 + static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
6186 + const struct blk_mq_queue_data *bd)
6187 + {
6188 +@@ -166,7 +158,8 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
6189 + struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
6190 + blk_status_t ret;
6191 +
6192 +- ret = nvme_loop_is_ready(queue, req);
6193 ++ ret = nvmf_check_if_ready(&queue->ctrl->ctrl, req,
6194 ++ test_bit(NVME_LOOP_Q_LIVE, &queue->flags), true);
6195 + if (unlikely(ret))
6196 + return ret;
6197 +
6198 +diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
6199 +index 41b740aed3a3..69bd98421eb1 100644
6200 +--- a/drivers/parisc/lba_pci.c
6201 ++++ b/drivers/parisc/lba_pci.c
6202 +@@ -1403,9 +1403,27 @@ lba_hw_init(struct lba_device *d)
6203 + WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
6204 + }
6205 +
6206 +- /* Set HF mode as the default (vs. -1 mode). */
6207 ++
6208 ++ /*
6209 ++ * Hard Fail vs. Soft Fail on PCI "Master Abort".
6210 ++ *
6211 ++ * "Master Abort" means the MMIO transaction timed out - usually due to
6212 ++ * the device not responding to an MMIO read. We would like HF to be
6213 ++ * enabled to find driver problems, though it means the system will
6214 ++ * crash with a HPMC.
6215 ++ *
6216 ++ * In SoftFail mode "~0L" is returned as a result of a timeout on the
6217 ++ * pci bus. This is like how PCI busses on x86 and most other
6218 ++ * architectures behave. In order to increase compatibility with
6219 ++ * existing (x86) PCI hardware and existing Linux drivers we enable
6220 ++ * Soft Faul mode on PA-RISC now too.
6221 ++ */
6222 + stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
6223 ++#if defined(ENABLE_HARDFAIL)
6224 + WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
6225 ++#else
6226 ++ WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
6227 ++#endif
6228 +
6229 + /*
6230 + ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal
6231 +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
6232 +index eede34e5ada2..98da1e137071 100644
6233 +--- a/drivers/pci/pci-driver.c
6234 ++++ b/drivers/pci/pci-driver.c
6235 +@@ -1225,11 +1225,14 @@ static int pci_pm_runtime_suspend(struct device *dev)
6236 + int error;
6237 +
6238 + /*
6239 +- * If pci_dev->driver is not set (unbound), the device should
6240 +- * always remain in D0 regardless of the runtime PM status
6241 ++ * If pci_dev->driver is not set (unbound), we leave the device in D0,
6242 ++ * but it may go to D3cold when the bridge above it runtime suspends.
6243 ++ * Save its config space in case that happens.
6244 + */
6245 +- if (!pci_dev->driver)
6246 ++ if (!pci_dev->driver) {
6247 ++ pci_save_state(pci_dev);
6248 + return 0;
6249 ++ }
6250 +
6251 + if (!pm || !pm->runtime_suspend)
6252 + return -ENOSYS;
6253 +@@ -1277,16 +1280,18 @@ static int pci_pm_runtime_resume(struct device *dev)
6254 + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
6255 +
6256 + /*
6257 +- * If pci_dev->driver is not set (unbound), the device should
6258 +- * always remain in D0 regardless of the runtime PM status
6259 ++ * Restoring config space is necessary even if the device is not bound
6260 ++ * to a driver because although we left it in D0, it may have gone to
6261 ++ * D3cold when the bridge above it runtime suspended.
6262 + */
6263 ++ pci_restore_standard_config(pci_dev);
6264 ++
6265 + if (!pci_dev->driver)
6266 + return 0;
6267 +
6268 + if (!pm || !pm->runtime_resume)
6269 + return -ENOSYS;
6270 +
6271 +- pci_restore_standard_config(pci_dev);
6272 + pci_fixup_device(pci_fixup_resume_early, pci_dev);
6273 + pci_enable_wake(pci_dev, PCI_D0, false);
6274 + pci_fixup_device(pci_fixup_resume, pci_dev);
6275 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
6276 +index 81241f981ad7..88598dbdc1b0 100644
6277 +--- a/drivers/pci/quirks.c
6278 ++++ b/drivers/pci/quirks.c
6279 +@@ -3903,6 +3903,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
6280 + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
6281 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
6282 + quirk_dma_func1_alias);
6283 ++/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
6284 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
6285 ++ quirk_dma_func1_alias);
6286 + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
6287 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
6288 + quirk_dma_func1_alias);
6289 +diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
6290 +index c3b615c94b4b..8c8caec3a72c 100644
6291 +--- a/drivers/pcmcia/cs.c
6292 ++++ b/drivers/pcmcia/cs.c
6293 +@@ -452,17 +452,20 @@ static int socket_insert(struct pcmcia_socket *skt)
6294 +
6295 + static int socket_suspend(struct pcmcia_socket *skt)
6296 + {
6297 +- if (skt->state & SOCKET_SUSPEND)
6298 ++ if ((skt->state & SOCKET_SUSPEND) && !(skt->state & SOCKET_IN_RESUME))
6299 + return -EBUSY;
6300 +
6301 + mutex_lock(&skt->ops_mutex);
6302 +- skt->suspended_state = skt->state;
6303 ++ /* store state on first suspend, but not after spurious wakeups */
6304 ++ if (!(skt->state & SOCKET_IN_RESUME))
6305 ++ skt->suspended_state = skt->state;
6306 +
6307 + skt->socket = dead_socket;
6308 + skt->ops->set_socket(skt, &skt->socket);
6309 + if (skt->ops->suspend)
6310 + skt->ops->suspend(skt);
6311 + skt->state |= SOCKET_SUSPEND;
6312 ++ skt->state &= ~SOCKET_IN_RESUME;
6313 + mutex_unlock(&skt->ops_mutex);
6314 + return 0;
6315 + }
6316 +@@ -475,6 +478,7 @@ static int socket_early_resume(struct pcmcia_socket *skt)
6317 + skt->ops->set_socket(skt, &skt->socket);
6318 + if (skt->state & SOCKET_PRESENT)
6319 + skt->resume_status = socket_setup(skt, resume_delay);
6320 ++ skt->state |= SOCKET_IN_RESUME;
6321 + mutex_unlock(&skt->ops_mutex);
6322 + return 0;
6323 + }
6324 +@@ -484,7 +488,7 @@ static int socket_late_resume(struct pcmcia_socket *skt)
6325 + int ret = 0;
6326 +
6327 + mutex_lock(&skt->ops_mutex);
6328 +- skt->state &= ~SOCKET_SUSPEND;
6329 ++ skt->state &= ~(SOCKET_SUSPEND | SOCKET_IN_RESUME);
6330 + mutex_unlock(&skt->ops_mutex);
6331 +
6332 + if (!(skt->state & SOCKET_PRESENT)) {
6333 +diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
6334 +index 6765beadea95..03ec43802909 100644
6335 +--- a/drivers/pcmcia/cs_internal.h
6336 ++++ b/drivers/pcmcia/cs_internal.h
6337 +@@ -70,6 +70,7 @@ struct pccard_resource_ops {
6338 + /* Flags in socket state */
6339 + #define SOCKET_PRESENT 0x0008
6340 + #define SOCKET_INUSE 0x0010
6341 ++#define SOCKET_IN_RESUME 0x0040
6342 + #define SOCKET_SUSPEND 0x0080
6343 + #define SOCKET_WIN_REQ(i) (0x0100<<(i))
6344 + #define SOCKET_CARDBUS 0x8000
6345 +diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
6346 +index e17f0351ccc2..2526971f9929 100644
6347 +--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
6348 ++++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
6349 +@@ -751,8 +751,6 @@ static int qcom_qmp_phy_poweroff(struct phy *phy)
6350 + struct qmp_phy *qphy = phy_get_drvdata(phy);
6351 + struct qcom_qmp *qmp = qphy->qmp;
6352 +
6353 +- clk_disable_unprepare(qphy->pipe_clk);
6354 +-
6355 + regulator_bulk_disable(qmp->cfg->num_vregs, qmp->vregs);
6356 +
6357 + return 0;
6358 +@@ -936,6 +934,8 @@ static int qcom_qmp_phy_exit(struct phy *phy)
6359 + const struct qmp_phy_cfg *cfg = qmp->cfg;
6360 + int i = cfg->num_clks;
6361 +
6362 ++ clk_disable_unprepare(qphy->pipe_clk);
6363 ++
6364 + /* PHY reset */
6365 + qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
6366 +
6367 +diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
6368 +index f1b24f18e9b2..b0d10934413f 100644
6369 +--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
6370 ++++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
6371 +@@ -76,6 +76,10 @@
6372 + #define PHYCTRL_OTAPDLYSEL_MASK 0xf
6373 + #define PHYCTRL_OTAPDLYSEL_SHIFT 0x7
6374 +
6375 ++#define PHYCTRL_IS_CALDONE(x) \
6376 ++ ((((x) >> PHYCTRL_CALDONE_SHIFT) & \
6377 ++ PHYCTRL_CALDONE_MASK) == PHYCTRL_CALDONE_DONE)
6378 ++
6379 + struct rockchip_emmc_phy {
6380 + unsigned int reg_offset;
6381 + struct regmap *reg_base;
6382 +@@ -90,6 +94,7 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off)
6383 + unsigned int freqsel = PHYCTRL_FREQSEL_200M;
6384 + unsigned long rate;
6385 + unsigned long timeout;
6386 ++ int ret;
6387 +
6388 + /*
6389 + * Keep phyctrl_pdb and phyctrl_endll low to allow
6390 +@@ -160,17 +165,19 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off)
6391 + PHYCTRL_PDB_SHIFT));
6392 +
6393 + /*
6394 +- * According to the user manual, it asks driver to
6395 +- * wait 5us for calpad busy trimming
6396 ++ * According to the user manual, it asks driver to wait 5us for
6397 ++ * calpad busy trimming. However it is documented that this value is
6398 ++ * PVT(A.K.A process,voltage and temperature) relevant, so some
6399 ++ * failure cases are found which indicates we should be more tolerant
6400 ++ * to calpad busy trimming.
6401 + */
6402 +- udelay(5);
6403 +- regmap_read(rk_phy->reg_base,
6404 +- rk_phy->reg_offset + GRF_EMMCPHY_STATUS,
6405 +- &caldone);
6406 +- caldone = (caldone >> PHYCTRL_CALDONE_SHIFT) & PHYCTRL_CALDONE_MASK;
6407 +- if (caldone != PHYCTRL_CALDONE_DONE) {
6408 +- pr_err("rockchip_emmc_phy_power: caldone timeout.\n");
6409 +- return -ETIMEDOUT;
6410 ++ ret = regmap_read_poll_timeout(rk_phy->reg_base,
6411 ++ rk_phy->reg_offset + GRF_EMMCPHY_STATUS,
6412 ++ caldone, PHYCTRL_IS_CALDONE(caldone),
6413 ++ 0, 50);
6414 ++ if (ret) {
6415 ++ pr_err("%s: caldone failed, ret=%d\n", __func__, ret);
6416 ++ return ret;
6417 + }
6418 +
6419 + /* Set the frequency of the DLL operation */
6420 +diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
6421 +index 1ff6c3573493..b601039d6c69 100644
6422 +--- a/drivers/pinctrl/devicetree.c
6423 ++++ b/drivers/pinctrl/devicetree.c
6424 +@@ -122,8 +122,10 @@ static int dt_to_map_one_config(struct pinctrl *p,
6425 + /* OK let's just assume this will appear later then */
6426 + return -EPROBE_DEFER;
6427 + }
6428 +- if (!pctldev)
6429 +- pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
6430 ++ /* If we're creating a hog we can use the passed pctldev */
6431 ++ if (pctldev && (np_pctldev == p->dev->of_node))
6432 ++ break;
6433 ++ pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
6434 + if (pctldev)
6435 + break;
6436 + /* Do not defer probing of hogs (circular loop) */
6437 +diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
6438 +index 644c5beb05cb..e86d23279ac1 100644
6439 +--- a/drivers/pinctrl/pinctrl-mcp23s08.c
6440 ++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
6441 +@@ -771,6 +771,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
6442 + {
6443 + int status, ret;
6444 + bool mirror = false;
6445 ++ struct regmap_config *one_regmap_config = NULL;
6446 +
6447 + mutex_init(&mcp->lock);
6448 +
6449 +@@ -791,22 +792,36 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
6450 + switch (type) {
6451 + #ifdef CONFIG_SPI_MASTER
6452 + case MCP_TYPE_S08:
6453 +- mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
6454 +- &mcp23x08_regmap);
6455 +- mcp->reg_shift = 0;
6456 +- mcp->chip.ngpio = 8;
6457 +- mcp->chip.label = "mcp23s08";
6458 +- break;
6459 +-
6460 + case MCP_TYPE_S17:
6461 ++ switch (type) {
6462 ++ case MCP_TYPE_S08:
6463 ++ one_regmap_config =
6464 ++ devm_kmemdup(dev, &mcp23x08_regmap,
6465 ++ sizeof(struct regmap_config), GFP_KERNEL);
6466 ++ mcp->reg_shift = 0;
6467 ++ mcp->chip.ngpio = 8;
6468 ++ mcp->chip.label = "mcp23s08";
6469 ++ break;
6470 ++ case MCP_TYPE_S17:
6471 ++ one_regmap_config =
6472 ++ devm_kmemdup(dev, &mcp23x17_regmap,
6473 ++ sizeof(struct regmap_config), GFP_KERNEL);
6474 ++ mcp->reg_shift = 1;
6475 ++ mcp->chip.ngpio = 16;
6476 ++ mcp->chip.label = "mcp23s17";
6477 ++ break;
6478 ++ }
6479 ++ if (!one_regmap_config)
6480 ++ return -ENOMEM;
6481 ++
6482 ++ one_regmap_config->name = devm_kasprintf(dev, GFP_KERNEL, "%d", (addr & ~0x40) >> 1);
6483 + mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
6484 +- &mcp23x17_regmap);
6485 +- mcp->reg_shift = 1;
6486 +- mcp->chip.ngpio = 16;
6487 +- mcp->chip.label = "mcp23s17";
6488 ++ one_regmap_config);
6489 + break;
6490 +
6491 + case MCP_TYPE_S18:
6492 ++ if (!one_regmap_config)
6493 ++ return -ENOMEM;
6494 + mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
6495 + &mcp23x17_regmap);
6496 + mcp->reg_shift = 1;
6497 +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
6498 +index 495432f3341b..95e5c5ea40af 100644
6499 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c
6500 ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
6501 +@@ -818,7 +818,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
6502 + return -EINVAL;
6503 +
6504 + chip = &pctrl->chip;
6505 +- chip->base = 0;
6506 ++ chip->base = -1;
6507 + chip->ngpio = ngpio;
6508 + chip->label = dev_name(pctrl->dev);
6509 + chip->parent = pctrl->dev;
6510 +diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
6511 +index e5807d1ce0dc..74ee48303156 100644
6512 +--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
6513 ++++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
6514 +@@ -1,7 +1,7 @@
6515 + /*
6516 + * R8A7796 processor support - PFC hardware block.
6517 + *
6518 +- * Copyright (C) 2016 Renesas Electronics Corp.
6519 ++ * Copyright (C) 2016-2017 Renesas Electronics Corp.
6520 + *
6521 + * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c
6522 + *
6523 +@@ -477,7 +477,7 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28
6524 + #define MOD_SEL1_26 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1)
6525 + #define MOD_SEL1_25_24 FM(SEL_SSP1_1_0) FM(SEL_SSP1_1_1) FM(SEL_SSP1_1_2) FM(SEL_SSP1_1_3)
6526 + #define MOD_SEL1_23_22_21 FM(SEL_SSP1_0_0) FM(SEL_SSP1_0_1) FM(SEL_SSP1_0_2) FM(SEL_SSP1_0_3) FM(SEL_SSP1_0_4) F_(0, 0) F_(0, 0) F_(0, 0)
6527 +-#define MOD_SEL1_20 FM(SEL_SSI_0) FM(SEL_SSI_1)
6528 ++#define MOD_SEL1_20 FM(SEL_SSI1_0) FM(SEL_SSI1_1)
6529 + #define MOD_SEL1_19 FM(SEL_SPEED_PULSE_0) FM(SEL_SPEED_PULSE_1)
6530 + #define MOD_SEL1_18_17 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1) FM(SEL_SIMCARD_2) FM(SEL_SIMCARD_3)
6531 + #define MOD_SEL1_16 FM(SEL_SDHI2_0) FM(SEL_SDHI2_1)
6532 +@@ -1218,7 +1218,7 @@ static const u16 pinmux_data[] = {
6533 + PINMUX_IPSR_GPSR(IP13_11_8, HSCK0),
6534 + PINMUX_IPSR_MSEL(IP13_11_8, MSIOF1_SCK_D, SEL_MSIOF1_3),
6535 + PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKB_A, SEL_ADG_B_0),
6536 +- PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI_1),
6537 ++ PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI1_1),
6538 + PINMUX_IPSR_MSEL(IP13_11_8, TS_SCK0_D, SEL_TSIF0_3),
6539 + PINMUX_IPSR_MSEL(IP13_11_8, STP_ISCLK_0_D, SEL_SSP1_0_3),
6540 + PINMUX_IPSR_MSEL(IP13_11_8, RIF0_CLK_C, SEL_DRIF0_2),
6541 +@@ -1226,14 +1226,14 @@ static const u16 pinmux_data[] = {
6542 +
6543 + PINMUX_IPSR_GPSR(IP13_15_12, HRX0),
6544 + PINMUX_IPSR_MSEL(IP13_15_12, MSIOF1_RXD_D, SEL_MSIOF1_3),
6545 +- PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA2_B, SEL_SSI_1),
6546 ++ PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA2_B, SEL_SSI2_1),
6547 + PINMUX_IPSR_MSEL(IP13_15_12, TS_SDEN0_D, SEL_TSIF0_3),
6548 + PINMUX_IPSR_MSEL(IP13_15_12, STP_ISEN_0_D, SEL_SSP1_0_3),
6549 + PINMUX_IPSR_MSEL(IP13_15_12, RIF0_D0_C, SEL_DRIF0_2),
6550 +
6551 + PINMUX_IPSR_GPSR(IP13_19_16, HTX0),
6552 + PINMUX_IPSR_MSEL(IP13_19_16, MSIOF1_TXD_D, SEL_MSIOF1_3),
6553 +- PINMUX_IPSR_MSEL(IP13_19_16, SSI_SDATA9_B, SEL_SSI_1),
6554 ++ PINMUX_IPSR_MSEL(IP13_19_16, SSI_SDATA9_B, SEL_SSI9_1),
6555 + PINMUX_IPSR_MSEL(IP13_19_16, TS_SDAT0_D, SEL_TSIF0_3),
6556 + PINMUX_IPSR_MSEL(IP13_19_16, STP_ISD_0_D, SEL_SSP1_0_3),
6557 + PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_C, SEL_DRIF0_2),
6558 +@@ -1241,7 +1241,7 @@ static const u16 pinmux_data[] = {
6559 + PINMUX_IPSR_GPSR(IP13_23_20, HCTS0_N),
6560 + PINMUX_IPSR_MSEL(IP13_23_20, RX2_B, SEL_SCIF2_1),
6561 + PINMUX_IPSR_MSEL(IP13_23_20, MSIOF1_SYNC_D, SEL_MSIOF1_3),
6562 +- PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK9_A, SEL_SSI_0),
6563 ++ PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK9_A, SEL_SSI9_0),
6564 + PINMUX_IPSR_MSEL(IP13_23_20, TS_SPSYNC0_D, SEL_TSIF0_3),
6565 + PINMUX_IPSR_MSEL(IP13_23_20, STP_ISSYNC_0_D, SEL_SSP1_0_3),
6566 + PINMUX_IPSR_MSEL(IP13_23_20, RIF0_SYNC_C, SEL_DRIF0_2),
6567 +@@ -1250,7 +1250,7 @@ static const u16 pinmux_data[] = {
6568 + PINMUX_IPSR_GPSR(IP13_27_24, HRTS0_N),
6569 + PINMUX_IPSR_MSEL(IP13_27_24, TX2_B, SEL_SCIF2_1),
6570 + PINMUX_IPSR_MSEL(IP13_27_24, MSIOF1_SS1_D, SEL_MSIOF1_3),
6571 +- PINMUX_IPSR_MSEL(IP13_27_24, SSI_WS9_A, SEL_SSI_0),
6572 ++ PINMUX_IPSR_MSEL(IP13_27_24, SSI_WS9_A, SEL_SSI9_0),
6573 + PINMUX_IPSR_MSEL(IP13_27_24, STP_IVCXO27_0_D, SEL_SSP1_0_3),
6574 + PINMUX_IPSR_MSEL(IP13_27_24, BPFCLK_A, SEL_FM_0),
6575 + PINMUX_IPSR_GPSR(IP13_27_24, AUDIO_CLKOUT2_A),
6576 +@@ -1265,7 +1265,7 @@ static const u16 pinmux_data[] = {
6577 + PINMUX_IPSR_MSEL(IP14_3_0, RX5_A, SEL_SCIF5_0),
6578 + PINMUX_IPSR_MSEL(IP14_3_0, NFWP_N_A, SEL_NDF_0),
6579 + PINMUX_IPSR_MSEL(IP14_3_0, AUDIO_CLKA_C, SEL_ADG_A_2),
6580 +- PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI_0),
6581 ++ PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI2_0),
6582 + PINMUX_IPSR_MSEL(IP14_3_0, STP_IVCXO27_0_C, SEL_SSP1_0_2),
6583 + PINMUX_IPSR_GPSR(IP14_3_0, AUDIO_CLKOUT3_A),
6584 + PINMUX_IPSR_MSEL(IP14_3_0, TCLK1_B, SEL_TIMER_TMU_1),
6585 +@@ -1274,7 +1274,7 @@ static const u16 pinmux_data[] = {
6586 + PINMUX_IPSR_MSEL(IP14_7_4, TX5_A, SEL_SCIF5_0),
6587 + PINMUX_IPSR_MSEL(IP14_7_4, MSIOF1_SS2_D, SEL_MSIOF1_3),
6588 + PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_A, SEL_ADG_C_0),
6589 +- PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI_0),
6590 ++ PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI2_0),
6591 + PINMUX_IPSR_MSEL(IP14_7_4, STP_OPWM_0_D, SEL_SSP1_0_3),
6592 + PINMUX_IPSR_GPSR(IP14_7_4, AUDIO_CLKOUT_D),
6593 + PINMUX_IPSR_MSEL(IP14_7_4, SPEEDIN_B, SEL_SPEED_PULSE_1),
6594 +@@ -1302,10 +1302,10 @@ static const u16 pinmux_data[] = {
6595 + PINMUX_IPSR_MSEL(IP14_31_28, MSIOF1_SS2_F, SEL_MSIOF1_5),
6596 +
6597 + /* IPSR15 */
6598 +- PINMUX_IPSR_MSEL(IP15_3_0, SSI_SDATA1_A, SEL_SSI_0),
6599 ++ PINMUX_IPSR_MSEL(IP15_3_0, SSI_SDATA1_A, SEL_SSI1_0),
6600 +
6601 +- PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI_0),
6602 +- PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI_1),
6603 ++ PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI2_0),
6604 ++ PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI1_1),
6605 +
6606 + PINMUX_IPSR_GPSR(IP15_11_8, SSI_SCK349),
6607 + PINMUX_IPSR_MSEL(IP15_11_8, MSIOF1_SS1_A, SEL_MSIOF1_0),
6608 +@@ -1391,11 +1391,11 @@ static const u16 pinmux_data[] = {
6609 + PINMUX_IPSR_MSEL(IP16_27_24, RIF1_D1_A, SEL_DRIF1_0),
6610 + PINMUX_IPSR_MSEL(IP16_27_24, RIF3_D1_A, SEL_DRIF3_0),
6611 +
6612 +- PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA9_A, SEL_SSI_0),
6613 ++ PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA9_A, SEL_SSI9_0),
6614 + PINMUX_IPSR_MSEL(IP16_31_28, HSCK2_B, SEL_HSCIF2_1),
6615 + PINMUX_IPSR_MSEL(IP16_31_28, MSIOF1_SS1_C, SEL_MSIOF1_2),
6616 + PINMUX_IPSR_MSEL(IP16_31_28, HSCK1_A, SEL_HSCIF1_0),
6617 +- PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS1_B, SEL_SSI_1),
6618 ++ PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS1_B, SEL_SSI1_1),
6619 + PINMUX_IPSR_GPSR(IP16_31_28, SCK1),
6620 + PINMUX_IPSR_MSEL(IP16_31_28, STP_IVCXO27_1_A, SEL_SSP1_1_0),
6621 + PINMUX_IPSR_MSEL(IP16_31_28, SCK5_A, SEL_SCIF5_0),
6622 +@@ -1427,7 +1427,7 @@ static const u16 pinmux_data[] = {
6623 +
6624 + PINMUX_IPSR_GPSR(IP17_19_16, USB1_PWEN),
6625 + PINMUX_IPSR_MSEL(IP17_19_16, SIM0_CLK_C, SEL_SIMCARD_2),
6626 +- PINMUX_IPSR_MSEL(IP17_19_16, SSI_SCK1_A, SEL_SSI_0),
6627 ++ PINMUX_IPSR_MSEL(IP17_19_16, SSI_SCK1_A, SEL_SSI1_0),
6628 + PINMUX_IPSR_MSEL(IP17_19_16, TS_SCK0_E, SEL_TSIF0_4),
6629 + PINMUX_IPSR_MSEL(IP17_19_16, STP_ISCLK_0_E, SEL_SSP1_0_4),
6630 + PINMUX_IPSR_MSEL(IP17_19_16, FMCLK_B, SEL_FM_1),
6631 +@@ -1437,7 +1437,7 @@ static const u16 pinmux_data[] = {
6632 +
6633 + PINMUX_IPSR_GPSR(IP17_23_20, USB1_OVC),
6634 + PINMUX_IPSR_MSEL(IP17_23_20, MSIOF1_SS2_C, SEL_MSIOF1_2),
6635 +- PINMUX_IPSR_MSEL(IP17_23_20, SSI_WS1_A, SEL_SSI_0),
6636 ++ PINMUX_IPSR_MSEL(IP17_23_20, SSI_WS1_A, SEL_SSI1_0),
6637 + PINMUX_IPSR_MSEL(IP17_23_20, TS_SDAT0_E, SEL_TSIF0_4),
6638 + PINMUX_IPSR_MSEL(IP17_23_20, STP_ISD_0_E, SEL_SSP1_0_4),
6639 + PINMUX_IPSR_MSEL(IP17_23_20, FMIN_B, SEL_FM_1),
6640 +@@ -1447,7 +1447,7 @@ static const u16 pinmux_data[] = {
6641 +
6642 + PINMUX_IPSR_GPSR(IP17_27_24, USB30_PWEN),
6643 + PINMUX_IPSR_GPSR(IP17_27_24, AUDIO_CLKOUT_B),
6644 +- PINMUX_IPSR_MSEL(IP17_27_24, SSI_SCK2_B, SEL_SSI_1),
6645 ++ PINMUX_IPSR_MSEL(IP17_27_24, SSI_SCK2_B, SEL_SSI2_1),
6646 + PINMUX_IPSR_MSEL(IP17_27_24, TS_SDEN1_D, SEL_TSIF1_3),
6647 + PINMUX_IPSR_MSEL(IP17_27_24, STP_ISEN_1_D, SEL_SSP1_1_3),
6648 + PINMUX_IPSR_MSEL(IP17_27_24, STP_OPWM_0_E, SEL_SSP1_0_4),
6649 +@@ -1459,7 +1459,7 @@ static const u16 pinmux_data[] = {
6650 +
6651 + PINMUX_IPSR_GPSR(IP17_31_28, USB30_OVC),
6652 + PINMUX_IPSR_GPSR(IP17_31_28, AUDIO_CLKOUT1_B),
6653 +- PINMUX_IPSR_MSEL(IP17_31_28, SSI_WS2_B, SEL_SSI_1),
6654 ++ PINMUX_IPSR_MSEL(IP17_31_28, SSI_WS2_B, SEL_SSI2_1),
6655 + PINMUX_IPSR_MSEL(IP17_31_28, TS_SPSYNC1_D, SEL_TSIF1_3),
6656 + PINMUX_IPSR_MSEL(IP17_31_28, STP_ISSYNC_1_D, SEL_SSP1_1_3),
6657 + PINMUX_IPSR_MSEL(IP17_31_28, STP_IVCXO27_0_E, SEL_SSP1_0_4),
6658 +@@ -1470,7 +1470,7 @@ static const u16 pinmux_data[] = {
6659 + /* IPSR18 */
6660 + PINMUX_IPSR_GPSR(IP18_3_0, GP6_30),
6661 + PINMUX_IPSR_GPSR(IP18_3_0, AUDIO_CLKOUT2_B),
6662 +- PINMUX_IPSR_MSEL(IP18_3_0, SSI_SCK9_B, SEL_SSI_1),
6663 ++ PINMUX_IPSR_MSEL(IP18_3_0, SSI_SCK9_B, SEL_SSI9_1),
6664 + PINMUX_IPSR_MSEL(IP18_3_0, TS_SDEN0_E, SEL_TSIF0_4),
6665 + PINMUX_IPSR_MSEL(IP18_3_0, STP_ISEN_0_E, SEL_SSP1_0_4),
6666 + PINMUX_IPSR_MSEL(IP18_3_0, RIF2_D0_B, SEL_DRIF2_1),
6667 +@@ -1480,7 +1480,7 @@ static const u16 pinmux_data[] = {
6668 +
6669 + PINMUX_IPSR_GPSR(IP18_7_4, GP6_31),
6670 + PINMUX_IPSR_GPSR(IP18_7_4, AUDIO_CLKOUT3_B),
6671 +- PINMUX_IPSR_MSEL(IP18_7_4, SSI_WS9_B, SEL_SSI_1),
6672 ++ PINMUX_IPSR_MSEL(IP18_7_4, SSI_WS9_B, SEL_SSI9_1),
6673 + PINMUX_IPSR_MSEL(IP18_7_4, TS_SPSYNC0_E, SEL_TSIF0_4),
6674 + PINMUX_IPSR_MSEL(IP18_7_4, STP_ISSYNC_0_E, SEL_SSP1_0_4),
6675 + PINMUX_IPSR_MSEL(IP18_7_4, RIF2_D1_B, SEL_DRIF2_1),
6676 +diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell-smbios-base.c
6677 +index 2485c80a9fdd..33fb2a20458a 100644
6678 +--- a/drivers/platform/x86/dell-smbios-base.c
6679 ++++ b/drivers/platform/x86/dell-smbios-base.c
6680 +@@ -514,7 +514,7 @@ static int build_tokens_sysfs(struct platform_device *dev)
6681 + continue;
6682 +
6683 + loop_fail_create_value:
6684 +- kfree(value_name);
6685 ++ kfree(location_name);
6686 + goto out_unwind_strings;
6687 + }
6688 + smbios_attribute_group.attrs = token_attrs;
6689 +@@ -525,7 +525,7 @@ static int build_tokens_sysfs(struct platform_device *dev)
6690 + return 0;
6691 +
6692 + out_unwind_strings:
6693 +- for (i = i-1; i > 0; i--) {
6694 ++ while (i--) {
6695 + kfree(token_location_attrs[i].attr.name);
6696 + kfree(token_value_attrs[i].attr.name);
6697 + }
6698 +diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
6699 +index 4cfa3f0cd689..cc7c516bb417 100644
6700 +--- a/drivers/power/supply/ltc2941-battery-gauge.c
6701 ++++ b/drivers/power/supply/ltc2941-battery-gauge.c
6702 +@@ -317,15 +317,15 @@ static int ltc294x_get_temperature(const struct ltc294x_info *info, int *val)
6703 +
6704 + if (info->id == LTC2942_ID) {
6705 + reg = LTC2942_REG_TEMPERATURE_MSB;
6706 +- value = 60000; /* Full-scale is 600 Kelvin */
6707 ++ value = 6000; /* Full-scale is 600 Kelvin */
6708 + } else {
6709 + reg = LTC2943_REG_TEMPERATURE_MSB;
6710 +- value = 51000; /* Full-scale is 510 Kelvin */
6711 ++ value = 5100; /* Full-scale is 510 Kelvin */
6712 + }
6713 + ret = ltc294x_read_regs(info->client, reg, &datar[0], 2);
6714 + value *= (datar[0] << 8) | datar[1];
6715 +- /* Convert to centidegrees */
6716 +- *val = value / 0xFFFF - 27215;
6717 ++ /* Convert to tenths of degree Celsius */
6718 ++ *val = value / 0xFFFF - 2722;
6719 + return ret;
6720 + }
6721 +
6722 +diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
6723 +index 35dde81b1c9b..1a568df383db 100644
6724 +--- a/drivers/power/supply/max17042_battery.c
6725 ++++ b/drivers/power/supply/max17042_battery.c
6726 +@@ -1053,6 +1053,7 @@ static int max17042_probe(struct i2c_client *client,
6727 +
6728 + i2c_set_clientdata(client, chip);
6729 + psy_cfg.drv_data = chip;
6730 ++ psy_cfg.of_node = dev->of_node;
6731 +
6732 + /* When current is not measured,
6733 + * CURRENT_NOW and CURRENT_AVG properties should be invisible. */
6734 +diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
6735 +index 0fce06acfaec..a2eb50719c7b 100644
6736 +--- a/drivers/regulator/gpio-regulator.c
6737 ++++ b/drivers/regulator/gpio-regulator.c
6738 +@@ -271,8 +271,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
6739 + drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
6740 + if (drvdata->desc.name == NULL) {
6741 + dev_err(&pdev->dev, "Failed to allocate supply name\n");
6742 +- ret = -ENOMEM;
6743 +- goto err;
6744 ++ return -ENOMEM;
6745 + }
6746 +
6747 + if (config->nr_gpios != 0) {
6748 +@@ -292,7 +291,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
6749 + dev_err(&pdev->dev,
6750 + "Could not obtain regulator setting GPIOs: %d\n",
6751 + ret);
6752 +- goto err_memstate;
6753 ++ goto err_memgpio;
6754 + }
6755 + }
6756 +
6757 +@@ -303,7 +302,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
6758 + if (drvdata->states == NULL) {
6759 + dev_err(&pdev->dev, "Failed to allocate state data\n");
6760 + ret = -ENOMEM;
6761 +- goto err_memgpio;
6762 ++ goto err_stategpio;
6763 + }
6764 + drvdata->nr_states = config->nr_states;
6765 +
6766 +@@ -324,7 +323,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
6767 + default:
6768 + dev_err(&pdev->dev, "No regulator type set\n");
6769 + ret = -EINVAL;
6770 +- goto err_memgpio;
6771 ++ goto err_memstate;
6772 + }
6773 +
6774 + /* build initial state from gpio init data. */
6775 +@@ -361,22 +360,21 @@ static int gpio_regulator_probe(struct platform_device *pdev)
6776 + if (IS_ERR(drvdata->dev)) {
6777 + ret = PTR_ERR(drvdata->dev);
6778 + dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
6779 +- goto err_stategpio;
6780 ++ goto err_memstate;
6781 + }
6782 +
6783 + platform_set_drvdata(pdev, drvdata);
6784 +
6785 + return 0;
6786 +
6787 +-err_stategpio:
6788 +- gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
6789 + err_memstate:
6790 + kfree(drvdata->states);
6791 ++err_stategpio:
6792 ++ gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
6793 + err_memgpio:
6794 + kfree(drvdata->gpios);
6795 + err_name:
6796 + kfree(drvdata->desc.name);
6797 +-err:
6798 + return ret;
6799 + }
6800 +
6801 +diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
6802 +index 092ed6efb3ec..f47264fa1940 100644
6803 +--- a/drivers/regulator/of_regulator.c
6804 ++++ b/drivers/regulator/of_regulator.c
6805 +@@ -321,6 +321,7 @@ int of_regulator_match(struct device *dev, struct device_node *node,
6806 + dev_err(dev,
6807 + "failed to parse DT for regulator %s\n",
6808 + child->name);
6809 ++ of_node_put(child);
6810 + return -EINVAL;
6811 + }
6812 + match->of_node = of_node_get(child);
6813 +diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
6814 +index 633268e9d550..05bcbce2013a 100644
6815 +--- a/drivers/remoteproc/imx_rproc.c
6816 ++++ b/drivers/remoteproc/imx_rproc.c
6817 +@@ -339,8 +339,10 @@ static int imx_rproc_probe(struct platform_device *pdev)
6818 + }
6819 +
6820 + dcfg = of_device_get_match_data(dev);
6821 +- if (!dcfg)
6822 +- return -EINVAL;
6823 ++ if (!dcfg) {
6824 ++ ret = -EINVAL;
6825 ++ goto err_put_rproc;
6826 ++ }
6827 +
6828 + priv = rproc->priv;
6829 + priv->rproc = rproc;
6830 +diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
6831 +index e96b85579f21..3c800642134e 100644
6832 +--- a/drivers/s390/cio/vfio_ccw_fsm.c
6833 ++++ b/drivers/s390/cio/vfio_ccw_fsm.c
6834 +@@ -129,6 +129,11 @@ static void fsm_io_request(struct vfio_ccw_private *private,
6835 + if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
6836 + orb = (union orb *)io_region->orb_area;
6837 +
6838 ++ /* Don't try to build a cp if transport mode is specified. */
6839 ++ if (orb->tm.b) {
6840 ++ io_region->ret_code = -EOPNOTSUPP;
6841 ++ goto err_out;
6842 ++ }
6843 + io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
6844 + orb);
6845 + if (io_region->ret_code)
6846 +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
6847 +index 9be34d37c356..3f3cb72e0c0c 100644
6848 +--- a/drivers/scsi/sr.c
6849 ++++ b/drivers/scsi/sr.c
6850 +@@ -525,6 +525,8 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
6851 + struct scsi_cd *cd;
6852 + int ret = -ENXIO;
6853 +
6854 ++ check_disk_change(bdev);
6855 ++
6856 + mutex_lock(&sr_mutex);
6857 + cd = scsi_cd_get(bdev->bd_disk);
6858 + if (cd) {
6859 +@@ -585,18 +587,28 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
6860 + static unsigned int sr_block_check_events(struct gendisk *disk,
6861 + unsigned int clearing)
6862 + {
6863 +- struct scsi_cd *cd = scsi_cd(disk);
6864 ++ unsigned int ret = 0;
6865 ++ struct scsi_cd *cd;
6866 +
6867 +- if (atomic_read(&cd->device->disk_events_disable_depth))
6868 ++ cd = scsi_cd_get(disk);
6869 ++ if (!cd)
6870 + return 0;
6871 +
6872 +- return cdrom_check_events(&cd->cdi, clearing);
6873 ++ if (!atomic_read(&cd->device->disk_events_disable_depth))
6874 ++ ret = cdrom_check_events(&cd->cdi, clearing);
6875 ++
6876 ++ scsi_cd_put(cd);
6877 ++ return ret;
6878 + }
6879 +
6880 + static int sr_block_revalidate_disk(struct gendisk *disk)
6881 + {
6882 +- struct scsi_cd *cd = scsi_cd(disk);
6883 + struct scsi_sense_hdr sshdr;
6884 ++ struct scsi_cd *cd;
6885 ++
6886 ++ cd = scsi_cd_get(disk);
6887 ++ if (!cd)
6888 ++ return -ENXIO;
6889 +
6890 + /* if the unit is not ready, nothing more to do */
6891 + if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
6892 +@@ -605,6 +617,7 @@ static int sr_block_revalidate_disk(struct gendisk *disk)
6893 + sr_cd_check(&cd->cdi);
6894 + get_sectorsize(cd);
6895 + out:
6896 ++ scsi_cd_put(cd);
6897 + return 0;
6898 + }
6899 +
6900 +diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
6901 +index 2a21f2d48592..35fab1e18adc 100644
6902 +--- a/drivers/scsi/sr_ioctl.c
6903 ++++ b/drivers/scsi/sr_ioctl.c
6904 +@@ -188,9 +188,13 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
6905 + struct scsi_device *SDev;
6906 + struct scsi_sense_hdr sshdr;
6907 + int result, err = 0, retries = 0;
6908 ++ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL;
6909 +
6910 + SDev = cd->device;
6911 +
6912 ++ if (cgc->sense)
6913 ++ senseptr = sense_buffer;
6914 ++
6915 + retry:
6916 + if (!scsi_block_when_processing_errors(SDev)) {
6917 + err = -ENODEV;
6918 +@@ -198,10 +202,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
6919 + }
6920 +
6921 + result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
6922 +- cgc->buffer, cgc->buflen,
6923 +- (unsigned char *)cgc->sense, &sshdr,
6924 ++ cgc->buffer, cgc->buflen, senseptr, &sshdr,
6925 + cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
6926 +
6927 ++ if (cgc->sense)
6928 ++ memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense));
6929 ++
6930 + /* Minimal error checking. Ignore cases we know about, and report the rest. */
6931 + if (driver_byte(result) != 0) {
6932 + switch (sshdr.sense_key) {
6933 +diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
6934 +index 2bdeebc48901..2625ef06c10e 100644
6935 +--- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
6936 ++++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
6937 +@@ -224,7 +224,11 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
6938 +
6939 + static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev)
6940 + {
6941 +- meson_gx_pwrc_vpu_power_off(&vpu_hdmi_pd.genpd);
6942 ++ bool powered_off;
6943 ++
6944 ++ powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd);
6945 ++ if (!powered_off)
6946 ++ meson_gx_pwrc_vpu_power_off(&vpu_hdmi_pd.genpd);
6947 + }
6948 +
6949 + static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = {
6950 +diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
6951 +index d008e5b82db4..df3ccb30bc2d 100644
6952 +--- a/drivers/soc/qcom/wcnss_ctrl.c
6953 ++++ b/drivers/soc/qcom/wcnss_ctrl.c
6954 +@@ -249,7 +249,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
6955 + /* Increment for next fragment */
6956 + req->seq++;
6957 +
6958 +- data += req->hdr.len;
6959 ++ data += NV_FRAGMENT_SIZE;
6960 + left -= NV_FRAGMENT_SIZE;
6961 + } while (left > 0);
6962 +
6963 +diff --git a/drivers/soc/renesas/r8a77970-sysc.c b/drivers/soc/renesas/r8a77970-sysc.c
6964 +index 8c614164718e..caf894f193ed 100644
6965 +--- a/drivers/soc/renesas/r8a77970-sysc.c
6966 ++++ b/drivers/soc/renesas/r8a77970-sysc.c
6967 +@@ -25,12 +25,12 @@ static const struct rcar_sysc_area r8a77970_areas[] __initconst = {
6968 + PD_CPU_NOCR },
6969 + { "cr7", 0x240, 0, R8A77970_PD_CR7, R8A77970_PD_ALWAYS_ON },
6970 + { "a3ir", 0x180, 0, R8A77970_PD_A3IR, R8A77970_PD_ALWAYS_ON },
6971 +- { "a2ir0", 0x400, 0, R8A77970_PD_A2IR0, R8A77970_PD_ALWAYS_ON },
6972 +- { "a2ir1", 0x400, 1, R8A77970_PD_A2IR1, R8A77970_PD_A2IR0 },
6973 +- { "a2ir2", 0x400, 2, R8A77970_PD_A2IR2, R8A77970_PD_A2IR0 },
6974 +- { "a2ir3", 0x400, 3, R8A77970_PD_A2IR3, R8A77970_PD_A2IR0 },
6975 +- { "a2sc0", 0x400, 4, R8A77970_PD_A2SC0, R8A77970_PD_ALWAYS_ON },
6976 +- { "a2sc1", 0x400, 5, R8A77970_PD_A2SC1, R8A77970_PD_A2SC0 },
6977 ++ { "a2ir0", 0x400, 0, R8A77970_PD_A2IR0, R8A77970_PD_A3IR },
6978 ++ { "a2ir1", 0x400, 1, R8A77970_PD_A2IR1, R8A77970_PD_A3IR },
6979 ++ { "a2ir2", 0x400, 2, R8A77970_PD_A2IR2, R8A77970_PD_A3IR },
6980 ++ { "a2ir3", 0x400, 3, R8A77970_PD_A2IR3, R8A77970_PD_A3IR },
6981 ++ { "a2sc0", 0x400, 4, R8A77970_PD_A2SC0, R8A77970_PD_A3IR },
6982 ++ { "a2sc1", 0x400, 5, R8A77970_PD_A2SC1, R8A77970_PD_A3IR },
6983 + };
6984 +
6985 + const struct rcar_sysc_info r8a77970_sysc_info __initconst = {
6986 +diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
6987 +index ff01f865a173..6573152ce893 100644
6988 +--- a/drivers/spi/spi-bcm-qspi.c
6989 ++++ b/drivers/spi/spi-bcm-qspi.c
6990 +@@ -1255,7 +1255,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
6991 + qspi->base[MSPI] = devm_ioremap_resource(dev, res);
6992 + if (IS_ERR(qspi->base[MSPI])) {
6993 + ret = PTR_ERR(qspi->base[MSPI]);
6994 +- goto qspi_probe_err;
6995 ++ goto qspi_resource_err;
6996 + }
6997 + } else {
6998 + goto qspi_resource_err;
6999 +@@ -1266,7 +1266,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
7000 + qspi->base[BSPI] = devm_ioremap_resource(dev, res);
7001 + if (IS_ERR(qspi->base[BSPI])) {
7002 + ret = PTR_ERR(qspi->base[BSPI]);
7003 +- goto qspi_probe_err;
7004 ++ goto qspi_resource_err;
7005 + }
7006 + qspi->bspi_mode = true;
7007 + } else {
7008 +diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c
7009 +index 7dd0da644a7f..2cf56b459d84 100644
7010 +--- a/drivers/watchdog/asm9260_wdt.c
7011 ++++ b/drivers/watchdog/asm9260_wdt.c
7012 +@@ -292,14 +292,14 @@ static int asm9260_wdt_probe(struct platform_device *pdev)
7013 + if (IS_ERR(priv->iobase))
7014 + return PTR_ERR(priv->iobase);
7015 +
7016 +- ret = asm9260_wdt_get_dt_clks(priv);
7017 +- if (ret)
7018 +- return ret;
7019 +-
7020 + priv->rst = devm_reset_control_get_exclusive(&pdev->dev, "wdt_rst");
7021 + if (IS_ERR(priv->rst))
7022 + return PTR_ERR(priv->rst);
7023 +
7024 ++ ret = asm9260_wdt_get_dt_clks(priv);
7025 ++ if (ret)
7026 ++ return ret;
7027 ++
7028 + wdd = &priv->wdd;
7029 + wdd->info = &asm9260_wdt_ident;
7030 + wdd->ops = &asm9260_wdt_ops;
7031 +diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
7032 +index ca5b91e2eb92..a5b8eb21201f 100644
7033 +--- a/drivers/watchdog/aspeed_wdt.c
7034 ++++ b/drivers/watchdog/aspeed_wdt.c
7035 +@@ -46,6 +46,7 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
7036 + #define WDT_RELOAD_VALUE 0x04
7037 + #define WDT_RESTART 0x08
7038 + #define WDT_CTRL 0x0C
7039 ++#define WDT_CTRL_BOOT_SECONDARY BIT(7)
7040 + #define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
7041 + #define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
7042 + #define WDT_CTRL_RESET_MODE_ARM_CPU (0x10 << 5)
7043 +@@ -158,6 +159,7 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd,
7044 + {
7045 + struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
7046 +
7047 ++ wdt->ctrl &= ~WDT_CTRL_BOOT_SECONDARY;
7048 + aspeed_wdt_enable(wdt, 128 * WDT_RATE_1MHZ / 1000);
7049 +
7050 + mdelay(1000);
7051 +@@ -232,16 +234,21 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
7052 + wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | WDT_CTRL_RESET_SYSTEM;
7053 + } else {
7054 + if (!strcmp(reset_type, "cpu"))
7055 +- wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU;
7056 ++ wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU |
7057 ++ WDT_CTRL_RESET_SYSTEM;
7058 + else if (!strcmp(reset_type, "soc"))
7059 +- wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC;
7060 ++ wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC |
7061 ++ WDT_CTRL_RESET_SYSTEM;
7062 + else if (!strcmp(reset_type, "system"))
7063 +- wdt->ctrl |= WDT_CTRL_RESET_SYSTEM;
7064 ++ wdt->ctrl |= WDT_CTRL_RESET_MODE_FULL_CHIP |
7065 ++ WDT_CTRL_RESET_SYSTEM;
7066 + else if (strcmp(reset_type, "none"))
7067 + return -EINVAL;
7068 + }
7069 + if (of_property_read_bool(np, "aspeed,external-signal"))
7070 + wdt->ctrl |= WDT_CTRL_WDT_EXT;
7071 ++ if (of_property_read_bool(np, "aspeed,alt-boot"))
7072 ++ wdt->ctrl |= WDT_CTRL_BOOT_SECONDARY;
7073 +
7074 + if (readl(wdt->base + WDT_CTRL) & WDT_CTRL_ENABLE) {
7075 + /*
7076 +diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
7077 +index 3e4c592c239f..6c6594261cb7 100644
7078 +--- a/drivers/watchdog/davinci_wdt.c
7079 ++++ b/drivers/watchdog/davinci_wdt.c
7080 +@@ -236,15 +236,22 @@ static int davinci_wdt_probe(struct platform_device *pdev)
7081 +
7082 + wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
7083 + davinci_wdt->base = devm_ioremap_resource(dev, wdt_mem);
7084 +- if (IS_ERR(davinci_wdt->base))
7085 +- return PTR_ERR(davinci_wdt->base);
7086 ++ if (IS_ERR(davinci_wdt->base)) {
7087 ++ ret = PTR_ERR(davinci_wdt->base);
7088 ++ goto err_clk_disable;
7089 ++ }
7090 +
7091 + ret = watchdog_register_device(wdd);
7092 +- if (ret < 0) {
7093 +- clk_disable_unprepare(davinci_wdt->clk);
7094 ++ if (ret) {
7095 + dev_err(dev, "cannot register watchdog device\n");
7096 ++ goto err_clk_disable;
7097 + }
7098 +
7099 ++ return 0;
7100 ++
7101 ++err_clk_disable:
7102 ++ clk_disable_unprepare(davinci_wdt->clk);
7103 ++
7104 + return ret;
7105 + }
7106 +
7107 +diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
7108 +index c2f4ff516230..918357bccf5e 100644
7109 +--- a/drivers/watchdog/dw_wdt.c
7110 ++++ b/drivers/watchdog/dw_wdt.c
7111 +@@ -34,6 +34,7 @@
7112 +
7113 + #define WDOG_CONTROL_REG_OFFSET 0x00
7114 + #define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
7115 ++#define WDOG_CONTROL_REG_RESP_MODE_MASK 0x02
7116 + #define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04
7117 + #define WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT 4
7118 + #define WDOG_CURRENT_COUNT_REG_OFFSET 0x08
7119 +@@ -121,14 +122,23 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
7120 + return 0;
7121 + }
7122 +
7123 ++static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt)
7124 ++{
7125 ++ u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
7126 ++
7127 ++ /* Disable interrupt mode; always perform system reset. */
7128 ++ val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
7129 ++ /* Enable watchdog. */
7130 ++ val |= WDOG_CONTROL_REG_WDT_EN_MASK;
7131 ++ writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
7132 ++}
7133 ++
7134 + static int dw_wdt_start(struct watchdog_device *wdd)
7135 + {
7136 + struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
7137 +
7138 + dw_wdt_set_timeout(wdd, wdd->timeout);
7139 +-
7140 +- writel(WDOG_CONTROL_REG_WDT_EN_MASK,
7141 +- dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
7142 ++ dw_wdt_arm_system_reset(dw_wdt);
7143 +
7144 + return 0;
7145 + }
7146 +@@ -152,16 +162,13 @@ static int dw_wdt_restart(struct watchdog_device *wdd,
7147 + unsigned long action, void *data)
7148 + {
7149 + struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
7150 +- u32 val;
7151 +
7152 + writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
7153 +- val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
7154 +- if (val & WDOG_CONTROL_REG_WDT_EN_MASK)
7155 ++ if (dw_wdt_is_enabled(dw_wdt))
7156 + writel(WDOG_COUNTER_RESTART_KICK_VALUE,
7157 + dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET);
7158 + else
7159 +- writel(WDOG_CONTROL_REG_WDT_EN_MASK,
7160 +- dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
7161 ++ dw_wdt_arm_system_reset(dw_wdt);
7162 +
7163 + /* wait for reset to assert... */
7164 + mdelay(500);
7165 +diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
7166 +index a8b280ff33e0..b4d484a42b70 100644
7167 +--- a/drivers/watchdog/sprd_wdt.c
7168 ++++ b/drivers/watchdog/sprd_wdt.c
7169 +@@ -154,8 +154,10 @@ static int sprd_wdt_enable(struct sprd_wdt *wdt)
7170 + if (ret)
7171 + return ret;
7172 + ret = clk_prepare_enable(wdt->rtc_enable);
7173 +- if (ret)
7174 ++ if (ret) {
7175 ++ clk_disable_unprepare(wdt->enable);
7176 + return ret;
7177 ++ }
7178 +
7179 + sprd_wdt_unlock(wdt->base);
7180 + val = readl_relaxed(wdt->base + SPRD_WDT_CTRL);
7181 +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
7182 +index 5bb72d3f8337..3530a196d959 100644
7183 +--- a/drivers/xen/swiotlb-xen.c
7184 ++++ b/drivers/xen/swiotlb-xen.c
7185 +@@ -365,7 +365,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
7186 + * physical address */
7187 + phys = xen_bus_to_phys(dev_addr);
7188 +
7189 +- if (((dev_addr + size - 1 > dma_mask)) ||
7190 ++ if (((dev_addr + size - 1 <= dma_mask)) ||
7191 + range_straddles_page_boundary(phys, size))
7192 + xen_destroy_contiguous_region(phys, order);
7193 +
7194 +diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
7195 +index 23e391d3ec01..22863f5f2474 100644
7196 +--- a/drivers/xen/xen-acpi-processor.c
7197 ++++ b/drivers/xen/xen-acpi-processor.c
7198 +@@ -362,9 +362,9 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
7199 + }
7200 + /* There are more ACPI Processor objects than in x2APIC or MADT.
7201 + * This can happen with incorrect ACPI SSDT declerations. */
7202 +- if (acpi_id > nr_acpi_bits) {
7203 +- pr_debug("We only have %u, trying to set %u\n",
7204 +- nr_acpi_bits, acpi_id);
7205 ++ if (acpi_id >= nr_acpi_bits) {
7206 ++ pr_debug("max acpi id %u, trying to set %u\n",
7207 ++ nr_acpi_bits - 1, acpi_id);
7208 + return AE_OK;
7209 + }
7210 + /* OK, There is a ACPI Processor object */
7211 +diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
7212 +index cc1b1ac57d61..47728477297e 100644
7213 +--- a/drivers/zorro/zorro.c
7214 ++++ b/drivers/zorro/zorro.c
7215 +@@ -16,6 +16,7 @@
7216 + #include <linux/bitops.h>
7217 + #include <linux/string.h>
7218 + #include <linux/platform_device.h>
7219 ++#include <linux/dma-mapping.h>
7220 + #include <linux/slab.h>
7221 +
7222 + #include <asm/byteorder.h>
7223 +@@ -185,6 +186,17 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
7224 + z->dev.parent = &bus->dev;
7225 + z->dev.bus = &zorro_bus_type;
7226 + z->dev.id = i;
7227 ++ switch (z->rom.er_Type & ERT_TYPEMASK) {
7228 ++ case ERT_ZORROIII:
7229 ++ z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
7230 ++ break;
7231 ++
7232 ++ case ERT_ZORROII:
7233 ++ default:
7234 ++ z->dev.coherent_dma_mask = DMA_BIT_MASK(24);
7235 ++ break;
7236 ++ }
7237 ++ z->dev.dma_mask = &z->dev.coherent_dma_mask;
7238 + }
7239 +
7240 + /* ... then register them */
7241 +diff --git a/fs/affs/namei.c b/fs/affs/namei.c
7242 +index d8aa0ae3d037..1ed0fa4c4d48 100644
7243 +--- a/fs/affs/namei.c
7244 ++++ b/fs/affs/namei.c
7245 +@@ -206,9 +206,10 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
7246 +
7247 + affs_lock_dir(dir);
7248 + bh = affs_find_entry(dir, dentry);
7249 +- affs_unlock_dir(dir);
7250 +- if (IS_ERR(bh))
7251 ++ if (IS_ERR(bh)) {
7252 ++ affs_unlock_dir(dir);
7253 + return ERR_CAST(bh);
7254 ++ }
7255 + if (bh) {
7256 + u32 ino = bh->b_blocknr;
7257 +
7258 +@@ -222,10 +223,13 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
7259 + }
7260 + affs_brelse(bh);
7261 + inode = affs_iget(sb, ino);
7262 +- if (IS_ERR(inode))
7263 ++ if (IS_ERR(inode)) {
7264 ++ affs_unlock_dir(dir);
7265 + return ERR_CAST(inode);
7266 ++ }
7267 + }
7268 + d_add(dentry, inode);
7269 ++ affs_unlock_dir(dir);
7270 + return NULL;
7271 + }
7272 +
7273 +diff --git a/fs/aio.c b/fs/aio.c
7274 +index 6bcd3fb5265a..63c0437ab135 100644
7275 +--- a/fs/aio.c
7276 ++++ b/fs/aio.c
7277 +@@ -1087,8 +1087,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
7278 +
7279 + ctx = rcu_dereference(table->table[id]);
7280 + if (ctx && ctx->user_id == ctx_id) {
7281 +- percpu_ref_get(&ctx->users);
7282 +- ret = ctx;
7283 ++ if (percpu_ref_tryget_live(&ctx->users))
7284 ++ ret = ctx;
7285 + }
7286 + out:
7287 + rcu_read_unlock();
7288 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
7289 +index 7efbc4d1128b..f5247ad86970 100644
7290 +--- a/fs/btrfs/dev-replace.c
7291 ++++ b/fs/btrfs/dev-replace.c
7292 +@@ -307,7 +307,7 @@ void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info)
7293 +
7294 + static char* btrfs_dev_name(struct btrfs_device *device)
7295 + {
7296 +- if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
7297 ++ if (!device || test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
7298 + return "<missing disk>";
7299 + else
7300 + return rcu_str_deref(device->name);
7301 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
7302 +index fea78d138073..02e39a7f22ec 100644
7303 +--- a/fs/btrfs/disk-io.c
7304 ++++ b/fs/btrfs/disk-io.c
7305 +@@ -1108,7 +1108,7 @@ static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
7306 + if (!writers)
7307 + return ERR_PTR(-ENOMEM);
7308 +
7309 +- ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
7310 ++ ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
7311 + if (ret < 0) {
7312 + kfree(writers);
7313 + return ERR_PTR(ret);
7314 +@@ -3735,7 +3735,8 @@ void close_ctree(struct btrfs_fs_info *fs_info)
7315 + btrfs_err(fs_info, "commit super ret %d", ret);
7316 + }
7317 +
7318 +- if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
7319 ++ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
7320 ++ test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
7321 + btrfs_error_commit_super(fs_info);
7322 +
7323 + kthread_stop(fs_info->transaction_kthread);
7324 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
7325 +index 53ddfafa440b..b45b840c2217 100644
7326 +--- a/fs/btrfs/extent-tree.c
7327 ++++ b/fs/btrfs/extent-tree.c
7328 +@@ -4657,6 +4657,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
7329 + if (wait_for_alloc) {
7330 + mutex_unlock(&fs_info->chunk_mutex);
7331 + wait_for_alloc = 0;
7332 ++ cond_resched();
7333 + goto again;
7334 + }
7335 +
7336 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
7337 +index f370bdc126b8..8b031f40a2f5 100644
7338 +--- a/fs/btrfs/inode.c
7339 ++++ b/fs/btrfs/inode.c
7340 +@@ -6632,8 +6632,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
7341 + goto out_unlock_inode;
7342 + } else {
7343 + btrfs_update_inode(trans, root, inode);
7344 +- unlock_new_inode(inode);
7345 +- d_instantiate(dentry, inode);
7346 ++ d_instantiate_new(dentry, inode);
7347 + }
7348 +
7349 + out_unlock:
7350 +@@ -6709,8 +6708,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
7351 + goto out_unlock_inode;
7352 +
7353 + BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
7354 +- unlock_new_inode(inode);
7355 +- d_instantiate(dentry, inode);
7356 ++ d_instantiate_new(dentry, inode);
7357 +
7358 + out_unlock:
7359 + btrfs_end_transaction(trans);
7360 +@@ -6855,12 +6853,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
7361 + if (err)
7362 + goto out_fail_inode;
7363 +
7364 +- d_instantiate(dentry, inode);
7365 +- /*
7366 +- * mkdir is special. We're unlocking after we call d_instantiate
7367 +- * to avoid a race with nfsd calling d_instantiate.
7368 +- */
7369 +- unlock_new_inode(inode);
7370 ++ d_instantiate_new(dentry, inode);
7371 + drop_on_err = 0;
7372 +
7373 + out_fail:
7374 +@@ -9238,7 +9231,8 @@ static int btrfs_truncate(struct inode *inode)
7375 + BTRFS_EXTENT_DATA_KEY);
7376 + trans->block_rsv = &fs_info->trans_block_rsv;
7377 + if (ret != -ENOSPC && ret != -EAGAIN) {
7378 +- err = ret;
7379 ++ if (ret < 0)
7380 ++ err = ret;
7381 + break;
7382 + }
7383 +
7384 +@@ -10372,8 +10366,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7385 + goto out_unlock_inode;
7386 + }
7387 +
7388 +- unlock_new_inode(inode);
7389 +- d_instantiate(dentry, inode);
7390 ++ d_instantiate_new(dentry, inode);
7391 +
7392 + out_unlock:
7393 + btrfs_end_transaction(trans);
7394 +diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
7395 +index 90204b166643..160eb2fba726 100644
7396 +--- a/fs/btrfs/tests/qgroup-tests.c
7397 ++++ b/fs/btrfs/tests/qgroup-tests.c
7398 +@@ -63,7 +63,7 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
7399 + btrfs_set_extent_generation(leaf, item, 1);
7400 + btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK);
7401 + block_info = (struct btrfs_tree_block_info *)(item + 1);
7402 +- btrfs_set_tree_block_level(leaf, block_info, 1);
7403 ++ btrfs_set_tree_block_level(leaf, block_info, 0);
7404 + iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7405 + if (parent > 0) {
7406 + btrfs_set_extent_inline_ref_type(leaf, iref,
7407 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
7408 +index 04f07144b45c..c070ce7fecc6 100644
7409 +--- a/fs/btrfs/transaction.c
7410 ++++ b/fs/btrfs/transaction.c
7411 +@@ -319,7 +319,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
7412 + if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
7413 + root->last_trans < trans->transid) || force) {
7414 + WARN_ON(root == fs_info->extent_root);
7415 +- WARN_ON(root->commit_root != root->node);
7416 ++ WARN_ON(!force && root->commit_root != root->node);
7417 +
7418 + /*
7419 + * see below for IN_TRANS_SETUP usage rules
7420 +@@ -1365,6 +1365,14 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
7421 + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
7422 + return 0;
7423 +
7424 ++ /*
7425 ++ * Ensure dirty @src will be commited. Or, after comming
7426 ++ * commit_fs_roots() and switch_commit_roots(), any dirty but not
7427 ++ * recorded root will never be updated again, causing an outdated root
7428 ++ * item.
7429 ++ */
7430 ++ record_root_in_trans(trans, src, 1);
7431 ++
7432 + /*
7433 + * We are going to commit transaction, see btrfs_commit_transaction()
7434 + * comment for reason locking tree_log_mutex
7435 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
7436 +index ac6ea1503cd6..eb53c21b223a 100644
7437 +--- a/fs/btrfs/tree-log.c
7438 ++++ b/fs/btrfs/tree-log.c
7439 +@@ -2356,8 +2356,10 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
7440 + nritems = btrfs_header_nritems(path->nodes[0]);
7441 + if (path->slots[0] >= nritems) {
7442 + ret = btrfs_next_leaf(root, path);
7443 +- if (ret)
7444 ++ if (ret == 1)
7445 + break;
7446 ++ else if (ret < 0)
7447 ++ goto out;
7448 + }
7449 + btrfs_item_key_to_cpu(path->nodes[0], &found_key,
7450 + path->slots[0]);
7451 +@@ -2461,13 +2463,41 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
7452 + if (ret)
7453 + break;
7454 +
7455 +- /* for regular files, make sure corresponding
7456 +- * orphan item exist. extents past the new EOF
7457 +- * will be truncated later by orphan cleanup.
7458 ++ /*
7459 ++ * Before replaying extents, truncate the inode to its
7460 ++ * size. We need to do it now and not after log replay
7461 ++ * because before an fsync we can have prealloc extents
7462 ++ * added beyond the inode's i_size. If we did it after,
7463 ++ * through orphan cleanup for example, we would drop
7464 ++ * those prealloc extents just after replaying them.
7465 + */
7466 + if (S_ISREG(mode)) {
7467 +- ret = insert_orphan_item(wc->trans, root,
7468 +- key.objectid);
7469 ++ struct inode *inode;
7470 ++ u64 from;
7471 ++
7472 ++ inode = read_one_inode(root, key.objectid);
7473 ++ if (!inode) {
7474 ++ ret = -EIO;
7475 ++ break;
7476 ++ }
7477 ++ from = ALIGN(i_size_read(inode),
7478 ++ root->fs_info->sectorsize);
7479 ++ ret = btrfs_drop_extents(wc->trans, root, inode,
7480 ++ from, (u64)-1, 1);
7481 ++ /*
7482 ++ * If the nlink count is zero here, the iput
7483 ++ * will free the inode. We bump it to make
7484 ++ * sure it doesn't get freed until the link
7485 ++ * count fixup is done.
7486 ++ */
7487 ++ if (!ret) {
7488 ++ if (inode->i_nlink == 0)
7489 ++ inc_nlink(inode);
7490 ++ /* Update link count and nbytes. */
7491 ++ ret = btrfs_update_inode(wc->trans,
7492 ++ root, inode);
7493 ++ }
7494 ++ iput(inode);
7495 + if (ret)
7496 + break;
7497 + }
7498 +@@ -3518,8 +3548,11 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
7499 + * from this directory and from this transaction
7500 + */
7501 + ret = btrfs_next_leaf(root, path);
7502 +- if (ret == 1) {
7503 +- last_offset = (u64)-1;
7504 ++ if (ret) {
7505 ++ if (ret == 1)
7506 ++ last_offset = (u64)-1;
7507 ++ else
7508 ++ err = ret;
7509 + goto done;
7510 + }
7511 + btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
7512 +@@ -3972,6 +4005,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
7513 + ASSERT(ret == 0);
7514 + src = src_path->nodes[0];
7515 + i = 0;
7516 ++ need_find_last_extent = true;
7517 + }
7518 +
7519 + btrfs_item_key_to_cpu(src, &key, i);
7520 +@@ -4321,6 +4355,31 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
7521 + num++;
7522 + }
7523 +
7524 ++ /*
7525 ++ * Add all prealloc extents beyond the inode's i_size to make sure we
7526 ++ * don't lose them after doing a fast fsync and replaying the log.
7527 ++ */
7528 ++ if (inode->flags & BTRFS_INODE_PREALLOC) {
7529 ++ struct rb_node *node;
7530 ++
7531 ++ for (node = rb_last(&tree->map); node; node = rb_prev(node)) {
7532 ++ em = rb_entry(node, struct extent_map, rb_node);
7533 ++ if (em->start < i_size_read(&inode->vfs_inode))
7534 ++ break;
7535 ++ if (!list_empty(&em->list))
7536 ++ continue;
7537 ++ /* Same as above loop. */
7538 ++ if (++num > 32768) {
7539 ++ list_del_init(&tree->modified_extents);
7540 ++ ret = -EFBIG;
7541 ++ goto process;
7542 ++ }
7543 ++ refcount_inc(&em->refs);
7544 ++ set_bit(EXTENT_FLAG_LOGGING, &em->flags);
7545 ++ list_add_tail(&em->list, &extents);
7546 ++ }
7547 ++ }
7548 ++
7549 + list_sort(NULL, &extents, extent_cmp);
7550 + btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end);
7551 + /*
7552 +diff --git a/fs/dcache.c b/fs/dcache.c
7553 +index 8945e6cabd93..06463b780e57 100644
7554 +--- a/fs/dcache.c
7555 ++++ b/fs/dcache.c
7556 +@@ -1865,6 +1865,28 @@ void d_instantiate(struct dentry *entry, struct inode * inode)
7557 + }
7558 + EXPORT_SYMBOL(d_instantiate);
7559 +
7560 ++/*
7561 ++ * This should be equivalent to d_instantiate() + unlock_new_inode(),
7562 ++ * with lockdep-related part of unlock_new_inode() done before
7563 ++ * anything else. Use that instead of open-coding d_instantiate()/
7564 ++ * unlock_new_inode() combinations.
7565 ++ */
7566 ++void d_instantiate_new(struct dentry *entry, struct inode *inode)
7567 ++{
7568 ++ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
7569 ++ BUG_ON(!inode);
7570 ++ lockdep_annotate_inode_mutex_key(inode);
7571 ++ security_d_instantiate(entry, inode);
7572 ++ spin_lock(&inode->i_lock);
7573 ++ __d_instantiate(entry, inode);
7574 ++ WARN_ON(!(inode->i_state & I_NEW));
7575 ++ inode->i_state &= ~I_NEW;
7576 ++ smp_mb();
7577 ++ wake_up_bit(&inode->i_state, __I_NEW);
7578 ++ spin_unlock(&inode->i_lock);
7579 ++}
7580 ++EXPORT_SYMBOL(d_instantiate_new);
7581 ++
7582 + /**
7583 + * d_instantiate_no_diralias - instantiate a non-aliased dentry
7584 + * @entry: dentry to complete
7585 +diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
7586 +index 847904aa63a9..7bba8f2693b2 100644
7587 +--- a/fs/ecryptfs/inode.c
7588 ++++ b/fs/ecryptfs/inode.c
7589 +@@ -283,8 +283,7 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
7590 + iget_failed(ecryptfs_inode);
7591 + goto out;
7592 + }
7593 +- unlock_new_inode(ecryptfs_inode);
7594 +- d_instantiate(ecryptfs_dentry, ecryptfs_inode);
7595 ++ d_instantiate_new(ecryptfs_dentry, ecryptfs_inode);
7596 + out:
7597 + return rc;
7598 + }
7599 +diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
7600 +index e078075dc66f..aa6ec191cac0 100644
7601 +--- a/fs/ext2/namei.c
7602 ++++ b/fs/ext2/namei.c
7603 +@@ -41,8 +41,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
7604 + {
7605 + int err = ext2_add_link(dentry, inode);
7606 + if (!err) {
7607 +- unlock_new_inode(inode);
7608 +- d_instantiate(dentry, inode);
7609 ++ d_instantiate_new(dentry, inode);
7610 + return 0;
7611 + }
7612 + inode_dec_link_count(inode);
7613 +@@ -269,8 +268,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
7614 + if (err)
7615 + goto out_fail;
7616 +
7617 +- unlock_new_inode(inode);
7618 +- d_instantiate(dentry, inode);
7619 ++ d_instantiate_new(dentry, inode);
7620 + out:
7621 + return err;
7622 +
7623 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
7624 +index b1f21e3a0763..4a09063ce1d2 100644
7625 +--- a/fs/ext4/namei.c
7626 ++++ b/fs/ext4/namei.c
7627 +@@ -2411,8 +2411,7 @@ static int ext4_add_nondir(handle_t *handle,
7628 + int err = ext4_add_entry(handle, dentry, inode);
7629 + if (!err) {
7630 + ext4_mark_inode_dirty(handle, inode);
7631 +- unlock_new_inode(inode);
7632 +- d_instantiate(dentry, inode);
7633 ++ d_instantiate_new(dentry, inode);
7634 + return 0;
7635 + }
7636 + drop_nlink(inode);
7637 +@@ -2651,8 +2650,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
7638 + err = ext4_mark_inode_dirty(handle, dir);
7639 + if (err)
7640 + goto out_clear_inode;
7641 +- unlock_new_inode(inode);
7642 +- d_instantiate(dentry, inode);
7643 ++ d_instantiate_new(dentry, inode);
7644 + if (IS_DIRSYNC(dir))
7645 + ext4_handle_sync(handle);
7646 +
7647 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
7648 +index b8dace7abe09..4c4ff4b3593c 100644
7649 +--- a/fs/ext4/super.c
7650 ++++ b/fs/ext4/super.c
7651 +@@ -3663,6 +3663,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
7652 + ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
7653 + "using the ext4 subsystem");
7654 + else {
7655 ++ /*
7656 ++ * If we're probing be silent, if this looks like
7657 ++ * it's actually an ext[34] filesystem.
7658 ++ */
7659 ++ if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
7660 ++ goto failed_mount;
7661 + ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
7662 + "to feature incompatibilities");
7663 + goto failed_mount;
7664 +@@ -3674,6 +3680,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
7665 + ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
7666 + "using the ext4 subsystem");
7667 + else {
7668 ++ /*
7669 ++ * If we're probing be silent, if this looks like
7670 ++ * it's actually an ext4 filesystem.
7671 ++ */
7672 ++ if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
7673 ++ goto failed_mount;
7674 + ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
7675 + "to feature incompatibilities");
7676 + goto failed_mount;
7677 +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
7678 +index 512dca8abc7d..e77271c2144d 100644
7679 +--- a/fs/f2fs/checkpoint.c
7680 ++++ b/fs/f2fs/checkpoint.c
7681 +@@ -1136,6 +1136,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
7682 +
7683 + if (cpc->reason & CP_TRIMMED)
7684 + __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
7685 ++ else
7686 ++ __clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
7687 +
7688 + if (cpc->reason & CP_UMOUNT)
7689 + __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
7690 +@@ -1162,6 +1164,39 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
7691 + spin_unlock_irqrestore(&sbi->cp_lock, flags);
7692 + }
7693 +
7694 ++static void commit_checkpoint(struct f2fs_sb_info *sbi,
7695 ++ void *src, block_t blk_addr)
7696 ++{
7697 ++ struct writeback_control wbc = {
7698 ++ .for_reclaim = 0,
7699 ++ };
7700 ++
7701 ++ /*
7702 ++ * pagevec_lookup_tag and lock_page again will take
7703 ++ * some extra time. Therefore, update_meta_pages and
7704 ++ * sync_meta_pages are combined in this function.
7705 ++ */
7706 ++ struct page *page = grab_meta_page(sbi, blk_addr);
7707 ++ int err;
7708 ++
7709 ++ memcpy(page_address(page), src, PAGE_SIZE);
7710 ++ set_page_dirty(page);
7711 ++
7712 ++ f2fs_wait_on_page_writeback(page, META, true);
7713 ++ f2fs_bug_on(sbi, PageWriteback(page));
7714 ++ if (unlikely(!clear_page_dirty_for_io(page)))
7715 ++ f2fs_bug_on(sbi, 1);
7716 ++
7717 ++ /* writeout cp pack 2 page */
7718 ++ err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
7719 ++ f2fs_bug_on(sbi, err);
7720 ++
7721 ++ f2fs_put_page(page, 0);
7722 ++
7723 ++ /* submit checkpoint (with barrier if NOBARRIER is not set) */
7724 ++ f2fs_submit_merged_write(sbi, META_FLUSH);
7725 ++}
7726 ++
7727 + static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
7728 + {
7729 + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
7730 +@@ -1264,16 +1299,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
7731 + }
7732 + }
7733 +
7734 +- /* need to wait for end_io results */
7735 +- wait_on_all_pages_writeback(sbi);
7736 +- if (unlikely(f2fs_cp_error(sbi)))
7737 +- return -EIO;
7738 +-
7739 +- /* flush all device cache */
7740 +- err = f2fs_flush_device_cache(sbi);
7741 +- if (err)
7742 +- return err;
7743 +-
7744 + /* write out checkpoint buffer at block 0 */
7745 + update_meta_page(sbi, ckpt, start_blk++);
7746 +
7747 +@@ -1301,26 +1326,26 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
7748 + start_blk += NR_CURSEG_NODE_TYPE;
7749 + }
7750 +
7751 +- /* writeout checkpoint block */
7752 +- update_meta_page(sbi, ckpt, start_blk);
7753 ++ /* update user_block_counts */
7754 ++ sbi->last_valid_block_count = sbi->total_valid_block_count;
7755 ++ percpu_counter_set(&sbi->alloc_valid_block_count, 0);
7756 +
7757 +- /* wait for previous submitted node/meta pages writeback */
7758 ++ /* Here, we have one bio having CP pack except cp pack 2 page */
7759 ++ sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
7760 ++
7761 ++ /* wait for previous submitted meta pages writeback */
7762 + wait_on_all_pages_writeback(sbi);
7763 +
7764 + if (unlikely(f2fs_cp_error(sbi)))
7765 + return -EIO;
7766 +
7767 +- filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX);
7768 +- filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX);
7769 +-
7770 +- /* update user_block_counts */
7771 +- sbi->last_valid_block_count = sbi->total_valid_block_count;
7772 +- percpu_counter_set(&sbi->alloc_valid_block_count, 0);
7773 +-
7774 +- /* Here, we only have one bio having CP pack */
7775 +- sync_meta_pages(sbi, META_FLUSH, LONG_MAX, FS_CP_META_IO);
7776 ++ /* flush all device cache */
7777 ++ err = f2fs_flush_device_cache(sbi);
7778 ++ if (err)
7779 ++ return err;
7780 +
7781 +- /* wait for previous submitted meta pages writeback */
7782 ++ /* barrier and flush checkpoint cp pack 2 page if it can */
7783 ++ commit_checkpoint(sbi, ckpt, start_blk);
7784 + wait_on_all_pages_writeback(sbi);
7785 +
7786 + release_ino_entry(sbi, false);
7787 +diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
7788 +index ff2352a0ed15..aff6c2ed1c02 100644
7789 +--- a/fs/f2fs/extent_cache.c
7790 ++++ b/fs/f2fs/extent_cache.c
7791 +@@ -706,6 +706,9 @@ void f2fs_drop_extent_tree(struct inode *inode)
7792 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
7793 + struct extent_tree *et = F2FS_I(inode)->extent_tree;
7794 +
7795 ++ if (!f2fs_may_extent_tree(inode))
7796 ++ return;
7797 ++
7798 + set_inode_flag(inode, FI_NO_EXTENT);
7799 +
7800 + write_lock(&et->lock);
7801 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
7802 +index 672a542e5464..c59b7888d356 100644
7803 +--- a/fs/f2fs/file.c
7804 ++++ b/fs/f2fs/file.c
7805 +@@ -1348,8 +1348,12 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
7806 + }
7807 +
7808 + out:
7809 +- if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
7810 +- f2fs_i_size_write(inode, new_size);
7811 ++ if (new_size > i_size_read(inode)) {
7812 ++ if (mode & FALLOC_FL_KEEP_SIZE)
7813 ++ file_set_keep_isize(inode);
7814 ++ else
7815 ++ f2fs_i_size_write(inode, new_size);
7816 ++ }
7817 + out_sem:
7818 + up_write(&F2FS_I(inode)->i_mmap_sem);
7819 +
7820 +diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
7821 +index b68e7b03959f..860c9dd4bb42 100644
7822 +--- a/fs/f2fs/namei.c
7823 ++++ b/fs/f2fs/namei.c
7824 +@@ -218,8 +218,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
7825 +
7826 + alloc_nid_done(sbi, ino);
7827 +
7828 +- d_instantiate(dentry, inode);
7829 +- unlock_new_inode(inode);
7830 ++ d_instantiate_new(dentry, inode);
7831 +
7832 + if (IS_DIRSYNC(dir))
7833 + f2fs_sync_fs(sbi->sb, 1);
7834 +@@ -526,8 +525,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
7835 + err = page_symlink(inode, disk_link.name, disk_link.len);
7836 +
7837 + err_out:
7838 +- d_instantiate(dentry, inode);
7839 +- unlock_new_inode(inode);
7840 ++ d_instantiate_new(dentry, inode);
7841 +
7842 + /*
7843 + * Let's flush symlink data in order to avoid broken symlink as much as
7844 +@@ -590,8 +588,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
7845 +
7846 + alloc_nid_done(sbi, inode->i_ino);
7847 +
7848 +- d_instantiate(dentry, inode);
7849 +- unlock_new_inode(inode);
7850 ++ d_instantiate_new(dentry, inode);
7851 +
7852 + if (IS_DIRSYNC(dir))
7853 + f2fs_sync_fs(sbi->sb, 1);
7854 +@@ -642,8 +639,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
7855 +
7856 + alloc_nid_done(sbi, inode->i_ino);
7857 +
7858 +- d_instantiate(dentry, inode);
7859 +- unlock_new_inode(inode);
7860 ++ d_instantiate_new(dentry, inode);
7861 +
7862 + if (IS_DIRSYNC(dir))
7863 + f2fs_sync_fs(sbi->sb, 1);
7864 +diff --git a/fs/fscache/page.c b/fs/fscache/page.c
7865 +index 961029e04027..da2fb58f2ecb 100644
7866 +--- a/fs/fscache/page.c
7867 ++++ b/fs/fscache/page.c
7868 +@@ -776,6 +776,7 @@ static void fscache_write_op(struct fscache_operation *_op)
7869 +
7870 + _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
7871 +
7872 ++again:
7873 + spin_lock(&object->lock);
7874 + cookie = object->cookie;
7875 +
7876 +@@ -816,10 +817,6 @@ static void fscache_write_op(struct fscache_operation *_op)
7877 + goto superseded;
7878 + page = results[0];
7879 + _debug("gang %d [%lx]", n, page->index);
7880 +- if (page->index >= op->store_limit) {
7881 +- fscache_stat(&fscache_n_store_pages_over_limit);
7882 +- goto superseded;
7883 +- }
7884 +
7885 + radix_tree_tag_set(&cookie->stores, page->index,
7886 + FSCACHE_COOKIE_STORING_TAG);
7887 +@@ -829,6 +826,9 @@ static void fscache_write_op(struct fscache_operation *_op)
7888 + spin_unlock(&cookie->stores_lock);
7889 + spin_unlock(&object->lock);
7890 +
7891 ++ if (page->index >= op->store_limit)
7892 ++ goto discard_page;
7893 ++
7894 + fscache_stat(&fscache_n_store_pages);
7895 + fscache_stat(&fscache_n_cop_write_page);
7896 + ret = object->cache->ops->write_page(op, page);
7897 +@@ -844,6 +844,11 @@ static void fscache_write_op(struct fscache_operation *_op)
7898 + _leave("");
7899 + return;
7900 +
7901 ++discard_page:
7902 ++ fscache_stat(&fscache_n_store_pages_over_limit);
7903 ++ fscache_end_page_write(object, page);
7904 ++ goto again;
7905 ++
7906 + superseded:
7907 + /* this writer is going away and there aren't any more things to
7908 + * write */
7909 +diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
7910 +index 51f940e76c5e..de28800691c6 100644
7911 +--- a/fs/gfs2/bmap.c
7912 ++++ b/fs/gfs2/bmap.c
7913 +@@ -1344,6 +1344,7 @@ static inline bool walk_done(struct gfs2_sbd *sdp,
7914 + static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
7915 + {
7916 + struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
7917 ++ u64 maxsize = sdp->sd_heightsize[ip->i_height];
7918 + struct metapath mp = {};
7919 + struct buffer_head *dibh, *bh;
7920 + struct gfs2_holder rd_gh;
7921 +@@ -1359,6 +1360,14 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
7922 + u64 prev_bnr = 0;
7923 + __be64 *start, *end;
7924 +
7925 ++ if (offset >= maxsize) {
7926 ++ /*
7927 ++ * The starting point lies beyond the allocated meta-data;
7928 ++ * there are no blocks do deallocate.
7929 ++ */
7930 ++ return 0;
7931 ++ }
7932 ++
7933 + /*
7934 + * The start position of the hole is defined by lblock, start_list, and
7935 + * start_aligned. The end position of the hole is defined by lend,
7936 +@@ -1372,7 +1381,6 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
7937 + */
7938 +
7939 + if (length) {
7940 +- u64 maxsize = sdp->sd_heightsize[ip->i_height];
7941 + u64 end_offset = offset + length;
7942 + u64 lend;
7943 +
7944 +diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
7945 +index 4f88e201b3f0..2edd3a9a7b79 100644
7946 +--- a/fs/gfs2/file.c
7947 ++++ b/fs/gfs2/file.c
7948 +@@ -809,7 +809,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
7949 + struct gfs2_inode *ip = GFS2_I(inode);
7950 + struct gfs2_alloc_parms ap = { .aflags = 0, };
7951 + unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
7952 +- loff_t bytes, max_bytes, max_blks = UINT_MAX;
7953 ++ loff_t bytes, max_bytes, max_blks;
7954 + int error;
7955 + const loff_t pos = offset;
7956 + const loff_t count = len;
7957 +@@ -861,7 +861,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
7958 + return error;
7959 + /* ap.allowed tells us how many blocks quota will allow
7960 + * us to write. Check if this reduces max_blks */
7961 +- if (ap.allowed && ap.allowed < max_blks)
7962 ++ max_blks = UINT_MAX;
7963 ++ if (ap.allowed)
7964 + max_blks = ap.allowed;
7965 +
7966 + error = gfs2_inplace_reserve(ip, &ap);
7967 +diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
7968 +index 5e47c935a515..836f29480be6 100644
7969 +--- a/fs/gfs2/quota.h
7970 ++++ b/fs/gfs2/quota.h
7971 +@@ -45,6 +45,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
7972 + {
7973 + struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
7974 + int ret;
7975 ++
7976 ++ ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
7977 + if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
7978 + return 0;
7979 + ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
7980 +diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
7981 +index 0a754f38462e..e5a6deb38e1e 100644
7982 +--- a/fs/jffs2/dir.c
7983 ++++ b/fs/jffs2/dir.c
7984 +@@ -209,8 +209,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
7985 + __func__, inode->i_ino, inode->i_mode, inode->i_nlink,
7986 + f->inocache->pino_nlink, inode->i_mapping->nrpages);
7987 +
7988 +- unlock_new_inode(inode);
7989 +- d_instantiate(dentry, inode);
7990 ++ d_instantiate_new(dentry, inode);
7991 + return 0;
7992 +
7993 + fail:
7994 +@@ -430,8 +429,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
7995 + mutex_unlock(&dir_f->sem);
7996 + jffs2_complete_reservation(c);
7997 +
7998 +- unlock_new_inode(inode);
7999 +- d_instantiate(dentry, inode);
8000 ++ d_instantiate_new(dentry, inode);
8001 + return 0;
8002 +
8003 + fail:
8004 +@@ -575,8 +573,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode
8005 + mutex_unlock(&dir_f->sem);
8006 + jffs2_complete_reservation(c);
8007 +
8008 +- unlock_new_inode(inode);
8009 +- d_instantiate(dentry, inode);
8010 ++ d_instantiate_new(dentry, inode);
8011 + return 0;
8012 +
8013 + fail:
8014 +@@ -747,8 +744,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
8015 + mutex_unlock(&dir_f->sem);
8016 + jffs2_complete_reservation(c);
8017 +
8018 +- unlock_new_inode(inode);
8019 +- d_instantiate(dentry, inode);
8020 ++ d_instantiate_new(dentry, inode);
8021 + return 0;
8022 +
8023 + fail:
8024 +diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
8025 +index b41596d71858..56c3fcbfe80e 100644
8026 +--- a/fs/jfs/namei.c
8027 ++++ b/fs/jfs/namei.c
8028 +@@ -178,8 +178,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
8029 + unlock_new_inode(ip);
8030 + iput(ip);
8031 + } else {
8032 +- unlock_new_inode(ip);
8033 +- d_instantiate(dentry, ip);
8034 ++ d_instantiate_new(dentry, ip);
8035 + }
8036 +
8037 + out2:
8038 +@@ -313,8 +312,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
8039 + unlock_new_inode(ip);
8040 + iput(ip);
8041 + } else {
8042 +- unlock_new_inode(ip);
8043 +- d_instantiate(dentry, ip);
8044 ++ d_instantiate_new(dentry, ip);
8045 + }
8046 +
8047 + out2:
8048 +@@ -1059,8 +1057,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
8049 + unlock_new_inode(ip);
8050 + iput(ip);
8051 + } else {
8052 +- unlock_new_inode(ip);
8053 +- d_instantiate(dentry, ip);
8054 ++ d_instantiate_new(dentry, ip);
8055 + }
8056 +
8057 + out2:
8058 +@@ -1447,8 +1444,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
8059 + unlock_new_inode(ip);
8060 + iput(ip);
8061 + } else {
8062 +- unlock_new_inode(ip);
8063 +- d_instantiate(dentry, ip);
8064 ++ d_instantiate_new(dentry, ip);
8065 + }
8066 +
8067 + out1:
8068 +diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
8069 +index 1a2894aa0194..dd52d3f82e8d 100644
8070 +--- a/fs/nilfs2/namei.c
8071 ++++ b/fs/nilfs2/namei.c
8072 +@@ -46,8 +46,7 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
8073 + int err = nilfs_add_link(dentry, inode);
8074 +
8075 + if (!err) {
8076 +- d_instantiate(dentry, inode);
8077 +- unlock_new_inode(inode);
8078 ++ d_instantiate_new(dentry, inode);
8079 + return 0;
8080 + }
8081 + inode_dec_link_count(inode);
8082 +@@ -243,8 +242,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
8083 + goto out_fail;
8084 +
8085 + nilfs_mark_inode_dirty(inode);
8086 +- d_instantiate(dentry, inode);
8087 +- unlock_new_inode(inode);
8088 ++ d_instantiate_new(dentry, inode);
8089 + out:
8090 + if (!err)
8091 + err = nilfs_transaction_commit(dir->i_sb);
8092 +diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
8093 +index e1fea149f50b..25b76f0d082b 100644
8094 +--- a/fs/ocfs2/dlm/dlmdomain.c
8095 ++++ b/fs/ocfs2/dlm/dlmdomain.c
8096 +@@ -675,20 +675,6 @@ static void dlm_leave_domain(struct dlm_ctxt *dlm)
8097 + spin_unlock(&dlm->spinlock);
8098 + }
8099 +
8100 +-int dlm_shutting_down(struct dlm_ctxt *dlm)
8101 +-{
8102 +- int ret = 0;
8103 +-
8104 +- spin_lock(&dlm_domain_lock);
8105 +-
8106 +- if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
8107 +- ret = 1;
8108 +-
8109 +- spin_unlock(&dlm_domain_lock);
8110 +-
8111 +- return ret;
8112 +-}
8113 +-
8114 + void dlm_unregister_domain(struct dlm_ctxt *dlm)
8115 + {
8116 + int leave = 0;
8117 +diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h
8118 +index fd6122a38dbd..8a9281411c18 100644
8119 +--- a/fs/ocfs2/dlm/dlmdomain.h
8120 ++++ b/fs/ocfs2/dlm/dlmdomain.h
8121 +@@ -28,7 +28,30 @@
8122 + extern spinlock_t dlm_domain_lock;
8123 + extern struct list_head dlm_domains;
8124 +
8125 +-int dlm_shutting_down(struct dlm_ctxt *dlm);
8126 ++static inline int dlm_joined(struct dlm_ctxt *dlm)
8127 ++{
8128 ++ int ret = 0;
8129 ++
8130 ++ spin_lock(&dlm_domain_lock);
8131 ++ if (dlm->dlm_state == DLM_CTXT_JOINED)
8132 ++ ret = 1;
8133 ++ spin_unlock(&dlm_domain_lock);
8134 ++
8135 ++ return ret;
8136 ++}
8137 ++
8138 ++static inline int dlm_shutting_down(struct dlm_ctxt *dlm)
8139 ++{
8140 ++ int ret = 0;
8141 ++
8142 ++ spin_lock(&dlm_domain_lock);
8143 ++ if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
8144 ++ ret = 1;
8145 ++ spin_unlock(&dlm_domain_lock);
8146 ++
8147 ++ return ret;
8148 ++}
8149 ++
8150 + void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
8151 + int node_num);
8152 +
8153 +diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
8154 +index ec8f75813beb..505ab4281f36 100644
8155 +--- a/fs/ocfs2/dlm/dlmrecovery.c
8156 ++++ b/fs/ocfs2/dlm/dlmrecovery.c
8157 +@@ -1378,6 +1378,15 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
8158 + if (!dlm_grab(dlm))
8159 + return -EINVAL;
8160 +
8161 ++ if (!dlm_joined(dlm)) {
8162 ++ mlog(ML_ERROR, "Domain %s not joined! "
8163 ++ "lockres %.*s, master %u\n",
8164 ++ dlm->name, mres->lockname_len,
8165 ++ mres->lockname, mres->master);
8166 ++ dlm_put(dlm);
8167 ++ return -EINVAL;
8168 ++ }
8169 ++
8170 + BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
8171 +
8172 + real_master = mres->master;
8173 +diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
8174 +index 6e3134e6d98a..1b5707c44c3f 100644
8175 +--- a/fs/orangefs/namei.c
8176 ++++ b/fs/orangefs/namei.c
8177 +@@ -75,8 +75,7 @@ static int orangefs_create(struct inode *dir,
8178 + get_khandle_from_ino(inode),
8179 + dentry);
8180 +
8181 +- d_instantiate(dentry, inode);
8182 +- unlock_new_inode(inode);
8183 ++ d_instantiate_new(dentry, inode);
8184 + orangefs_set_timeout(dentry);
8185 + ORANGEFS_I(inode)->getattr_time = jiffies - 1;
8186 + ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
8187 +@@ -332,8 +331,7 @@ static int orangefs_symlink(struct inode *dir,
8188 + "Assigned symlink inode new number of %pU\n",
8189 + get_khandle_from_ino(inode));
8190 +
8191 +- d_instantiate(dentry, inode);
8192 +- unlock_new_inode(inode);
8193 ++ d_instantiate_new(dentry, inode);
8194 + orangefs_set_timeout(dentry);
8195 + ORANGEFS_I(inode)->getattr_time = jiffies - 1;
8196 + ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
8197 +@@ -402,8 +400,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
8198 + "Assigned dir inode new number of %pU\n",
8199 + get_khandle_from_ino(inode));
8200 +
8201 +- d_instantiate(dentry, inode);
8202 +- unlock_new_inode(inode);
8203 ++ d_instantiate_new(dentry, inode);
8204 + orangefs_set_timeout(dentry);
8205 + ORANGEFS_I(inode)->getattr_time = jiffies - 1;
8206 + ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
8207 +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
8208 +index c41ab261397d..7da10e595297 100644
8209 +--- a/fs/proc/proc_sysctl.c
8210 ++++ b/fs/proc/proc_sysctl.c
8211 +@@ -707,7 +707,10 @@ static bool proc_sys_link_fill_cache(struct file *file,
8212 + struct ctl_table *table)
8213 + {
8214 + bool ret = true;
8215 ++
8216 + head = sysctl_head_grab(head);
8217 ++ if (IS_ERR(head))
8218 ++ return false;
8219 +
8220 + if (S_ISLNK(table->mode)) {
8221 + /* It is not an error if we can not follow the link ignore it */
8222 +diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
8223 +index bd39a998843d..5089dac02660 100644
8224 +--- a/fs/reiserfs/namei.c
8225 ++++ b/fs/reiserfs/namei.c
8226 +@@ -687,8 +687,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod
8227 + reiserfs_update_inode_transaction(inode);
8228 + reiserfs_update_inode_transaction(dir);
8229 +
8230 +- unlock_new_inode(inode);
8231 +- d_instantiate(dentry, inode);
8232 ++ d_instantiate_new(dentry, inode);
8233 + retval = journal_end(&th);
8234 +
8235 + out_failed:
8236 +@@ -771,8 +770,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
8237 + goto out_failed;
8238 + }
8239 +
8240 +- unlock_new_inode(inode);
8241 +- d_instantiate(dentry, inode);
8242 ++ d_instantiate_new(dentry, inode);
8243 + retval = journal_end(&th);
8244 +
8245 + out_failed:
8246 +@@ -871,8 +869,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
8247 + /* the above add_entry did not update dir's stat data */
8248 + reiserfs_update_sd(&th, dir);
8249 +
8250 +- unlock_new_inode(inode);
8251 +- d_instantiate(dentry, inode);
8252 ++ d_instantiate_new(dentry, inode);
8253 + retval = journal_end(&th);
8254 + out_failed:
8255 + reiserfs_write_unlock(dir->i_sb);
8256 +@@ -1187,8 +1184,7 @@ static int reiserfs_symlink(struct inode *parent_dir,
8257 + goto out_failed;
8258 + }
8259 +
8260 +- unlock_new_inode(inode);
8261 +- d_instantiate(dentry, inode);
8262 ++ d_instantiate_new(dentry, inode);
8263 + retval = journal_end(&th);
8264 + out_failed:
8265 + reiserfs_write_unlock(parent_dir->i_sb);
8266 +diff --git a/fs/super.c b/fs/super.c
8267 +index afbf4d220c27..f25717f9b691 100644
8268 +--- a/fs/super.c
8269 ++++ b/fs/super.c
8270 +@@ -120,13 +120,23 @@ static unsigned long super_cache_count(struct shrinker *shrink,
8271 + sb = container_of(shrink, struct super_block, s_shrink);
8272 +
8273 + /*
8274 +- * Don't call trylock_super as it is a potential
8275 +- * scalability bottleneck. The counts could get updated
8276 +- * between super_cache_count and super_cache_scan anyway.
8277 +- * Call to super_cache_count with shrinker_rwsem held
8278 +- * ensures the safety of call to list_lru_shrink_count() and
8279 +- * s_op->nr_cached_objects().
8280 ++ * We don't call trylock_super() here as it is a scalability bottleneck,
8281 ++ * so we're exposed to partial setup state. The shrinker rwsem does not
8282 ++ * protect filesystem operations backing list_lru_shrink_count() or
8283 ++ * s_op->nr_cached_objects(). Counts can change between
8284 ++ * super_cache_count and super_cache_scan, so we really don't need locks
8285 ++ * here.
8286 ++ *
8287 ++ * However, if we are currently mounting the superblock, the underlying
8288 ++ * filesystem might be in a state of partial construction and hence it
8289 ++ * is dangerous to access it. trylock_super() uses a SB_BORN check to
8290 ++ * avoid this situation, so do the same here. The memory barrier is
8291 ++ * matched with the one in mount_fs() as we don't hold locks here.
8292 + */
8293 ++ if (!(sb->s_flags & SB_BORN))
8294 ++ return 0;
8295 ++ smp_rmb();
8296 ++
8297 + if (sb->s_op && sb->s_op->nr_cached_objects)
8298 + total_objects = sb->s_op->nr_cached_objects(sb, sc);
8299 +
8300 +@@ -1226,6 +1236,14 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
8301 + sb = root->d_sb;
8302 + BUG_ON(!sb);
8303 + WARN_ON(!sb->s_bdi);
8304 ++
8305 ++ /*
8306 ++ * Write barrier is for super_cache_count(). We place it before setting
8307 ++ * SB_BORN as the data dependency between the two functions is the
8308 ++ * superblock structure contents that we just set up, not the SB_BORN
8309 ++ * flag.
8310 ++ */
8311 ++ smp_wmb();
8312 + sb->s_flags |= SB_BORN;
8313 +
8314 + error = security_sb_kern_mount(sb, flags, secdata);
8315 +diff --git a/fs/udf/namei.c b/fs/udf/namei.c
8316 +index 0458dd47e105..c586026508db 100644
8317 +--- a/fs/udf/namei.c
8318 ++++ b/fs/udf/namei.c
8319 +@@ -622,8 +622,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
8320 + if (fibh.sbh != fibh.ebh)
8321 + brelse(fibh.ebh);
8322 + brelse(fibh.sbh);
8323 +- unlock_new_inode(inode);
8324 +- d_instantiate(dentry, inode);
8325 ++ d_instantiate_new(dentry, inode);
8326 +
8327 + return 0;
8328 + }
8329 +@@ -733,8 +732,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
8330 + inc_nlink(dir);
8331 + dir->i_ctime = dir->i_mtime = current_time(dir);
8332 + mark_inode_dirty(dir);
8333 +- unlock_new_inode(inode);
8334 +- d_instantiate(dentry, inode);
8335 ++ d_instantiate_new(dentry, inode);
8336 + if (fibh.sbh != fibh.ebh)
8337 + brelse(fibh.ebh);
8338 + brelse(fibh.sbh);
8339 +diff --git a/fs/udf/super.c b/fs/udf/super.c
8340 +index f73239a9a97d..8e5d6d29b6cf 100644
8341 +--- a/fs/udf/super.c
8342 ++++ b/fs/udf/super.c
8343 +@@ -2091,8 +2091,9 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
8344 + bool lvid_open = false;
8345 +
8346 + uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
8347 +- uopt.uid = INVALID_UID;
8348 +- uopt.gid = INVALID_GID;
8349 ++ /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
8350 ++ uopt.uid = make_kuid(current_user_ns(), overflowuid);
8351 ++ uopt.gid = make_kgid(current_user_ns(), overflowgid);
8352 + uopt.umask = 0;
8353 + uopt.fmode = UDF_INVALID_MODE;
8354 + uopt.dmode = UDF_INVALID_MODE;
8355 +diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
8356 +index 32545cd00ceb..d5f43ba76c59 100644
8357 +--- a/fs/ufs/namei.c
8358 ++++ b/fs/ufs/namei.c
8359 +@@ -39,8 +39,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
8360 + {
8361 + int err = ufs_add_link(dentry, inode);
8362 + if (!err) {
8363 +- unlock_new_inode(inode);
8364 +- d_instantiate(dentry, inode);
8365 ++ d_instantiate_new(dentry, inode);
8366 + return 0;
8367 + }
8368 + inode_dec_link_count(inode);
8369 +@@ -193,8 +192,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
8370 + if (err)
8371 + goto out_fail;
8372 +
8373 +- unlock_new_inode(inode);
8374 +- d_instantiate(dentry, inode);
8375 ++ d_instantiate_new(dentry, inode);
8376 + return 0;
8377 +
8378 + out_fail:
8379 +diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
8380 +index b2cde5426182..7b68e6c9a474 100644
8381 +--- a/fs/xfs/xfs_discard.c
8382 ++++ b/fs/xfs/xfs_discard.c
8383 +@@ -50,19 +50,19 @@ xfs_trim_extents(
8384 +
8385 + pag = xfs_perag_get(mp, agno);
8386 +
8387 +- error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
8388 +- if (error || !agbp)
8389 +- goto out_put_perag;
8390 +-
8391 +- cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
8392 +-
8393 + /*
8394 + * Force out the log. This means any transactions that might have freed
8395 +- * space before we took the AGF buffer lock are now on disk, and the
8396 ++ * space before we take the AGF buffer lock are now on disk, and the
8397 + * volatile disk cache is flushed.
8398 + */
8399 + xfs_log_force(mp, XFS_LOG_SYNC);
8400 +
8401 ++ error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
8402 ++ if (error || !agbp)
8403 ++ goto out_put_perag;
8404 ++
8405 ++ cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
8406 ++
8407 + /*
8408 + * Look up the longest btree in the AGF and start with it.
8409 + */
8410 +diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
8411 +index 848b463a0af5..a4c3b0a0a197 100644
8412 +--- a/include/drm/drm_vblank.h
8413 ++++ b/include/drm/drm_vblank.h
8414 +@@ -179,7 +179,7 @@ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
8415 + void drm_crtc_vblank_off(struct drm_crtc *crtc);
8416 + void drm_crtc_vblank_reset(struct drm_crtc *crtc);
8417 + void drm_crtc_vblank_on(struct drm_crtc *crtc);
8418 +-u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
8419 ++u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
8420 +
8421 + bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
8422 + unsigned int pipe, int *max_error,
8423 +diff --git a/include/linux/dcache.h b/include/linux/dcache.h
8424 +index 82a99d366aec..9e9bc9f33c03 100644
8425 +--- a/include/linux/dcache.h
8426 ++++ b/include/linux/dcache.h
8427 +@@ -226,6 +226,7 @@ extern seqlock_t rename_lock;
8428 + * These are the low-level FS interfaces to the dcache..
8429 + */
8430 + extern void d_instantiate(struct dentry *, struct inode *);
8431 ++extern void d_instantiate_new(struct dentry *, struct inode *);
8432 + extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
8433 + extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
8434 + extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
8435 +diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
8436 +index 23159dd5be18..a1fd63871d17 100644
8437 +--- a/include/rdma/ib_umem.h
8438 ++++ b/include/rdma/ib_umem.h
8439 +@@ -48,7 +48,6 @@ struct ib_umem {
8440 + int writable;
8441 + int hugetlb;
8442 + struct work_struct work;
8443 +- struct pid *pid;
8444 + struct mm_struct *mm;
8445 + unsigned long diff;
8446 + struct ib_umem_odp *odp_data;
8447 +diff --git a/ipc/shm.c b/ipc/shm.c
8448 +index f68420b1ad93..61b477e48e9b 100644
8449 +--- a/ipc/shm.c
8450 ++++ b/ipc/shm.c
8451 +@@ -1320,14 +1320,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
8452 +
8453 + if (addr) {
8454 + if (addr & (shmlba - 1)) {
8455 +- /*
8456 +- * Round down to the nearest multiple of shmlba.
8457 +- * For sane do_mmap_pgoff() parameters, avoid
8458 +- * round downs that trigger nil-page and MAP_FIXED.
8459 +- */
8460 +- if ((shmflg & SHM_RND) && addr >= shmlba)
8461 +- addr &= ~(shmlba - 1);
8462 +- else
8463 ++ if (shmflg & SHM_RND) {
8464 ++ addr &= ~(shmlba - 1); /* round down */
8465 ++
8466 ++ /*
8467 ++ * Ensure that the round-down is non-nil
8468 ++ * when remapping. This can happen for
8469 ++ * cases when addr < shmlba.
8470 ++ */
8471 ++ if (!addr && (shmflg & SHM_REMAP))
8472 ++ goto out;
8473 ++ } else
8474 + #ifndef __ARCH_FORCE_SHMLBA
8475 + if (addr & ~PAGE_MASK)
8476 + #endif
8477 +diff --git a/kernel/audit.c b/kernel/audit.c
8478 +index 227db99b0f19..bc169f2a4766 100644
8479 +--- a/kernel/audit.c
8480 ++++ b/kernel/audit.c
8481 +@@ -1058,6 +1058,8 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
8482 + return;
8483 +
8484 + ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
8485 ++ if (!ab)
8486 ++ return;
8487 + audit_log_task_info(ab, current);
8488 + audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
8489 + audit_feature_names[which], !!old_feature, !!new_feature,
8490 +diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
8491 +index dbb0781a0533..90327d7cfe24 100644
8492 +--- a/kernel/debug/kdb/kdb_main.c
8493 ++++ b/kernel/debug/kdb/kdb_main.c
8494 +@@ -1566,6 +1566,7 @@ static int kdb_md(int argc, const char **argv)
8495 + int symbolic = 0;
8496 + int valid = 0;
8497 + int phys = 0;
8498 ++ int raw = 0;
8499 +
8500 + kdbgetintenv("MDCOUNT", &mdcount);
8501 + kdbgetintenv("RADIX", &radix);
8502 +@@ -1575,9 +1576,10 @@ static int kdb_md(int argc, const char **argv)
8503 + repeat = mdcount * 16 / bytesperword;
8504 +
8505 + if (strcmp(argv[0], "mdr") == 0) {
8506 +- if (argc != 2)
8507 ++ if (argc == 2 || (argc == 0 && last_addr != 0))
8508 ++ valid = raw = 1;
8509 ++ else
8510 + return KDB_ARGCOUNT;
8511 +- valid = 1;
8512 + } else if (isdigit(argv[0][2])) {
8513 + bytesperword = (int)(argv[0][2] - '0');
8514 + if (bytesperword == 0) {
8515 +@@ -1613,7 +1615,10 @@ static int kdb_md(int argc, const char **argv)
8516 + radix = last_radix;
8517 + bytesperword = last_bytesperword;
8518 + repeat = last_repeat;
8519 +- mdcount = ((repeat * bytesperword) + 15) / 16;
8520 ++ if (raw)
8521 ++ mdcount = repeat;
8522 ++ else
8523 ++ mdcount = ((repeat * bytesperword) + 15) / 16;
8524 + }
8525 +
8526 + if (argc) {
8527 +@@ -1630,7 +1635,10 @@ static int kdb_md(int argc, const char **argv)
8528 + diag = kdbgetularg(argv[nextarg], &val);
8529 + if (!diag) {
8530 + mdcount = (int) val;
8531 +- repeat = mdcount * 16 / bytesperword;
8532 ++ if (raw)
8533 ++ repeat = mdcount;
8534 ++ else
8535 ++ repeat = mdcount * 16 / bytesperword;
8536 + }
8537 + }
8538 + if (argc >= nextarg+1) {
8539 +@@ -1640,8 +1648,15 @@ static int kdb_md(int argc, const char **argv)
8540 + }
8541 + }
8542 +
8543 +- if (strcmp(argv[0], "mdr") == 0)
8544 +- return kdb_mdr(addr, mdcount);
8545 ++ if (strcmp(argv[0], "mdr") == 0) {
8546 ++ int ret;
8547 ++ last_addr = addr;
8548 ++ ret = kdb_mdr(addr, mdcount);
8549 ++ last_addr += mdcount;
8550 ++ last_repeat = mdcount;
8551 ++ last_bytesperword = bytesperword; // to make REPEAT happy
8552 ++ return ret;
8553 ++ }
8554 +
8555 + switch (radix) {
8556 + case 10:
8557 +diff --git a/kernel/events/core.c b/kernel/events/core.c
8558 +index ca7298760c83..cc6a96303b7e 100644
8559 +--- a/kernel/events/core.c
8560 ++++ b/kernel/events/core.c
8561 +@@ -948,27 +948,39 @@ list_update_cgroup_event(struct perf_event *event,
8562 + if (!is_cgroup_event(event))
8563 + return;
8564 +
8565 +- if (add && ctx->nr_cgroups++)
8566 +- return;
8567 +- else if (!add && --ctx->nr_cgroups)
8568 +- return;
8569 + /*
8570 + * Because cgroup events are always per-cpu events,
8571 + * this will always be called from the right CPU.
8572 + */
8573 + cpuctx = __get_cpu_context(ctx);
8574 +- cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
8575 +- /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
8576 +- if (add) {
8577 ++
8578 ++ /*
8579 ++ * Since setting cpuctx->cgrp is conditional on the current @cgrp
8580 ++ * matching the event's cgroup, we must do this for every new event,
8581 ++ * because if the first would mismatch, the second would not try again
8582 ++ * and we would leave cpuctx->cgrp unset.
8583 ++ */
8584 ++ if (add && !cpuctx->cgrp) {
8585 + struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
8586 +
8587 +- list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
8588 + if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
8589 + cpuctx->cgrp = cgrp;
8590 +- } else {
8591 +- list_del(cpuctx_entry);
8592 +- cpuctx->cgrp = NULL;
8593 + }
8594 ++
8595 ++ if (add && ctx->nr_cgroups++)
8596 ++ return;
8597 ++ else if (!add && --ctx->nr_cgroups)
8598 ++ return;
8599 ++
8600 ++ /* no cgroup running */
8601 ++ if (!add)
8602 ++ cpuctx->cgrp = NULL;
8603 ++
8604 ++ cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
8605 ++ if (add)
8606 ++ list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
8607 ++ else
8608 ++ list_del(cpuctx_entry);
8609 + }
8610 +
8611 + #else /* !CONFIG_CGROUP_PERF */
8612 +@@ -2328,6 +2340,18 @@ static int __perf_install_in_context(void *info)
8613 + raw_spin_lock(&task_ctx->lock);
8614 + }
8615 +
8616 ++#ifdef CONFIG_CGROUP_PERF
8617 ++ if (is_cgroup_event(event)) {
8618 ++ /*
8619 ++ * If the current cgroup doesn't match the event's
8620 ++ * cgroup, we should not try to schedule it.
8621 ++ */
8622 ++ struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
8623 ++ reprogram = cgroup_is_descendant(cgrp->css.cgroup,
8624 ++ event->cgrp->css.cgroup);
8625 ++ }
8626 ++#endif
8627 ++
8628 + if (reprogram) {
8629 + ctx_sched_out(ctx, cpuctx, EVENT_TIME);
8630 + add_event_to_ctx(event, ctx);
8631 +@@ -5746,7 +5770,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
8632 + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
8633 + values[n++] = running;
8634 +
8635 +- if (leader != event)
8636 ++ if ((leader != event) &&
8637 ++ (leader->state == PERF_EVENT_STATE_ACTIVE))
8638 + leader->pmu->read(leader);
8639 +
8640 + values[n++] = perf_event_count(leader);
8641 +diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
8642 +index a37a3b4b6342..e0665549af59 100644
8643 +--- a/kernel/irq/affinity.c
8644 ++++ b/kernel/irq/affinity.c
8645 +@@ -108,7 +108,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
8646 + int affv = nvecs - affd->pre_vectors - affd->post_vectors;
8647 + int last_affv = affv + affd->pre_vectors;
8648 + nodemask_t nodemsk = NODE_MASK_NONE;
8649 +- struct cpumask *masks;
8650 ++ struct cpumask *masks = NULL;
8651 + cpumask_var_t nmsk, *node_to_possible_cpumask;
8652 +
8653 + /*
8654 +@@ -121,13 +121,13 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
8655 + if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
8656 + return NULL;
8657 +
8658 +- masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
8659 +- if (!masks)
8660 +- goto out;
8661 +-
8662 + node_to_possible_cpumask = alloc_node_to_possible_cpumask();
8663 + if (!node_to_possible_cpumask)
8664 +- goto out;
8665 ++ goto outcpumsk;
8666 ++
8667 ++ masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
8668 ++ if (!masks)
8669 ++ goto outnodemsk;
8670 +
8671 + /* Fill out vectors at the beginning that don't need affinity */
8672 + for (curvec = 0; curvec < affd->pre_vectors; curvec++)
8673 +@@ -192,8 +192,9 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
8674 + /* Fill out vectors at the end that don't need affinity */
8675 + for (; curvec < nvecs; curvec++)
8676 + cpumask_copy(masks + curvec, irq_default_affinity);
8677 ++outnodemsk:
8678 + free_node_to_possible_cpumask(node_to_possible_cpumask);
8679 +-out:
8680 ++outcpumsk:
8681 + free_cpumask_var(nmsk);
8682 + return masks;
8683 + }
8684 +diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
8685 +index fb88a028deec..1973e8d44250 100644
8686 +--- a/kernel/rcu/tree_plugin.h
8687 ++++ b/kernel/rcu/tree_plugin.h
8688 +@@ -560,8 +560,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
8689 + }
8690 + t = list_entry(rnp->gp_tasks->prev,
8691 + struct task_struct, rcu_node_entry);
8692 +- list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
8693 ++ list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
8694 ++ /*
8695 ++ * We could be printing a lot while holding a spinlock.
8696 ++ * Avoid triggering hard lockup.
8697 ++ */
8698 ++ touch_nmi_watchdog();
8699 + sched_show_task(t);
8700 ++ }
8701 + raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
8702 + }
8703 +
8704 +@@ -1677,6 +1683,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
8705 + char *ticks_title;
8706 + unsigned long ticks_value;
8707 +
8708 ++ /*
8709 ++ * We could be printing a lot while holding a spinlock. Avoid
8710 ++ * triggering hard lockup.
8711 ++ */
8712 ++ touch_nmi_watchdog();
8713 ++
8714 + if (rsp->gpnum == rdp->gpnum) {
8715 + ticks_title = "ticks this GP";
8716 + ticks_value = rdp->ticks_this_gp;
8717 +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
8718 +index aad49451584e..84bf1a24a55a 100644
8719 +--- a/kernel/sched/rt.c
8720 ++++ b/kernel/sched/rt.c
8721 +@@ -843,6 +843,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
8722 + continue;
8723 +
8724 + raw_spin_lock(&rq->lock);
8725 ++ update_rq_clock(rq);
8726 ++
8727 + if (rt_rq->rt_time) {
8728 + u64 runtime;
8729 +
8730 +diff --git a/kernel/sys.c b/kernel/sys.c
8731 +index 9afc4cb5acf5..de3143bbcd74 100644
8732 +--- a/kernel/sys.c
8733 ++++ b/kernel/sys.c
8734 +@@ -1401,6 +1401,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
8735 + if (resource >= RLIM_NLIMITS)
8736 + return -EINVAL;
8737 +
8738 ++ resource = array_index_nospec(resource, RLIM_NLIMITS);
8739 + task_lock(current->group_leader);
8740 + x = current->signal->rlim[resource];
8741 + task_unlock(current->group_leader);
8742 +@@ -1420,6 +1421,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
8743 + if (resource >= RLIM_NLIMITS)
8744 + return -EINVAL;
8745 +
8746 ++ resource = array_index_nospec(resource, RLIM_NLIMITS);
8747 + task_lock(current->group_leader);
8748 + r = current->signal->rlim[resource];
8749 + task_unlock(current->group_leader);
8750 +diff --git a/lib/radix-tree.c b/lib/radix-tree.c
8751 +index a7705b0f139c..25f13dc22997 100644
8752 +--- a/lib/radix-tree.c
8753 ++++ b/lib/radix-tree.c
8754 +@@ -2034,10 +2034,12 @@ void *radix_tree_delete_item(struct radix_tree_root *root,
8755 + unsigned long index, void *item)
8756 + {
8757 + struct radix_tree_node *node = NULL;
8758 +- void __rcu **slot;
8759 ++ void __rcu **slot = NULL;
8760 + void *entry;
8761 +
8762 + entry = __radix_tree_lookup(root, index, &node, &slot);
8763 ++ if (!slot)
8764 ++ return NULL;
8765 + if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
8766 + get_slot_offset(node, slot))))
8767 + return NULL;
8768 +diff --git a/lib/test_kasan.c b/lib/test_kasan.c
8769 +index 98854a64b014..ec657105edbf 100644
8770 +--- a/lib/test_kasan.c
8771 ++++ b/lib/test_kasan.c
8772 +@@ -567,7 +567,15 @@ static noinline void __init kmem_cache_invalid_free(void)
8773 + return;
8774 + }
8775 +
8776 ++ /* Trigger invalid free, the object doesn't get freed */
8777 + kmem_cache_free(cache, p + 1);
8778 ++
8779 ++ /*
8780 ++ * Properly free the object to prevent the "Objects remaining in
8781 ++ * test_cache on __kmem_cache_shutdown" BUG failure.
8782 ++ */
8783 ++ kmem_cache_free(cache, p);
8784 ++
8785 + kmem_cache_destroy(cache);
8786 + }
8787 +
8788 +diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
8789 +index e13d911251e7..e9070890b28c 100644
8790 +--- a/mm/kasan/kasan.c
8791 ++++ b/mm/kasan/kasan.c
8792 +@@ -791,6 +791,40 @@ DEFINE_ASAN_SET_SHADOW(f5);
8793 + DEFINE_ASAN_SET_SHADOW(f8);
8794 +
8795 + #ifdef CONFIG_MEMORY_HOTPLUG
8796 ++static bool shadow_mapped(unsigned long addr)
8797 ++{
8798 ++ pgd_t *pgd = pgd_offset_k(addr);
8799 ++ p4d_t *p4d;
8800 ++ pud_t *pud;
8801 ++ pmd_t *pmd;
8802 ++ pte_t *pte;
8803 ++
8804 ++ if (pgd_none(*pgd))
8805 ++ return false;
8806 ++ p4d = p4d_offset(pgd, addr);
8807 ++ if (p4d_none(*p4d))
8808 ++ return false;
8809 ++ pud = pud_offset(p4d, addr);
8810 ++ if (pud_none(*pud))
8811 ++ return false;
8812 ++
8813 ++ /*
8814 ++ * We can't use pud_large() or pud_huge(), the first one is
8815 ++ * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
8816 ++ * pud_bad(), if pud is bad then it's bad because it's huge.
8817 ++ */
8818 ++ if (pud_bad(*pud))
8819 ++ return true;
8820 ++ pmd = pmd_offset(pud, addr);
8821 ++ if (pmd_none(*pmd))
8822 ++ return false;
8823 ++
8824 ++ if (pmd_bad(*pmd))
8825 ++ return true;
8826 ++ pte = pte_offset_kernel(pmd, addr);
8827 ++ return !pte_none(*pte);
8828 ++}
8829 ++
8830 + static int __meminit kasan_mem_notifier(struct notifier_block *nb,
8831 + unsigned long action, void *data)
8832 + {
8833 +@@ -812,6 +846,14 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
8834 + case MEM_GOING_ONLINE: {
8835 + void *ret;
8836 +
8837 ++ /*
8838 ++ * If shadow is mapped already than it must have been mapped
8839 ++ * during the boot. This could happen if we onlining previously
8840 ++ * offlined memory.
8841 ++ */
8842 ++ if (shadow_mapped(shadow_start))
8843 ++ return NOTIFY_OK;
8844 ++
8845 + ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
8846 + shadow_end, GFP_KERNEL,
8847 + PAGE_KERNEL, VM_NO_GUARD,
8848 +@@ -823,8 +865,26 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
8849 + kmemleak_ignore(ret);
8850 + return NOTIFY_OK;
8851 + }
8852 +- case MEM_OFFLINE:
8853 +- vfree((void *)shadow_start);
8854 ++ case MEM_CANCEL_ONLINE:
8855 ++ case MEM_OFFLINE: {
8856 ++ struct vm_struct *vm;
8857 ++
8858 ++ /*
8859 ++ * shadow_start was either mapped during boot by kasan_init()
8860 ++ * or during memory online by __vmalloc_node_range().
8861 ++ * In the latter case we can use vfree() to free shadow.
8862 ++ * Non-NULL result of the find_vm_area() will tell us if
8863 ++ * that was the second case.
8864 ++ *
8865 ++ * Currently it's not possible to free shadow mapped
8866 ++ * during boot by kasan_init(). It's because the code
8867 ++ * to do that hasn't been written yet. So we'll just
8868 ++ * leak the memory.
8869 ++ */
8870 ++ vm = find_vm_area((void *)shadow_start);
8871 ++ if (vm)
8872 ++ vfree((void *)shadow_start);
8873 ++ }
8874 + }
8875 +
8876 + return NOTIFY_OK;
8877 +@@ -837,5 +897,5 @@ static int __init kasan_memhotplug_init(void)
8878 + return 0;
8879 + }
8880 +
8881 +-module_init(kasan_memhotplug_init);
8882 ++core_initcall(kasan_memhotplug_init);
8883 + #endif
8884 +diff --git a/mm/ksm.c b/mm/ksm.c
8885 +index 2d6b35234926..d5f37b26e695 100644
8886 +--- a/mm/ksm.c
8887 ++++ b/mm/ksm.c
8888 +@@ -2089,8 +2089,22 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
8889 + tree_rmap_item =
8890 + unstable_tree_search_insert(rmap_item, page, &tree_page);
8891 + if (tree_rmap_item) {
8892 ++ bool split;
8893 ++
8894 + kpage = try_to_merge_two_pages(rmap_item, page,
8895 + tree_rmap_item, tree_page);
8896 ++ /*
8897 ++ * If both pages we tried to merge belong to the same compound
8898 ++ * page, then we actually ended up increasing the reference
8899 ++ * count of the same compound page twice, and split_huge_page
8900 ++ * failed.
8901 ++ * Here we set a flag if that happened, and we use it later to
8902 ++ * try split_huge_page again. Since we call put_page right
8903 ++ * afterwards, the reference count will be correct and
8904 ++ * split_huge_page should succeed.
8905 ++ */
8906 ++ split = PageTransCompound(page)
8907 ++ && compound_head(page) == compound_head(tree_page);
8908 + put_page(tree_page);
8909 + if (kpage) {
8910 + /*
8911 +@@ -2117,6 +2131,20 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
8912 + break_cow(tree_rmap_item);
8913 + break_cow(rmap_item);
8914 + }
8915 ++ } else if (split) {
8916 ++ /*
8917 ++ * We are here if we tried to merge two pages and
8918 ++ * failed because they both belonged to the same
8919 ++ * compound page. We will split the page now, but no
8920 ++ * merging will take place.
8921 ++ * We do not want to add the cost of a full lock; if
8922 ++ * the page is locked, it is better to skip it and
8923 ++ * perhaps try again later.
8924 ++ */
8925 ++ if (!trylock_page(page))
8926 ++ return;
8927 ++ split_huge_page(page);
8928 ++ unlock_page(page);
8929 + }
8930 + }
8931 + }
8932 +diff --git a/mm/page_idle.c b/mm/page_idle.c
8933 +index 0a49374e6931..e412a63b2b74 100644
8934 +--- a/mm/page_idle.c
8935 ++++ b/mm/page_idle.c
8936 +@@ -65,11 +65,15 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
8937 + while (page_vma_mapped_walk(&pvmw)) {
8938 + addr = pvmw.address;
8939 + if (pvmw.pte) {
8940 +- referenced = ptep_clear_young_notify(vma, addr,
8941 +- pvmw.pte);
8942 ++ /*
8943 ++ * For PTE-mapped THP, one sub page is referenced,
8944 ++ * the whole THP is referenced.
8945 ++ */
8946 ++ if (ptep_clear_young_notify(vma, addr, pvmw.pte))
8947 ++ referenced = true;
8948 + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
8949 +- referenced = pmdp_clear_young_notify(vma, addr,
8950 +- pvmw.pmd);
8951 ++ if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
8952 ++ referenced = true;
8953 + } else {
8954 + /* unexpected pmd-mapped page? */
8955 + WARN_ON_ONCE(1);
8956 +diff --git a/mm/slub.c b/mm/slub.c
8957 +index e381728a3751..8442b3c54870 100644
8958 +--- a/mm/slub.c
8959 ++++ b/mm/slub.c
8960 +@@ -1362,10 +1362,8 @@ static __always_inline void kfree_hook(void *x)
8961 + kasan_kfree_large(x, _RET_IP_);
8962 + }
8963 +
8964 +-static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
8965 ++static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
8966 + {
8967 +- void *freeptr;
8968 +-
8969 + kmemleak_free_recursive(x, s->flags);
8970 +
8971 + /*
8972 +@@ -1385,17 +1383,12 @@ static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
8973 + if (!(s->flags & SLAB_DEBUG_OBJECTS))
8974 + debug_check_no_obj_freed(x, s->object_size);
8975 +
8976 +- freeptr = get_freepointer(s, x);
8977 +- /*
8978 +- * kasan_slab_free() may put x into memory quarantine, delaying its
8979 +- * reuse. In this case the object's freelist pointer is changed.
8980 +- */
8981 +- kasan_slab_free(s, x, _RET_IP_);
8982 +- return freeptr;
8983 ++ /* KASAN might put x into memory quarantine, delaying its reuse */
8984 ++ return kasan_slab_free(s, x, _RET_IP_);
8985 + }
8986 +
8987 +-static inline void slab_free_freelist_hook(struct kmem_cache *s,
8988 +- void *head, void *tail)
8989 ++static inline bool slab_free_freelist_hook(struct kmem_cache *s,
8990 ++ void **head, void **tail)
8991 + {
8992 + /*
8993 + * Compiler cannot detect this function can be removed if slab_free_hook()
8994 +@@ -1406,13 +1399,33 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
8995 + defined(CONFIG_DEBUG_OBJECTS_FREE) || \
8996 + defined(CONFIG_KASAN)
8997 +
8998 +- void *object = head;
8999 +- void *tail_obj = tail ? : head;
9000 +- void *freeptr;
9001 ++ void *object;
9002 ++ void *next = *head;
9003 ++ void *old_tail = *tail ? *tail : *head;
9004 ++
9005 ++ /* Head and tail of the reconstructed freelist */
9006 ++ *head = NULL;
9007 ++ *tail = NULL;
9008 +
9009 + do {
9010 +- freeptr = slab_free_hook(s, object);
9011 +- } while ((object != tail_obj) && (object = freeptr));
9012 ++ object = next;
9013 ++ next = get_freepointer(s, object);
9014 ++ /* If object's reuse doesn't have to be delayed */
9015 ++ if (!slab_free_hook(s, object)) {
9016 ++ /* Move object to the new freelist */
9017 ++ set_freepointer(s, object, *head);
9018 ++ *head = object;
9019 ++ if (!*tail)
9020 ++ *tail = object;
9021 ++ }
9022 ++ } while (object != old_tail);
9023 ++
9024 ++ if (*head == *tail)
9025 ++ *tail = NULL;
9026 ++
9027 ++ return *head != NULL;
9028 ++#else
9029 ++ return true;
9030 + #endif
9031 + }
9032 +
9033 +@@ -2965,14 +2978,12 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
9034 + void *head, void *tail, int cnt,
9035 + unsigned long addr)
9036 + {
9037 +- slab_free_freelist_hook(s, head, tail);
9038 + /*
9039 +- * slab_free_freelist_hook() could have put the items into quarantine.
9040 +- * If so, no need to free them.
9041 ++ * With KASAN enabled slab_free_freelist_hook modifies the freelist
9042 ++ * to remove objects, whose reuse must be delayed.
9043 + */
9044 +- if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
9045 +- return;
9046 +- do_slab_free(s, page, head, tail, cnt, addr);
9047 ++ if (slab_free_freelist_hook(s, &head, &tail))
9048 ++ do_slab_free(s, page, head, tail, cnt, addr);
9049 + }
9050 +
9051 + #ifdef CONFIG_KASAN
9052 +diff --git a/mm/swapfile.c b/mm/swapfile.c
9053 +index c7a33717d079..a134d1e86795 100644
9054 +--- a/mm/swapfile.c
9055 ++++ b/mm/swapfile.c
9056 +@@ -2961,6 +2961,10 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
9057 + maxpages = swp_offset(pte_to_swp_entry(
9058 + swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
9059 + last_page = swap_header->info.last_page;
9060 ++ if (!last_page) {
9061 ++ pr_warn("Empty swap-file\n");
9062 ++ return 0;
9063 ++ }
9064 + if (last_page > maxpages) {
9065 + pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
9066 + maxpages << (PAGE_SHIFT - 10),
9067 +diff --git a/mm/vmscan.c b/mm/vmscan.c
9068 +index f6a1587f9f31..a47621fa8496 100644
9069 +--- a/mm/vmscan.c
9070 ++++ b/mm/vmscan.c
9071 +@@ -3896,7 +3896,13 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
9072 + */
9073 + int page_evictable(struct page *page)
9074 + {
9075 +- return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
9076 ++ int ret;
9077 ++
9078 ++ /* Prevent address_space of inode and swap cache from being freed */
9079 ++ rcu_read_lock();
9080 ++ ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
9081 ++ rcu_read_unlock();
9082 ++ return ret;
9083 + }
9084 +
9085 + #ifdef CONFIG_SHMEM
9086 +diff --git a/mm/z3fold.c b/mm/z3fold.c
9087 +index 36d31d3593e1..95c9e90f8fda 100644
9088 +--- a/mm/z3fold.c
9089 ++++ b/mm/z3fold.c
9090 +@@ -469,6 +469,8 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
9091 + spin_lock_init(&pool->lock);
9092 + spin_lock_init(&pool->stale_lock);
9093 + pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
9094 ++ if (!pool->unbuddied)
9095 ++ goto out_pool;
9096 + for_each_possible_cpu(cpu) {
9097 + struct list_head *unbuddied =
9098 + per_cpu_ptr(pool->unbuddied, cpu);
9099 +@@ -481,7 +483,7 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
9100 + pool->name = name;
9101 + pool->compact_wq = create_singlethread_workqueue(pool->name);
9102 + if (!pool->compact_wq)
9103 +- goto out;
9104 ++ goto out_unbuddied;
9105 + pool->release_wq = create_singlethread_workqueue(pool->name);
9106 + if (!pool->release_wq)
9107 + goto out_wq;
9108 +@@ -491,8 +493,11 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
9109 +
9110 + out_wq:
9111 + destroy_workqueue(pool->compact_wq);
9112 +-out:
9113 ++out_unbuddied:
9114 ++ free_percpu(pool->unbuddied);
9115 ++out_pool:
9116 + kfree(pool);
9117 ++out:
9118 + return NULL;
9119 + }
9120 +
9121 +diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
9122 +index 22dc1b9d6362..c070dfc0190a 100644
9123 +--- a/net/netlabel/netlabel_unlabeled.c
9124 ++++ b/net/netlabel/netlabel_unlabeled.c
9125 +@@ -1472,6 +1472,16 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
9126 + iface = rcu_dereference(netlbl_unlhsh_def);
9127 + if (iface == NULL || !iface->valid)
9128 + goto unlabel_getattr_nolabel;
9129 ++
9130 ++#if IS_ENABLED(CONFIG_IPV6)
9131 ++ /* When resolving a fallback label, check the sk_buff version as
9132 ++ * it is possible (e.g. SCTP) to have family = PF_INET6 while
9133 ++ * receiving ip_hdr(skb)->version = 4.
9134 ++ */
9135 ++ if (family == PF_INET6 && ip_hdr(skb)->version == 4)
9136 ++ family = PF_INET;
9137 ++#endif /* IPv6 */
9138 ++
9139 + switch (family) {
9140 + case PF_INET: {
9141 + struct iphdr *hdr4;
9142 +diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
9143 +index ad2ab1103189..67b6f2428d46 100644
9144 +--- a/net/rxrpc/call_event.c
9145 ++++ b/net/rxrpc/call_event.c
9146 +@@ -225,7 +225,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
9147 + ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
9148 + }
9149 +
9150 +- resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
9151 ++ resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
9152 + resend_at += jiffies + rxrpc_resend_timeout;
9153 + WRITE_ONCE(call->resend_at, resend_at);
9154 +
9155 +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
9156 +index 6fc61400337f..34db634594c4 100644
9157 +--- a/net/rxrpc/input.c
9158 ++++ b/net/rxrpc/input.c
9159 +@@ -1240,16 +1240,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
9160 + goto discard_unlock;
9161 +
9162 + if (sp->hdr.callNumber == chan->last_call) {
9163 +- /* For the previous service call, if completed successfully, we
9164 +- * discard all further packets.
9165 ++ if (chan->call ||
9166 ++ sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
9167 ++ goto discard_unlock;
9168 ++
9169 ++ /* For the previous service call, if completed
9170 ++ * successfully, we discard all further packets.
9171 + */
9172 + if (rxrpc_conn_is_service(conn) &&
9173 +- (chan->last_type == RXRPC_PACKET_TYPE_ACK ||
9174 +- sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
9175 ++ chan->last_type == RXRPC_PACKET_TYPE_ACK)
9176 + goto discard_unlock;
9177 +
9178 +- /* But otherwise we need to retransmit the final packet from
9179 +- * data cached in the connection record.
9180 ++ /* But otherwise we need to retransmit the final packet
9181 ++ * from data cached in the connection record.
9182 + */
9183 + rxrpc_post_packet_to_conn(conn, skb);
9184 + goto out_unlock;
9185 +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
9186 +index 09f2a3e05221..7a94ce92ffdc 100644
9187 +--- a/net/rxrpc/sendmsg.c
9188 ++++ b/net/rxrpc/sendmsg.c
9189 +@@ -130,7 +130,9 @@ static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix)
9190 + spin_lock_bh(&call->lock);
9191 +
9192 + if (call->state < RXRPC_CALL_COMPLETE) {
9193 +- call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS;
9194 ++ call->rxtx_annotations[ix] =
9195 ++ (call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) |
9196 ++ RXRPC_TX_ANNO_RETRANS;
9197 + if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
9198 + rxrpc_queue_call(call);
9199 + }
9200 +diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
9201 +index 2a8957bd6d38..26df554f7588 100644
9202 +--- a/net/smc/smc_ib.c
9203 ++++ b/net/smc/smc_ib.c
9204 +@@ -23,6 +23,8 @@
9205 + #include "smc_wr.h"
9206 + #include "smc.h"
9207 +
9208 ++#define SMC_MAX_CQE 32766 /* max. # of completion queue elements */
9209 ++
9210 + #define SMC_QP_MIN_RNR_TIMER 5
9211 + #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
9212 + #define SMC_QP_RETRY_CNT 7 /* 7: infinite */
9213 +@@ -438,9 +440,15 @@ int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
9214 + long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
9215 + {
9216 + struct ib_cq_init_attr cqattr = {
9217 +- .cqe = SMC_WR_MAX_CQE, .comp_vector = 0 };
9218 ++ .cqe = SMC_MAX_CQE, .comp_vector = 0 };
9219 ++ int cqe_size_order, smc_order;
9220 + long rc;
9221 +
9222 ++ /* the calculated number of cq entries fits to mlx5 cq allocation */
9223 ++ cqe_size_order = cache_line_size() == 128 ? 7 : 6;
9224 ++ smc_order = MAX_ORDER - cqe_size_order - 1;
9225 ++ if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
9226 ++ cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
9227 + smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
9228 + smc_wr_tx_cq_handler, NULL,
9229 + smcibdev, &cqattr);
9230 +diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
9231 +index ef0c3494c9cb..210bec3c3ebe 100644
9232 +--- a/net/smc/smc_wr.h
9233 ++++ b/net/smc/smc_wr.h
9234 +@@ -19,7 +19,6 @@
9235 + #include "smc.h"
9236 + #include "smc_core.h"
9237 +
9238 +-#define SMC_WR_MAX_CQE 32768 /* max. # of completion queue elements */
9239 + #define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */
9240 +
9241 + #define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ)
9242 +diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
9243 +index 35ef69312811..6a8f67714c83 100644
9244 +--- a/security/integrity/ima/Kconfig
9245 ++++ b/security/integrity/ima/Kconfig
9246 +@@ -10,6 +10,7 @@ config IMA
9247 + select CRYPTO_HASH_INFO
9248 + select TCG_TPM if HAS_IOMEM && !UML
9249 + select TCG_TIS if TCG_TPM && X86
9250 ++ select TCG_CRB if TCG_TPM && ACPI
9251 + select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
9252 + help
9253 + The Trusted Computing Group(TCG) runtime Integrity
9254 +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
9255 +index 205bc69361ea..4e085a17124f 100644
9256 +--- a/security/integrity/ima/ima_crypto.c
9257 ++++ b/security/integrity/ima/ima_crypto.c
9258 +@@ -73,6 +73,8 @@ int __init ima_init_crypto(void)
9259 + hash_algo_name[ima_hash_algo], rc);
9260 + return rc;
9261 + }
9262 ++ pr_info("Allocated hash algorithm: %s\n",
9263 ++ hash_algo_name[ima_hash_algo]);
9264 + return 0;
9265 + }
9266 +
9267 +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
9268 +index 2cfb0c714967..c678d3801a61 100644
9269 +--- a/security/integrity/ima/ima_main.c
9270 ++++ b/security/integrity/ima/ima_main.c
9271 +@@ -16,6 +16,9 @@
9272 + * implements the IMA hooks: ima_bprm_check, ima_file_mmap,
9273 + * and ima_file_check.
9274 + */
9275 ++
9276 ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9277 ++
9278 + #include <linux/module.h>
9279 + #include <linux/file.h>
9280 + #include <linux/binfmts.h>
9281 +@@ -472,6 +475,16 @@ static int __init init_ima(void)
9282 + ima_init_template_list();
9283 + hash_setup(CONFIG_IMA_DEFAULT_HASH);
9284 + error = ima_init();
9285 ++
9286 ++ if (error && strcmp(hash_algo_name[ima_hash_algo],
9287 ++ CONFIG_IMA_DEFAULT_HASH) != 0) {
9288 ++ pr_info("Allocating %s failed, going to use default hash algorithm %s\n",
9289 ++ hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH);
9290 ++ hash_setup_done = 0;
9291 ++ hash_setup(CONFIG_IMA_DEFAULT_HASH);
9292 ++ error = ima_init();
9293 ++ }
9294 ++
9295 + if (!error) {
9296 + ima_initialized = 1;
9297 + ima_update_policy_flag();
9298 +diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
9299 +index 915f5572c6ff..f3508e6db5f7 100644
9300 +--- a/security/integrity/ima/ima_policy.c
9301 ++++ b/security/integrity/ima/ima_policy.c
9302 +@@ -384,7 +384,7 @@ int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
9303 + action |= entry->action & IMA_DO_MASK;
9304 + if (entry->action & IMA_APPRAISE) {
9305 + action |= get_subaction(entry, func);
9306 +- action ^= IMA_HASH;
9307 ++ action &= ~IMA_HASH;
9308 + }
9309 +
9310 + if (entry->action & IMA_DO_MASK)
9311 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
9312 +index 8644d864e3c1..3d40fd252780 100644
9313 +--- a/security/selinux/hooks.c
9314 ++++ b/security/selinux/hooks.c
9315 +@@ -1532,8 +1532,15 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
9316 + /* Called from d_instantiate or d_splice_alias. */
9317 + dentry = dget(opt_dentry);
9318 + } else {
9319 +- /* Called from selinux_complete_init, try to find a dentry. */
9320 ++ /*
9321 ++ * Called from selinux_complete_init, try to find a dentry.
9322 ++ * Some filesystems really want a connected one, so try
9323 ++ * that first. We could split SECURITY_FS_USE_XATTR in
9324 ++ * two, depending upon that...
9325 ++ */
9326 + dentry = d_find_alias(inode);
9327 ++ if (!dentry)
9328 ++ dentry = d_find_any_alias(inode);
9329 + }
9330 + if (!dentry) {
9331 + /*
9332 +@@ -1636,14 +1643,19 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
9333 + if ((sbsec->flags & SE_SBGENFS) && !S_ISLNK(inode->i_mode)) {
9334 + /* We must have a dentry to determine the label on
9335 + * procfs inodes */
9336 +- if (opt_dentry)
9337 ++ if (opt_dentry) {
9338 + /* Called from d_instantiate or
9339 + * d_splice_alias. */
9340 + dentry = dget(opt_dentry);
9341 +- else
9342 ++ } else {
9343 + /* Called from selinux_complete_init, try to
9344 +- * find a dentry. */
9345 ++ * find a dentry. Some filesystems really want
9346 ++ * a connected one, so try that first.
9347 ++ */
9348 + dentry = d_find_alias(inode);
9349 ++ if (!dentry)
9350 ++ dentry = d_find_any_alias(inode);
9351 ++ }
9352 + /*
9353 + * This can be hit on boot when a file is accessed
9354 + * before the policy is loaded. When we load policy we
9355 +diff --git a/sound/core/timer.c b/sound/core/timer.c
9356 +index dc87728c5b74..0ddcae495838 100644
9357 +--- a/sound/core/timer.c
9358 ++++ b/sound/core/timer.c
9359 +@@ -592,7 +592,7 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
9360 + else
9361 + timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
9362 + snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
9363 +- SNDRV_TIMER_EVENT_CONTINUE);
9364 ++ SNDRV_TIMER_EVENT_PAUSE);
9365 + unlock:
9366 + spin_unlock_irqrestore(&timer->lock, flags);
9367 + return result;
9368 +@@ -614,7 +614,7 @@ static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
9369 + list_del_init(&timeri->ack_list);
9370 + list_del_init(&timeri->active_list);
9371 + snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
9372 +- SNDRV_TIMER_EVENT_CONTINUE);
9373 ++ SNDRV_TIMER_EVENT_PAUSE);
9374 + spin_unlock(&timeri->timer->lock);
9375 + }
9376 + spin_unlock_irqrestore(&slave_active_lock, flags);
9377 +diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
9378 +index 8632301489fa..b67de2bb06a2 100644
9379 +--- a/sound/core/vmaster.c
9380 ++++ b/sound/core/vmaster.c
9381 +@@ -68,10 +68,13 @@ static int slave_update(struct link_slave *slave)
9382 + return -ENOMEM;
9383 + uctl->id = slave->slave.id;
9384 + err = slave->slave.get(&slave->slave, uctl);
9385 ++ if (err < 0)
9386 ++ goto error;
9387 + for (ch = 0; ch < slave->info.count; ch++)
9388 + slave->vals[ch] = uctl->value.integer.value[ch];
9389 ++ error:
9390 + kfree(uctl);
9391 +- return 0;
9392 ++ return err < 0 ? err : 0;
9393 + }
9394 +
9395 + /* get the slave ctl info and save the initial values */
9396 +diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
9397 +index 457a1521f32f..785f4e95148c 100644
9398 +--- a/tools/hv/hv_fcopy_daemon.c
9399 ++++ b/tools/hv/hv_fcopy_daemon.c
9400 +@@ -23,13 +23,14 @@
9401 + #include <unistd.h>
9402 + #include <errno.h>
9403 + #include <linux/hyperv.h>
9404 ++#include <linux/limits.h>
9405 + #include <syslog.h>
9406 + #include <sys/stat.h>
9407 + #include <fcntl.h>
9408 + #include <getopt.h>
9409 +
9410 + static int target_fd;
9411 +-static char target_fname[W_MAX_PATH];
9412 ++static char target_fname[PATH_MAX];
9413 + static unsigned long long filesize;
9414 +
9415 + static int hv_start_fcopy(struct hv_start_fcopy *smsg)
9416 +diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
9417 +index b2b4ebffab8c..34031a297f02 100644
9418 +--- a/tools/hv/hv_vss_daemon.c
9419 ++++ b/tools/hv/hv_vss_daemon.c
9420 +@@ -22,6 +22,7 @@
9421 + #include <sys/poll.h>
9422 + #include <sys/ioctl.h>
9423 + #include <sys/stat.h>
9424 ++#include <sys/sysmacros.h>
9425 + #include <fcntl.h>
9426 + #include <stdio.h>
9427 + #include <mntent.h>
9428 +diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
9429 +index 012328038594..b100e4d8f9fb 100644
9430 +--- a/tools/perf/Makefile.perf
9431 ++++ b/tools/perf/Makefile.perf
9432 +@@ -364,7 +364,8 @@ LIBS = -Wl,--whole-archive $(PERFLIBS) $(EXTRA_PERFLIBS) -Wl,--no-whole-archive
9433 +
9434 + ifeq ($(USE_CLANG), 1)
9435 + CLANGLIBS_LIST = AST Basic CodeGen Driver Frontend Lex Tooling Edit Sema Analysis Parse Serialization
9436 +- LIBCLANG = $(foreach l,$(CLANGLIBS_LIST),$(wildcard $(shell $(LLVM_CONFIG) --libdir)/libclang$(l).a))
9437 ++ CLANGLIBS_NOEXT_LIST = $(foreach l,$(CLANGLIBS_LIST),$(shell $(LLVM_CONFIG) --libdir)/libclang$(l))
9438 ++ LIBCLANG = $(foreach l,$(CLANGLIBS_NOEXT_LIST),$(wildcard $(l).a $(l).so))
9439 + LIBS += -Wl,--start-group $(LIBCLANG) -Wl,--end-group
9440 + endif
9441 +
9442 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
9443 +index 54a4c152edb3..9204cdfed73d 100644
9444 +--- a/tools/perf/builtin-stat.c
9445 ++++ b/tools/perf/builtin-stat.c
9446 +@@ -2274,11 +2274,16 @@ static int add_default_attributes(void)
9447 + return 0;
9448 +
9449 + if (transaction_run) {
9450 ++ struct parse_events_error errinfo;
9451 ++
9452 + if (pmu_have_event("cpu", "cycles-ct") &&
9453 + pmu_have_event("cpu", "el-start"))
9454 +- err = parse_events(evsel_list, transaction_attrs, NULL);
9455 ++ err = parse_events(evsel_list, transaction_attrs,
9456 ++ &errinfo);
9457 + else
9458 +- err = parse_events(evsel_list, transaction_limited_attrs, NULL);
9459 ++ err = parse_events(evsel_list,
9460 ++ transaction_limited_attrs,
9461 ++ &errinfo);
9462 + if (err) {
9463 + fprintf(stderr, "Cannot set up transaction events\n");
9464 + return -1;
9465 +diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
9466 +index 35ac016fcb98..fd6e238b5cc8 100644
9467 +--- a/tools/perf/builtin-top.c
9468 ++++ b/tools/perf/builtin-top.c
9469 +@@ -1224,8 +1224,10 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
9470 +
9471 + static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
9472 + {
9473 +- if (!strcmp(var, "top.call-graph"))
9474 +- var = "call-graph.record-mode"; /* fall-through */
9475 ++ if (!strcmp(var, "top.call-graph")) {
9476 ++ var = "call-graph.record-mode";
9477 ++ return perf_default_config(var, value, cb);
9478 ++ }
9479 + if (!strcmp(var, "top.children")) {
9480 + symbol_conf.cumulate_callchain = perf_config_bool(var, value);
9481 + return 0;
9482 +diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
9483 +index 260418969120..2f008067d989 100644
9484 +--- a/tools/perf/tests/dwarf-unwind.c
9485 ++++ b/tools/perf/tests/dwarf-unwind.c
9486 +@@ -37,6 +37,19 @@ static int init_live_machine(struct machine *machine)
9487 + mmap_handler, machine, true, 500);
9488 + }
9489 +
9490 ++/*
9491 ++ * We need to keep these functions global, despite the
9492 ++ * fact that they are used only locally in this object,
9493 ++ * in order to keep them around even if the binary is
9494 ++ * stripped. If they are gone, the unwind check for
9495 ++ * symbol fails.
9496 ++ */
9497 ++int test_dwarf_unwind__thread(struct thread *thread);
9498 ++int test_dwarf_unwind__compare(void *p1, void *p2);
9499 ++int test_dwarf_unwind__krava_3(struct thread *thread);
9500 ++int test_dwarf_unwind__krava_2(struct thread *thread);
9501 ++int test_dwarf_unwind__krava_1(struct thread *thread);
9502 ++
9503 + #define MAX_STACK 8
9504 +
9505 + static int unwind_entry(struct unwind_entry *entry, void *arg)
9506 +@@ -45,12 +58,12 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
9507 + char *symbol = entry->sym ? entry->sym->name : NULL;
9508 + static const char *funcs[MAX_STACK] = {
9509 + "test__arch_unwind_sample",
9510 +- "unwind_thread",
9511 +- "compare",
9512 ++ "test_dwarf_unwind__thread",
9513 ++ "test_dwarf_unwind__compare",
9514 + "bsearch",
9515 +- "krava_3",
9516 +- "krava_2",
9517 +- "krava_1",
9518 ++ "test_dwarf_unwind__krava_3",
9519 ++ "test_dwarf_unwind__krava_2",
9520 ++ "test_dwarf_unwind__krava_1",
9521 + "test__dwarf_unwind"
9522 + };
9523 + /*
9524 +@@ -77,7 +90,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
9525 + return strcmp((const char *) symbol, funcs[idx]);
9526 + }
9527 +
9528 +-static noinline int unwind_thread(struct thread *thread)
9529 ++noinline int test_dwarf_unwind__thread(struct thread *thread)
9530 + {
9531 + struct perf_sample sample;
9532 + unsigned long cnt = 0;
9533 +@@ -108,7 +121,7 @@ static noinline int unwind_thread(struct thread *thread)
9534 +
9535 + static int global_unwind_retval = -INT_MAX;
9536 +
9537 +-static noinline int compare(void *p1, void *p2)
9538 ++noinline int test_dwarf_unwind__compare(void *p1, void *p2)
9539 + {
9540 + /* Any possible value should be 'thread' */
9541 + struct thread *thread = *(struct thread **)p1;
9542 +@@ -117,17 +130,17 @@ static noinline int compare(void *p1, void *p2)
9543 + /* Call unwinder twice for both callchain orders. */
9544 + callchain_param.order = ORDER_CALLER;
9545 +
9546 +- global_unwind_retval = unwind_thread(thread);
9547 ++ global_unwind_retval = test_dwarf_unwind__thread(thread);
9548 + if (!global_unwind_retval) {
9549 + callchain_param.order = ORDER_CALLEE;
9550 +- global_unwind_retval = unwind_thread(thread);
9551 ++ global_unwind_retval = test_dwarf_unwind__thread(thread);
9552 + }
9553 + }
9554 +
9555 + return p1 - p2;
9556 + }
9557 +
9558 +-static noinline int krava_3(struct thread *thread)
9559 ++noinline int test_dwarf_unwind__krava_3(struct thread *thread)
9560 + {
9561 + struct thread *array[2] = {thread, thread};
9562 + void *fp = &bsearch;
9563 +@@ -141,18 +154,19 @@ static noinline int krava_3(struct thread *thread)
9564 + size_t, int (*)(void *, void *));
9565 +
9566 + _bsearch = fp;
9567 +- _bsearch(array, &thread, 2, sizeof(struct thread **), compare);
9568 ++ _bsearch(array, &thread, 2, sizeof(struct thread **),
9569 ++ test_dwarf_unwind__compare);
9570 + return global_unwind_retval;
9571 + }
9572 +
9573 +-static noinline int krava_2(struct thread *thread)
9574 ++noinline int test_dwarf_unwind__krava_2(struct thread *thread)
9575 + {
9576 +- return krava_3(thread);
9577 ++ return test_dwarf_unwind__krava_3(thread);
9578 + }
9579 +
9580 +-static noinline int krava_1(struct thread *thread)
9581 ++noinline int test_dwarf_unwind__krava_1(struct thread *thread)
9582 + {
9583 +- return krava_2(thread);
9584 ++ return test_dwarf_unwind__krava_2(thread);
9585 + }
9586 +
9587 + int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
9588 +@@ -189,7 +203,7 @@ int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unu
9589 + goto out;
9590 + }
9591 +
9592 +- err = krava_1(thread);
9593 ++ err = test_dwarf_unwind__krava_1(thread);
9594 + thread__put(thread);
9595 +
9596 + out:
9597 +diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
9598 +index c446c894b297..8c4ab0b390c0 100755
9599 +--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
9600 ++++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
9601 +@@ -21,12 +21,12 @@ trace_libc_inet_pton_backtrace() {
9602 + expected[3]=".*packets transmitted.*"
9603 + expected[4]="rtt min.*"
9604 + expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
9605 +- expected[6]=".*inet_pton[[:space:]]\($libc\)$"
9606 ++ expected[6]=".*inet_pton[[:space:]]\($libc|inlined\)$"
9607 + case "$(uname -m)" in
9608 + s390x)
9609 + eventattr='call-graph=dwarf'
9610 +- expected[7]="gaih_inet[[:space:]]\(inlined\)$"
9611 +- expected[8]="__GI_getaddrinfo[[:space:]]\(inlined\)$"
9612 ++ expected[7]="gaih_inet.*[[:space:]]\($libc|inlined\)$"
9613 ++ expected[8]="__GI_getaddrinfo[[:space:]]\($libc|inlined\)$"
9614 + expected[9]="main[[:space:]]\(.*/bin/ping.*\)$"
9615 + expected[10]="__libc_start_main[[:space:]]\($libc\)$"
9616 + expected[11]="_start[[:space:]]\(.*/bin/ping.*\)$"
9617 +diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
9618 +index f6789fb029d6..884cad122acf 100644
9619 +--- a/tools/perf/tests/vmlinux-kallsyms.c
9620 ++++ b/tools/perf/tests/vmlinux-kallsyms.c
9621 +@@ -125,7 +125,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
9622 +
9623 + if (pair && UM(pair->start) == mem_start) {
9624 + next_pair:
9625 +- if (strcmp(sym->name, pair->name) == 0) {
9626 ++ if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
9627 + /*
9628 + * kallsyms don't have the symbol end, so we
9629 + * set that by using the next symbol start - 1,
9630 +diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
9631 +index fbf927cf775d..6ff6839558b0 100644
9632 +--- a/tools/perf/ui/browsers/annotate.c
9633 ++++ b/tools/perf/ui/browsers/annotate.c
9634 +@@ -319,6 +319,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
9635 + struct map_symbol *ms = ab->b.priv;
9636 + struct symbol *sym = ms->sym;
9637 + u8 pcnt_width = annotate_browser__pcnt_width(ab);
9638 ++ int width = 0;
9639 +
9640 + /* PLT symbols contain external offsets */
9641 + if (strstr(sym->name, "@plt"))
9642 +@@ -365,13 +366,17 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
9643 + to = (u64)btarget->idx;
9644 + }
9645 +
9646 ++ if (ab->have_cycles)
9647 ++ width = IPC_WIDTH + CYCLES_WIDTH;
9648 ++
9649 + ui_browser__set_color(browser, HE_COLORSET_JUMP_ARROWS);
9650 +- __ui_browser__line_arrow(browser, pcnt_width + 2 + ab->addr_width,
9651 ++ __ui_browser__line_arrow(browser,
9652 ++ pcnt_width + 2 + ab->addr_width + width,
9653 + from, to);
9654 +
9655 + if (is_fused(ab, cursor)) {
9656 + ui_browser__mark_fused(browser,
9657 +- pcnt_width + 3 + ab->addr_width,
9658 ++ pcnt_width + 3 + ab->addr_width + width,
9659 + from - 1,
9660 + to > from ? true : false);
9661 + }
9662 +diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
9663 +index 1bfc946e37dc..bf31ceab33bd 100644
9664 +--- a/tools/perf/util/c++/clang.cpp
9665 ++++ b/tools/perf/util/c++/clang.cpp
9666 +@@ -9,6 +9,7 @@
9667 + * Copyright (C) 2016 Huawei Inc.
9668 + */
9669 +
9670 ++#include "clang/Basic/Version.h"
9671 + #include "clang/CodeGen/CodeGenAction.h"
9672 + #include "clang/Frontend/CompilerInvocation.h"
9673 + #include "clang/Frontend/CompilerInstance.h"
9674 +@@ -58,7 +59,8 @@ createCompilerInvocation(llvm::opt::ArgStringList CFlags, StringRef& Path,
9675 +
9676 + FrontendOptions& Opts = CI->getFrontendOpts();
9677 + Opts.Inputs.clear();
9678 +- Opts.Inputs.emplace_back(Path, IK_C);
9679 ++ Opts.Inputs.emplace_back(Path,
9680 ++ FrontendOptions::getInputKindForExtension("c"));
9681 + return CI;
9682 + }
9683 +
9684 +@@ -71,10 +73,17 @@ getModuleFromSource(llvm::opt::ArgStringList CFlags,
9685 +
9686 + Clang.setVirtualFileSystem(&*VFS);
9687 +
9688 ++#if CLANG_VERSION_MAJOR < 4
9689 + IntrusiveRefCntPtr<CompilerInvocation> CI =
9690 + createCompilerInvocation(std::move(CFlags), Path,
9691 + Clang.getDiagnostics());
9692 + Clang.setInvocation(&*CI);
9693 ++#else
9694 ++ std::shared_ptr<CompilerInvocation> CI(
9695 ++ createCompilerInvocation(std::move(CFlags), Path,
9696 ++ Clang.getDiagnostics()));
9697 ++ Clang.setInvocation(CI);
9698 ++#endif
9699 +
9700 + std::unique_ptr<CodeGenAction> Act(new EmitLLVMOnlyAction(&*LLVMCtx));
9701 + if (!Clang.ExecuteAction(*Act))
9702 +diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
9703 +index b6140950301e..44a8456cea10 100644
9704 +--- a/tools/perf/util/hist.c
9705 ++++ b/tools/perf/util/hist.c
9706 +@@ -879,7 +879,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
9707 + * cumulated only one time to prevent entries more than 100%
9708 + * overhead.
9709 + */
9710 +- he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
9711 ++ he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
9712 + if (he_cache == NULL)
9713 + return -ENOMEM;
9714 +
9715 +@@ -1045,8 +1045,6 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
9716 + if (err)
9717 + return err;
9718 +
9719 +- iter->max_stack = max_stack_depth;
9720 +-
9721 + err = iter->ops->prepare_entry(iter, al);
9722 + if (err)
9723 + goto out;
9724 +diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
9725 +index 02721b579746..e869cad4d89f 100644
9726 +--- a/tools/perf/util/hist.h
9727 ++++ b/tools/perf/util/hist.h
9728 +@@ -107,7 +107,6 @@ struct hist_entry_iter {
9729 + int curr;
9730 +
9731 + bool hide_unresolved;
9732 +- int max_stack;
9733 +
9734 + struct perf_evsel *evsel;
9735 + struct perf_sample *sample;
9736 +diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
9737 +index 91531a7c8fbf..0bda6dfd5b96 100644
9738 +--- a/tools/perf/util/mmap.c
9739 ++++ b/tools/perf/util/mmap.c
9740 +@@ -344,5 +344,11 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite,
9741 + */
9742 + void perf_mmap__read_done(struct perf_mmap *map)
9743 + {
9744 ++ /*
9745 ++ * Check if event was unmapped due to a POLLHUP/POLLERR.
9746 ++ */
9747 ++ if (!refcount_read(&map->refcnt))
9748 ++ return;
9749 ++
9750 + map->prev = perf_mmap__read_head(map);
9751 + }
9752 +diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
9753 +index 6c645eb77d42..ee820fcc29b0 100644
9754 +--- a/tools/testing/radix-tree/idr-test.c
9755 ++++ b/tools/testing/radix-tree/idr-test.c
9756 +@@ -252,6 +252,13 @@ void idr_checks(void)
9757 + idr_remove(&idr, 3);
9758 + idr_remove(&idr, 0);
9759 +
9760 ++ assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0);
9761 ++ idr_remove(&idr, 1);
9762 ++ for (i = 1; i < RADIX_TREE_MAP_SIZE; i++)
9763 ++ assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i);
9764 ++ idr_remove(&idr, 1 << 30);
9765 ++ idr_destroy(&idr);
9766 ++
9767 + for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
9768 + struct item *item = item_create(i, 0);
9769 + assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
9770 +diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
9771 +index 7442dfb73b7f..0fbe778efd5f 100644
9772 +--- a/tools/testing/selftests/Makefile
9773 ++++ b/tools/testing/selftests/Makefile
9774 +@@ -130,6 +130,7 @@ ifdef INSTALL_PATH
9775 + BUILD_TARGET=$$BUILD/$$TARGET; \
9776 + echo "echo ; echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \
9777 + echo "echo ========================================" >> $(ALL_SCRIPT); \
9778 ++ echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
9779 + echo "cd $$TARGET" >> $(ALL_SCRIPT); \
9780 + make -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
9781 + echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
9782 +diff --git a/tools/testing/selftests/net/fib-onlink-tests.sh b/tools/testing/selftests/net/fib-onlink-tests.sh
9783 +new file mode 100644
9784 +index 000000000000..06b1d7cc12cc
9785 +--- /dev/null
9786 ++++ b/tools/testing/selftests/net/fib-onlink-tests.sh
9787 +@@ -0,0 +1,375 @@
9788 ++#!/bin/bash
9789 ++# SPDX-License-Identifier: GPL-2.0
9790 ++
9791 ++# IPv4 and IPv6 onlink tests
9792 ++
9793 ++PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
9794 ++
9795 ++# Network interfaces
9796 ++# - odd in current namespace; even in peer ns
9797 ++declare -A NETIFS
9798 ++# default VRF
9799 ++NETIFS[p1]=veth1
9800 ++NETIFS[p2]=veth2
9801 ++NETIFS[p3]=veth3
9802 ++NETIFS[p4]=veth4
9803 ++# VRF
9804 ++NETIFS[p5]=veth5
9805 ++NETIFS[p6]=veth6
9806 ++NETIFS[p7]=veth7
9807 ++NETIFS[p8]=veth8
9808 ++
9809 ++# /24 network
9810 ++declare -A V4ADDRS
9811 ++V4ADDRS[p1]=169.254.1.1
9812 ++V4ADDRS[p2]=169.254.1.2
9813 ++V4ADDRS[p3]=169.254.3.1
9814 ++V4ADDRS[p4]=169.254.3.2
9815 ++V4ADDRS[p5]=169.254.5.1
9816 ++V4ADDRS[p6]=169.254.5.2
9817 ++V4ADDRS[p7]=169.254.7.1
9818 ++V4ADDRS[p8]=169.254.7.2
9819 ++
9820 ++# /64 network
9821 ++declare -A V6ADDRS
9822 ++V6ADDRS[p1]=2001:db8:101::1
9823 ++V6ADDRS[p2]=2001:db8:101::2
9824 ++V6ADDRS[p3]=2001:db8:301::1
9825 ++V6ADDRS[p4]=2001:db8:301::2
9826 ++V6ADDRS[p5]=2001:db8:501::1
9827 ++V6ADDRS[p6]=2001:db8:501::2
9828 ++V6ADDRS[p7]=2001:db8:701::1
9829 ++V6ADDRS[p8]=2001:db8:701::2
9830 ++
9831 ++# Test networks:
9832 ++# [1] = default table
9833 ++# [2] = VRF
9834 ++#
9835 ++# /32 host routes
9836 ++declare -A TEST_NET4
9837 ++TEST_NET4[1]=169.254.101
9838 ++TEST_NET4[2]=169.254.102
9839 ++# /128 host routes
9840 ++declare -A TEST_NET6
9841 ++TEST_NET6[1]=2001:db8:101
9842 ++TEST_NET6[2]=2001:db8:102
9843 ++
9844 ++# connected gateway
9845 ++CONGW[1]=169.254.1.254
9846 ++CONGW[2]=169.254.5.254
9847 ++
9848 ++# recursive gateway
9849 ++RECGW4[1]=169.254.11.254
9850 ++RECGW4[2]=169.254.12.254
9851 ++RECGW6[1]=2001:db8:11::64
9852 ++RECGW6[2]=2001:db8:12::64
9853 ++
9854 ++# for v4 mapped to v6
9855 ++declare -A TEST_NET4IN6IN6
9856 ++TEST_NET4IN6[1]=10.1.1.254
9857 ++TEST_NET4IN6[2]=10.2.1.254
9858 ++
9859 ++# mcast address
9860 ++MCAST6=ff02::1
9861 ++
9862 ++
9863 ++PEER_NS=bart
9864 ++PEER_CMD="ip netns exec ${PEER_NS}"
9865 ++VRF=lisa
9866 ++VRF_TABLE=1101
9867 ++PBR_TABLE=101
9868 ++
9869 ++################################################################################
9870 ++# utilities
9871 ++
9872 ++log_test()
9873 ++{
9874 ++ local rc=$1
9875 ++ local expected=$2
9876 ++ local msg="$3"
9877 ++
9878 ++ if [ ${rc} -eq ${expected} ]; then
9879 ++ nsuccess=$((nsuccess+1))
9880 ++ printf "\n TEST: %-50s [ OK ]\n" "${msg}"
9881 ++ else
9882 ++ nfail=$((nfail+1))
9883 ++ printf "\n TEST: %-50s [FAIL]\n" "${msg}"
9884 ++ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
9885 ++ echo
9886 ++ echo "hit enter to continue, 'q' to quit"
9887 ++ read a
9888 ++ [ "$a" = "q" ] && exit 1
9889 ++ fi
9890 ++ fi
9891 ++}
9892 ++
9893 ++log_section()
9894 ++{
9895 ++ echo
9896 ++ echo "######################################################################"
9897 ++ echo "TEST SECTION: $*"
9898 ++ echo "######################################################################"
9899 ++}
9900 ++
9901 ++log_subsection()
9902 ++{
9903 ++ echo
9904 ++ echo "#########################################"
9905 ++ echo "TEST SUBSECTION: $*"
9906 ++}
9907 ++
9908 ++run_cmd()
9909 ++{
9910 ++ echo
9911 ++ echo "COMMAND: $*"
9912 ++ eval $*
9913 ++}
9914 ++
9915 ++get_linklocal()
9916 ++{
9917 ++ local dev=$1
9918 ++ local pfx
9919 ++ local addr
9920 ++
9921 ++ addr=$(${pfx} ip -6 -br addr show dev ${dev} | \
9922 ++ awk '{
9923 ++ for (i = 3; i <= NF; ++i) {
9924 ++ if ($i ~ /^fe80/)
9925 ++ print $i
9926 ++ }
9927 ++ }'
9928 ++ )
9929 ++ addr=${addr/\/*}
9930 ++
9931 ++ [ -z "$addr" ] && return 1
9932 ++
9933 ++ echo $addr
9934 ++
9935 ++ return 0
9936 ++}
9937 ++
9938 ++################################################################################
9939 ++#
9940 ++
9941 ++setup()
9942 ++{
9943 ++ echo
9944 ++ echo "########################################"
9945 ++ echo "Configuring interfaces"
9946 ++
9947 ++ set -e
9948 ++
9949 ++ # create namespace
9950 ++ ip netns add ${PEER_NS}
9951 ++ ip -netns ${PEER_NS} li set lo up
9952 ++
9953 ++ # add vrf table
9954 ++ ip li add ${VRF} type vrf table ${VRF_TABLE}
9955 ++ ip li set ${VRF} up
9956 ++ ip ro add table ${VRF_TABLE} unreachable default
9957 ++ ip -6 ro add table ${VRF_TABLE} unreachable default
9958 ++
9959 ++ # create test interfaces
9960 ++ ip li add ${NETIFS[p1]} type veth peer name ${NETIFS[p2]}
9961 ++ ip li add ${NETIFS[p3]} type veth peer name ${NETIFS[p4]}
9962 ++ ip li add ${NETIFS[p5]} type veth peer name ${NETIFS[p6]}
9963 ++ ip li add ${NETIFS[p7]} type veth peer name ${NETIFS[p8]}
9964 ++
9965 ++ # enslave vrf interfaces
9966 ++ for n in 5 7; do
9967 ++ ip li set ${NETIFS[p${n}]} vrf ${VRF}
9968 ++ done
9969 ++
9970 ++ # add addresses
9971 ++ for n in 1 3 5 7; do
9972 ++ ip li set ${NETIFS[p${n}]} up
9973 ++ ip addr add ${V4ADDRS[p${n}]}/24 dev ${NETIFS[p${n}]}
9974 ++ ip addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]}
9975 ++ done
9976 ++
9977 ++ # move peer interfaces to namespace and add addresses
9978 ++ for n in 2 4 6 8; do
9979 ++ ip li set ${NETIFS[p${n}]} netns ${PEER_NS} up
9980 ++ ip -netns ${PEER_NS} addr add ${V4ADDRS[p${n}]}/24 dev ${NETIFS[p${n}]}
9981 ++ ip -netns ${PEER_NS} addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]}
9982 ++ done
9983 ++
9984 ++ set +e
9985 ++
9986 ++ # let DAD complete - assume default of 1 probe
9987 ++ sleep 1
9988 ++}
9989 ++
9990 ++cleanup()
9991 ++{
9992 ++ # make sure we start from a clean slate
9993 ++ ip netns del ${PEER_NS} 2>/dev/null
9994 ++ for n in 1 3 5 7; do
9995 ++ ip link del ${NETIFS[p${n}]} 2>/dev/null
9996 ++ done
9997 ++ ip link del ${VRF} 2>/dev/null
9998 ++ ip ro flush table ${VRF_TABLE}
9999 ++ ip -6 ro flush table ${VRF_TABLE}
10000 ++}
10001 ++
10002 ++################################################################################
10003 ++# IPv4 tests
10004 ++#
10005 ++
10006 ++run_ip()
10007 ++{
10008 ++ local table="$1"
10009 ++ local prefix="$2"
10010 ++ local gw="$3"
10011 ++ local dev="$4"
10012 ++ local exp_rc="$5"
10013 ++ local desc="$6"
10014 ++
10015 ++ # dev arg may be empty
10016 ++ [ -n "${dev}" ] && dev="dev ${dev}"
10017 ++
10018 ++ run_cmd ip ro add table "${table}" "${prefix}"/32 via "${gw}" "${dev}" onlink
10019 ++ log_test $? ${exp_rc} "${desc}"
10020 ++}
10021 ++
10022 ++valid_onlink_ipv4()
10023 ++{
10024 ++ # - unicast connected, unicast recursive
10025 ++ #
10026 ++ log_subsection "default VRF - main table"
10027 ++
10028 ++ run_ip 254 ${TEST_NET4[1]}.1 ${CONGW[1]} ${NETIFS[p1]} 0 "unicast connected"
10029 ++ run_ip 254 ${TEST_NET4[1]}.2 ${RECGW4[1]} ${NETIFS[p1]} 0 "unicast recursive"
10030 ++
10031 ++ log_subsection "VRF ${VRF}"
10032 ++
10033 ++ run_ip ${VRF_TABLE} ${TEST_NET4[2]}.1 ${CONGW[2]} ${NETIFS[p5]} 0 "unicast connected"
10034 ++ run_ip ${VRF_TABLE} ${TEST_NET4[2]}.2 ${RECGW4[2]} ${NETIFS[p5]} 0 "unicast recursive"
10035 ++
10036 ++ log_subsection "VRF device, PBR table"
10037 ++
10038 ++ run_ip ${PBR_TABLE} ${TEST_NET4[2]}.3 ${CONGW[2]} ${NETIFS[p5]} 0 "unicast connected"
10039 ++ run_ip ${PBR_TABLE} ${TEST_NET4[2]}.4 ${RECGW4[2]} ${NETIFS[p5]} 0 "unicast recursive"
10040 ++}
10041 ++
10042 ++invalid_onlink_ipv4()
10043 ++{
10044 ++ run_ip 254 ${TEST_NET4[1]}.11 ${V4ADDRS[p1]} ${NETIFS[p1]} 2 \
10045 ++ "Invalid gw - local unicast address"
10046 ++
10047 ++ run_ip ${VRF_TABLE} ${TEST_NET4[2]}.11 ${V4ADDRS[p5]} ${NETIFS[p5]} 2 \
10048 ++ "Invalid gw - local unicast address, VRF"
10049 ++
10050 ++ run_ip 254 ${TEST_NET4[1]}.101 ${V4ADDRS[p1]} "" 2 "No nexthop device given"
10051 ++
10052 ++ run_ip 254 ${TEST_NET4[1]}.102 ${V4ADDRS[p3]} ${NETIFS[p1]} 2 \
10053 ++ "Gateway resolves to wrong nexthop device"
10054 ++
10055 ++ run_ip ${VRF_TABLE} ${TEST_NET4[2]}.103 ${V4ADDRS[p7]} ${NETIFS[p5]} 2 \
10056 ++ "Gateway resolves to wrong nexthop device - VRF"
10057 ++}
10058 ++
10059 ++################################################################################
10060 ++# IPv6 tests
10061 ++#
10062 ++
10063 ++run_ip6()
10064 ++{
10065 ++ local table="$1"
10066 ++ local prefix="$2"
10067 ++ local gw="$3"
10068 ++ local dev="$4"
10069 ++ local exp_rc="$5"
10070 ++ local desc="$6"
10071 ++
10072 ++ # dev arg may be empty
10073 ++ [ -n "${dev}" ] && dev="dev ${dev}"
10074 ++
10075 ++ run_cmd ip -6 ro add table "${table}" "${prefix}"/128 via "${gw}" "${dev}" onlink
10076 ++ log_test $? ${exp_rc} "${desc}"
10077 ++}
10078 ++
10079 ++valid_onlink_ipv6()
10080 ++{
10081 ++ # - unicast connected, unicast recursive, v4-mapped
10082 ++ #
10083 ++ log_subsection "default VRF - main table"
10084 ++
10085 ++ run_ip6 254 ${TEST_NET6[1]}::1 ${V6ADDRS[p1]/::*}::64 ${NETIFS[p1]} 0 "unicast connected"
10086 ++ run_ip6 254 ${TEST_NET6[1]}::2 ${RECGW6[1]} ${NETIFS[p1]} 0 "unicast recursive"
10087 ++ run_ip6 254 ${TEST_NET6[1]}::3 ::ffff:${TEST_NET4IN6[1]} ${NETIFS[p1]} 0 "v4-mapped"
10088 ++
10089 ++ log_subsection "VRF ${VRF}"
10090 ++
10091 ++ run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::1 ${V6ADDRS[p5]/::*}::64 ${NETIFS[p5]} 0 "unicast connected"
10092 ++ run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::2 ${RECGW6[2]} ${NETIFS[p5]} 0 "unicast recursive"
10093 ++ run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::3 ::ffff:${TEST_NET4IN6[2]} ${NETIFS[p5]} 0 "v4-mapped"
10094 ++
10095 ++ log_subsection "VRF device, PBR table"
10096 ++
10097 ++ run_ip6 ${PBR_TABLE} ${TEST_NET6[2]}::4 ${V6ADDRS[p5]/::*}::64 ${NETIFS[p5]} 0 "unicast connected"
10098 ++ run_ip6 ${PBR_TABLE} ${TEST_NET6[2]}::5 ${RECGW6[2]} ${NETIFS[p5]} 0 "unicast recursive"
10099 ++ run_ip6 ${PBR_TABLE} ${TEST_NET6[2]}::6 ::ffff:${TEST_NET4IN6[2]} ${NETIFS[p5]} 0 "v4-mapped"
10100 ++}
10101 ++
10102 ++invalid_onlink_ipv6()
10103 ++{
10104 ++ local lladdr
10105 ++
10106 ++ lladdr=$(get_linklocal ${NETIFS[p1]}) || return 1
10107 ++
10108 ++ run_ip6 254 ${TEST_NET6[1]}::11 ${V6ADDRS[p1]} ${NETIFS[p1]} 2 \
10109 ++ "Invalid gw - local unicast address"
10110 ++ run_ip6 254 ${TEST_NET6[1]}::12 ${lladdr} ${NETIFS[p1]} 2 \
10111 ++ "Invalid gw - local linklocal address"
10112 ++ run_ip6 254 ${TEST_NET6[1]}::12 ${MCAST6} ${NETIFS[p1]} 2 \
10113 ++ "Invalid gw - multicast address"
10114 ++
10115 ++ lladdr=$(get_linklocal ${NETIFS[p5]}) || return 1
10116 ++ run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::11 ${V6ADDRS[p5]} ${NETIFS[p5]} 2 \
10117 ++ "Invalid gw - local unicast address, VRF"
10118 ++ run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::12 ${lladdr} ${NETIFS[p5]} 2 \
10119 ++ "Invalid gw - local linklocal address, VRF"
10120 ++ run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::12 ${MCAST6} ${NETIFS[p5]} 2 \
10121 ++ "Invalid gw - multicast address, VRF"
10122 ++
10123 ++ run_ip6 254 ${TEST_NET6[1]}::101 ${V6ADDRS[p1]} "" 2 \
10124 ++ "No nexthop device given"
10125 ++
10126 ++ # default VRF validation is done against LOCAL table
10127 ++ # run_ip6 254 ${TEST_NET6[1]}::102 ${V6ADDRS[p3]/::[0-9]/::64} ${NETIFS[p1]} 2 \
10128 ++ # "Gateway resolves to wrong nexthop device"
10129 ++
10130 ++ run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::103 ${V6ADDRS[p7]/::[0-9]/::64} ${NETIFS[p5]} 2 \
10131 ++ "Gateway resolves to wrong nexthop device - VRF"
10132 ++}
10133 ++
10134 ++run_onlink_tests()
10135 ++{
10136 ++ log_section "IPv4 onlink"
10137 ++ log_subsection "Valid onlink commands"
10138 ++ valid_onlink_ipv4
10139 ++ log_subsection "Invalid onlink commands"
10140 ++ invalid_onlink_ipv4
10141 ++
10142 ++ log_section "IPv6 onlink"
10143 ++ log_subsection "Valid onlink commands"
10144 ++ valid_onlink_ipv6
10145 ++ invalid_onlink_ipv6
10146 ++}
10147 ++
10148 ++################################################################################
10149 ++# main
10150 ++
10151 ++nsuccess=0
10152 ++nfail=0
10153 ++
10154 ++cleanup
10155 ++setup
10156 ++run_onlink_tests
10157 ++cleanup
10158 ++
10159 ++if [ "$TESTS" != "none" ]; then
10160 ++ printf "\nTests passed: %3d\n" ${nsuccess}
10161 ++ printf "Tests failed: %3d\n" ${nfail}
10162 ++fi
10163 +diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
10164 +index 989f917068d1..d4346b16b2c1 100644
10165 +--- a/tools/testing/selftests/net/psock_fanout.c
10166 ++++ b/tools/testing/selftests/net/psock_fanout.c
10167 +@@ -128,6 +128,8 @@ static void sock_fanout_getopts(int fd, uint16_t *typeflags, uint16_t *group_id)
10168 +
10169 + static void sock_fanout_set_ebpf(int fd)
10170 + {
10171 ++ static char log_buf[65536];
10172 ++
10173 + const int len_off = __builtin_offsetof(struct __sk_buff, len);
10174 + struct bpf_insn prog[] = {
10175 + { BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0 },
10176 +@@ -140,7 +142,6 @@ static void sock_fanout_set_ebpf(int fd)
10177 + { BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0 },
10178 + { BPF_JMP | BPF_EXIT, 0, 0, 0, 0 }
10179 + };
10180 +- char log_buf[512];
10181 + union bpf_attr attr;
10182 + int pfd;
10183 +
10184 +diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
10185 +index 1c12536f2081..18f523557983 100644
10186 +--- a/tools/thermal/tmon/sysfs.c
10187 ++++ b/tools/thermal/tmon/sysfs.c
10188 +@@ -486,6 +486,7 @@ int zone_instance_to_index(int zone_inst)
10189 + int update_thermal_data()
10190 + {
10191 + int i;
10192 ++ int next_thermal_record = cur_thermal_record + 1;
10193 + char tz_name[256];
10194 + static unsigned long samples;
10195 +
10196 +@@ -495,9 +496,9 @@ int update_thermal_data()
10197 + }
10198 +
10199 + /* circular buffer for keeping historic data */
10200 +- if (cur_thermal_record >= NR_THERMAL_RECORDS)
10201 +- cur_thermal_record = 0;
10202 +- gettimeofday(&trec[cur_thermal_record].tv, NULL);
10203 ++ if (next_thermal_record >= NR_THERMAL_RECORDS)
10204 ++ next_thermal_record = 0;
10205 ++ gettimeofday(&trec[next_thermal_record].tv, NULL);
10206 + if (tmon_log) {
10207 + fprintf(tmon_log, "%lu ", ++samples);
10208 + fprintf(tmon_log, "%3.1f ", p_param.t_target);
10209 +@@ -507,11 +508,12 @@ int update_thermal_data()
10210 + snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE,
10211 + ptdata.tzi[i].instance);
10212 + sysfs_get_ulong(tz_name, "temp",
10213 +- &trec[cur_thermal_record].temp[i]);
10214 ++ &trec[next_thermal_record].temp[i]);
10215 + if (tmon_log)
10216 + fprintf(tmon_log, "%lu ",
10217 +- trec[cur_thermal_record].temp[i]/1000);
10218 ++ trec[next_thermal_record].temp[i] / 1000);
10219 + }
10220 ++ cur_thermal_record = next_thermal_record;
10221 + for (i = 0; i < ptdata.nr_cooling_dev; i++) {
10222 + char cdev_name[256];
10223 + unsigned long val;
10224 +diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
10225 +index 9aa19652e8e8..b43138f8b862 100644
10226 +--- a/tools/thermal/tmon/tmon.c
10227 ++++ b/tools/thermal/tmon/tmon.c
10228 +@@ -336,7 +336,6 @@ int main(int argc, char **argv)
10229 + show_data_w();
10230 + show_cooling_device();
10231 + }
10232 +- cur_thermal_record++;
10233 + time_elapsed += ticktime;
10234 + controller_handler(trec[0].temp[target_tz_index] / 1000,
10235 + &yk);