Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Mon, 09 May 2022 10:57:13
Message-Id: 1652093805.3cf0d8dc86d4755756c79ce006f29f9fdad1b85c.mpagano@gentoo
1 commit: 3cf0d8dc86d4755756c79ce006f29f9fdad1b85c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon May 9 10:56:45 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon May 9 10:56:45 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3cf0d8dc
7
8 Linux patch 5.15.38
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1037_linux-5.15.38.patch | 6119 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6123 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index cb4266b1..da0799e5 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -191,6 +191,10 @@ Patch: 1036_linux-5.15.37.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.37
23
24 +Patch: 1037_linux-5.15.38.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.38
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1037_linux-5.15.38.patch b/1037_linux-5.15.38.patch
33 new file mode 100644
34 index 00000000..d1228d57
35 --- /dev/null
36 +++ b/1037_linux-5.15.38.patch
37 @@ -0,0 +1,6119 @@
38 +diff --git a/Makefile b/Makefile
39 +index 50b1688a4ca2c..73b884c9baa40 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 15
46 +-SUBLEVEL = 37
47 ++SUBLEVEL = 38
48 + EXTRAVERSION =
49 + NAME = Trick or Treat
50 +
51 +diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
52 +index 0d2fac98ce7d2..c8b80f156ec98 100644
53 +--- a/arch/arm/boot/dts/am3517-evm.dts
54 ++++ b/arch/arm/boot/dts/am3517-evm.dts
55 +@@ -161,6 +161,8 @@
56 +
57 + /* HS USB Host PHY on PORT 1 */
58 + hsusb1_phy: hsusb1_phy {
59 ++ pinctrl-names = "default";
60 ++ pinctrl-0 = <&hsusb1_rst_pins>;
61 + compatible = "usb-nop-xceiv";
62 + reset-gpios = <&gpio2 25 GPIO_ACTIVE_LOW>; /* gpio_57 */
63 + #phy-cells = <0>;
64 +@@ -168,7 +170,9 @@
65 + };
66 +
67 + &davinci_emac {
68 +- status = "okay";
69 ++ pinctrl-names = "default";
70 ++ pinctrl-0 = <&ethernet_pins>;
71 ++ status = "okay";
72 + };
73 +
74 + &davinci_mdio {
75 +@@ -193,6 +197,8 @@
76 + };
77 +
78 + &i2c2 {
79 ++ pinctrl-names = "default";
80 ++ pinctrl-0 = <&i2c2_pins>;
81 + clock-frequency = <400000>;
82 + /* User DIP swithes [1:8] / User LEDS [1:2] */
83 + tca6416: gpio@21 {
84 +@@ -205,6 +211,8 @@
85 + };
86 +
87 + &i2c3 {
88 ++ pinctrl-names = "default";
89 ++ pinctrl-0 = <&i2c3_pins>;
90 + clock-frequency = <400000>;
91 + };
92 +
93 +@@ -223,6 +231,8 @@
94 + };
95 +
96 + &usbhshost {
97 ++ pinctrl-names = "default";
98 ++ pinctrl-0 = <&hsusb1_pins>;
99 + port1-mode = "ehci-phy";
100 + };
101 +
102 +@@ -231,8 +241,35 @@
103 + };
104 +
105 + &omap3_pmx_core {
106 +- pinctrl-names = "default";
107 +- pinctrl-0 = <&hsusb1_rst_pins>;
108 ++
109 ++ ethernet_pins: pinmux_ethernet_pins {
110 ++ pinctrl-single,pins = <
111 ++ OMAP3_CORE1_IOPAD(0x21fe, PIN_INPUT | MUX_MODE0) /* rmii_mdio_data */
112 ++ OMAP3_CORE1_IOPAD(0x2200, MUX_MODE0) /* rmii_mdio_clk */
113 ++ OMAP3_CORE1_IOPAD(0x2202, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_rxd0 */
114 ++ OMAP3_CORE1_IOPAD(0x2204, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_rxd1 */
115 ++ OMAP3_CORE1_IOPAD(0x2206, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_crs_dv */
116 ++ OMAP3_CORE1_IOPAD(0x2208, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_rxer */
117 ++ OMAP3_CORE1_IOPAD(0x220a, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_txd0 */
118 ++ OMAP3_CORE1_IOPAD(0x220c, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_txd1 */
119 ++ OMAP3_CORE1_IOPAD(0x220e, PIN_OUTPUT_PULLDOWN |MUX_MODE0) /* rmii_txen */
120 ++ OMAP3_CORE1_IOPAD(0x2210, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_50mhz_clk */
121 ++ >;
122 ++ };
123 ++
124 ++ i2c2_pins: pinmux_i2c2_pins {
125 ++ pinctrl-single,pins = <
126 ++ OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c2_scl */
127 ++ OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c2_sda */
128 ++ >;
129 ++ };
130 ++
131 ++ i2c3_pins: pinmux_i2c3_pins {
132 ++ pinctrl-single,pins = <
133 ++ OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_scl */
134 ++ OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_sda */
135 ++ >;
136 ++ };
137 +
138 + leds_pins: pinmux_leds_pins {
139 + pinctrl-single,pins = <
140 +@@ -300,8 +337,6 @@
141 + };
142 +
143 + &omap3_pmx_core2 {
144 +- pinctrl-names = "default";
145 +- pinctrl-0 = <&hsusb1_pins>;
146 +
147 + hsusb1_pins: pinmux_hsusb1_pins {
148 + pinctrl-single,pins = <
149 +diff --git a/arch/arm/boot/dts/am3517-som.dtsi b/arch/arm/boot/dts/am3517-som.dtsi
150 +index 8b669e2eafec4..f7b680f6c48ad 100644
151 +--- a/arch/arm/boot/dts/am3517-som.dtsi
152 ++++ b/arch/arm/boot/dts/am3517-som.dtsi
153 +@@ -69,6 +69,8 @@
154 + };
155 +
156 + &i2c1 {
157 ++ pinctrl-names = "default";
158 ++ pinctrl-0 = <&i2c1_pins>;
159 + clock-frequency = <400000>;
160 +
161 + s35390a: s35390a@30 {
162 +@@ -179,6 +181,13 @@
163 +
164 + &omap3_pmx_core {
165 +
166 ++ i2c1_pins: pinmux_i2c1_pins {
167 ++ pinctrl-single,pins = <
168 ++ OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_scl */
169 ++ OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_sda */
170 ++ >;
171 ++ };
172 ++
173 + wl12xx_buffer_pins: pinmux_wl12xx_buffer_pins {
174 + pinctrl-single,pins = <
175 + OMAP3_CORE1_IOPAD(0x2156, PIN_OUTPUT | MUX_MODE4) /* mmc1_dat7.gpio_129 */
176 +diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
177 +index d72c042f28507..a49c2966b41e2 100644
178 +--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
179 ++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
180 +@@ -57,8 +57,8 @@
181 + };
182 +
183 + spi0: spi@f0004000 {
184 +- pinctrl-names = "default";
185 +- pinctrl-0 = <&pinctrl_spi0_cs>;
186 ++ pinctrl-names = "default", "cs";
187 ++ pinctrl-1 = <&pinctrl_spi0_cs>;
188 + cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>;
189 + status = "okay";
190 + };
191 +@@ -171,8 +171,8 @@
192 + };
193 +
194 + spi1: spi@f8008000 {
195 +- pinctrl-names = "default";
196 +- pinctrl-0 = <&pinctrl_spi1_cs>;
197 ++ pinctrl-names = "default", "cs";
198 ++ pinctrl-1 = <&pinctrl_spi1_cs>;
199 + cs-gpios = <&pioC 25 0>;
200 + status = "okay";
201 + };
202 +diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
203 +index d241c24f0d836..e519d27479362 100644
204 +--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
205 ++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
206 +@@ -81,8 +81,8 @@
207 + };
208 +
209 + spi1: spi@fc018000 {
210 +- pinctrl-names = "default";
211 +- pinctrl-0 = <&pinctrl_spi0_cs>;
212 ++ pinctrl-names = "default", "cs";
213 ++ pinctrl-1 = <&pinctrl_spi1_cs>;
214 + cs-gpios = <&pioB 21 0>;
215 + status = "okay";
216 + };
217 +@@ -140,7 +140,7 @@
218 + atmel,pins =
219 + <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
220 + };
221 +- pinctrl_spi0_cs: spi0_cs_default {
222 ++ pinctrl_spi1_cs: spi1_cs_default {
223 + atmel,pins =
224 + <AT91_PIOB 21 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
225 + };
226 +diff --git a/arch/arm/boot/dts/at91-sama7g5ek.dts b/arch/arm/boot/dts/at91-sama7g5ek.dts
227 +index f3d6aaa3a78dc..bac0e49cc5770 100644
228 +--- a/arch/arm/boot/dts/at91-sama7g5ek.dts
229 ++++ b/arch/arm/boot/dts/at91-sama7g5ek.dts
230 +@@ -403,7 +403,7 @@
231 + pinctrl_flx3_default: flx3_default {
232 + pinmux = <PIN_PD16__FLEXCOM3_IO0>,
233 + <PIN_PD17__FLEXCOM3_IO1>;
234 +- bias-disable;
235 ++ bias-pull-up;
236 + };
237 +
238 + pinctrl_flx4_default: flx4_default {
239 +diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
240 +index 87bb39060e8be..ca03685f0f086 100644
241 +--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
242 ++++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
243 +@@ -219,6 +219,12 @@
244 + wm8731: wm8731@1b {
245 + compatible = "wm8731";
246 + reg = <0x1b>;
247 ++
248 ++ /* PCK0 at 12MHz */
249 ++ clocks = <&pmc PMC_TYPE_SYSTEM 8>;
250 ++ clock-names = "mclk";
251 ++ assigned-clocks = <&pmc PMC_TYPE_SYSTEM 8>;
252 ++ assigned-clock-rates = <12000000>;
253 + };
254 + };
255 +
256 +diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
257 +index 0a11bacffc1f1..5733e3a4ea8e7 100644
258 +--- a/arch/arm/boot/dts/dra7-l4.dtsi
259 ++++ b/arch/arm/boot/dts/dra7-l4.dtsi
260 +@@ -4188,11 +4188,11 @@
261 + reg = <0x1d0010 0x4>;
262 + reg-names = "sysc";
263 + ti,sysc-midle = <SYSC_IDLE_FORCE>,
264 +- <SYSC_IDLE_NO>,
265 +- <SYSC_IDLE_SMART>;
266 ++ <SYSC_IDLE_NO>;
267 + ti,sysc-sidle = <SYSC_IDLE_FORCE>,
268 + <SYSC_IDLE_NO>,
269 + <SYSC_IDLE_SMART>;
270 ++ power-domains = <&prm_vpe>;
271 + clocks = <&vpe_clkctrl DRA7_VPE_VPE_CLKCTRL 0>;
272 + clock-names = "fck";
273 + #address-cells = <1>;
274 +diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
275 +index 30fa349f9d054..a696873dc1abe 100644
276 +--- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi
277 ++++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
278 +@@ -286,6 +286,8 @@
279 + codec: sgtl5000@a {
280 + compatible = "fsl,sgtl5000";
281 + reg = <0x0a>;
282 ++ pinctrl-names = "default";
283 ++ pinctrl-0 = <&pinctrl_sgtl5000>;
284 + clocks = <&clks IMX6QDL_CLK_CKO>;
285 + VDDA-supply = <&reg_module_3v3_audio>;
286 + VDDIO-supply = <&reg_module_3v3>;
287 +@@ -516,8 +518,6 @@
288 + MX6QDL_PAD_DISP0_DAT21__AUD4_TXD 0x130b0
289 + MX6QDL_PAD_DISP0_DAT22__AUD4_TXFS 0x130b0
290 + MX6QDL_PAD_DISP0_DAT23__AUD4_RXD 0x130b0
291 +- /* SGTL5000 sys_mclk */
292 +- MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0
293 + >;
294 + };
295 +
296 +@@ -810,6 +810,12 @@
297 + >;
298 + };
299 +
300 ++ pinctrl_sgtl5000: sgtl5000grp {
301 ++ fsl,pins = <
302 ++ MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0
303 ++ >;
304 ++ };
305 ++
306 + pinctrl_spdif: spdifgrp {
307 + fsl,pins = <
308 + MX6QDL_PAD_GPIO_16__SPDIF_IN 0x1b0b0
309 +diff --git a/arch/arm/boot/dts/imx6ull-colibri.dtsi b/arch/arm/boot/dts/imx6ull-colibri.dtsi
310 +index 0cdbf7b6e7285..b6fc879e9dbe6 100644
311 +--- a/arch/arm/boot/dts/imx6ull-colibri.dtsi
312 ++++ b/arch/arm/boot/dts/imx6ull-colibri.dtsi
313 +@@ -37,7 +37,7 @@
314 +
315 + reg_sd1_vmmc: regulator-sd1-vmmc {
316 + compatible = "regulator-gpio";
317 +- gpio = <&gpio5 9 GPIO_ACTIVE_HIGH>;
318 ++ gpios = <&gpio5 9 GPIO_ACTIVE_HIGH>;
319 + pinctrl-names = "default";
320 + pinctrl-0 = <&pinctrl_snvs_reg_sd>;
321 + regulator-always-on;
322 +diff --git a/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
323 +index 2a0a98fe67f06..3240c67e0c392 100644
324 +--- a/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
325 ++++ b/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
326 +@@ -11,3 +11,18 @@
327 + model = "LogicPD Zoom OMAP35xx SOM-LV Development Kit";
328 + compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3430", "ti,omap3";
329 + };
330 ++
331 ++&omap3_pmx_core2 {
332 ++ pinctrl-names = "default";
333 ++ pinctrl-0 = <&hsusb2_2_pins>;
334 ++ hsusb2_2_pins: pinmux_hsusb2_2_pins {
335 ++ pinctrl-single,pins = <
336 ++ OMAP3430_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */
337 ++ OMAP3430_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */
338 ++ OMAP3430_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */
339 ++ OMAP3430_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */
340 ++ OMAP3430_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */
341 ++ OMAP3430_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */
342 ++ >;
343 ++ };
344 ++};
345 +diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
346 +index a604d92221a4f..c757f0d7781c1 100644
347 +--- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
348 ++++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
349 +@@ -11,3 +11,18 @@
350 + model = "LogicPD Zoom DM3730 SOM-LV Development Kit";
351 + compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3630", "ti,omap3";
352 + };
353 ++
354 ++&omap3_pmx_core2 {
355 ++ pinctrl-names = "default";
356 ++ pinctrl-0 = <&hsusb2_2_pins>;
357 ++ hsusb2_2_pins: pinmux_hsusb2_2_pins {
358 ++ pinctrl-single,pins = <
359 ++ OMAP3630_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */
360 ++ OMAP3630_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */
361 ++ OMAP3630_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */
362 ++ OMAP3630_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */
363 ++ OMAP3630_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */
364 ++ OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */
365 ++ >;
366 ++ };
367 ++};
368 +diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
369 +index b56524cc7fe27..55b619c99e24d 100644
370 +--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
371 ++++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
372 +@@ -265,21 +265,6 @@
373 + };
374 + };
375 +
376 +-&omap3_pmx_core2 {
377 +- pinctrl-names = "default";
378 +- pinctrl-0 = <&hsusb2_2_pins>;
379 +- hsusb2_2_pins: pinmux_hsusb2_2_pins {
380 +- pinctrl-single,pins = <
381 +- OMAP3630_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */
382 +- OMAP3630_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */
383 +- OMAP3630_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */
384 +- OMAP3630_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */
385 +- OMAP3630_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */
386 +- OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */
387 +- >;
388 +- };
389 +-};
390 +-
391 + &uart2 {
392 + interrupts-extended = <&intc 73 &omap3_pmx_core OMAP3_UART2_RX>;
393 + pinctrl-names = "default";
394 +diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
395 +index 23ab27fe4ee5d..3923b38e798d0 100644
396 +--- a/arch/arm/boot/dts/omap3-gta04.dtsi
397 ++++ b/arch/arm/boot/dts/omap3-gta04.dtsi
398 +@@ -31,6 +31,8 @@
399 + aliases {
400 + display0 = &lcd;
401 + display1 = &tv0;
402 ++ /delete-property/ mmc2;
403 ++ /delete-property/ mmc3;
404 + };
405 +
406 + ldo_3v3: fixedregulator {
407 +diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
408 +index 5a48abac6af49..4b554cc8fa58a 100644
409 +--- a/arch/arm/mach-exynos/Kconfig
410 ++++ b/arch/arm/mach-exynos/Kconfig
411 +@@ -18,7 +18,6 @@ menuconfig ARCH_EXYNOS
412 + select EXYNOS_PMU
413 + select EXYNOS_SROM
414 + select EXYNOS_PM_DOMAINS if PM_GENERIC_DOMAINS
415 +- select GPIOLIB
416 + select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5
417 + select HAVE_ARM_SCU if SMP
418 + select HAVE_S3C2410_I2C if I2C
419 +diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
420 +index 5c3845730dbf5..0b80f8bcd3047 100644
421 +--- a/arch/arm/mach-omap2/omap4-common.c
422 ++++ b/arch/arm/mach-omap2/omap4-common.c
423 +@@ -314,10 +314,12 @@ void __init omap_gic_of_init(void)
424 +
425 + np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic");
426 + gic_dist_base_addr = of_iomap(np, 0);
427 ++ of_node_put(np);
428 + WARN_ON(!gic_dist_base_addr);
429 +
430 + np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-twd-timer");
431 + twd_base = of_iomap(np, 0);
432 ++ of_node_put(np);
433 + WARN_ON(!twd_base);
434 +
435 + skip_errata_init:
436 +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi
437 +index d61f43052a344..8e9ad1e51d665 100644
438 +--- a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi
439 ++++ b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi
440 +@@ -11,26 +11,6 @@
441 + compatible = "operating-points-v2";
442 + opp-shared;
443 +
444 +- opp-100000000 {
445 +- opp-hz = /bits/ 64 <100000000>;
446 +- opp-microvolt = <731000>;
447 +- };
448 +-
449 +- opp-250000000 {
450 +- opp-hz = /bits/ 64 <250000000>;
451 +- opp-microvolt = <731000>;
452 +- };
453 +-
454 +- opp-500000000 {
455 +- opp-hz = /bits/ 64 <500000000>;
456 +- opp-microvolt = <731000>;
457 +- };
458 +-
459 +- opp-667000000 {
460 +- opp-hz = /bits/ 64 <667000000>;
461 +- opp-microvolt = <731000>;
462 +- };
463 +-
464 + opp-1000000000 {
465 + opp-hz = /bits/ 64 <1000000000>;
466 + opp-microvolt = <761000>;
467 +@@ -71,26 +51,6 @@
468 + compatible = "operating-points-v2";
469 + opp-shared;
470 +
471 +- opp-100000000 {
472 +- opp-hz = /bits/ 64 <100000000>;
473 +- opp-microvolt = <731000>;
474 +- };
475 +-
476 +- opp-250000000 {
477 +- opp-hz = /bits/ 64 <250000000>;
478 +- opp-microvolt = <731000>;
479 +- };
480 +-
481 +- opp-500000000 {
482 +- opp-hz = /bits/ 64 <500000000>;
483 +- opp-microvolt = <731000>;
484 +- };
485 +-
486 +- opp-667000000 {
487 +- opp-hz = /bits/ 64 <667000000>;
488 +- opp-microvolt = <731000>;
489 +- };
490 +-
491 + opp-1000000000 {
492 + opp-hz = /bits/ 64 <1000000000>;
493 + opp-microvolt = <731000>;
494 +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
495 +index 1e5d0ee5d541b..44c23c984034c 100644
496 +--- a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
497 ++++ b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
498 +@@ -11,26 +11,6 @@
499 + compatible = "operating-points-v2";
500 + opp-shared;
501 +
502 +- opp-100000000 {
503 +- opp-hz = /bits/ 64 <100000000>;
504 +- opp-microvolt = <731000>;
505 +- };
506 +-
507 +- opp-250000000 {
508 +- opp-hz = /bits/ 64 <250000000>;
509 +- opp-microvolt = <731000>;
510 +- };
511 +-
512 +- opp-500000000 {
513 +- opp-hz = /bits/ 64 <500000000>;
514 +- opp-microvolt = <731000>;
515 +- };
516 +-
517 +- opp-667000000 {
518 +- opp-hz = /bits/ 64 <667000000>;
519 +- opp-microvolt = <731000>;
520 +- };
521 +-
522 + opp-1000000000 {
523 + opp-hz = /bits/ 64 <1000000000>;
524 + opp-microvolt = <731000>;
525 +@@ -76,26 +56,6 @@
526 + compatible = "operating-points-v2";
527 + opp-shared;
528 +
529 +- opp-100000000 {
530 +- opp-hz = /bits/ 64 <100000000>;
531 +- opp-microvolt = <751000>;
532 +- };
533 +-
534 +- opp-250000000 {
535 +- opp-hz = /bits/ 64 <250000000>;
536 +- opp-microvolt = <751000>;
537 +- };
538 +-
539 +- opp-500000000 {
540 +- opp-hz = /bits/ 64 <500000000>;
541 +- opp-microvolt = <751000>;
542 +- };
543 +-
544 +- opp-667000000 {
545 +- opp-hz = /bits/ 64 <667000000>;
546 +- opp-microvolt = <751000>;
547 +- };
548 +-
549 + opp-1000000000 {
550 + opp-hz = /bits/ 64 <1000000000>;
551 + opp-microvolt = <771000>;
552 +diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
553 +index 5751c48620edf..cadba194b149b 100644
554 +--- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
555 ++++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
556 +@@ -437,6 +437,7 @@
557 + "",
558 + "eMMC_RST#", /* BOOT_12 */
559 + "eMMC_DS", /* BOOT_13 */
560 ++ "", "",
561 + /* GPIOC */
562 + "SD_D0_B", /* GPIOC_0 */
563 + "SD_D1_B", /* GPIOC_1 */
564 +diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
565 +index 3d8b1f4f2001b..78bdbd2ccc9de 100644
566 +--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
567 ++++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
568 +@@ -95,26 +95,6 @@
569 + compatible = "operating-points-v2";
570 + opp-shared;
571 +
572 +- opp-100000000 {
573 +- opp-hz = /bits/ 64 <100000000>;
574 +- opp-microvolt = <730000>;
575 +- };
576 +-
577 +- opp-250000000 {
578 +- opp-hz = /bits/ 64 <250000000>;
579 +- opp-microvolt = <730000>;
580 +- };
581 +-
582 +- opp-500000000 {
583 +- opp-hz = /bits/ 64 <500000000>;
584 +- opp-microvolt = <730000>;
585 +- };
586 +-
587 +- opp-667000000 {
588 +- opp-hz = /bits/ 64 <666666666>;
589 +- opp-microvolt = <750000>;
590 +- };
591 +-
592 + opp-1000000000 {
593 + opp-hz = /bits/ 64 <1000000000>;
594 + opp-microvolt = <770000>;
595 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
596 +index 8e4a0ce99790b..7ea909a4c1d5e 100644
597 +--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
598 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
599 +@@ -103,12 +103,14 @@
600 +
601 + &usbotg1 {
602 + dr_mode = "otg";
603 ++ over-current-active-low;
604 + vbus-supply = <&reg_usb_otg1_vbus>;
605 + status = "okay";
606 + };
607 +
608 + &usbotg2 {
609 + dr_mode = "host";
610 ++ disable-over-current;
611 + status = "okay";
612 + };
613 +
614 +@@ -166,7 +168,7 @@
615 + fsl,pins = <
616 + MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
617 + MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6
618 +- MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
619 ++ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6
620 + MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6
621 + >;
622 + };
623 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
624 +index b7c91bdc21dd9..806ee21651d1f 100644
625 +--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
626 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
627 +@@ -139,12 +139,14 @@
628 +
629 + &usbotg1 {
630 + dr_mode = "otg";
631 ++ over-current-active-low;
632 + vbus-supply = <&reg_usb_otg1_vbus>;
633 + status = "okay";
634 + };
635 +
636 + &usbotg2 {
637 + dr_mode = "host";
638 ++ disable-over-current;
639 + vbus-supply = <&reg_usb_otg2_vbus>;
640 + status = "okay";
641 + };
642 +@@ -231,7 +233,7 @@
643 + fsl,pins = <
644 + MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
645 + MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6
646 +- MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
647 ++ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6
648 + MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6
649 + >;
650 + };
651 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
652 +index d2ffd62a3bd46..942fed2eed643 100644
653 +--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
654 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
655 +@@ -166,12 +166,14 @@
656 +
657 + &usbotg1 {
658 + dr_mode = "otg";
659 ++ over-current-active-low;
660 + vbus-supply = <&reg_usb_otg1_vbus>;
661 + status = "okay";
662 + };
663 +
664 + &usbotg2 {
665 + dr_mode = "host";
666 ++ disable-over-current;
667 + vbus-supply = <&reg_usb_otg2_vbus>;
668 + status = "okay";
669 + };
670 +@@ -280,7 +282,7 @@
671 + fsl,pins = <
672 + MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
673 + MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6
674 +- MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
675 ++ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6
676 + MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6
677 + >;
678 + };
679 +diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
680 +index 7dfee715a2c4d..d8ce217c60166 100644
681 +--- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
682 ++++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
683 +@@ -59,6 +59,10 @@
684 + interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
685 + rohm,reset-snvs-powered;
686 +
687 ++ #clock-cells = <0>;
688 ++ clocks = <&osc_32k 0>;
689 ++ clock-output-names = "clk-32k-out";
690 ++
691 + regulators {
692 + buck1_reg: BUCK1 {
693 + regulator-name = "buck1";
694 +diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
695 +index da6c942fb7f9d..6d6cbd4c83b8f 100644
696 +--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
697 ++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
698 +@@ -263,7 +263,7 @@
699 + ranges;
700 +
701 + sai2: sai@30020000 {
702 +- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
703 ++ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
704 + reg = <0x30020000 0x10000>;
705 + interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
706 + clocks = <&clk IMX8MN_CLK_SAI2_IPG>,
707 +@@ -277,7 +277,7 @@
708 + };
709 +
710 + sai3: sai@30030000 {
711 +- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
712 ++ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
713 + reg = <0x30030000 0x10000>;
714 + interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
715 + clocks = <&clk IMX8MN_CLK_SAI3_IPG>,
716 +@@ -291,7 +291,7 @@
717 + };
718 +
719 + sai5: sai@30050000 {
720 +- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
721 ++ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
722 + reg = <0x30050000 0x10000>;
723 + interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
724 + clocks = <&clk IMX8MN_CLK_SAI5_IPG>,
725 +@@ -307,7 +307,7 @@
726 + };
727 +
728 + sai6: sai@30060000 {
729 +- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
730 ++ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
731 + reg = <0x30060000 0x10000>;
732 + interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
733 + clocks = <&clk IMX8MN_CLK_SAI6_IPG>,
734 +@@ -364,7 +364,7 @@
735 + };
736 +
737 + sai7: sai@300b0000 {
738 +- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
739 ++ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
740 + reg = <0x300b0000 0x10000>;
741 + interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
742 + clocks = <&clk IMX8MN_CLK_SAI7_IPG>,
743 +diff --git a/arch/arm64/boot/dts/freescale/imx8qm.dtsi b/arch/arm64/boot/dts/freescale/imx8qm.dtsi
744 +index aebbe2b84aa13..a143f38bc78bd 100644
745 +--- a/arch/arm64/boot/dts/freescale/imx8qm.dtsi
746 ++++ b/arch/arm64/boot/dts/freescale/imx8qm.dtsi
747 +@@ -155,7 +155,7 @@
748 + };
749 +
750 + clk: clock-controller {
751 +- compatible = "fsl,imx8qxp-clk", "fsl,scu-clk";
752 ++ compatible = "fsl,imx8qm-clk", "fsl,scu-clk";
753 + #clock-cells = <2>;
754 + };
755 +
756 +diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
757 +index 02d4719bf43a8..232e4549defe1 100644
758 +--- a/arch/powerpc/kernel/reloc_64.S
759 ++++ b/arch/powerpc/kernel/reloc_64.S
760 +@@ -8,8 +8,10 @@
761 + #include <asm/ppc_asm.h>
762 +
763 + RELA = 7
764 +-RELACOUNT = 0x6ffffff9
765 ++RELASZ = 8
766 ++RELAENT = 9
767 + R_PPC64_RELATIVE = 22
768 ++R_PPC64_UADDR64 = 43
769 +
770 + /*
771 + * r3 = desired final address of kernel
772 +@@ -25,29 +27,38 @@ _GLOBAL(relocate)
773 + add r9,r9,r12 /* r9 has runtime addr of .rela.dyn section */
774 + ld r10,(p_st - 0b)(r12)
775 + add r10,r10,r12 /* r10 has runtime addr of _stext */
776 ++ ld r13,(p_sym - 0b)(r12)
777 ++ add r13,r13,r12 /* r13 has runtime addr of .dynsym */
778 +
779 + /*
780 +- * Scan the dynamic section for the RELA and RELACOUNT entries.
781 ++ * Scan the dynamic section for the RELA, RELASZ and RELAENT entries.
782 + */
783 + li r7,0
784 + li r8,0
785 +-1: ld r6,0(r11) /* get tag */
786 ++.Ltags:
787 ++ ld r6,0(r11) /* get tag */
788 + cmpdi r6,0
789 +- beq 4f /* end of list */
790 ++ beq .Lend_of_list /* end of list */
791 + cmpdi r6,RELA
792 + bne 2f
793 + ld r7,8(r11) /* get RELA pointer in r7 */
794 +- b 3f
795 +-2: addis r6,r6,(-RELACOUNT)@ha
796 +- cmpdi r6,RELACOUNT@l
797 ++ b 4f
798 ++2: cmpdi r6,RELASZ
799 + bne 3f
800 +- ld r8,8(r11) /* get RELACOUNT value in r8 */
801 +-3: addi r11,r11,16
802 +- b 1b
803 +-4: cmpdi r7,0 /* check we have both RELA and RELACOUNT */
804 ++ ld r8,8(r11) /* get RELASZ value in r8 */
805 ++ b 4f
806 ++3: cmpdi r6,RELAENT
807 ++ bne 4f
808 ++ ld r12,8(r11) /* get RELAENT value in r12 */
809 ++4: addi r11,r11,16
810 ++ b .Ltags
811 ++.Lend_of_list:
812 ++ cmpdi r7,0 /* check we have RELA, RELASZ, RELAENT */
813 + cmpdi cr1,r8,0
814 +- beq 6f
815 +- beq cr1,6f
816 ++ beq .Lout
817 ++ beq cr1,.Lout
818 ++ cmpdi r12,0
819 ++ beq .Lout
820 +
821 + /*
822 + * Work out linktime address of _stext and hence the
823 +@@ -62,23 +73,39 @@ _GLOBAL(relocate)
824 +
825 + /*
826 + * Run through the list of relocations and process the
827 +- * R_PPC64_RELATIVE ones.
828 ++ * R_PPC64_RELATIVE and R_PPC64_UADDR64 ones.
829 + */
830 ++ divd r8,r8,r12 /* RELASZ / RELAENT */
831 + mtctr r8
832 +-5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */
833 ++.Lrels: ld r0,8(r9) /* ELF64_R_TYPE(reloc->r_info) */
834 + cmpdi r0,R_PPC64_RELATIVE
835 +- bne 6f
836 ++ bne .Luaddr64
837 + ld r6,0(r9) /* reloc->r_offset */
838 + ld r0,16(r9) /* reloc->r_addend */
839 ++ b .Lstore
840 ++.Luaddr64:
841 ++ srdi r14,r0,32 /* ELF64_R_SYM(reloc->r_info) */
842 ++ clrldi r0,r0,32
843 ++ cmpdi r0,R_PPC64_UADDR64
844 ++ bne .Lnext
845 ++ ld r6,0(r9)
846 ++ ld r0,16(r9)
847 ++ mulli r14,r14,24 /* 24 == sizeof(elf64_sym) */
848 ++ add r14,r14,r13 /* elf64_sym[ELF64_R_SYM] */
849 ++ ld r14,8(r14)
850 ++ add r0,r0,r14
851 ++.Lstore:
852 + add r0,r0,r3
853 + stdx r0,r7,r6
854 +- addi r9,r9,24
855 +- bdnz 5b
856 +-
857 +-6: blr
858 ++.Lnext:
859 ++ add r9,r9,r12
860 ++ bdnz .Lrels
861 ++.Lout:
862 ++ blr
863 +
864 + .balign 8
865 + p_dyn: .8byte __dynamic_start - 0b
866 + p_rela: .8byte __rela_dyn_start - 0b
867 ++p_sym: .8byte __dynamic_symtab - 0b
868 + p_st: .8byte _stext - 0b
869 +
870 +diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
871 +index 40bdefe9caa73..1a63e37f336ab 100644
872 +--- a/arch/powerpc/kernel/vmlinux.lds.S
873 ++++ b/arch/powerpc/kernel/vmlinux.lds.S
874 +@@ -275,9 +275,7 @@ SECTIONS
875 + . = ALIGN(8);
876 + .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
877 + {
878 +-#ifdef CONFIG_PPC32
879 + __dynamic_symtab = .;
880 +-#endif
881 + *(.dynsym)
882 + }
883 + .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
884 +diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
885 +index 2f46e31c76129..4f53d0b97539b 100644
886 +--- a/arch/powerpc/perf/Makefile
887 ++++ b/arch/powerpc/perf/Makefile
888 +@@ -3,11 +3,11 @@
889 + obj-y += callchain.o callchain_$(BITS).o perf_regs.o
890 + obj-$(CONFIG_COMPAT) += callchain_32.o
891 +
892 +-obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
893 ++obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o
894 + obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \
895 + power5+-pmu.o power6-pmu.o power7-pmu.o \
896 + isa207-common.o power8-pmu.o power9-pmu.o \
897 +- generic-compat-pmu.o power10-pmu.o
898 ++ generic-compat-pmu.o power10-pmu.o bhrb.o
899 + obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
900 +
901 + obj-$(CONFIG_PPC_POWERNV) += imc-pmu.o
902 +diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh
903 +index 014e00e74d2b6..63792af004170 100755
904 +--- a/arch/powerpc/tools/relocs_check.sh
905 ++++ b/arch/powerpc/tools/relocs_check.sh
906 +@@ -39,6 +39,7 @@ $objdump -R "$vmlinux" |
907 + # R_PPC_NONE
908 + grep -F -w -v 'R_PPC64_RELATIVE
909 + R_PPC64_NONE
910 ++R_PPC64_UADDR64
911 + R_PPC_ADDR16_LO
912 + R_PPC_ADDR16_HI
913 + R_PPC_ADDR16_HA
914 +@@ -54,9 +55,3 @@ fi
915 + num_bad=$(echo "$bad_relocs" | wc -l)
916 + echo "WARNING: $num_bad bad relocations"
917 + echo "$bad_relocs"
918 +-
919 +-# If we see this type of relocation it's an idication that
920 +-# we /may/ be using an old version of binutils.
921 +-if echo "$bad_relocs" | grep -q -F -w R_PPC64_UADDR64; then
922 +- echo "WARNING: You need at least binutils >= 2.19 to build a CONFIG_RELOCATABLE kernel"
923 +-fi
924 +diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
925 +index 0b552873a5778..765004b605132 100644
926 +--- a/arch/riscv/kernel/patch.c
927 ++++ b/arch/riscv/kernel/patch.c
928 +@@ -104,7 +104,7 @@ static int patch_text_cb(void *data)
929 + struct patch_insn *patch = data;
930 + int ret = 0;
931 +
932 +- if (atomic_inc_return(&patch->cpu_count) == 1) {
933 ++ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
934 + ret =
935 + patch_text_nosync(patch->addr, &patch->insn,
936 + GET_INSN_LENGTH(patch->insn));
937 +diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
938 +index ab45a220fac47..fcbfe94903bb8 100644
939 +--- a/arch/x86/include/asm/microcode.h
940 ++++ b/arch/x86/include/asm/microcode.h
941 +@@ -132,10 +132,12 @@ extern void load_ucode_ap(void);
942 + void reload_early_microcode(void);
943 + extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
944 + extern bool initrd_gone;
945 ++void microcode_bsp_resume(void);
946 + #else
947 + static inline void __init load_ucode_bsp(void) { }
948 + static inline void load_ucode_ap(void) { }
949 + static inline void reload_early_microcode(void) { }
950 ++static inline void microcode_bsp_resume(void) { }
951 + static inline bool
952 + get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
953 + #endif
954 +diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
955 +index efb69be41ab18..150ebfb8c12ed 100644
956 +--- a/arch/x86/kernel/cpu/microcode/core.c
957 ++++ b/arch/x86/kernel/cpu/microcode/core.c
958 +@@ -775,9 +775,9 @@ static struct subsys_interface mc_cpu_interface = {
959 + };
960 +
961 + /**
962 +- * mc_bp_resume - Update boot CPU microcode during resume.
963 ++ * microcode_bsp_resume - Update boot CPU microcode during resume.
964 + */
965 +-static void mc_bp_resume(void)
966 ++void microcode_bsp_resume(void)
967 + {
968 + int cpu = smp_processor_id();
969 + struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
970 +@@ -789,7 +789,7 @@ static void mc_bp_resume(void)
971 + }
972 +
973 + static struct syscore_ops mc_syscore_ops = {
974 +- .resume = mc_bp_resume,
975 ++ .resume = microcode_bsp_resume,
976 + };
977 +
978 + static int mc_cpu_starting(unsigned int cpu)
979 +diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
980 +index 508c81e97ab10..f1c0befb62df5 100644
981 +--- a/arch/x86/lib/usercopy_64.c
982 ++++ b/arch/x86/lib/usercopy_64.c
983 +@@ -121,7 +121,7 @@ void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
984 +
985 + /* cache copy and flush to align dest */
986 + if (!IS_ALIGNED(dest, 8)) {
987 +- unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
988 ++ size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
989 +
990 + memcpy((void *) dest, (void *) source, len);
991 + clean_cache_range((void *) dest, len);
992 +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
993 +index 5debe4ac6f819..f153e9ab8c966 100644
994 +--- a/arch/x86/pci/xen.c
995 ++++ b/arch/x86/pci/xen.c
996 +@@ -472,7 +472,6 @@ static __init void xen_setup_pci_msi(void)
997 + xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs;
998 + }
999 + xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs;
1000 +- pci_msi_ignore_mask = 1;
1001 + } else if (xen_hvm_domain()) {
1002 + xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs;
1003 + xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs;
1004 +@@ -486,6 +485,11 @@ static __init void xen_setup_pci_msi(void)
1005 + * in allocating the native domain and never use it.
1006 + */
1007 + x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
1008 ++ /*
1009 ++ * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
1010 ++ * controlled by the hypervisor.
1011 ++ */
1012 ++ pci_msi_ignore_mask = 1;
1013 + }
1014 +
1015 + #else /* CONFIG_PCI_MSI */
1016 +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
1017 +index 736008f2fcccd..732cb075d7072 100644
1018 +--- a/arch/x86/power/cpu.c
1019 ++++ b/arch/x86/power/cpu.c
1020 +@@ -25,6 +25,7 @@
1021 + #include <asm/cpu.h>
1022 + #include <asm/mmu_context.h>
1023 + #include <asm/cpu_device_id.h>
1024 ++#include <asm/microcode.h>
1025 +
1026 + #ifdef CONFIG_X86_32
1027 + __visible unsigned long saved_context_ebx;
1028 +@@ -262,11 +263,18 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
1029 + x86_platform.restore_sched_clock_state();
1030 + mtrr_bp_restore();
1031 + perf_restore_debug_store();
1032 +- msr_restore_context(ctxt);
1033 +
1034 + c = &cpu_data(smp_processor_id());
1035 + if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
1036 + init_ia32_feat_ctl(c);
1037 ++
1038 ++ microcode_bsp_resume();
1039 ++
1040 ++ /*
1041 ++ * This needs to happen after the microcode has been updated upon resume
1042 ++ * because some of the MSRs are "emulated" in microcode.
1043 ++ */
1044 ++ msr_restore_context(ctxt);
1045 + }
1046 +
1047 + /* Needed by apm.c */
1048 +diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
1049 +index 81d7c7e8f7e96..10b79d3c74e07 100644
1050 +--- a/arch/xtensa/platforms/iss/console.c
1051 ++++ b/arch/xtensa/platforms/iss/console.c
1052 +@@ -36,24 +36,19 @@ static void rs_poll(struct timer_list *);
1053 + static struct tty_driver *serial_driver;
1054 + static struct tty_port serial_port;
1055 + static DEFINE_TIMER(serial_timer, rs_poll);
1056 +-static DEFINE_SPINLOCK(timer_lock);
1057 +
1058 + static int rs_open(struct tty_struct *tty, struct file * filp)
1059 + {
1060 +- spin_lock_bh(&timer_lock);
1061 + if (tty->count == 1)
1062 + mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
1063 +- spin_unlock_bh(&timer_lock);
1064 +
1065 + return 0;
1066 + }
1067 +
1068 + static void rs_close(struct tty_struct *tty, struct file * filp)
1069 + {
1070 +- spin_lock_bh(&timer_lock);
1071 + if (tty->count == 1)
1072 + del_timer_sync(&serial_timer);
1073 +- spin_unlock_bh(&timer_lock);
1074 + }
1075 +
1076 +
1077 +@@ -73,8 +68,6 @@ static void rs_poll(struct timer_list *unused)
1078 + int rd = 1;
1079 + unsigned char c;
1080 +
1081 +- spin_lock(&timer_lock);
1082 +-
1083 + while (simc_poll(0)) {
1084 + rd = simc_read(0, &c, 1);
1085 + if (rd <= 0)
1086 +@@ -87,7 +80,6 @@ static void rs_poll(struct timer_list *unused)
1087 + tty_flip_buffer_push(port);
1088 + if (rd)
1089 + mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
1090 +- spin_unlock(&timer_lock);
1091 + }
1092 +
1093 +
1094 +diff --git a/block/blk-iocost.c b/block/blk-iocost.c
1095 +index eb7b0d6bd11f6..10851493940c3 100644
1096 +--- a/block/blk-iocost.c
1097 ++++ b/block/blk-iocost.c
1098 +@@ -2322,7 +2322,17 @@ static void ioc_timer_fn(struct timer_list *timer)
1099 + iocg->hweight_donating = hwa;
1100 + iocg->hweight_after_donation = new_hwi;
1101 + list_add(&iocg->surplus_list, &surpluses);
1102 +- } else {
1103 ++ } else if (!iocg->abs_vdebt) {
1104 ++ /*
1105 ++ * @iocg doesn't have enough to donate. Reset
1106 ++ * its inuse to active.
1107 ++ *
1108 ++ * Don't reset debtors as their inuse's are
1109 ++ * owned by debt handling. This shouldn't affect
1110 ++ * donation calculuation in any meaningful way
1111 ++ * as @iocg doesn't have a meaningful amount of
1112 ++ * share anyway.
1113 ++ */
1114 + TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
1115 + iocg->inuse, iocg->active,
1116 + iocg->hweight_inuse, new_hwi);
1117 +diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
1118 +index 1fd6a4a34c154..aedcb92491f2e 100644
1119 +--- a/drivers/acpi/processor_idle.c
1120 ++++ b/drivers/acpi/processor_idle.c
1121 +@@ -95,11 +95,6 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
1122 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
1123 + DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
1124 + (void *)1},
1125 +- /* T40 can not handle C3 idle state */
1126 +- { set_max_cstate, "IBM ThinkPad T40", {
1127 +- DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
1128 +- DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
1129 +- (void *)2},
1130 + {},
1131 + };
1132 +
1133 +@@ -797,7 +792,8 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
1134 + if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
1135 + cx->type == ACPI_STATE_C3) {
1136 + state->enter_dead = acpi_idle_play_dead;
1137 +- drv->safe_state_index = count;
1138 ++ if (cx->type != ACPI_STATE_C3)
1139 ++ drv->safe_state_index = count;
1140 + }
1141 + /*
1142 + * Halt-induced C1 is not good for ->enter_s2idle, because it
1143 +diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
1144 +index 43407665918f3..ef4fc89f085d8 100644
1145 +--- a/drivers/base/arch_topology.c
1146 ++++ b/drivers/base/arch_topology.c
1147 +@@ -609,7 +609,7 @@ void update_siblings_masks(unsigned int cpuid)
1148 + for_each_online_cpu(cpu) {
1149 + cpu_topo = &cpu_topology[cpu];
1150 +
1151 +- if (cpuid_topo->llc_id == cpu_topo->llc_id) {
1152 ++ if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) {
1153 + cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
1154 + cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
1155 + }
1156 +diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
1157 +index d243526b23d86..0982642a7907e 100644
1158 +--- a/drivers/bus/mhi/pci_generic.c
1159 ++++ b/drivers/bus/mhi/pci_generic.c
1160 +@@ -1020,6 +1020,7 @@ static int __maybe_unused mhi_pci_freeze(struct device *dev)
1161 + * the intermediate restore kernel reinitializes MHI device with new
1162 + * context.
1163 + */
1164 ++ flush_work(&mhi_pdev->recovery_work);
1165 + if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1166 + mhi_power_down(mhi_cntrl, true);
1167 + mhi_unprepare_after_power_down(mhi_cntrl);
1168 +@@ -1045,6 +1046,7 @@ static const struct dev_pm_ops mhi_pci_pm_ops = {
1169 + .resume = mhi_pci_resume,
1170 + .freeze = mhi_pci_freeze,
1171 + .thaw = mhi_pci_restore,
1172 ++ .poweroff = mhi_pci_freeze,
1173 + .restore = mhi_pci_restore,
1174 + #endif
1175 + };
1176 +diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
1177 +index 4566e730ef2b8..60b082fe2ed02 100644
1178 +--- a/drivers/bus/sunxi-rsb.c
1179 ++++ b/drivers/bus/sunxi-rsb.c
1180 +@@ -227,6 +227,8 @@ static struct sunxi_rsb_device *sunxi_rsb_device_create(struct sunxi_rsb *rsb,
1181 +
1182 + dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev));
1183 +
1184 ++ return rdev;
1185 ++
1186 + err_device_add:
1187 + put_device(&rdev->dev);
1188 +
1189 +diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
1190 +index ebf22929ff328..00d46f3ae22fb 100644
1191 +--- a/drivers/bus/ti-sysc.c
1192 ++++ b/drivers/bus/ti-sysc.c
1193 +@@ -3162,13 +3162,27 @@ static int sysc_check_disabled_devices(struct sysc *ddata)
1194 + */
1195 + static int sysc_check_active_timer(struct sysc *ddata)
1196 + {
1197 ++ int error;
1198 ++
1199 + if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
1200 + ddata->cap->type != TI_SYSC_OMAP4_TIMER)
1201 + return 0;
1202 +
1203 ++ /*
1204 ++ * Quirk for omap3 beagleboard revision A to B4 to use gpt12.
1205 ++ * Revision C and later are fixed with commit 23885389dbbb ("ARM:
1206 ++ * dts: Fix timer regression for beagleboard revision c"). This all
1207 ++ * can be dropped if we stop supporting old beagleboard revisions
1208 ++ * A to B4 at some point.
1209 ++ */
1210 ++ if (sysc_soc->soc == SOC_3430)
1211 ++ error = -ENXIO;
1212 ++ else
1213 ++ error = -EBUSY;
1214 ++
1215 + if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
1216 + (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
1217 +- return -ENXIO;
1218 ++ return error;
1219 +
1220 + return 0;
1221 + }
1222 +diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
1223 +index 542b31d6e96dd..636bcf2439ef2 100644
1224 +--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
1225 ++++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
1226 +@@ -109,6 +109,8 @@ static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
1227 + spin_lock_init(&data->lock);
1228 +
1229 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1230 ++ if (!r)
1231 ++ return -EINVAL;
1232 + /* one clock/reset pair per word */
1233 + count = DIV_ROUND_UP((resource_size(r)), SUN9I_MMC_WIDTH);
1234 + data->membase = devm_ioremap_resource(&pdev->dev, r);
1235 +diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
1236 +index 35d93361fda1a..bb2f59fd0de43 100644
1237 +--- a/drivers/cpufreq/qcom-cpufreq-hw.c
1238 ++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
1239 +@@ -24,12 +24,16 @@
1240 + #define CLK_HW_DIV 2
1241 + #define LUT_TURBO_IND 1
1242 +
1243 ++#define GT_IRQ_STATUS BIT(2)
1244 ++
1245 + #define HZ_PER_KHZ 1000
1246 +
1247 + struct qcom_cpufreq_soc_data {
1248 + u32 reg_enable;
1249 ++ u32 reg_domain_state;
1250 + u32 reg_freq_lut;
1251 + u32 reg_volt_lut;
1252 ++ u32 reg_intr_clr;
1253 + u32 reg_current_vote;
1254 + u32 reg_perf_state;
1255 + u8 lut_row_size;
1256 +@@ -266,28 +270,31 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
1257 + }
1258 + }
1259 +
1260 +-static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
1261 ++static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
1262 + {
1263 +- unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote);
1264 ++ unsigned int lval;
1265 ++
1266 ++ if (data->soc_data->reg_current_vote)
1267 ++ lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
1268 ++ else
1269 ++ lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
1270 +
1271 +- return (val & 0x3FF) * 19200;
1272 ++ return lval * xo_rate;
1273 + }
1274 +
1275 + static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
1276 + {
1277 + unsigned long max_capacity, capacity, freq_hz, throttled_freq;
1278 + struct cpufreq_policy *policy = data->policy;
1279 +- int cpu = cpumask_first(policy->cpus);
1280 ++ int cpu = cpumask_first(policy->related_cpus);
1281 + struct device *dev = get_cpu_device(cpu);
1282 + struct dev_pm_opp *opp;
1283 +- unsigned int freq;
1284 +
1285 + /*
1286 + * Get the h/w throttled frequency, normalize it using the
1287 + * registered opp table and use it to calculate thermal pressure.
1288 + */
1289 +- freq = qcom_lmh_get_throttle_freq(data);
1290 +- freq_hz = freq * HZ_PER_KHZ;
1291 ++ freq_hz = qcom_lmh_get_throttle_freq(data);
1292 +
1293 + opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
1294 + if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
1295 +@@ -345,6 +352,10 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
1296 + disable_irq_nosync(c_data->throttle_irq);
1297 + schedule_delayed_work(&c_data->throttle_work, 0);
1298 +
1299 ++ if (c_data->soc_data->reg_intr_clr)
1300 ++ writel_relaxed(GT_IRQ_STATUS,
1301 ++ c_data->base + c_data->soc_data->reg_intr_clr);
1302 ++
1303 + return IRQ_HANDLED;
1304 + }
1305 +
1306 +@@ -359,8 +370,10 @@ static const struct qcom_cpufreq_soc_data qcom_soc_data = {
1307 +
1308 + static const struct qcom_cpufreq_soc_data epss_soc_data = {
1309 + .reg_enable = 0x0,
1310 ++ .reg_domain_state = 0x20,
1311 + .reg_freq_lut = 0x100,
1312 + .reg_volt_lut = 0x200,
1313 ++ .reg_intr_clr = 0x308,
1314 + .reg_perf_state = 0x320,
1315 + .lut_row_size = 4,
1316 + };
1317 +diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
1318 +index 2deed8d8773fa..75e1bf3a08f7c 100644
1319 +--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
1320 ++++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
1321 +@@ -98,8 +98,10 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
1322 + return -ENOMEM;
1323 +
1324 + ret = sun50i_cpufreq_get_efuse(&speed);
1325 +- if (ret)
1326 ++ if (ret) {
1327 ++ kfree(opp_tables);
1328 + return ret;
1329 ++ }
1330 +
1331 + snprintf(name, MAX_NAME_LEN, "speed%d", speed);
1332 +
1333 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1334 +index 4f2e0cc8a51a8..442857f3bde77 100644
1335 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1336 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1337 +@@ -138,19 +138,33 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
1338 + }
1339 +
1340 + static void increment_queue_count(struct device_queue_manager *dqm,
1341 +- enum kfd_queue_type type)
1342 ++ struct qcm_process_device *qpd,
1343 ++ struct queue *q)
1344 + {
1345 + dqm->active_queue_count++;
1346 +- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
1347 ++ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
1348 ++ q->properties.type == KFD_QUEUE_TYPE_DIQ)
1349 + dqm->active_cp_queue_count++;
1350 ++
1351 ++ if (q->properties.is_gws) {
1352 ++ dqm->gws_queue_count++;
1353 ++ qpd->mapped_gws_queue = true;
1354 ++ }
1355 + }
1356 +
1357 + static void decrement_queue_count(struct device_queue_manager *dqm,
1358 +- enum kfd_queue_type type)
1359 ++ struct qcm_process_device *qpd,
1360 ++ struct queue *q)
1361 + {
1362 + dqm->active_queue_count--;
1363 +- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
1364 ++ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
1365 ++ q->properties.type == KFD_QUEUE_TYPE_DIQ)
1366 + dqm->active_cp_queue_count--;
1367 ++
1368 ++ if (q->properties.is_gws) {
1369 ++ dqm->gws_queue_count--;
1370 ++ qpd->mapped_gws_queue = false;
1371 ++ }
1372 + }
1373 +
1374 + static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
1375 +@@ -390,7 +404,7 @@ add_queue_to_list:
1376 + list_add(&q->list, &qpd->queues_list);
1377 + qpd->queue_count++;
1378 + if (q->properties.is_active)
1379 +- increment_queue_count(dqm, q->properties.type);
1380 ++ increment_queue_count(dqm, qpd, q);
1381 +
1382 + /*
1383 + * Unconditionally increment this counter, regardless of the queue's
1384 +@@ -515,13 +529,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
1385 + deallocate_vmid(dqm, qpd, q);
1386 + }
1387 + qpd->queue_count--;
1388 +- if (q->properties.is_active) {
1389 +- decrement_queue_count(dqm, q->properties.type);
1390 +- if (q->properties.is_gws) {
1391 +- dqm->gws_queue_count--;
1392 +- qpd->mapped_gws_queue = false;
1393 +- }
1394 +- }
1395 ++ if (q->properties.is_active)
1396 ++ decrement_queue_count(dqm, qpd, q);
1397 +
1398 + return retval;
1399 + }
1400 +@@ -613,12 +622,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
1401 + * dqm->active_queue_count to determine whether a new runlist must be
1402 + * uploaded.
1403 + */
1404 +- if (q->properties.is_active && !prev_active)
1405 +- increment_queue_count(dqm, q->properties.type);
1406 +- else if (!q->properties.is_active && prev_active)
1407 +- decrement_queue_count(dqm, q->properties.type);
1408 +-
1409 +- if (q->gws && !q->properties.is_gws) {
1410 ++ if (q->properties.is_active && !prev_active) {
1411 ++ increment_queue_count(dqm, &pdd->qpd, q);
1412 ++ } else if (!q->properties.is_active && prev_active) {
1413 ++ decrement_queue_count(dqm, &pdd->qpd, q);
1414 ++ } else if (q->gws && !q->properties.is_gws) {
1415 + if (q->properties.is_active) {
1416 + dqm->gws_queue_count++;
1417 + pdd->qpd.mapped_gws_queue = true;
1418 +@@ -680,11 +688,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
1419 + mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1420 + q->properties.type)];
1421 + q->properties.is_active = false;
1422 +- decrement_queue_count(dqm, q->properties.type);
1423 +- if (q->properties.is_gws) {
1424 +- dqm->gws_queue_count--;
1425 +- qpd->mapped_gws_queue = false;
1426 +- }
1427 ++ decrement_queue_count(dqm, qpd, q);
1428 +
1429 + if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
1430 + continue;
1431 +@@ -730,7 +734,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
1432 + continue;
1433 +
1434 + q->properties.is_active = false;
1435 +- decrement_queue_count(dqm, q->properties.type);
1436 ++ decrement_queue_count(dqm, qpd, q);
1437 + }
1438 + pdd->last_evict_timestamp = get_jiffies_64();
1439 + retval = execute_queues_cpsch(dqm,
1440 +@@ -801,11 +805,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
1441 + mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1442 + q->properties.type)];
1443 + q->properties.is_active = true;
1444 +- increment_queue_count(dqm, q->properties.type);
1445 +- if (q->properties.is_gws) {
1446 +- dqm->gws_queue_count++;
1447 +- qpd->mapped_gws_queue = true;
1448 +- }
1449 ++ increment_queue_count(dqm, qpd, q);
1450 +
1451 + if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
1452 + continue;
1453 +@@ -863,7 +863,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
1454 + continue;
1455 +
1456 + q->properties.is_active = true;
1457 +- increment_queue_count(dqm, q->properties.type);
1458 ++ increment_queue_count(dqm, &pdd->qpd, q);
1459 + }
1460 + retval = execute_queues_cpsch(dqm,
1461 + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1462 +@@ -1265,7 +1265,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1463 + dqm->total_queue_count);
1464 +
1465 + list_add(&kq->list, &qpd->priv_queue_list);
1466 +- increment_queue_count(dqm, kq->queue->properties.type);
1467 ++ increment_queue_count(dqm, qpd, kq->queue);
1468 + qpd->is_debug = true;
1469 + execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1470 + dqm_unlock(dqm);
1471 +@@ -1279,7 +1279,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1472 + {
1473 + dqm_lock(dqm);
1474 + list_del(&kq->list);
1475 +- decrement_queue_count(dqm, kq->queue->properties.type);
1476 ++ decrement_queue_count(dqm, qpd, kq->queue);
1477 + qpd->is_debug = false;
1478 + execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1479 + /*
1480 +@@ -1346,7 +1346,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1481 + qpd->queue_count++;
1482 +
1483 + if (q->properties.is_active) {
1484 +- increment_queue_count(dqm, q->properties.type);
1485 ++ increment_queue_count(dqm, qpd, q);
1486 +
1487 + execute_queues_cpsch(dqm,
1488 + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1489 +@@ -1548,15 +1548,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1490 + list_del(&q->list);
1491 + qpd->queue_count--;
1492 + if (q->properties.is_active) {
1493 +- decrement_queue_count(dqm, q->properties.type);
1494 ++ decrement_queue_count(dqm, qpd, q);
1495 + retval = execute_queues_cpsch(dqm,
1496 + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1497 + if (retval == -ETIME)
1498 + qpd->reset_wavefronts = true;
1499 +- if (q->properties.is_gws) {
1500 +- dqm->gws_queue_count--;
1501 +- qpd->mapped_gws_queue = false;
1502 +- }
1503 + }
1504 +
1505 + /*
1506 +@@ -1747,7 +1743,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
1507 + /* Clean all kernel queues */
1508 + list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1509 + list_del(&kq->list);
1510 +- decrement_queue_count(dqm, kq->queue->properties.type);
1511 ++ decrement_queue_count(dqm, qpd, kq->queue);
1512 + qpd->is_debug = false;
1513 + dqm->total_queue_count--;
1514 + filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1515 +@@ -1760,13 +1756,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
1516 + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1517 + deallocate_sdma_queue(dqm, q);
1518 +
1519 +- if (q->properties.is_active) {
1520 +- decrement_queue_count(dqm, q->properties.type);
1521 +- if (q->properties.is_gws) {
1522 +- dqm->gws_queue_count--;
1523 +- qpd->mapped_gws_queue = false;
1524 +- }
1525 +- }
1526 ++ if (q->properties.is_active)
1527 ++ decrement_queue_count(dqm, qpd, q);
1528 +
1529 + dqm->total_queue_count--;
1530 + }
1531 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
1532 +index fbbdf99761838..5b8274b8c3845 100644
1533 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
1534 ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
1535 +@@ -1428,6 +1428,7 @@ static struct clock_source *dcn21_clock_source_create(
1536 + return &clk_src->base;
1537 + }
1538 +
1539 ++ kfree(clk_src);
1540 + BREAK_TO_DEBUGGER();
1541 + return NULL;
1542 + }
1543 +diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
1544 +index c82f8febe7303..e7b90863aa43d 100644
1545 +--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
1546 ++++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
1547 +@@ -96,6 +96,14 @@
1548 +
1549 + #define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359
1550 +
1551 ++enum intel_dp_aux_backlight_modparam {
1552 ++ INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
1553 ++ INTEL_DP_AUX_BACKLIGHT_OFF = 0,
1554 ++ INTEL_DP_AUX_BACKLIGHT_ON = 1,
1555 ++ INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
1556 ++ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
1557 ++};
1558 ++
1559 + /* Intel EDP backlight callbacks */
1560 + static bool
1561 + intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
1562 +@@ -125,6 +133,24 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
1563 + return false;
1564 + }
1565 +
1566 ++ /*
1567 ++ * If we don't have HDR static metadata there is no way to
1568 ++ * runtime detect used range for nits based control. For now
1569 ++ * do not use Intel proprietary eDP backlight control if we
1570 ++ * don't have this data in panel EDID. In case we find panel
1571 ++ * which supports only nits based control, but doesn't provide
1572 ++ * HDR static metadata we need to start maintaining table of
1573 ++ * ranges for such panels.
1574 ++ */
1575 ++ if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
1576 ++ !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
1577 ++ BIT(HDMI_STATIC_METADATA_TYPE1))) {
1578 ++ drm_info(&i915->drm,
1579 ++ "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
1580 ++ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
1581 ++ return false;
1582 ++ }
1583 ++
1584 + panel->backlight.edp.intel.sdr_uses_aux =
1585 + tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP;
1586 +
1587 +@@ -373,14 +399,6 @@ static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
1588 + .get = intel_dp_aux_vesa_get_backlight,
1589 + };
1590 +
1591 +-enum intel_dp_aux_backlight_modparam {
1592 +- INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
1593 +- INTEL_DP_AUX_BACKLIGHT_OFF = 0,
1594 +- INTEL_DP_AUX_BACKLIGHT_ON = 1,
1595 +- INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
1596 +- INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
1597 +-};
1598 +-
1599 + int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
1600 + {
1601 + struct drm_device *dev = connector->base.dev;
1602 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1603 +index c65473fc90935..bb64e7baa1cc1 100644
1604 +--- a/drivers/gpu/drm/i915/i915_reg.h
1605 ++++ b/drivers/gpu/drm/i915/i915_reg.h
1606 +@@ -7458,7 +7458,7 @@ enum {
1607 + #define _SEL_FETCH_PLANE_BASE_6_A 0x70940
1608 + #define _SEL_FETCH_PLANE_BASE_7_A 0x70960
1609 + #define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880
1610 +-#define _SEL_FETCH_PLANE_BASE_1_B 0x70990
1611 ++#define _SEL_FETCH_PLANE_BASE_1_B 0x71890
1612 +
1613 + #define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \
1614 + _SEL_FETCH_PLANE_BASE_1_A, \
1615 +diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
1616 +index edb60ae0a9b75..faecc2935039b 100644
1617 +--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
1618 ++++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
1619 +@@ -222,13 +222,11 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
1620 +
1621 + /* Set the physical address of the buffer in memory */
1622 + paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
1623 +- paddr -= PHYS_OFFSET;
1624 + DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &paddr);
1625 + regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, paddr);
1626 +
1627 + if (fb->format->num_planes > 1) {
1628 + paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 2 : 1);
1629 +- paddr -= PHYS_OFFSET;
1630 + DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n", &paddr);
1631 + regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR1_REG,
1632 + paddr);
1633 +@@ -236,7 +234,6 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
1634 +
1635 + if (fb->format->num_planes > 2) {
1636 + paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 1 : 2);
1637 +- paddr -= PHYS_OFFSET;
1638 + DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n", &paddr);
1639 + regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR2_REG,
1640 + paddr);
1641 +diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
1642 +index e50718422411d..cafb8c7790154 100644
1643 +--- a/drivers/iio/dac/ad5446.c
1644 ++++ b/drivers/iio/dac/ad5446.c
1645 +@@ -178,7 +178,7 @@ static int ad5446_read_raw(struct iio_dev *indio_dev,
1646 +
1647 + switch (m) {
1648 + case IIO_CHAN_INFO_RAW:
1649 +- *val = st->cached_val;
1650 ++ *val = st->cached_val >> chan->scan_type.shift;
1651 + return IIO_VAL_INT;
1652 + case IIO_CHAN_INFO_SCALE:
1653 + *val = st->vref_mv;
1654 +diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
1655 +index 0405e92b9e8c3..987264410278c 100644
1656 +--- a/drivers/iio/dac/ad5592r-base.c
1657 ++++ b/drivers/iio/dac/ad5592r-base.c
1658 +@@ -523,7 +523,7 @@ static int ad5592r_alloc_channels(struct iio_dev *iio_dev)
1659 + if (!ret)
1660 + st->channel_modes[reg] = tmp;
1661 +
1662 +- fwnode_property_read_u32(child, "adi,off-state", &tmp);
1663 ++ ret = fwnode_property_read_u32(child, "adi,off-state", &tmp);
1664 + if (!ret)
1665 + st->channel_offstate[reg] = tmp;
1666 + }
1667 +diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
1668 +index 824b5124a5f55..01336105792ee 100644
1669 +--- a/drivers/iio/imu/bmi160/bmi160_core.c
1670 ++++ b/drivers/iio/imu/bmi160/bmi160_core.c
1671 +@@ -730,7 +730,7 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
1672 +
1673 + ret = regmap_write(data->regmap, BMI160_REG_CMD, BMI160_CMD_SOFTRESET);
1674 + if (ret)
1675 +- return ret;
1676 ++ goto disable_regulator;
1677 +
1678 + usleep_range(BMI160_SOFTRESET_USLEEP, BMI160_SOFTRESET_USLEEP + 1);
1679 +
1680 +@@ -741,29 +741,37 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
1681 + if (use_spi) {
1682 + ret = regmap_read(data->regmap, BMI160_REG_DUMMY, &val);
1683 + if (ret)
1684 +- return ret;
1685 ++ goto disable_regulator;
1686 + }
1687 +
1688 + ret = regmap_read(data->regmap, BMI160_REG_CHIP_ID, &val);
1689 + if (ret) {
1690 + dev_err(dev, "Error reading chip id\n");
1691 +- return ret;
1692 ++ goto disable_regulator;
1693 + }
1694 + if (val != BMI160_CHIP_ID_VAL) {
1695 + dev_err(dev, "Wrong chip id, got %x expected %x\n",
1696 + val, BMI160_CHIP_ID_VAL);
1697 +- return -ENODEV;
1698 ++ ret = -ENODEV;
1699 ++ goto disable_regulator;
1700 + }
1701 +
1702 + ret = bmi160_set_mode(data, BMI160_ACCEL, true);
1703 + if (ret)
1704 +- return ret;
1705 ++ goto disable_regulator;
1706 +
1707 + ret = bmi160_set_mode(data, BMI160_GYRO, true);
1708 + if (ret)
1709 +- return ret;
1710 ++ goto disable_accel;
1711 +
1712 + return 0;
1713 ++
1714 ++disable_accel:
1715 ++ bmi160_set_mode(data, BMI160_ACCEL, false);
1716 ++
1717 ++disable_regulator:
1718 ++ regulator_bulk_disable(ARRAY_SIZE(data->supplies), data->supplies);
1719 ++ return ret;
1720 + }
1721 +
1722 + static int bmi160_data_rdy_trigger_set_state(struct iio_trigger *trig,
1723 +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
1724 +index 85b1934cec60e..53891010a91de 100644
1725 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
1726 ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
1727 +@@ -18,12 +18,15 @@ static int inv_icm42600_i2c_bus_setup(struct inv_icm42600_state *st)
1728 + unsigned int mask, val;
1729 + int ret;
1730 +
1731 +- /* setup interface registers */
1732 +- ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6,
1733 +- INV_ICM42600_INTF_CONFIG6_MASK,
1734 +- INV_ICM42600_INTF_CONFIG6_I3C_EN);
1735 +- if (ret)
1736 +- return ret;
1737 ++ /*
1738 ++ * setup interface registers
1739 ++ * This register write to REG_INTF_CONFIG6 enables a spike filter that
1740 ++ * is impacting the line and can prevent the I2C ACK to be seen by the
1741 ++ * controller. So we don't test the return value.
1742 ++ */
1743 ++ regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6,
1744 ++ INV_ICM42600_INTF_CONFIG6_MASK,
1745 ++ INV_ICM42600_INTF_CONFIG6_I3C_EN);
1746 +
1747 + ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG4,
1748 + INV_ICM42600_INTF_CONFIG4_I3C_BUS_ONLY, 0);
1749 +diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
1750 +index 42b8a2680e3aa..1509fd0cbb50f 100644
1751 +--- a/drivers/iio/magnetometer/ak8975.c
1752 ++++ b/drivers/iio/magnetometer/ak8975.c
1753 +@@ -389,6 +389,7 @@ static int ak8975_power_on(const struct ak8975_data *data)
1754 + if (ret) {
1755 + dev_warn(&data->client->dev,
1756 + "Failed to enable specified Vid supply\n");
1757 ++ regulator_disable(data->vdd);
1758 + return ret;
1759 + }
1760 +
1761 +diff --git a/drivers/interconnect/qcom/sdx55.c b/drivers/interconnect/qcom/sdx55.c
1762 +index 03d604f84cc57..e3ac25a997b71 100644
1763 +--- a/drivers/interconnect/qcom/sdx55.c
1764 ++++ b/drivers/interconnect/qcom/sdx55.c
1765 +@@ -18,7 +18,6 @@
1766 + #include "icc-rpmh.h"
1767 + #include "sdx55.h"
1768 +
1769 +-DEFINE_QNODE(ipa_core_master, SDX55_MASTER_IPA_CORE, 1, 8, SDX55_SLAVE_IPA_CORE);
1770 + DEFINE_QNODE(llcc_mc, SDX55_MASTER_LLCC, 4, 4, SDX55_SLAVE_EBI_CH0);
1771 + DEFINE_QNODE(acm_tcu, SDX55_MASTER_TCU_0, 1, 8, SDX55_SLAVE_LLCC, SDX55_SLAVE_MEM_NOC_SNOC, SDX55_SLAVE_MEM_NOC_PCIE_SNOC);
1772 + DEFINE_QNODE(qnm_snoc_gc, SDX55_MASTER_SNOC_GC_MEM_NOC, 1, 8, SDX55_SLAVE_LLCC);
1773 +@@ -40,7 +39,6 @@ DEFINE_QNODE(xm_pcie, SDX55_MASTER_PCIE, 1, 8, SDX55_SLAVE_ANOC_SNOC);
1774 + DEFINE_QNODE(xm_qdss_etr, SDX55_MASTER_QDSS_ETR, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_AOSS, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
1775 + DEFINE_QNODE(xm_sdc1, SDX55_MASTER_SDCC_1, 1, 8, SDX55_SLAVE_AOSS, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP, SDX55_SLAVE_AUDIO);
1776 + DEFINE_QNODE(xm_usb3, SDX55_MASTER_USB3, 1, 8, SDX55_SLAVE_ANOC_SNOC);
1777 +-DEFINE_QNODE(ipa_core_slave, SDX55_SLAVE_IPA_CORE, 1, 8);
1778 + DEFINE_QNODE(ebi, SDX55_SLAVE_EBI_CH0, 1, 4);
1779 + DEFINE_QNODE(qns_llcc, SDX55_SLAVE_LLCC, 1, 16, SDX55_SLAVE_EBI_CH0);
1780 + DEFINE_QNODE(qns_memnoc_snoc, SDX55_SLAVE_MEM_NOC_SNOC, 1, 8, SDX55_MASTER_MEM_NOC_SNOC);
1781 +@@ -82,7 +80,6 @@ DEFINE_QNODE(xs_sys_tcu_cfg, SDX55_SLAVE_TCU, 1, 8);
1782 + DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
1783 + DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
1784 + DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
1785 +-DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
1786 + DEFINE_QBCM(bcm_pn0, "PN0", false, &qhm_snoc_cfg);
1787 + DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr);
1788 + DEFINE_QBCM(bcm_sh4, "SH4", false, &qns_memnoc_snoc, &qns_sys_pcie);
1789 +@@ -219,22 +216,6 @@ static const struct qcom_icc_desc sdx55_system_noc = {
1790 + .num_bcms = ARRAY_SIZE(system_noc_bcms),
1791 + };
1792 +
1793 +-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
1794 +- &bcm_ip0,
1795 +-};
1796 +-
1797 +-static struct qcom_icc_node *ipa_virt_nodes[] = {
1798 +- [MASTER_IPA_CORE] = &ipa_core_master,
1799 +- [SLAVE_IPA_CORE] = &ipa_core_slave,
1800 +-};
1801 +-
1802 +-static const struct qcom_icc_desc sdx55_ipa_virt = {
1803 +- .nodes = ipa_virt_nodes,
1804 +- .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
1805 +- .bcms = ipa_virt_bcms,
1806 +- .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
1807 +-};
1808 +-
1809 + static const struct of_device_id qnoc_of_match[] = {
1810 + { .compatible = "qcom,sdx55-mc-virt",
1811 + .data = &sdx55_mc_virt},
1812 +@@ -242,8 +223,6 @@ static const struct of_device_id qnoc_of_match[] = {
1813 + .data = &sdx55_mem_noc},
1814 + { .compatible = "qcom,sdx55-system-noc",
1815 + .data = &sdx55_system_noc},
1816 +- { .compatible = "qcom,sdx55-ipa-virt",
1817 +- .data = &sdx55_ipa_virt},
1818 + { }
1819 + };
1820 + MODULE_DEVICE_TABLE(of, qnoc_of_match);
1821 +diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
1822 +index 2a4c1f94bfa07..3a416705f61cb 100644
1823 +--- a/drivers/memory/renesas-rpc-if.c
1824 ++++ b/drivers/memory/renesas-rpc-if.c
1825 +@@ -162,25 +162,39 @@ static const struct regmap_access_table rpcif_volatile_table = {
1826 +
1827 +
1828 + /*
1829 +- * Custom accessor functions to ensure SMRDR0 and SMWDR0 are always accessed
1830 +- * with proper width. Requires SMENR_SPIDE to be correctly set before!
1831 ++ * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with
1832 ++ * proper width. Requires rpcif.xfer_size to be correctly set before!
1833 + */
1834 + static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
1835 + {
1836 + struct rpcif *rpc = context;
1837 +
1838 +- if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
1839 +- u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
1840 +-
1841 +- if (spide == 0x8) {
1842 ++ switch (reg) {
1843 ++ case RPCIF_SMRDR0:
1844 ++ case RPCIF_SMWDR0:
1845 ++ switch (rpc->xfer_size) {
1846 ++ case 1:
1847 + *val = readb(rpc->base + reg);
1848 + return 0;
1849 +- } else if (spide == 0xC) {
1850 ++
1851 ++ case 2:
1852 + *val = readw(rpc->base + reg);
1853 + return 0;
1854 +- } else if (spide != 0xF) {
1855 ++
1856 ++ case 4:
1857 ++ case 8:
1858 ++ *val = readl(rpc->base + reg);
1859 ++ return 0;
1860 ++
1861 ++ default:
1862 + return -EILSEQ;
1863 + }
1864 ++
1865 ++ case RPCIF_SMRDR1:
1866 ++ case RPCIF_SMWDR1:
1867 ++ if (rpc->xfer_size != 8)
1868 ++ return -EILSEQ;
1869 ++ break;
1870 + }
1871 +
1872 + *val = readl(rpc->base + reg);
1873 +@@ -192,18 +206,34 @@ static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val)
1874 + {
1875 + struct rpcif *rpc = context;
1876 +
1877 +- if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
1878 +- u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
1879 +-
1880 +- if (spide == 0x8) {
1881 ++ switch (reg) {
1882 ++ case RPCIF_SMWDR0:
1883 ++ switch (rpc->xfer_size) {
1884 ++ case 1:
1885 + writeb(val, rpc->base + reg);
1886 + return 0;
1887 +- } else if (spide == 0xC) {
1888 ++
1889 ++ case 2:
1890 + writew(val, rpc->base + reg);
1891 + return 0;
1892 +- } else if (spide != 0xF) {
1893 ++
1894 ++ case 4:
1895 ++ case 8:
1896 ++ writel(val, rpc->base + reg);
1897 ++ return 0;
1898 ++
1899 ++ default:
1900 + return -EILSEQ;
1901 + }
1902 ++
1903 ++ case RPCIF_SMWDR1:
1904 ++ if (rpc->xfer_size != 8)
1905 ++ return -EILSEQ;
1906 ++ break;
1907 ++
1908 ++ case RPCIF_SMRDR0:
1909 ++ case RPCIF_SMRDR1:
1910 ++ return -EPERM;
1911 + }
1912 +
1913 + writel(val, rpc->base + reg);
1914 +@@ -442,6 +472,7 @@ int rpcif_manual_xfer(struct rpcif *rpc)
1915 +
1916 + smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
1917 + regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
1918 ++ rpc->xfer_size = nbytes;
1919 +
1920 + memcpy(data, rpc->buffer + pos, nbytes);
1921 + if (nbytes == 8) {
1922 +@@ -506,6 +537,7 @@ int rpcif_manual_xfer(struct rpcif *rpc)
1923 + regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
1924 + regmap_write(rpc->regmap, RPCIF_SMCR,
1925 + rpc->smcr | RPCIF_SMCR_SPIE);
1926 ++ rpc->xfer_size = nbytes;
1927 + ret = wait_msg_xfer_end(rpc);
1928 + if (ret)
1929 + goto err_out;
1930 +diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
1931 +index 9193b812bc07e..403243859dce9 100644
1932 +--- a/drivers/misc/eeprom/at25.c
1933 ++++ b/drivers/misc/eeprom/at25.c
1934 +@@ -30,6 +30,8 @@
1935 + */
1936 +
1937 + #define FM25_SN_LEN 8 /* serial number length */
1938 ++#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */
1939 ++
1940 + struct at25_data {
1941 + struct spi_device *spi;
1942 + struct mutex lock;
1943 +@@ -38,6 +40,7 @@ struct at25_data {
1944 + struct nvmem_config nvmem_config;
1945 + struct nvmem_device *nvmem;
1946 + u8 sernum[FM25_SN_LEN];
1947 ++ u8 command[EE_MAXADDRLEN + 1];
1948 + };
1949 +
1950 + #define AT25_WREN 0x06 /* latch the write enable */
1951 +@@ -60,8 +63,6 @@ struct at25_data {
1952 +
1953 + #define FM25_ID_LEN 9 /* ID length */
1954 +
1955 +-#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */
1956 +-
1957 + /* Specs often allow 5 msec for a page write, sometimes 20 msec;
1958 + * it's important to recover from write timeouts.
1959 + */
1960 +@@ -76,7 +77,6 @@ static int at25_ee_read(void *priv, unsigned int offset,
1961 + {
1962 + struct at25_data *at25 = priv;
1963 + char *buf = val;
1964 +- u8 command[EE_MAXADDRLEN + 1];
1965 + u8 *cp;
1966 + ssize_t status;
1967 + struct spi_transfer t[2];
1968 +@@ -90,12 +90,15 @@ static int at25_ee_read(void *priv, unsigned int offset,
1969 + if (unlikely(!count))
1970 + return -EINVAL;
1971 +
1972 +- cp = command;
1973 ++ cp = at25->command;
1974 +
1975 + instr = AT25_READ;
1976 + if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)
1977 + if (offset >= (1U << (at25->addrlen * 8)))
1978 + instr |= AT25_INSTR_BIT3;
1979 ++
1980 ++ mutex_lock(&at25->lock);
1981 ++
1982 + *cp++ = instr;
1983 +
1984 + /* 8/16/24-bit address is written MSB first */
1985 +@@ -114,7 +117,7 @@ static int at25_ee_read(void *priv, unsigned int offset,
1986 + spi_message_init(&m);
1987 + memset(t, 0, sizeof(t));
1988 +
1989 +- t[0].tx_buf = command;
1990 ++ t[0].tx_buf = at25->command;
1991 + t[0].len = at25->addrlen + 1;
1992 + spi_message_add_tail(&t[0], &m);
1993 +
1994 +@@ -122,8 +125,6 @@ static int at25_ee_read(void *priv, unsigned int offset,
1995 + t[1].len = count;
1996 + spi_message_add_tail(&t[1], &m);
1997 +
1998 +- mutex_lock(&at25->lock);
1999 +-
2000 + /* Read it all at once.
2001 + *
2002 + * REVISIT that's potentially a problem with large chips, if
2003 +@@ -151,7 +152,7 @@ static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command,
2004 + spi_message_init(&m);
2005 + memset(t, 0, sizeof(t));
2006 +
2007 +- t[0].tx_buf = &command;
2008 ++ t[0].tx_buf = at25->command;
2009 + t[0].len = 1;
2010 + spi_message_add_tail(&t[0], &m);
2011 +
2012 +@@ -161,6 +162,8 @@ static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command,
2013 +
2014 + mutex_lock(&at25->lock);
2015 +
2016 ++ at25->command[0] = command;
2017 ++
2018 + status = spi_sync(at25->spi, &m);
2019 + dev_dbg(&at25->spi->dev, "read %d aux bytes --> %d\n", len, status);
2020 +
2021 +diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
2022 +index c437d97debb8a..ec9d1fb07006f 100644
2023 +--- a/drivers/mtd/nand/raw/mtk_ecc.c
2024 ++++ b/drivers/mtd/nand/raw/mtk_ecc.c
2025 +@@ -43,6 +43,7 @@
2026 +
2027 + struct mtk_ecc_caps {
2028 + u32 err_mask;
2029 ++ u32 err_shift;
2030 + const u8 *ecc_strength;
2031 + const u32 *ecc_regs;
2032 + u8 num_ecc_strength;
2033 +@@ -76,7 +77,7 @@ static const u8 ecc_strength_mt2712[] = {
2034 + };
2035 +
2036 + static const u8 ecc_strength_mt7622[] = {
2037 +- 4, 6, 8, 10, 12, 14, 16
2038 ++ 4, 6, 8, 10, 12
2039 + };
2040 +
2041 + enum mtk_ecc_regs {
2042 +@@ -221,7 +222,7 @@ void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
2043 + for (i = 0; i < sectors; i++) {
2044 + offset = (i >> 2) << 2;
2045 + err = readl(ecc->regs + ECC_DECENUM0 + offset);
2046 +- err = err >> ((i % 4) * 8);
2047 ++ err = err >> ((i % 4) * ecc->caps->err_shift);
2048 + err &= ecc->caps->err_mask;
2049 + if (err == ecc->caps->err_mask) {
2050 + /* uncorrectable errors */
2051 +@@ -449,6 +450,7 @@ EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
2052 +
2053 + static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
2054 + .err_mask = 0x3f,
2055 ++ .err_shift = 8,
2056 + .ecc_strength = ecc_strength_mt2701,
2057 + .ecc_regs = mt2701_ecc_regs,
2058 + .num_ecc_strength = 20,
2059 +@@ -459,6 +461,7 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
2060 +
2061 + static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
2062 + .err_mask = 0x7f,
2063 ++ .err_shift = 8,
2064 + .ecc_strength = ecc_strength_mt2712,
2065 + .ecc_regs = mt2712_ecc_regs,
2066 + .num_ecc_strength = 23,
2067 +@@ -468,10 +471,11 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
2068 + };
2069 +
2070 + static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
2071 +- .err_mask = 0x3f,
2072 ++ .err_mask = 0x1f,
2073 ++ .err_shift = 5,
2074 + .ecc_strength = ecc_strength_mt7622,
2075 + .ecc_regs = mt7622_ecc_regs,
2076 +- .num_ecc_strength = 7,
2077 ++ .num_ecc_strength = 5,
2078 + .ecc_mode_shift = 4,
2079 + .parity_bits = 13,
2080 + .pg_irq_sel = 0,
2081 +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
2082 +index 0f41a9a421575..e972bee60e7c8 100644
2083 +--- a/drivers/mtd/nand/raw/qcom_nandc.c
2084 ++++ b/drivers/mtd/nand/raw/qcom_nandc.c
2085 +@@ -2641,10 +2641,23 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
2086 + ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2087 +
2088 + mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2089 ++ /* Free the initially allocated BAM transaction for reading the ONFI params */
2090 ++ if (nandc->props->is_bam)
2091 ++ free_bam_transaction(nandc);
2092 +
2093 + nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2094 + cwperpage);
2095 +
2096 ++ /* Now allocate the BAM transaction based on updated max_cwperpage */
2097 ++ if (nandc->props->is_bam) {
2098 ++ nandc->bam_txn = alloc_bam_transaction(nandc);
2099 ++ if (!nandc->bam_txn) {
2100 ++ dev_err(nandc->dev,
2101 ++ "failed to allocate bam transaction\n");
2102 ++ return -ENOMEM;
2103 ++ }
2104 ++ }
2105 ++
2106 + /*
2107 + * DATA_UD_BYTES varies based on whether the read/write command protects
2108 + * spare data with ECC too. We protect spare data by default, so we set
2109 +@@ -2945,17 +2958,6 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
2110 + if (ret)
2111 + return ret;
2112 +
2113 +- if (nandc->props->is_bam) {
2114 +- free_bam_transaction(nandc);
2115 +- nandc->bam_txn = alloc_bam_transaction(nandc);
2116 +- if (!nandc->bam_txn) {
2117 +- dev_err(nandc->dev,
2118 +- "failed to allocate bam transaction\n");
2119 +- nand_cleanup(chip);
2120 +- return -ENOMEM;
2121 +- }
2122 +- }
2123 +-
2124 + ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
2125 + if (ret)
2126 + nand_cleanup(chip);
2127 +diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
2128 +index 13df4bdf792af..8f89e2d3d817f 100644
2129 +--- a/drivers/mtd/nand/raw/sh_flctl.c
2130 ++++ b/drivers/mtd/nand/raw/sh_flctl.c
2131 +@@ -384,7 +384,8 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
2132 + dma_addr_t dma_addr;
2133 + dma_cookie_t cookie;
2134 + uint32_t reg;
2135 +- int ret;
2136 ++ int ret = 0;
2137 ++ unsigned long time_left;
2138 +
2139 + if (dir == DMA_FROM_DEVICE) {
2140 + chan = flctl->chan_fifo0_rx;
2141 +@@ -425,13 +426,14 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
2142 + goto out;
2143 + }
2144 +
2145 +- ret =
2146 ++ time_left =
2147 + wait_for_completion_timeout(&flctl->dma_complete,
2148 + msecs_to_jiffies(3000));
2149 +
2150 +- if (ret <= 0) {
2151 ++ if (time_left == 0) {
2152 + dmaengine_terminate_all(chan);
2153 + dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
2154 ++ ret = -ETIMEDOUT;
2155 + }
2156 +
2157 + out:
2158 +@@ -441,7 +443,7 @@ out:
2159 +
2160 + dma_unmap_single(chan->device->dev, dma_addr, len, dir);
2161 +
2162 +- /* ret > 0 is success */
2163 ++ /* ret == 0 is success */
2164 + return ret;
2165 + }
2166 +
2167 +@@ -465,7 +467,7 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
2168 +
2169 + /* initiate DMA transfer */
2170 + if (flctl->chan_fifo0_rx && rlen >= 32 &&
2171 +- flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0)
2172 ++ !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE))
2173 + goto convert; /* DMA success */
2174 +
2175 + /* do polling transfer */
2176 +@@ -524,7 +526,7 @@ static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
2177 +
2178 + /* initiate DMA transfer */
2179 + if (flctl->chan_fifo0_tx && rlen >= 32 &&
2180 +- flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0)
2181 ++ !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE))
2182 + return; /* DMA success */
2183 +
2184 + /* do polling transfer */
2185 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2186 +index 46c3301a5e07d..2e75b7e8f70b3 100644
2187 +--- a/drivers/net/bonding/bond_main.c
2188 ++++ b/drivers/net/bonding/bond_main.c
2189 +@@ -3817,14 +3817,19 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const v
2190 + return true;
2191 + }
2192 +
2193 +-static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
2194 ++static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
2195 + {
2196 + hash ^= (__force u32)flow_get_u32_dst(flow) ^
2197 + (__force u32)flow_get_u32_src(flow);
2198 + hash ^= (hash >> 16);
2199 + hash ^= (hash >> 8);
2200 ++
2201 + /* discard lowest hash bit to deal with the common even ports pattern */
2202 +- return hash >> 1;
2203 ++ if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
2204 ++ xmit_policy == BOND_XMIT_POLICY_ENCAP34)
2205 ++ return hash >> 1;
2206 ++
2207 ++ return hash;
2208 + }
2209 +
2210 + /* Generate hash based on xmit policy. If @skb is given it is used to linearize
2211 +@@ -3854,7 +3859,7 @@ static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const voi
2212 + memcpy(&hash, &flow.ports.ports, sizeof(hash));
2213 + }
2214 +
2215 +- return bond_ip_hash(hash, &flow);
2216 ++ return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
2217 + }
2218 +
2219 + /**
2220 +@@ -5012,7 +5017,7 @@ static u32 bond_sk_hash_l34(struct sock *sk)
2221 + /* L4 */
2222 + memcpy(&hash, &flow.ports.ports, sizeof(hash));
2223 + /* L3 */
2224 +- return bond_ip_hash(hash, &flow);
2225 ++ return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
2226 + }
2227 +
2228 + static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
2229 +diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
2230 +index 9e006a25b636c..8a8f392813d82 100644
2231 +--- a/drivers/net/dsa/lantiq_gswip.c
2232 ++++ b/drivers/net/dsa/lantiq_gswip.c
2233 +@@ -1663,9 +1663,6 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
2234 + break;
2235 + case PHY_INTERFACE_MODE_RMII:
2236 + miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
2237 +-
2238 +- /* Configure the RMII clock as output: */
2239 +- miicfg |= GSWIP_MII_CFG_RMII_CLK;
2240 + break;
2241 + case PHY_INTERFACE_MODE_RGMII:
2242 + case PHY_INTERFACE_MODE_RGMII_ID:
2243 +diff --git a/drivers/net/dsa/mv88e6xxx/port_hidden.c b/drivers/net/dsa/mv88e6xxx/port_hidden.c
2244 +index b49d05f0e1179..7a9f9ff6dedf3 100644
2245 +--- a/drivers/net/dsa/mv88e6xxx/port_hidden.c
2246 ++++ b/drivers/net/dsa/mv88e6xxx/port_hidden.c
2247 +@@ -40,8 +40,9 @@ int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip)
2248 + {
2249 + int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY);
2250 +
2251 +- return mv88e6xxx_wait_bit(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
2252 +- MV88E6XXX_PORT_RESERVED_1A, bit, 0);
2253 ++ return mv88e6xxx_port_wait_bit(chip,
2254 ++ MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
2255 ++ MV88E6XXX_PORT_RESERVED_1A, bit, 0);
2256 + }
2257 +
2258 + int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
2259 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
2260 +index bdd4e420f869f..553f3de939574 100644
2261 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
2262 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
2263 +@@ -14158,10 +14158,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
2264 +
2265 + /* Stop Tx */
2266 + bnx2x_tx_disable(bp);
2267 +- /* Delete all NAPI objects */
2268 +- bnx2x_del_all_napi(bp);
2269 +- if (CNIC_LOADED(bp))
2270 +- bnx2x_del_all_napi_cnic(bp);
2271 + netdev_reset_tc(bp->dev);
2272 +
2273 + del_timer_sync(&bp->timer);
2274 +@@ -14266,6 +14262,11 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
2275 + bnx2x_drain_tx_queues(bp);
2276 + bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
2277 + bnx2x_netif_stop(bp, 1);
2278 ++ bnx2x_del_all_napi(bp);
2279 ++
2280 ++ if (CNIC_LOADED(bp))
2281 ++ bnx2x_del_all_napi_cnic(bp);
2282 ++
2283 + bnx2x_free_irq(bp);
2284 +
2285 + /* Report UNLOAD_DONE to MCP */
2286 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
2287 +index b4f99dd284e53..8bcc39b1575c2 100644
2288 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
2289 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
2290 +@@ -1991,6 +1991,11 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
2291 + return skb;
2292 + }
2293 +
2294 ++static void bcmgenet_hide_tsb(struct sk_buff *skb)
2295 ++{
2296 ++ __skb_pull(skb, sizeof(struct status_64));
2297 ++}
2298 ++
2299 + static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
2300 + {
2301 + struct bcmgenet_priv *priv = netdev_priv(dev);
2302 +@@ -2097,6 +2102,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
2303 + }
2304 +
2305 + GENET_CB(skb)->last_cb = tx_cb_ptr;
2306 ++
2307 ++ bcmgenet_hide_tsb(skb);
2308 + skb_tx_timestamp(skb);
2309 +
2310 + /* Decrement total BD count and advance our write pointer */
2311 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
2312 +index a3e87e10ee6bd..67eb9b671244b 100644
2313 +--- a/drivers/net/ethernet/freescale/fec_main.c
2314 ++++ b/drivers/net/ethernet/freescale/fec_main.c
2315 +@@ -3726,7 +3726,7 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
2316 + ARRAY_SIZE(out_val));
2317 + if (ret) {
2318 + dev_dbg(&fep->pdev->dev, "no stop mode property\n");
2319 +- return ret;
2320 ++ goto out;
2321 + }
2322 +
2323 + fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
2324 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2325 +index 16cbd146ad064..818a028703c65 100644
2326 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2327 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2328 +@@ -5092,6 +5092,13 @@ static void hns3_state_init(struct hnae3_handle *handle)
2329 + set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
2330 + }
2331 +
2332 ++static void hns3_state_uninit(struct hnae3_handle *handle)
2333 ++{
2334 ++ struct hns3_nic_priv *priv = handle->priv;
2335 ++
2336 ++ clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
2337 ++}
2338 ++
2339 + static int hns3_client_init(struct hnae3_handle *handle)
2340 + {
2341 + struct pci_dev *pdev = handle->pdev;
2342 +@@ -5209,7 +5216,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
2343 + return ret;
2344 +
2345 + out_reg_netdev_fail:
2346 ++ hns3_state_uninit(handle);
2347 + hns3_dbg_uninit(handle);
2348 ++ hns3_client_stop(handle);
2349 + out_client_start:
2350 + hns3_free_rx_cpu_rmap(netdev);
2351 + hns3_nic_uninit_irq(priv);
2352 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
2353 +index 65d78ee4d65a0..4a5b11b6fed3f 100644
2354 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
2355 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
2356 +@@ -93,6 +93,13 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
2357 + enum hclge_cmd_status status;
2358 + struct hclge_desc desc;
2359 +
2360 ++ if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) {
2361 ++ dev_err(&hdev->pdev->dev,
2362 ++ "msg data length(=%u) exceeds maximum(=%u)\n",
2363 ++ msg_len, HCLGE_MBX_MAX_MSG_SIZE);
2364 ++ return -EMSGSIZE;
2365 ++ }
2366 ++
2367 + resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
2368 +
2369 + hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
2370 +@@ -175,7 +182,7 @@ static int hclge_get_ring_chain_from_mbx(
2371 + ring_num = req->msg.ring_num;
2372 +
2373 + if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
2374 +- return -ENOMEM;
2375 ++ return -EINVAL;
2376 +
2377 + for (i = 0; i < ring_num; i++) {
2378 + if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
2379 +@@ -586,9 +593,9 @@ static int hclge_set_vf_mtu(struct hclge_vport *vport,
2380 + return hclge_set_vport_mtu(vport, mtu);
2381 + }
2382 +
2383 +-static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
2384 +- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
2385 +- struct hclge_respond_to_vf_msg *resp_msg)
2386 ++static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
2387 ++ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
2388 ++ struct hclge_respond_to_vf_msg *resp_msg)
2389 + {
2390 + struct hnae3_handle *handle = &vport->nic;
2391 + struct hclge_dev *hdev = vport->back;
2392 +@@ -598,17 +605,18 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
2393 + if (queue_id >= handle->kinfo.num_tqps) {
2394 + dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
2395 + queue_id, mbx_req->mbx_src_vfid);
2396 +- return;
2397 ++ return -EINVAL;
2398 + }
2399 +
2400 + qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
2401 + memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
2402 + resp_msg->len = sizeof(qid_in_pf);
2403 ++ return 0;
2404 + }
2405 +
2406 +-static void hclge_get_rss_key(struct hclge_vport *vport,
2407 +- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
2408 +- struct hclge_respond_to_vf_msg *resp_msg)
2409 ++static int hclge_get_rss_key(struct hclge_vport *vport,
2410 ++ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
2411 ++ struct hclge_respond_to_vf_msg *resp_msg)
2412 + {
2413 + #define HCLGE_RSS_MBX_RESP_LEN 8
2414 + struct hclge_dev *hdev = vport->back;
2415 +@@ -624,13 +632,14 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
2416 + dev_warn(&hdev->pdev->dev,
2417 + "failed to get the rss hash key, the index(%u) invalid !\n",
2418 + index);
2419 +- return;
2420 ++ return -EINVAL;
2421 + }
2422 +
2423 + memcpy(resp_msg->data,
2424 + &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
2425 + HCLGE_RSS_MBX_RESP_LEN);
2426 + resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
2427 ++ return 0;
2428 + }
2429 +
2430 + static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
2431 +@@ -805,10 +814,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
2432 + "VF fail(%d) to set mtu\n", ret);
2433 + break;
2434 + case HCLGE_MBX_GET_QID_IN_PF:
2435 +- hclge_get_queue_id_in_pf(vport, req, &resp_msg);
2436 ++ ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg);
2437 + break;
2438 + case HCLGE_MBX_GET_RSS_KEY:
2439 +- hclge_get_rss_key(vport, req, &resp_msg);
2440 ++ ret = hclge_get_rss_key(vport, req, &resp_msg);
2441 + break;
2442 + case HCLGE_MBX_GET_LINK_MODE:
2443 + hclge_get_link_mode(vport, req);
2444 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
2445 +index c809e8fe648f9..b262aa84b6a24 100644
2446 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
2447 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
2448 +@@ -2961,13 +2961,8 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
2449 + {
2450 + struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2451 +
2452 +- if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2453 +- ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2454 +- ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2455 +- } else {
2456 +- ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2457 +- ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2458 +- }
2459 ++ ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2460 ++ ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2461 + ring->rx_mini_max_pending = 0;
2462 + ring->rx_jumbo_max_pending = 0;
2463 + ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2464 +@@ -2980,23 +2975,21 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
2465 + struct ethtool_ringparam *ring)
2466 + {
2467 + struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2468 +- int ret;
2469 +
2470 +- ret = 0;
2471 ++ if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
2472 ++ ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
2473 ++ netdev_err(netdev, "Invalid request.\n");
2474 ++ netdev_err(netdev, "Max tx buffers = %llu\n",
2475 ++ adapter->max_rx_add_entries_per_subcrq);
2476 ++ netdev_err(netdev, "Max rx buffers = %llu\n",
2477 ++ adapter->max_tx_entries_per_subcrq);
2478 ++ return -EINVAL;
2479 ++ }
2480 ++
2481 + adapter->desired.rx_entries = ring->rx_pending;
2482 + adapter->desired.tx_entries = ring->tx_pending;
2483 +
2484 +- ret = wait_for_reset(adapter);
2485 +-
2486 +- if (!ret &&
2487 +- (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2488 +- adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2489 +- netdev_info(netdev,
2490 +- "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2491 +- ring->rx_pending, ring->tx_pending,
2492 +- adapter->req_rx_add_entries_per_subcrq,
2493 +- adapter->req_tx_entries_per_subcrq);
2494 +- return ret;
2495 ++ return wait_for_reset(adapter);
2496 + }
2497 +
2498 + static void ibmvnic_get_channels(struct net_device *netdev,
2499 +@@ -3004,14 +2997,8 @@ static void ibmvnic_get_channels(struct net_device *netdev,
2500 + {
2501 + struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2502 +
2503 +- if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2504 +- channels->max_rx = adapter->max_rx_queues;
2505 +- channels->max_tx = adapter->max_tx_queues;
2506 +- } else {
2507 +- channels->max_rx = IBMVNIC_MAX_QUEUES;
2508 +- channels->max_tx = IBMVNIC_MAX_QUEUES;
2509 +- }
2510 +-
2511 ++ channels->max_rx = adapter->max_rx_queues;
2512 ++ channels->max_tx = adapter->max_tx_queues;
2513 + channels->max_other = 0;
2514 + channels->max_combined = 0;
2515 + channels->rx_count = adapter->req_rx_queues;
2516 +@@ -3024,22 +3011,11 @@ static int ibmvnic_set_channels(struct net_device *netdev,
2517 + struct ethtool_channels *channels)
2518 + {
2519 + struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2520 +- int ret;
2521 +
2522 +- ret = 0;
2523 + adapter->desired.rx_queues = channels->rx_count;
2524 + adapter->desired.tx_queues = channels->tx_count;
2525 +
2526 +- ret = wait_for_reset(adapter);
2527 +-
2528 +- if (!ret &&
2529 +- (adapter->req_rx_queues != channels->rx_count ||
2530 +- adapter->req_tx_queues != channels->tx_count))
2531 +- netdev_info(netdev,
2532 +- "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2533 +- channels->rx_count, channels->tx_count,
2534 +- adapter->req_rx_queues, adapter->req_tx_queues);
2535 +- return ret;
2536 ++ return wait_for_reset(adapter);
2537 + }
2538 +
2539 + static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2540 +@@ -3047,43 +3023,32 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2541 + struct ibmvnic_adapter *adapter = netdev_priv(dev);
2542 + int i;
2543 +
2544 +- switch (stringset) {
2545 +- case ETH_SS_STATS:
2546 +- for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2547 +- i++, data += ETH_GSTRING_LEN)
2548 +- memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2549 ++ if (stringset != ETH_SS_STATS)
2550 ++ return;
2551 +
2552 +- for (i = 0; i < adapter->req_tx_queues; i++) {
2553 +- snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2554 +- data += ETH_GSTRING_LEN;
2555 ++ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
2556 ++ memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2557 +
2558 +- snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2559 +- data += ETH_GSTRING_LEN;
2560 ++ for (i = 0; i < adapter->req_tx_queues; i++) {
2561 ++ snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2562 ++ data += ETH_GSTRING_LEN;
2563 +
2564 +- snprintf(data, ETH_GSTRING_LEN,
2565 +- "tx%d_dropped_packets", i);
2566 +- data += ETH_GSTRING_LEN;
2567 +- }
2568 ++ snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2569 ++ data += ETH_GSTRING_LEN;
2570 +
2571 +- for (i = 0; i < adapter->req_rx_queues; i++) {
2572 +- snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2573 +- data += ETH_GSTRING_LEN;
2574 ++ snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
2575 ++ data += ETH_GSTRING_LEN;
2576 ++ }
2577 +
2578 +- snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2579 +- data += ETH_GSTRING_LEN;
2580 ++ for (i = 0; i < adapter->req_rx_queues; i++) {
2581 ++ snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2582 ++ data += ETH_GSTRING_LEN;
2583 +
2584 +- snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2585 +- data += ETH_GSTRING_LEN;
2586 +- }
2587 +- break;
2588 ++ snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2589 ++ data += ETH_GSTRING_LEN;
2590 +
2591 +- case ETH_SS_PRIV_FLAGS:
2592 +- for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2593 +- strcpy(data + i * ETH_GSTRING_LEN,
2594 +- ibmvnic_priv_flags[i]);
2595 +- break;
2596 +- default:
2597 +- return;
2598 ++ snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2599 ++ data += ETH_GSTRING_LEN;
2600 + }
2601 + }
2602 +
2603 +@@ -3096,8 +3061,6 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2604 + return ARRAY_SIZE(ibmvnic_stats) +
2605 + adapter->req_tx_queues * NUM_TX_STATS +
2606 + adapter->req_rx_queues * NUM_RX_STATS;
2607 +- case ETH_SS_PRIV_FLAGS:
2608 +- return ARRAY_SIZE(ibmvnic_priv_flags);
2609 + default:
2610 + return -EOPNOTSUPP;
2611 + }
2612 +@@ -3150,26 +3113,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2613 + }
2614 + }
2615 +
2616 +-static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2617 +-{
2618 +- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2619 +-
2620 +- return adapter->priv_flags;
2621 +-}
2622 +-
2623 +-static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2624 +-{
2625 +- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2626 +- bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2627 +-
2628 +- if (which_maxes)
2629 +- adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2630 +- else
2631 +- adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2632 +-
2633 +- return 0;
2634 +-}
2635 +-
2636 + static const struct ethtool_ops ibmvnic_ethtool_ops = {
2637 + .get_drvinfo = ibmvnic_get_drvinfo,
2638 + .get_msglevel = ibmvnic_get_msglevel,
2639 +@@ -3183,8 +3126,6 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
2640 + .get_sset_count = ibmvnic_get_sset_count,
2641 + .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2642 + .get_link_ksettings = ibmvnic_get_link_ksettings,
2643 +- .get_priv_flags = ibmvnic_get_priv_flags,
2644 +- .set_priv_flags = ibmvnic_set_priv_flags,
2645 + };
2646 +
2647 + /* Routines for managing CRQs/sCRQs */
2648 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
2649 +index ef395fd3b1e6f..1a9ed9202654f 100644
2650 +--- a/drivers/net/ethernet/ibm/ibmvnic.h
2651 ++++ b/drivers/net/ethernet/ibm/ibmvnic.h
2652 +@@ -43,11 +43,6 @@
2653 +
2654 + #define IBMVNIC_RESET_DELAY 100
2655 +
2656 +-static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
2657 +-#define IBMVNIC_USE_SERVER_MAXES 0x1
2658 +- "use-server-maxes"
2659 +-};
2660 +-
2661 + struct ibmvnic_login_buffer {
2662 + __be32 len;
2663 + __be32 version;
2664 +@@ -885,7 +880,6 @@ struct ibmvnic_adapter {
2665 + struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
2666 + dma_addr_t ip_offload_ctrl_tok;
2667 + u32 msg_enable;
2668 +- u32 priv_flags;
2669 +
2670 + /* Vital Product Data (VPD) */
2671 + struct ibmvnic_vpd *vpd;
2672 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
2673 +index e596e1a9fc757..69d11ff7677d6 100644
2674 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
2675 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
2676 +@@ -903,7 +903,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
2677 + /* Tx IPsec offload doesn't seem to work on this
2678 + * device, so block these requests for now.
2679 + */
2680 +- if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) {
2681 ++ sam->flags = sam->flags & ~XFRM_OFFLOAD_IPV6;
2682 ++ if (sam->flags != XFRM_OFFLOAD_INBOUND) {
2683 + err = -EOPNOTSUPP;
2684 + goto err_out;
2685 + }
2686 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
2687 +index ac9e6c7a33b55..6b447d8f0bd8a 100644
2688 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
2689 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
2690 +@@ -65,8 +65,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
2691 + struct phy_device *phy_dev = ndev->phydev;
2692 + u32 val;
2693 +
2694 +- writew(SGMII_ADAPTER_DISABLE,
2695 +- sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
2696 ++ if (sgmii_adapter_base)
2697 ++ writew(SGMII_ADAPTER_DISABLE,
2698 ++ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
2699 +
2700 + if (splitter_base) {
2701 + val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
2702 +@@ -88,10 +89,11 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
2703 + writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
2704 + }
2705 +
2706 +- writew(SGMII_ADAPTER_ENABLE,
2707 +- sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
2708 +- if (phy_dev)
2709 ++ if (phy_dev && sgmii_adapter_base) {
2710 ++ writew(SGMII_ADAPTER_ENABLE,
2711 ++ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
2712 + tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
2713 ++ }
2714 + }
2715 +
2716 + static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
2717 +diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
2718 +index 7661dbb31162b..50e4bea46d673 100644
2719 +--- a/drivers/net/hippi/rrunner.c
2720 ++++ b/drivers/net/hippi/rrunner.c
2721 +@@ -1353,7 +1353,9 @@ static int rr_close(struct net_device *dev)
2722 +
2723 + rrpriv->fw_running = 0;
2724 +
2725 ++ spin_unlock_irqrestore(&rrpriv->lock, flags);
2726 + del_timer_sync(&rrpriv->timer);
2727 ++ spin_lock_irqsave(&rrpriv->lock, flags);
2728 +
2729 + writel(0, &regs->TxPi);
2730 + writel(0, &regs->IpRxPi);
2731 +diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
2732 +index bd310e8d5e43d..df33637c5269a 100644
2733 +--- a/drivers/net/phy/marvell10g.c
2734 ++++ b/drivers/net/phy/marvell10g.c
2735 +@@ -789,7 +789,7 @@ static int mv3310_read_status_copper(struct phy_device *phydev)
2736 +
2737 + cssr1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSSR1);
2738 + if (cssr1 < 0)
2739 +- return val;
2740 ++ return cssr1;
2741 +
2742 + /* If the link settings are not resolved, mark the link down */
2743 + if (!(cssr1 & MV_PCS_CSSR1_RESOLVED)) {
2744 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2745 +index 4ad25a8b0870c..73aba760e10c6 100644
2746 +--- a/drivers/net/virtio_net.c
2747 ++++ b/drivers/net/virtio_net.c
2748 +@@ -965,6 +965,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
2749 + * xdp.data_meta were adjusted
2750 + */
2751 + len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
2752 ++
2753 ++ /* recalculate headroom if xdp.data or xdp_data_meta
2754 ++ * were adjusted, note that offset should always point
2755 ++ * to the start of the reserved bytes for virtio_net
2756 ++ * header which are followed by xdp.data, that means
2757 ++ * that offset is equal to the headroom (when buf is
2758 ++ * starting at the beginning of the page, otherwise
2759 ++ * there is a base offset inside the page) but it's used
2760 ++ * with a different starting point (buf start) than
2761 ++ * xdp.data (buf start + vnet hdr size). If xdp.data or
2762 ++ * data_meta were adjusted by the xdp prog then the
2763 ++ * headroom size has changed and so has the offset, we
2764 ++ * can use data_hard_start, which points at buf start +
2765 ++ * vnet hdr size, to calculate the new headroom and use
2766 ++ * it later to compute buf start in page_to_skb()
2767 ++ */
2768 ++ headroom = xdp.data - xdp.data_hard_start - metasize;
2769 ++
2770 + /* We can only create skb based on xdp_page. */
2771 + if (unlikely(xdp_page != page)) {
2772 + rcu_read_unlock();
2773 +@@ -972,7 +990,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
2774 + head_skb = page_to_skb(vi, rq, xdp_page, offset,
2775 + len, PAGE_SIZE, false,
2776 + metasize,
2777 +- VIRTIO_XDP_HEADROOM);
2778 ++ headroom);
2779 + return head_skb;
2780 + }
2781 + break;
2782 +diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
2783 +index a46067c38bf5d..5eaef79c06e16 100644
2784 +--- a/drivers/net/wireguard/device.c
2785 ++++ b/drivers/net/wireguard/device.c
2786 +@@ -19,6 +19,7 @@
2787 + #include <linux/if_arp.h>
2788 + #include <linux/icmp.h>
2789 + #include <linux/suspend.h>
2790 ++#include <net/dst_metadata.h>
2791 + #include <net/icmp.h>
2792 + #include <net/rtnetlink.h>
2793 + #include <net/ip_tunnels.h>
2794 +@@ -152,7 +153,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
2795 + goto err_peer;
2796 + }
2797 +
2798 +- mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
2799 ++ mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
2800 +
2801 + __skb_queue_head_init(&packets);
2802 + if (!skb_is_gso(skb)) {
2803 +diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
2804 +index 5b471ab80fe28..54d65a6f0fccf 100644
2805 +--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
2806 ++++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
2807 +@@ -414,19 +414,19 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
2808 +
2809 + ret = clk_prepare_enable(priv->clk_ref);
2810 + if (ret)
2811 +- goto err_disable_clk_ref;
2812 ++ return ret;
2813 +
2814 + priv->reset = devm_reset_control_array_get_exclusive(dev);
2815 +- if (IS_ERR(priv->reset))
2816 +- return PTR_ERR(priv->reset);
2817 ++ if (IS_ERR(priv->reset)) {
2818 ++ ret = PTR_ERR(priv->reset);
2819 ++ goto err_disable_clk_ref;
2820 ++ }
2821 +
2822 + priv->phy = devm_phy_create(dev, np, &phy_g12a_usb3_pcie_ops);
2823 + if (IS_ERR(priv->phy)) {
2824 + ret = PTR_ERR(priv->phy);
2825 +- if (ret != -EPROBE_DEFER)
2826 +- dev_err(dev, "failed to create PHY\n");
2827 +-
2828 +- return ret;
2829 ++ dev_err_probe(dev, ret, "failed to create PHY\n");
2830 ++ goto err_disable_clk_ref;
2831 + }
2832 +
2833 + phy_set_drvdata(priv->phy, priv);
2834 +@@ -434,8 +434,12 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
2835 +
2836 + phy_provider = devm_of_phy_provider_register(dev,
2837 + phy_g12a_usb3_pcie_xlate);
2838 ++ if (IS_ERR(phy_provider)) {
2839 ++ ret = PTR_ERR(phy_provider);
2840 ++ goto err_disable_clk_ref;
2841 ++ }
2842 +
2843 +- return PTR_ERR_OR_ZERO(phy_provider);
2844 ++ return 0;
2845 +
2846 + err_disable_clk_ref:
2847 + clk_disable_unprepare(priv->clk_ref);
2848 +diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
2849 +index 5172971f4c360..3cd4d51c247c3 100644
2850 +--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
2851 ++++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
2852 +@@ -629,7 +629,8 @@ idle:
2853 + cleanup:
2854 + if (error < 0)
2855 + phy_mdm6600_device_power_off(ddata);
2856 +-
2857 ++ pm_runtime_disable(ddata->dev);
2858 ++ pm_runtime_dont_use_autosuspend(ddata->dev);
2859 + return error;
2860 + }
2861 +
2862 +diff --git a/drivers/phy/samsung/phy-exynos5250-sata.c b/drivers/phy/samsung/phy-exynos5250-sata.c
2863 +index 9ec234243f7c6..595adba5fb8f1 100644
2864 +--- a/drivers/phy/samsung/phy-exynos5250-sata.c
2865 ++++ b/drivers/phy/samsung/phy-exynos5250-sata.c
2866 +@@ -187,6 +187,7 @@ static int exynos_sata_phy_probe(struct platform_device *pdev)
2867 + return -EINVAL;
2868 +
2869 + sata_phy->client = of_find_i2c_device_by_node(node);
2870 ++ of_node_put(node);
2871 + if (!sata_phy->client)
2872 + return -EPROBE_DEFER;
2873 +
2874 +@@ -195,20 +196,21 @@ static int exynos_sata_phy_probe(struct platform_device *pdev)
2875 + sata_phy->phyclk = devm_clk_get(dev, "sata_phyctrl");
2876 + if (IS_ERR(sata_phy->phyclk)) {
2877 + dev_err(dev, "failed to get clk for PHY\n");
2878 +- return PTR_ERR(sata_phy->phyclk);
2879 ++ ret = PTR_ERR(sata_phy->phyclk);
2880 ++ goto put_dev;
2881 + }
2882 +
2883 + ret = clk_prepare_enable(sata_phy->phyclk);
2884 + if (ret < 0) {
2885 + dev_err(dev, "failed to enable source clk\n");
2886 +- return ret;
2887 ++ goto put_dev;
2888 + }
2889 +
2890 + sata_phy->phy = devm_phy_create(dev, NULL, &exynos_sata_phy_ops);
2891 + if (IS_ERR(sata_phy->phy)) {
2892 +- clk_disable_unprepare(sata_phy->phyclk);
2893 + dev_err(dev, "failed to create PHY\n");
2894 +- return PTR_ERR(sata_phy->phy);
2895 ++ ret = PTR_ERR(sata_phy->phy);
2896 ++ goto clk_disable;
2897 + }
2898 +
2899 + phy_set_drvdata(sata_phy->phy, sata_phy);
2900 +@@ -216,11 +218,18 @@ static int exynos_sata_phy_probe(struct platform_device *pdev)
2901 + phy_provider = devm_of_phy_provider_register(dev,
2902 + of_phy_simple_xlate);
2903 + if (IS_ERR(phy_provider)) {
2904 +- clk_disable_unprepare(sata_phy->phyclk);
2905 +- return PTR_ERR(phy_provider);
2906 ++ ret = PTR_ERR(phy_provider);
2907 ++ goto clk_disable;
2908 + }
2909 +
2910 + return 0;
2911 ++
2912 ++clk_disable:
2913 ++ clk_disable_unprepare(sata_phy->phyclk);
2914 ++put_dev:
2915 ++ put_device(&sata_phy->client->dev);
2916 ++
2917 ++ return ret;
2918 + }
2919 +
2920 + static const struct of_device_id exynos_sata_phy_of_match[] = {
2921 +diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
2922 +index 2ff56ce77b307..21c0088f5ca9e 100644
2923 +--- a/drivers/phy/ti/phy-am654-serdes.c
2924 ++++ b/drivers/phy/ti/phy-am654-serdes.c
2925 +@@ -838,7 +838,7 @@ static int serdes_am654_probe(struct platform_device *pdev)
2926 +
2927 + clk_err:
2928 + of_clk_del_provider(node);
2929 +-
2930 ++ pm_runtime_disable(dev);
2931 + return ret;
2932 + }
2933 +
2934 +diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
2935 +index ebceb1520ce88..ca8532a3f1931 100644
2936 +--- a/drivers/phy/ti/phy-omap-usb2.c
2937 ++++ b/drivers/phy/ti/phy-omap-usb2.c
2938 +@@ -215,7 +215,7 @@ static int omap_usb2_enable_clocks(struct omap_usb *phy)
2939 + return 0;
2940 +
2941 + err1:
2942 +- clk_disable(phy->wkupclk);
2943 ++ clk_disable_unprepare(phy->wkupclk);
2944 +
2945 + err0:
2946 + return ret;
2947 +diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
2948 +index 7040a7a7bd5d1..246b0e951e1ce 100644
2949 +--- a/drivers/pinctrl/mediatek/Kconfig
2950 ++++ b/drivers/pinctrl/mediatek/Kconfig
2951 +@@ -30,6 +30,7 @@ config PINCTRL_MTK_MOORE
2952 + select GENERIC_PINMUX_FUNCTIONS
2953 + select GPIOLIB
2954 + select OF_GPIO
2955 ++ select EINT_MTK
2956 + select PINCTRL_MTK_V2
2957 +
2958 + config PINCTRL_MTK_PARIS
2959 +diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
2960 +index 8d271c6b0ca41..5de691c630b4f 100644
2961 +--- a/drivers/pinctrl/pinctrl-pistachio.c
2962 ++++ b/drivers/pinctrl/pinctrl-pistachio.c
2963 +@@ -1374,10 +1374,10 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
2964 + }
2965 +
2966 + irq = irq_of_parse_and_map(child, 0);
2967 +- if (irq < 0) {
2968 +- dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq);
2969 ++ if (!irq) {
2970 ++ dev_err(pctl->dev, "No IRQ for bank %u\n", i);
2971 + of_node_put(child);
2972 +- ret = irq;
2973 ++ ret = -EINVAL;
2974 + goto err;
2975 + }
2976 +
2977 +diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
2978 +index 923ff21a44c05..543a4991cf700 100644
2979 +--- a/drivers/pinctrl/pinctrl-rockchip.c
2980 ++++ b/drivers/pinctrl/pinctrl-rockchip.c
2981 +@@ -455,95 +455,110 @@ static struct rockchip_mux_recalced_data rk3128_mux_recalced_data[] = {
2982 +
2983 + static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
2984 + {
2985 ++ /* gpio1b6_sel */
2986 + .num = 1,
2987 + .pin = 14,
2988 + .reg = 0x28,
2989 + .bit = 12,
2990 + .mask = 0xf
2991 + }, {
2992 ++ /* gpio1b7_sel */
2993 + .num = 1,
2994 + .pin = 15,
2995 + .reg = 0x2c,
2996 + .bit = 0,
2997 + .mask = 0x3
2998 + }, {
2999 ++ /* gpio1c2_sel */
3000 + .num = 1,
3001 + .pin = 18,
3002 + .reg = 0x30,
3003 + .bit = 4,
3004 + .mask = 0xf
3005 + }, {
3006 ++ /* gpio1c3_sel */
3007 + .num = 1,
3008 + .pin = 19,
3009 + .reg = 0x30,
3010 + .bit = 8,
3011 + .mask = 0xf
3012 + }, {
3013 ++ /* gpio1c4_sel */
3014 + .num = 1,
3015 + .pin = 20,
3016 + .reg = 0x30,
3017 + .bit = 12,
3018 + .mask = 0xf
3019 + }, {
3020 ++ /* gpio1c5_sel */
3021 + .num = 1,
3022 + .pin = 21,
3023 + .reg = 0x34,
3024 + .bit = 0,
3025 + .mask = 0xf
3026 + }, {
3027 ++ /* gpio1c6_sel */
3028 + .num = 1,
3029 + .pin = 22,
3030 + .reg = 0x34,
3031 + .bit = 4,
3032 + .mask = 0xf
3033 + }, {
3034 ++ /* gpio1c7_sel */
3035 + .num = 1,
3036 + .pin = 23,
3037 + .reg = 0x34,
3038 + .bit = 8,
3039 + .mask = 0xf
3040 + }, {
3041 ++ /* gpio3b4_sel */
3042 + .num = 3,
3043 + .pin = 12,
3044 + .reg = 0x68,
3045 + .bit = 8,
3046 + .mask = 0xf
3047 + }, {
3048 ++ /* gpio3b5_sel */
3049 + .num = 3,
3050 + .pin = 13,
3051 + .reg = 0x68,
3052 + .bit = 12,
3053 + .mask = 0xf
3054 + }, {
3055 ++ /* gpio2a2_sel */
3056 + .num = 2,
3057 + .pin = 2,
3058 +- .reg = 0x608,
3059 +- .bit = 0,
3060 +- .mask = 0x7
3061 ++ .reg = 0x40,
3062 ++ .bit = 4,
3063 ++ .mask = 0x3
3064 + }, {
3065 ++ /* gpio2a3_sel */
3066 + .num = 2,
3067 + .pin = 3,
3068 +- .reg = 0x608,
3069 +- .bit = 4,
3070 +- .mask = 0x7
3071 ++ .reg = 0x40,
3072 ++ .bit = 6,
3073 ++ .mask = 0x3
3074 + }, {
3075 ++ /* gpio2c0_sel */
3076 + .num = 2,
3077 + .pin = 16,
3078 +- .reg = 0x610,
3079 +- .bit = 8,
3080 +- .mask = 0x7
3081 ++ .reg = 0x50,
3082 ++ .bit = 0,
3083 ++ .mask = 0x3
3084 + }, {
3085 ++ /* gpio3b2_sel */
3086 + .num = 3,
3087 + .pin = 10,
3088 +- .reg = 0x610,
3089 +- .bit = 0,
3090 +- .mask = 0x7
3091 ++ .reg = 0x68,
3092 ++ .bit = 4,
3093 ++ .mask = 0x3
3094 + }, {
3095 ++ /* gpio3b3_sel */
3096 + .num = 3,
3097 + .pin = 11,
3098 +- .reg = 0x610,
3099 +- .bit = 4,
3100 +- .mask = 0x7
3101 ++ .reg = 0x68,
3102 ++ .bit = 6,
3103 ++ .mask = 0x3
3104 + },
3105 + };
3106 +
3107 +diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
3108 +index dfd805e768624..7b0576f71376e 100644
3109 +--- a/drivers/pinctrl/samsung/Kconfig
3110 ++++ b/drivers/pinctrl/samsung/Kconfig
3111 +@@ -4,14 +4,13 @@
3112 + #
3113 + config PINCTRL_SAMSUNG
3114 + bool
3115 +- depends on OF_GPIO
3116 ++ select GPIOLIB
3117 + select PINMUX
3118 + select PINCONF
3119 +
3120 + config PINCTRL_EXYNOS
3121 + bool "Pinctrl common driver part for Samsung Exynos SoCs"
3122 +- depends on OF_GPIO
3123 +- depends on ARCH_EXYNOS || ARCH_S5PV210 || COMPILE_TEST
3124 ++ depends on ARCH_EXYNOS || ARCH_S5PV210 || (COMPILE_TEST && OF)
3125 + select PINCTRL_SAMSUNG
3126 + select PINCTRL_EXYNOS_ARM if ARM && (ARCH_EXYNOS || ARCH_S5PV210)
3127 + select PINCTRL_EXYNOS_ARM64 if ARM64 && ARCH_EXYNOS
3128 +@@ -26,12 +25,10 @@ config PINCTRL_EXYNOS_ARM64
3129 +
3130 + config PINCTRL_S3C24XX
3131 + bool "Samsung S3C24XX SoC pinctrl driver"
3132 +- depends on OF_GPIO
3133 +- depends on ARCH_S3C24XX || COMPILE_TEST
3134 ++ depends on ARCH_S3C24XX || (COMPILE_TEST && OF)
3135 + select PINCTRL_SAMSUNG
3136 +
3137 + config PINCTRL_S3C64XX
3138 + bool "Samsung S3C64XX SoC pinctrl driver"
3139 +- depends on OF_GPIO
3140 +- depends on ARCH_S3C64XX || COMPILE_TEST
3141 ++ depends on ARCH_S3C64XX || (COMPILE_TEST && OF)
3142 + select PINCTRL_SAMSUNG
3143 +diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
3144 +index 8934b4878fa85..97a4fb5a93280 100644
3145 +--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
3146 ++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
3147 +@@ -225,6 +225,13 @@ static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset)
3148 + pinctrl_gpio_free(chip->base + offset);
3149 + }
3150 +
3151 ++static int stm32_gpio_get_noclk(struct gpio_chip *chip, unsigned int offset)
3152 ++{
3153 ++ struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
3154 ++
3155 ++ return !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
3156 ++}
3157 ++
3158 + static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
3159 + {
3160 + struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
3161 +@@ -232,7 +239,7 @@ static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
3162 +
3163 + clk_enable(bank->clk);
3164 +
3165 +- ret = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
3166 ++ ret = stm32_gpio_get_noclk(chip, offset);
3167 +
3168 + clk_disable(bank->clk);
3169 +
3170 +@@ -311,8 +318,12 @@ static void stm32_gpio_irq_trigger(struct irq_data *d)
3171 + struct stm32_gpio_bank *bank = d->domain->host_data;
3172 + int level;
3173 +
3174 ++ /* Do not access the GPIO if this is not LEVEL triggered IRQ. */
3175 ++ if (!(bank->irq_type[d->hwirq] & IRQ_TYPE_LEVEL_MASK))
3176 ++ return;
3177 ++
3178 + /* If level interrupt type then retrig */
3179 +- level = stm32_gpio_get(&bank->gpio_chip, d->hwirq);
3180 ++ level = stm32_gpio_get_noclk(&bank->gpio_chip, d->hwirq);
3181 + if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) ||
3182 + (level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH))
3183 + irq_chip_retrigger_hierarchy(d);
3184 +@@ -354,6 +365,7 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
3185 + {
3186 + struct stm32_gpio_bank *bank = irq_data->domain->host_data;
3187 + struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
3188 ++ unsigned long flags;
3189 + int ret;
3190 +
3191 + ret = stm32_gpio_direction_input(&bank->gpio_chip, irq_data->hwirq);
3192 +@@ -367,6 +379,10 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
3193 + return ret;
3194 + }
3195 +
3196 ++ flags = irqd_get_trigger_type(irq_data);
3197 ++ if (flags & IRQ_TYPE_LEVEL_MASK)
3198 ++ clk_enable(bank->clk);
3199 ++
3200 + return 0;
3201 + }
3202 +
3203 +@@ -374,6 +390,9 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data)
3204 + {
3205 + struct stm32_gpio_bank *bank = irq_data->domain->host_data;
3206 +
3207 ++ if (bank->irq_type[irq_data->hwirq] & IRQ_TYPE_LEVEL_MASK)
3208 ++ clk_disable(bank->clk);
3209 ++
3210 + gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
3211 + }
3212 +
3213 +diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
3214 +index c8cdc614a357c..6aa5fe9736138 100644
3215 +--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
3216 ++++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
3217 +@@ -67,7 +67,7 @@ static int evaluate_odvp(struct int3400_thermal_priv *priv);
3218 + struct odvp_attr {
3219 + int odvp;
3220 + struct int3400_thermal_priv *priv;
3221 +- struct kobj_attribute attr;
3222 ++ struct device_attribute attr;
3223 + };
3224 +
3225 + static ssize_t data_vault_read(struct file *file, struct kobject *kobj,
3226 +@@ -272,7 +272,7 @@ static int int3400_thermal_run_osc(acpi_handle handle,
3227 + return result;
3228 + }
3229 +
3230 +-static ssize_t odvp_show(struct kobject *kobj, struct kobj_attribute *attr,
3231 ++static ssize_t odvp_show(struct device *dev, struct device_attribute *attr,
3232 + char *buf)
3233 + {
3234 + struct odvp_attr *odvp_attr;
3235 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
3236 +index 8643b143c408e..2294d5b633b5b 100644
3237 +--- a/drivers/tty/n_gsm.c
3238 ++++ b/drivers/tty/n_gsm.c
3239 +@@ -73,6 +73,8 @@ module_param(debug, int, 0600);
3240 + */
3241 + #define MAX_MRU 1500
3242 + #define MAX_MTU 1500
3243 ++/* SOF, ADDR, CTRL, LEN1, LEN2, ..., FCS, EOF */
3244 ++#define PROT_OVERHEAD 7
3245 + #define GSM_NET_TX_TIMEOUT (HZ*10)
3246 +
3247 + /*
3248 +@@ -231,6 +233,7 @@ struct gsm_mux {
3249 + int initiator; /* Did we initiate connection */
3250 + bool dead; /* Has the mux been shut down */
3251 + struct gsm_dlci *dlci[NUM_DLCI];
3252 ++ int old_c_iflag; /* termios c_iflag value before attach */
3253 + bool constipated; /* Asked by remote to shut up */
3254 +
3255 + spinlock_t tx_lock;
3256 +@@ -820,7 +823,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
3257 + break;
3258 + case 2: /* Unstructed with modem bits.
3259 + Always one byte as we never send inline break data */
3260 +- *dp++ = gsm_encode_modem(dlci);
3261 ++ *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
3262 + break;
3263 + }
3264 + WARN_ON(kfifo_out_locked(&dlci->fifo, dp , len, &dlci->lock) != len);
3265 +@@ -1081,7 +1084,6 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
3266 + {
3267 + unsigned int addr = 0;
3268 + unsigned int modem = 0;
3269 +- unsigned int brk = 0;
3270 + struct gsm_dlci *dlci;
3271 + int len = clen;
3272 + int slen;
3273 +@@ -1111,17 +1113,8 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
3274 + return;
3275 + }
3276 + len--;
3277 +- if (len > 0) {
3278 +- while (gsm_read_ea(&brk, *dp++) == 0) {
3279 +- len--;
3280 +- if (len == 0)
3281 +- return;
3282 +- }
3283 +- modem <<= 7;
3284 +- modem |= (brk & 0x7f);
3285 +- }
3286 + tty = tty_port_tty_get(&dlci->port);
3287 +- gsm_process_modem(tty, dlci, modem, slen);
3288 ++ gsm_process_modem(tty, dlci, modem, slen - len);
3289 + if (tty) {
3290 + tty_wakeup(tty);
3291 + tty_kref_put(tty);
3292 +@@ -1300,11 +1293,12 @@ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
3293 +
3294 + static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
3295 + {
3296 +- struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1, gsm->ftype);
3297 ++ struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 2, gsm->ftype);
3298 + if (msg == NULL)
3299 + return;
3300 +- msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */
3301 +- memcpy(msg->data + 1, ctrl->data, ctrl->len);
3302 ++ msg->data[0] = (ctrl->cmd << 1) | CR | EA; /* command */
3303 ++ msg->data[1] = (ctrl->len << 1) | EA;
3304 ++ memcpy(msg->data + 2, ctrl->data, ctrl->len);
3305 + gsm_data_queue(gsm->dlci[0], msg);
3306 + }
3307 +
3308 +@@ -1327,7 +1321,6 @@ static void gsm_control_retransmit(struct timer_list *t)
3309 + spin_lock_irqsave(&gsm->control_lock, flags);
3310 + ctrl = gsm->pending_cmd;
3311 + if (ctrl) {
3312 +- gsm->cretries--;
3313 + if (gsm->cretries == 0) {
3314 + gsm->pending_cmd = NULL;
3315 + ctrl->error = -ETIMEDOUT;
3316 +@@ -1336,6 +1329,7 @@ static void gsm_control_retransmit(struct timer_list *t)
3317 + wake_up(&gsm->event);
3318 + return;
3319 + }
3320 ++ gsm->cretries--;
3321 + gsm_control_transmit(gsm, ctrl);
3322 + mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
3323 + }
3324 +@@ -1376,7 +1370,7 @@ retry:
3325 +
3326 + /* If DLCI0 is in ADM mode skip retries, it won't respond */
3327 + if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
3328 +- gsm->cretries = 1;
3329 ++ gsm->cretries = 0;
3330 + else
3331 + gsm->cretries = gsm->n2;
3332 +
3333 +@@ -1424,13 +1418,17 @@ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
3334 +
3335 + static void gsm_dlci_close(struct gsm_dlci *dlci)
3336 + {
3337 ++ unsigned long flags;
3338 ++
3339 + del_timer(&dlci->t1);
3340 + if (debug & 8)
3341 + pr_debug("DLCI %d goes closed.\n", dlci->addr);
3342 + dlci->state = DLCI_CLOSED;
3343 + if (dlci->addr != 0) {
3344 + tty_port_tty_hangup(&dlci->port, false);
3345 ++ spin_lock_irqsave(&dlci->lock, flags);
3346 + kfifo_reset(&dlci->fifo);
3347 ++ spin_unlock_irqrestore(&dlci->lock, flags);
3348 + /* Ensure that gsmtty_open() can return. */
3349 + tty_port_set_initialized(&dlci->port, 0);
3350 + wake_up_interruptible(&dlci->port.open_wait);
3351 +@@ -1593,6 +1591,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
3352 + tty = tty_port_tty_get(port);
3353 + if (tty) {
3354 + gsm_process_modem(tty, dlci, modem, slen);
3355 ++ tty_wakeup(tty);
3356 + tty_kref_put(tty);
3357 + }
3358 + fallthrough;
3359 +@@ -1818,7 +1817,6 @@ static void gsm_queue(struct gsm_mux *gsm)
3360 + gsm_response(gsm, address, UA);
3361 + gsm_dlci_close(dlci);
3362 + break;
3363 +- case UA:
3364 + case UA|PF:
3365 + if (cr == 0 || dlci == NULL)
3366 + break;
3367 +@@ -1962,6 +1960,16 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
3368 +
3369 + static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
3370 + {
3371 ++ /* handle XON/XOFF */
3372 ++ if ((c & ISO_IEC_646_MASK) == XON) {
3373 ++ gsm->constipated = true;
3374 ++ return;
3375 ++ } else if ((c & ISO_IEC_646_MASK) == XOFF) {
3376 ++ gsm->constipated = false;
3377 ++ /* Kick the link in case it is idling */
3378 ++ gsm_data_kick(gsm, NULL);
3379 ++ return;
3380 ++ }
3381 + if (c == GSM1_SOF) {
3382 + /* EOF is only valid in frame if we have got to the data state
3383 + and received at least one byte (the FCS) */
3384 +@@ -1976,7 +1984,8 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
3385 + }
3386 + /* Any partial frame was a runt so go back to start */
3387 + if (gsm->state != GSM_START) {
3388 +- gsm->malformed++;
3389 ++ if (gsm->state != GSM_SEARCH)
3390 ++ gsm->malformed++;
3391 + gsm->state = GSM_START;
3392 + }
3393 + /* A SOF in GSM_START means we are still reading idling or
3394 +@@ -2048,74 +2057,43 @@ static void gsm_error(struct gsm_mux *gsm,
3395 + gsm->io_error++;
3396 + }
3397 +
3398 +-static int gsm_disconnect(struct gsm_mux *gsm)
3399 +-{
3400 +- struct gsm_dlci *dlci = gsm->dlci[0];
3401 +- struct gsm_control *gc;
3402 +-
3403 +- if (!dlci)
3404 +- return 0;
3405 +-
3406 +- /* In theory disconnecting DLCI 0 is sufficient but for some
3407 +- modems this is apparently not the case. */
3408 +- gc = gsm_control_send(gsm, CMD_CLD, NULL, 0);
3409 +- if (gc)
3410 +- gsm_control_wait(gsm, gc);
3411 +-
3412 +- del_timer_sync(&gsm->t2_timer);
3413 +- /* Now we are sure T2 has stopped */
3414 +-
3415 +- gsm_dlci_begin_close(dlci);
3416 +- wait_event_interruptible(gsm->event,
3417 +- dlci->state == DLCI_CLOSED);
3418 +-
3419 +- if (signal_pending(current))
3420 +- return -EINTR;
3421 +-
3422 +- return 0;
3423 +-}
3424 +-
3425 + /**
3426 + * gsm_cleanup_mux - generic GSM protocol cleanup
3427 + * @gsm: our mux
3428 ++ * @disc: disconnect link?
3429 + *
3430 + * Clean up the bits of the mux which are the same for all framing
3431 + * protocols. Remove the mux from the mux table, stop all the timers
3432 + * and then shut down each device hanging up the channels as we go.
3433 + */
3434 +
3435 +-static void gsm_cleanup_mux(struct gsm_mux *gsm)
3436 ++static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
3437 + {
3438 + int i;
3439 + struct gsm_dlci *dlci = gsm->dlci[0];
3440 + struct gsm_msg *txq, *ntxq;
3441 +
3442 + gsm->dead = true;
3443 ++ mutex_lock(&gsm->mutex);
3444 +
3445 +- spin_lock(&gsm_mux_lock);
3446 +- for (i = 0; i < MAX_MUX; i++) {
3447 +- if (gsm_mux[i] == gsm) {
3448 +- gsm_mux[i] = NULL;
3449 +- break;
3450 ++ if (dlci) {
3451 ++ if (disc && dlci->state != DLCI_CLOSED) {
3452 ++ gsm_dlci_begin_close(dlci);
3453 ++ wait_event(gsm->event, dlci->state == DLCI_CLOSED);
3454 + }
3455 ++ dlci->dead = true;
3456 + }
3457 +- spin_unlock(&gsm_mux_lock);
3458 +- /* open failed before registering => nothing to do */
3459 +- if (i == MAX_MUX)
3460 +- return;
3461 +
3462 ++ /* Finish outstanding timers, making sure they are done */
3463 + del_timer_sync(&gsm->t2_timer);
3464 +- /* Now we are sure T2 has stopped */
3465 +- if (dlci)
3466 +- dlci->dead = true;
3467 +
3468 +- /* Free up any link layer users */
3469 +- mutex_lock(&gsm->mutex);
3470 +- for (i = 0; i < NUM_DLCI; i++)
3471 ++ /* Free up any link layer users and finally the control channel */
3472 ++ for (i = NUM_DLCI - 1; i >= 0; i--)
3473 + if (gsm->dlci[i])
3474 + gsm_dlci_release(gsm->dlci[i]);
3475 + mutex_unlock(&gsm->mutex);
3476 + /* Now wipe the queues */
3477 ++ tty_ldisc_flush(gsm->tty);
3478 + list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
3479 + kfree(txq);
3480 + INIT_LIST_HEAD(&gsm->tx_list);
3481 +@@ -2133,7 +2111,6 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
3482 + static int gsm_activate_mux(struct gsm_mux *gsm)
3483 + {
3484 + struct gsm_dlci *dlci;
3485 +- int i = 0;
3486 +
3487 + timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
3488 + init_waitqueue_head(&gsm->event);
3489 +@@ -2145,18 +2122,6 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
3490 + else
3491 + gsm->receive = gsm1_receive;
3492 +
3493 +- spin_lock(&gsm_mux_lock);
3494 +- for (i = 0; i < MAX_MUX; i++) {
3495 +- if (gsm_mux[i] == NULL) {
3496 +- gsm->num = i;
3497 +- gsm_mux[i] = gsm;
3498 +- break;
3499 +- }
3500 +- }
3501 +- spin_unlock(&gsm_mux_lock);
3502 +- if (i == MAX_MUX)
3503 +- return -EBUSY;
3504 +-
3505 + dlci = gsm_dlci_alloc(gsm, 0);
3506 + if (dlci == NULL)
3507 + return -ENOMEM;
3508 +@@ -2172,6 +2137,15 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
3509 + */
3510 + static void gsm_free_mux(struct gsm_mux *gsm)
3511 + {
3512 ++ int i;
3513 ++
3514 ++ for (i = 0; i < MAX_MUX; i++) {
3515 ++ if (gsm == gsm_mux[i]) {
3516 ++ gsm_mux[i] = NULL;
3517 ++ break;
3518 ++ }
3519 ++ }
3520 ++ mutex_destroy(&gsm->mutex);
3521 + kfree(gsm->txframe);
3522 + kfree(gsm->buf);
3523 + kfree(gsm);
3524 +@@ -2191,12 +2165,20 @@ static void gsm_free_muxr(struct kref *ref)
3525 +
3526 + static inline void mux_get(struct gsm_mux *gsm)
3527 + {
3528 ++ unsigned long flags;
3529 ++
3530 ++ spin_lock_irqsave(&gsm_mux_lock, flags);
3531 + kref_get(&gsm->ref);
3532 ++ spin_unlock_irqrestore(&gsm_mux_lock, flags);
3533 + }
3534 +
3535 + static inline void mux_put(struct gsm_mux *gsm)
3536 + {
3537 ++ unsigned long flags;
3538 ++
3539 ++ spin_lock_irqsave(&gsm_mux_lock, flags);
3540 + kref_put(&gsm->ref, gsm_free_muxr);
3541 ++ spin_unlock_irqrestore(&gsm_mux_lock, flags);
3542 + }
3543 +
3544 + static inline unsigned int mux_num_to_base(struct gsm_mux *gsm)
3545 +@@ -2217,6 +2199,7 @@ static inline unsigned int mux_line_to_num(unsigned int line)
3546 +
3547 + static struct gsm_mux *gsm_alloc_mux(void)
3548 + {
3549 ++ int i;
3550 + struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
3551 + if (gsm == NULL)
3552 + return NULL;
3553 +@@ -2225,7 +2208,7 @@ static struct gsm_mux *gsm_alloc_mux(void)
3554 + kfree(gsm);
3555 + return NULL;
3556 + }
3557 +- gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL);
3558 ++ gsm->txframe = kmalloc(2 * (MAX_MTU + PROT_OVERHEAD - 1), GFP_KERNEL);
3559 + if (gsm->txframe == NULL) {
3560 + kfree(gsm->buf);
3561 + kfree(gsm);
3562 +@@ -2246,6 +2229,26 @@ static struct gsm_mux *gsm_alloc_mux(void)
3563 + gsm->mtu = 64;
3564 + gsm->dead = true; /* Avoid early tty opens */
3565 +
3566 ++ /* Store the instance to the mux array or abort if no space is
3567 ++ * available.
3568 ++ */
3569 ++ spin_lock(&gsm_mux_lock);
3570 ++ for (i = 0; i < MAX_MUX; i++) {
3571 ++ if (!gsm_mux[i]) {
3572 ++ gsm_mux[i] = gsm;
3573 ++ gsm->num = i;
3574 ++ break;
3575 ++ }
3576 ++ }
3577 ++ spin_unlock(&gsm_mux_lock);
3578 ++ if (i == MAX_MUX) {
3579 ++ mutex_destroy(&gsm->mutex);
3580 ++ kfree(gsm->txframe);
3581 ++ kfree(gsm->buf);
3582 ++ kfree(gsm);
3583 ++ return NULL;
3584 ++ }
3585 ++
3586 + return gsm;
3587 + }
3588 +
3589 +@@ -2281,7 +2284,7 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
3590 + /* Check the MRU/MTU range looks sane */
3591 + if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8)
3592 + return -EINVAL;
3593 +- if (c->n2 < 3)
3594 ++ if (c->n2 > 255)
3595 + return -EINVAL;
3596 + if (c->encapsulation > 1) /* Basic, advanced, no I */
3597 + return -EINVAL;
3598 +@@ -2312,19 +2315,11 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
3599 +
3600 + /*
3601 + * Close down what is needed, restart and initiate the new
3602 +- * configuration
3603 ++ * configuration. On the first time there is no DLCI[0]
3604 ++ * and closing or cleaning up is not necessary.
3605 + */
3606 +-
3607 +- if (need_close || need_restart) {
3608 +- int ret;
3609 +-
3610 +- ret = gsm_disconnect(gsm);
3611 +-
3612 +- if (ret)
3613 +- return ret;
3614 +- }
3615 +- if (need_restart)
3616 +- gsm_cleanup_mux(gsm);
3617 ++ if (need_close || need_restart)
3618 ++ gsm_cleanup_mux(gsm, true);
3619 +
3620 + gsm->initiator = c->initiator;
3621 + gsm->mru = c->mru;
3622 +@@ -2393,6 +2388,9 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
3623 + int ret, i;
3624 +
3625 + gsm->tty = tty_kref_get(tty);
3626 ++ /* Turn off tty XON/XOFF handling to handle it explicitly. */
3627 ++ gsm->old_c_iflag = tty->termios.c_iflag;
3628 ++ tty->termios.c_iflag &= (IXON | IXOFF);
3629 + ret = gsm_activate_mux(gsm);
3630 + if (ret != 0)
3631 + tty_kref_put(gsm->tty);
3632 +@@ -2433,7 +2431,8 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
3633 + WARN_ON(tty != gsm->tty);
3634 + for (i = 1; i < NUM_DLCI; i++)
3635 + tty_unregister_device(gsm_tty_driver, base + i);
3636 +- gsm_cleanup_mux(gsm);
3637 ++ /* Restore tty XON/XOFF handling. */
3638 ++ gsm->tty->termios.c_iflag = gsm->old_c_iflag;
3639 + tty_kref_put(gsm->tty);
3640 + gsm->tty = NULL;
3641 + }
3642 +@@ -2498,6 +2497,12 @@ static void gsmld_close(struct tty_struct *tty)
3643 + {
3644 + struct gsm_mux *gsm = tty->disc_data;
3645 +
3646 ++ /* The ldisc locks and closes the port before calling our close. This
3647 ++ * means we have no way to do a proper disconnect. We will not bother
3648 ++ * to do one.
3649 ++ */
3650 ++ gsm_cleanup_mux(gsm, false);
3651 ++
3652 + gsmld_detach_gsm(tty, gsm);
3653 +
3654 + gsmld_flush_buffer(tty);
3655 +@@ -2536,7 +2541,7 @@ static int gsmld_open(struct tty_struct *tty)
3656 +
3657 + ret = gsmld_attach_gsm(tty, gsm);
3658 + if (ret != 0) {
3659 +- gsm_cleanup_mux(gsm);
3660 ++ gsm_cleanup_mux(gsm, false);
3661 + mux_put(gsm);
3662 + }
3663 + return ret;
3664 +@@ -2895,19 +2900,19 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
3665 +
3666 + static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
3667 + {
3668 +- u8 modembits[5];
3669 ++ u8 modembits[3];
3670 + struct gsm_control *ctrl;
3671 + int len = 2;
3672 +
3673 +- if (brk)
3674 ++ modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */
3675 ++ if (!brk) {
3676 ++ modembits[1] = (gsm_encode_modem(dlci) << 1) | EA;
3677 ++ } else {
3678 ++ modembits[1] = gsm_encode_modem(dlci) << 1;
3679 ++ modembits[2] = (brk << 4) | 2 | EA; /* Length, Break, EA */
3680 + len++;
3681 +-
3682 +- modembits[0] = len << 1 | EA; /* Data bytes */
3683 +- modembits[1] = dlci->addr << 2 | 3; /* DLCI, EA, 1 */
3684 +- modembits[2] = gsm_encode_modem(dlci) << 1 | EA;
3685 +- if (brk)
3686 +- modembits[3] = brk << 4 | 2 | EA; /* Valid, EA */
3687 +- ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1);
3688 ++ }
3689 ++ ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len);
3690 + if (ctrl == NULL)
3691 + return -ENOMEM;
3692 + return gsm_control_wait(dlci->gsm, ctrl);
3693 +@@ -3092,13 +3097,17 @@ static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty)
3694 + static void gsmtty_flush_buffer(struct tty_struct *tty)
3695 + {
3696 + struct gsm_dlci *dlci = tty->driver_data;
3697 ++ unsigned long flags;
3698 ++
3699 + if (dlci->state == DLCI_CLOSED)
3700 + return;
3701 + /* Caution needed: If we implement reliable transport classes
3702 + then the data being transmitted can't simply be junked once
3703 + it has first hit the stack. Until then we can just blow it
3704 + away */
3705 ++ spin_lock_irqsave(&dlci->lock, flags);
3706 + kfifo_reset(&dlci->fifo);
3707 ++ spin_unlock_irqrestore(&dlci->lock, flags);
3708 + /* Need to unhook this DLCI from the transmit queue logic */
3709 + }
3710 +
3711 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
3712 +index 114a49da564a9..e7b9805903f4c 100644
3713 +--- a/drivers/tty/serial/8250/8250_pci.c
3714 ++++ b/drivers/tty/serial/8250/8250_pci.c
3715 +@@ -2940,7 +2940,7 @@ enum pci_board_num_t {
3716 + pbn_panacom2,
3717 + pbn_panacom4,
3718 + pbn_plx_romulus,
3719 +- pbn_endrun_2_4000000,
3720 ++ pbn_endrun_2_3906250,
3721 + pbn_oxsemi,
3722 + pbn_oxsemi_1_3906250,
3723 + pbn_oxsemi_2_3906250,
3724 +@@ -3472,10 +3472,10 @@ static struct pciserial_board pci_boards[] = {
3725 + * signal now many ports are available
3726 + * 2 port 952 Uart support
3727 + */
3728 +- [pbn_endrun_2_4000000] = {
3729 ++ [pbn_endrun_2_3906250] = {
3730 + .flags = FL_BASE0,
3731 + .num_ports = 2,
3732 +- .base_baud = 4000000,
3733 ++ .base_baud = 3906250,
3734 + .uart_offset = 0x200,
3735 + .first_offset = 0x1000,
3736 + },
3737 +@@ -4418,7 +4418,7 @@ static const struct pci_device_id serial_pci_tbl[] = {
3738 + */
3739 + { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588,
3740 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
3741 +- pbn_endrun_2_4000000 },
3742 ++ pbn_endrun_2_3906250 },
3743 + /*
3744 + * Quatech cards. These actually have configurable clocks but for
3745 + * now we just use the default.
3746 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
3747 +index 723ec08067990..2285ef9477556 100644
3748 +--- a/drivers/tty/serial/8250/8250_port.c
3749 ++++ b/drivers/tty/serial/8250/8250_port.c
3750 +@@ -3340,7 +3340,7 @@ static void serial8250_console_restore(struct uart_8250_port *up)
3751 +
3752 + serial8250_set_divisor(port, baud, quot, frac);
3753 + serial_port_out(port, UART_LCR, up->lcr);
3754 +- serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
3755 ++ serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
3756 + }
3757 +
3758 + /*
3759 +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
3760 +index da54f827c5efc..3d40306971b81 100644
3761 +--- a/drivers/tty/serial/amba-pl011.c
3762 ++++ b/drivers/tty/serial/amba-pl011.c
3763 +@@ -1288,13 +1288,18 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
3764 +
3765 + static void pl011_rs485_tx_stop(struct uart_amba_port *uap)
3766 + {
3767 ++ /*
3768 ++ * To be on the safe side only time out after twice as many iterations
3769 ++ * as fifo size.
3770 ++ */
3771 ++ const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2;
3772 + struct uart_port *port = &uap->port;
3773 + int i = 0;
3774 + u32 cr;
3775 +
3776 + /* Wait until hardware tx queue is empty */
3777 + while (!pl011_tx_empty(port)) {
3778 +- if (i == port->fifosize) {
3779 ++ if (i > MAX_TX_DRAIN_ITERS) {
3780 + dev_warn(port->dev,
3781 + "timeout while draining hardware tx queue\n");
3782 + break;
3783 +@@ -2099,7 +2104,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
3784 + * with the given baud rate. We use this as the poll interval when we
3785 + * wait for the tx queue to empty.
3786 + */
3787 +- uap->rs485_tx_drain_interval = (bits * 1000 * 1000) / baud;
3788 ++ uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud);
3789 +
3790 + pl011_setup_status_masks(port, termios);
3791 +
3792 +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
3793 +index 7820049aba5af..b7ef075a4005b 100644
3794 +--- a/drivers/tty/serial/imx.c
3795 ++++ b/drivers/tty/serial/imx.c
3796 +@@ -1438,7 +1438,7 @@ static int imx_uart_startup(struct uart_port *port)
3797 + imx_uart_writel(sport, ucr1, UCR1);
3798 +
3799 + ucr4 = imx_uart_readl(sport, UCR4) & ~(UCR4_OREN | UCR4_INVR);
3800 +- if (!sport->dma_is_enabled)
3801 ++ if (!dma_is_inited)
3802 + ucr4 |= UCR4_OREN;
3803 + if (sport->inverted_rx)
3804 + ucr4 |= UCR4_INVR;
3805 +diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
3806 +index f9af7ebe003d7..d6d515d598dc0 100644
3807 +--- a/drivers/usb/cdns3/cdns3-gadget.c
3808 ++++ b/drivers/usb/cdns3/cdns3-gadget.c
3809 +@@ -2684,6 +2684,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
3810 + struct usb_request *request;
3811 + struct cdns3_request *priv_req;
3812 + struct cdns3_trb *trb = NULL;
3813 ++ struct cdns3_trb trb_tmp;
3814 + int ret;
3815 + int val;
3816 +
3817 +@@ -2693,8 +2694,10 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
3818 + if (request) {
3819 + priv_req = to_cdns3_request(request);
3820 + trb = priv_req->trb;
3821 +- if (trb)
3822 ++ if (trb) {
3823 ++ trb_tmp = *trb;
3824 + trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
3825 ++ }
3826 + }
3827 +
3828 + writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
3829 +@@ -2709,7 +2712,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
3830 +
3831 + if (request) {
3832 + if (trb)
3833 +- trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
3834 ++ *trb = trb_tmp;
3835 +
3836 + cdns3_rearm_transfer(priv_ep, 1);
3837 + }
3838 +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
3839 +index fa66e6e587928..656ba91c32831 100644
3840 +--- a/drivers/usb/core/devio.c
3841 ++++ b/drivers/usb/core/devio.c
3842 +@@ -1197,12 +1197,16 @@ static int do_proc_control(struct usb_dev_state *ps,
3843 +
3844 + usb_unlock_device(dev);
3845 + i = usbfs_start_wait_urb(urb, tmo, &actlen);
3846 ++
3847 ++ /* Linger a bit, prior to the next control message. */
3848 ++ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
3849 ++ msleep(200);
3850 + usb_lock_device(dev);
3851 + snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, tbuf, actlen);
3852 + if (!i && actlen) {
3853 + if (copy_to_user(ctrl->data, tbuf, actlen)) {
3854 + ret = -EFAULT;
3855 +- goto recv_fault;
3856 ++ goto done;
3857 + }
3858 + }
3859 + } else {
3860 +@@ -1219,6 +1223,10 @@ static int do_proc_control(struct usb_dev_state *ps,
3861 +
3862 + usb_unlock_device(dev);
3863 + i = usbfs_start_wait_urb(urb, tmo, &actlen);
3864 ++
3865 ++ /* Linger a bit, prior to the next control message. */
3866 ++ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
3867 ++ msleep(200);
3868 + usb_lock_device(dev);
3869 + snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, NULL, 0);
3870 + }
3871 +@@ -1230,10 +1238,6 @@ static int do_proc_control(struct usb_dev_state *ps,
3872 + }
3873 + ret = (i < 0 ? i : actlen);
3874 +
3875 +- recv_fault:
3876 +- /* Linger a bit, prior to the next control message. */
3877 +- if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
3878 +- msleep(200);
3879 + done:
3880 + kfree(dr);
3881 + usb_free_urb(urb);
3882 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
3883 +index d3c14b5ed4a1f..97b44a68668a5 100644
3884 +--- a/drivers/usb/core/quirks.c
3885 ++++ b/drivers/usb/core/quirks.c
3886 +@@ -404,6 +404,9 @@ static const struct usb_device_id usb_quirk_list[] = {
3887 + { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
3888 + USB_QUIRK_IGNORE_REMOTE_WAKEUP },
3889 +
3890 ++ /* Realtek Semiconductor Corp. Mass Storage Device (Multicard Reader)*/
3891 ++ { USB_DEVICE(0x0bda, 0x0151), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
3892 ++
3893 + /* Realtek hub in Dell WD19 (Type-C) */
3894 + { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
3895 +
3896 +@@ -507,6 +510,9 @@ static const struct usb_device_id usb_quirk_list[] = {
3897 + /* DJI CineSSD */
3898 + { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
3899 +
3900 ++ /* VCOM device */
3901 ++ { USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
3902 ++
3903 + /* INTEL VALUE SSD */
3904 + { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
3905 +
3906 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
3907 +index 357b7805896e7..5cb1350ec66d1 100644
3908 +--- a/drivers/usb/dwc3/core.c
3909 ++++ b/drivers/usb/dwc3/core.c
3910 +@@ -275,7 +275,8 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
3911 +
3912 + reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3913 + reg |= DWC3_DCTL_CSFTRST;
3914 +- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3915 ++ reg &= ~DWC3_DCTL_RUN_STOP;
3916 ++ dwc3_gadget_dctl_write_safe(dwc, reg);
3917 +
3918 + /*
3919 + * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit
3920 +@@ -1268,10 +1269,10 @@ static void dwc3_get_properties(struct dwc3 *dwc)
3921 + u8 lpm_nyet_threshold;
3922 + u8 tx_de_emphasis;
3923 + u8 hird_threshold;
3924 +- u8 rx_thr_num_pkt_prd;
3925 +- u8 rx_max_burst_prd;
3926 +- u8 tx_thr_num_pkt_prd;
3927 +- u8 tx_max_burst_prd;
3928 ++ u8 rx_thr_num_pkt_prd = 0;
3929 ++ u8 rx_max_burst_prd = 0;
3930 ++ u8 tx_thr_num_pkt_prd = 0;
3931 ++ u8 tx_max_burst_prd = 0;
3932 + u8 tx_fifo_resize_max_num;
3933 + const char *usb_psy_name;
3934 + int ret;
3935 +diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
3936 +index d7f76835137fa..f148b0370f829 100644
3937 +--- a/drivers/usb/dwc3/drd.c
3938 ++++ b/drivers/usb/dwc3/drd.c
3939 +@@ -571,16 +571,15 @@ int dwc3_drd_init(struct dwc3 *dwc)
3940 + {
3941 + int ret, irq;
3942 +
3943 ++ if (ROLE_SWITCH &&
3944 ++ device_property_read_bool(dwc->dev, "usb-role-switch"))
3945 ++ return dwc3_setup_role_switch(dwc);
3946 ++
3947 + dwc->edev = dwc3_get_extcon(dwc);
3948 + if (IS_ERR(dwc->edev))
3949 + return PTR_ERR(dwc->edev);
3950 +
3951 +- if (ROLE_SWITCH &&
3952 +- device_property_read_bool(dwc->dev, "usb-role-switch")) {
3953 +- ret = dwc3_setup_role_switch(dwc);
3954 +- if (ret < 0)
3955 +- return ret;
3956 +- } else if (dwc->edev) {
3957 ++ if (dwc->edev) {
3958 + dwc->edev_nb.notifier_call = dwc3_drd_notifier;
3959 + ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
3960 + &dwc->edev_nb);
3961 +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
3962 +index 4d9608cc55f73..f08b2178fd32d 100644
3963 +--- a/drivers/usb/dwc3/dwc3-pci.c
3964 ++++ b/drivers/usb/dwc3/dwc3-pci.c
3965 +@@ -44,6 +44,8 @@
3966 + #define PCI_DEVICE_ID_INTEL_ADLM 0x54ee
3967 + #define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
3968 + #define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
3969 ++#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
3970 ++#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
3971 + #define PCI_DEVICE_ID_INTEL_TGL 0x9a15
3972 + #define PCI_DEVICE_ID_AMD_MR 0x163a
3973 +
3974 +@@ -421,6 +423,12 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
3975 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
3976 + (kernel_ulong_t) &dwc3_pci_intel_swnode, },
3977 +
3978 ++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP),
3979 ++ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
3980 ++
3981 ++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL),
3982 ++ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
3983 ++
3984 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL),
3985 + (kernel_ulong_t) &dwc3_pci_intel_swnode, },
3986 +
3987 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3988 +index 00cf8ebcb338c..c32f3116d1a0f 100644
3989 +--- a/drivers/usb/dwc3/gadget.c
3990 ++++ b/drivers/usb/dwc3/gadget.c
3991 +@@ -3199,6 +3199,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
3992 + const struct dwc3_event_depevt *event,
3993 + struct dwc3_request *req, int status)
3994 + {
3995 ++ int request_status;
3996 + int ret;
3997 +
3998 + if (req->request.num_mapped_sgs)
3999 +@@ -3219,7 +3220,35 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
4000 + req->needs_extra_trb = false;
4001 + }
4002 +
4003 +- dwc3_gadget_giveback(dep, req, status);
4004 ++ /*
4005 ++ * The event status only reflects the status of the TRB with IOC set.
4006 ++ * For the requests that don't set interrupt on completion, the driver
4007 ++ * needs to check and return the status of the completed TRBs associated
4008 ++ * with the request. Use the status of the last TRB of the request.
4009 ++ */
4010 ++ if (req->request.no_interrupt) {
4011 ++ struct dwc3_trb *trb;
4012 ++
4013 ++ trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue);
4014 ++ switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) {
4015 ++ case DWC3_TRBSTS_MISSED_ISOC:
4016 ++ /* Isoc endpoint only */
4017 ++ request_status = -EXDEV;
4018 ++ break;
4019 ++ case DWC3_TRB_STS_XFER_IN_PROG:
4020 ++ /* Applicable when End Transfer with ForceRM=0 */
4021 ++ case DWC3_TRBSTS_SETUP_PENDING:
4022 ++ /* Control endpoint only */
4023 ++ case DWC3_TRBSTS_OK:
4024 ++ default:
4025 ++ request_status = 0;
4026 ++ break;
4027 ++ }
4028 ++ } else {
4029 ++ request_status = status;
4030 ++ }
4031 ++
4032 ++ dwc3_gadget_giveback(dep, req, request_status);
4033 +
4034 + out:
4035 + return ret;
4036 +diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
4037 +index 477e72a1d11e7..5ade844db4046 100644
4038 +--- a/drivers/usb/gadget/configfs.c
4039 ++++ b/drivers/usb/gadget/configfs.c
4040 +@@ -1447,6 +1447,8 @@ static void configfs_composite_unbind(struct usb_gadget *gadget)
4041 + usb_ep_autoconfig_reset(cdev->gadget);
4042 + spin_lock_irqsave(&gi->spinlock, flags);
4043 + cdev->gadget = NULL;
4044 ++ cdev->deactivations = 0;
4045 ++ gadget->deactivated = false;
4046 + set_gadget_data(gadget, NULL);
4047 + spin_unlock_irqrestore(&gi->spinlock, flags);
4048 + }
4049 +diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
4050 +index 7d00ad7c154c2..99dc9adf56efa 100644
4051 +--- a/drivers/usb/gadget/function/uvc_queue.c
4052 ++++ b/drivers/usb/gadget/function/uvc_queue.c
4053 +@@ -264,6 +264,8 @@ void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect)
4054 + buf->state = UVC_BUF_STATE_ERROR;
4055 + vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
4056 + }
4057 ++ queue->buf_used = 0;
4058 ++
4059 + /* This must be protected by the irqlock spinlock to avoid race
4060 + * conditions between uvc_queue_buffer and the disconnection event that
4061 + * could result in an interruptible wait in uvc_dequeue_buffer. Do not
4062 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
4063 +index 1e7dc130c39a6..f65f1ba2b5929 100644
4064 +--- a/drivers/usb/host/xhci-hub.c
4065 ++++ b/drivers/usb/host/xhci-hub.c
4066 +@@ -1434,7 +1434,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
4067 + }
4068 + spin_unlock_irqrestore(&xhci->lock, flags);
4069 + if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex],
4070 +- msecs_to_jiffies(100)))
4071 ++ msecs_to_jiffies(500)))
4072 + xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n",
4073 + hcd->self.busnum, wIndex + 1);
4074 + spin_lock_irqsave(&xhci->lock, flags);
4075 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
4076 +index de9a9ea2cabc2..cb8b481a94990 100644
4077 +--- a/drivers/usb/host/xhci-pci.c
4078 ++++ b/drivers/usb/host/xhci-pci.c
4079 +@@ -59,6 +59,7 @@
4080 + #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
4081 + #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
4082 + #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
4083 ++#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
4084 +
4085 + #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
4086 + #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
4087 +@@ -266,7 +267,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
4088 + pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
4089 + pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
4090 + pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
4091 +- pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
4092 ++ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI ||
4093 ++ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI))
4094 + xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
4095 +
4096 + if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
4097 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
4098 +index d0b6806275e01..f9707997969d4 100644
4099 +--- a/drivers/usb/host/xhci-ring.c
4100 ++++ b/drivers/usb/host/xhci-ring.c
4101 +@@ -3141,6 +3141,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
4102 + if (event_loop++ < TRBS_PER_SEGMENT / 2)
4103 + continue;
4104 + xhci_update_erst_dequeue(xhci, event_ring_deq);
4105 ++ event_ring_deq = xhci->event_ring->dequeue;
4106 +
4107 + /* ring is half-full, force isoc trbs to interrupt more often */
4108 + if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
4109 +diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
4110 +index c8af2cd2216d6..996958a6565c3 100644
4111 +--- a/drivers/usb/host/xhci-tegra.c
4112 ++++ b/drivers/usb/host/xhci-tegra.c
4113 +@@ -1034,13 +1034,13 @@ static int tegra_xusb_unpowergate_partitions(struct tegra_xusb *tegra)
4114 + int rc;
4115 +
4116 + if (tegra->use_genpd) {
4117 +- rc = pm_runtime_get_sync(tegra->genpd_dev_ss);
4118 ++ rc = pm_runtime_resume_and_get(tegra->genpd_dev_ss);
4119 + if (rc < 0) {
4120 + dev_err(dev, "failed to enable XUSB SS partition\n");
4121 + return rc;
4122 + }
4123 +
4124 +- rc = pm_runtime_get_sync(tegra->genpd_dev_host);
4125 ++ rc = pm_runtime_resume_and_get(tegra->genpd_dev_host);
4126 + if (rc < 0) {
4127 + dev_err(dev, "failed to enable XUSB Host partition\n");
4128 + pm_runtime_put_sync(tegra->genpd_dev_ss);
4129 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
4130 +index 2c1cc94808875..90f5a3ce7c348 100644
4131 +--- a/drivers/usb/host/xhci.c
4132 ++++ b/drivers/usb/host/xhci.c
4133 +@@ -778,6 +778,17 @@ void xhci_shutdown(struct usb_hcd *hcd)
4134 + if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
4135 + usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
4136 +
4137 ++ /* Don't poll the roothubs after shutdown. */
4138 ++ xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
4139 ++ __func__, hcd->self.busnum);
4140 ++ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
4141 ++ del_timer_sync(&hcd->rh_timer);
4142 ++
4143 ++ if (xhci->shared_hcd) {
4144 ++ clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
4145 ++ del_timer_sync(&xhci->shared_hcd->rh_timer);
4146 ++ }
4147 ++
4148 + spin_lock_irq(&xhci->lock);
4149 + xhci_halt(xhci);
4150 + /* Workaround for spurious wakeups at shutdown with HSW */
4151 +diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
4152 +index 748139d262633..0be8efcda15d5 100644
4153 +--- a/drivers/usb/misc/uss720.c
4154 ++++ b/drivers/usb/misc/uss720.c
4155 +@@ -71,6 +71,7 @@ static void destroy_priv(struct kref *kref)
4156 +
4157 + dev_dbg(&priv->usbdev->dev, "destroying priv datastructure\n");
4158 + usb_put_dev(priv->usbdev);
4159 ++ priv->usbdev = NULL;
4160 + kfree(priv);
4161 + }
4162 +
4163 +@@ -736,7 +737,6 @@ static int uss720_probe(struct usb_interface *intf,
4164 + parport_announce_port(pp);
4165 +
4166 + usb_set_intfdata(intf, pp);
4167 +- usb_put_dev(usbdev);
4168 + return 0;
4169 +
4170 + probe_abort:
4171 +@@ -754,7 +754,6 @@ static void uss720_disconnect(struct usb_interface *intf)
4172 + usb_set_intfdata(intf, NULL);
4173 + if (pp) {
4174 + priv = pp->private_data;
4175 +- priv->usbdev = NULL;
4176 + priv->pp = NULL;
4177 + dev_dbg(&intf->dev, "parport_remove_port\n");
4178 + parport_remove_port(pp);
4179 +diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
4180 +index a6b04831b20bf..9b8aded3d95e9 100644
4181 +--- a/drivers/usb/mtu3/mtu3_dr.c
4182 ++++ b/drivers/usb/mtu3/mtu3_dr.c
4183 +@@ -21,10 +21,8 @@ static inline struct ssusb_mtk *otg_sx_to_ssusb(struct otg_switch_mtk *otg_sx)
4184 +
4185 + static void toggle_opstate(struct ssusb_mtk *ssusb)
4186 + {
4187 +- if (!ssusb->otg_switch.is_u3_drd) {
4188 +- mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
4189 +- mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
4190 +- }
4191 ++ mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
4192 ++ mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
4193 + }
4194 +
4195 + /* only port0 supports dual-role mode */
4196 +diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
4197 +index 661a229c105dd..34b9f81401871 100644
4198 +--- a/drivers/usb/phy/phy-generic.c
4199 ++++ b/drivers/usb/phy/phy-generic.c
4200 +@@ -268,6 +268,13 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
4201 + return -EPROBE_DEFER;
4202 + }
4203 +
4204 ++ nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
4205 ++ if (PTR_ERR(nop->vbus_draw) == -ENODEV)
4206 ++ nop->vbus_draw = NULL;
4207 ++ if (IS_ERR(nop->vbus_draw))
4208 ++ return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
4209 ++ "could not get vbus regulator\n");
4210 ++
4211 + nop->dev = dev;
4212 + nop->phy.dev = nop->dev;
4213 + nop->phy.label = "nop-xceiv";
4214 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
4215 +index 08554e1548420..bd006e1712ccb 100644
4216 +--- a/drivers/usb/serial/cp210x.c
4217 ++++ b/drivers/usb/serial/cp210x.c
4218 +@@ -194,6 +194,8 @@ static const struct usb_device_id id_table[] = {
4219 + { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
4220 + { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
4221 + { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
4222 ++ { USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */
4223 ++ { USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */
4224 + { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
4225 + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
4226 + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
4227 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
4228 +index e7755d9cfc61a..1364ce7f0abf0 100644
4229 +--- a/drivers/usb/serial/option.c
4230 ++++ b/drivers/usb/serial/option.c
4231 +@@ -432,6 +432,8 @@ static void option_instat_callback(struct urb *urb);
4232 + #define CINTERION_PRODUCT_CLS8 0x00b0
4233 + #define CINTERION_PRODUCT_MV31_MBIM 0x00b3
4234 + #define CINTERION_PRODUCT_MV31_RMNET 0x00b7
4235 ++#define CINTERION_PRODUCT_MV32_WA 0x00f1
4236 ++#define CINTERION_PRODUCT_MV32_WB 0x00f2
4237 +
4238 + /* Olivetti products */
4239 + #define OLIVETTI_VENDOR_ID 0x0b3c
4240 +@@ -1217,6 +1219,10 @@ static const struct usb_device_id option_ids[] = {
4241 + .driver_info = NCTRL(0) | RSVD(1) },
4242 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */
4243 + .driver_info = NCTRL(2) | RSVD(3) },
4244 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1057, 0xff), /* Telit FN980 */
4245 ++ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
4246 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1058, 0xff), /* Telit FN980 (PCIe) */
4247 ++ .driver_info = NCTRL(0) | RSVD(1) },
4248 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */
4249 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
4250 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */
4251 +@@ -1233,6 +1239,8 @@ static const struct usb_device_id option_ids[] = {
4252 + .driver_info = NCTRL(2) | RSVD(3) },
4253 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
4254 + .driver_info = NCTRL(0) | RSVD(1) },
4255 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */
4256 ++ .driver_info = RSVD(0) },
4257 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
4258 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
4259 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
4260 +@@ -1969,6 +1977,10 @@ static const struct usb_device_id option_ids[] = {
4261 + .driver_info = RSVD(3)},
4262 + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
4263 + .driver_info = RSVD(0)},
4264 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
4265 ++ .driver_info = RSVD(3)},
4266 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
4267 ++ .driver_info = RSVD(3)},
4268 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
4269 + .driver_info = RSVD(4) },
4270 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
4271 +diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
4272 +index da65d14c9ed5e..06aad0d727ddc 100644
4273 +--- a/drivers/usb/serial/whiteheat.c
4274 ++++ b/drivers/usb/serial/whiteheat.c
4275 +@@ -584,9 +584,8 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
4276 + switch (command) {
4277 + case WHITEHEAT_GET_DTR_RTS:
4278 + info = usb_get_serial_port_data(port);
4279 +- memcpy(&info->mcr, command_info->result_buffer,
4280 +- sizeof(struct whiteheat_dr_info));
4281 +- break;
4282 ++ info->mcr = command_info->result_buffer[0];
4283 ++ break;
4284 + }
4285 + }
4286 + exit:
4287 +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
4288 +index 5ef5bd0e87cf2..8a7e2dd52ad5a 100644
4289 +--- a/drivers/usb/typec/ucsi/ucsi.c
4290 ++++ b/drivers/usb/typec/ucsi/ucsi.c
4291 +@@ -955,6 +955,8 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role)
4292 + role == TYPEC_HOST))
4293 + goto out_unlock;
4294 +
4295 ++ reinit_completion(&con->complete);
4296 ++
4297 + command = UCSI_SET_UOR | UCSI_CONNECTOR_NUMBER(con->num);
4298 + command |= UCSI_SET_UOR_ROLE(role);
4299 + command |= UCSI_SET_UOR_ACCEPT_ROLE_SWAPS;
4300 +@@ -962,14 +964,18 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role)
4301 + if (ret < 0)
4302 + goto out_unlock;
4303 +
4304 ++ mutex_unlock(&con->lock);
4305 ++
4306 + if (!wait_for_completion_timeout(&con->complete,
4307 +- msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
4308 +- ret = -ETIMEDOUT;
4309 ++ msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
4310 ++ return -ETIMEDOUT;
4311 ++
4312 ++ return 0;
4313 +
4314 + out_unlock:
4315 + mutex_unlock(&con->lock);
4316 +
4317 +- return ret < 0 ? ret : 0;
4318 ++ return ret;
4319 + }
4320 +
4321 + static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
4322 +@@ -991,6 +997,8 @@ static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
4323 + if (cur_role == role)
4324 + goto out_unlock;
4325 +
4326 ++ reinit_completion(&con->complete);
4327 ++
4328 + command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num);
4329 + command |= UCSI_SET_PDR_ROLE(role);
4330 + command |= UCSI_SET_PDR_ACCEPT_ROLE_SWAPS;
4331 +@@ -998,11 +1006,13 @@ static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
4332 + if (ret < 0)
4333 + goto out_unlock;
4334 +
4335 ++ mutex_unlock(&con->lock);
4336 ++
4337 + if (!wait_for_completion_timeout(&con->complete,
4338 +- msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) {
4339 +- ret = -ETIMEDOUT;
4340 +- goto out_unlock;
4341 +- }
4342 ++ msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
4343 ++ return -ETIMEDOUT;
4344 ++
4345 ++ mutex_lock(&con->lock);
4346 +
4347 + /* Something has gone wrong while swapping the role */
4348 + if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) !=
4349 +diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
4350 +index 90f48b71fd8f7..d9eec1b60e665 100644
4351 +--- a/drivers/video/fbdev/udlfb.c
4352 ++++ b/drivers/video/fbdev/udlfb.c
4353 +@@ -1649,8 +1649,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
4354 + const struct device_attribute *attr;
4355 + struct dlfb_data *dlfb;
4356 + struct fb_info *info;
4357 +- int retval = -ENOMEM;
4358 ++ int retval;
4359 + struct usb_device *usbdev = interface_to_usbdev(intf);
4360 ++ struct usb_endpoint_descriptor *out;
4361 +
4362 + /* usb initialization */
4363 + dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
4364 +@@ -1664,6 +1665,12 @@ static int dlfb_usb_probe(struct usb_interface *intf,
4365 + dlfb->udev = usb_get_dev(usbdev);
4366 + usb_set_intfdata(intf, dlfb);
4367 +
4368 ++ retval = usb_find_common_endpoints(intf->cur_altsetting, NULL, &out, NULL, NULL);
4369 ++ if (retval) {
4370 ++ dev_err(&intf->dev, "Device should have at lease 1 bulk endpoint!\n");
4371 ++ goto error;
4372 ++ }
4373 ++
4374 + dev_dbg(&intf->dev, "console enable=%d\n", console);
4375 + dev_dbg(&intf->dev, "fb_defio enable=%d\n", fb_defio);
4376 + dev_dbg(&intf->dev, "shadow enable=%d\n", shadow);
4377 +@@ -1673,6 +1680,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
4378 + if (!dlfb_parse_vendor_descriptor(dlfb, intf)) {
4379 + dev_err(&intf->dev,
4380 + "firmware not recognized, incompatible device?\n");
4381 ++ retval = -ENODEV;
4382 + goto error;
4383 + }
4384 +
4385 +@@ -1686,8 +1694,10 @@ static int dlfb_usb_probe(struct usb_interface *intf,
4386 +
4387 + /* allocates framebuffer driver structure, not framebuffer memory */
4388 + info = framebuffer_alloc(0, &dlfb->udev->dev);
4389 +- if (!info)
4390 ++ if (!info) {
4391 ++ retval = -ENOMEM;
4392 + goto error;
4393 ++ }
4394 +
4395 + dlfb->info = info;
4396 + info->par = dlfb;
4397 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
4398 +index e90d80a8a9e34..290cfe11e7901 100644
4399 +--- a/fs/btrfs/tree-log.c
4400 ++++ b/fs/btrfs/tree-log.c
4401 +@@ -3216,6 +3216,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
4402 + ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
4403 + if (ret) {
4404 + mutex_unlock(&fs_info->tree_root->log_mutex);
4405 ++ blk_finish_plug(&plug);
4406 + goto out;
4407 + }
4408 + }
4409 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
4410 +index 09900a9015ea6..d1faa9d2f1e8a 100644
4411 +--- a/fs/ceph/caps.c
4412 ++++ b/fs/ceph/caps.c
4413 +@@ -2266,6 +2266,8 @@ retry:
4414 + list_for_each_entry(req, &ci->i_unsafe_dirops,
4415 + r_unsafe_dir_item) {
4416 + s = req->r_session;
4417 ++ if (!s)
4418 ++ continue;
4419 + if (unlikely(s->s_mds >= max_sessions)) {
4420 + spin_unlock(&ci->i_unsafe_lock);
4421 + for (i = 0; i < max_sessions; i++) {
4422 +@@ -2286,6 +2288,8 @@ retry:
4423 + list_for_each_entry(req, &ci->i_unsafe_iops,
4424 + r_unsafe_target_item) {
4425 + s = req->r_session;
4426 ++ if (!s)
4427 ++ continue;
4428 + if (unlikely(s->s_mds >= max_sessions)) {
4429 + spin_unlock(&ci->i_unsafe_lock);
4430 + for (i = 0; i < max_sessions; i++) {
4431 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
4432 +index db3ead52ec7c1..0c1af2dd9069e 100644
4433 +--- a/fs/cifs/smb2ops.c
4434 ++++ b/fs/cifs/smb2ops.c
4435 +@@ -1849,9 +1849,17 @@ smb2_copychunk_range(const unsigned int xid,
4436 + int chunks_copied = 0;
4437 + bool chunk_sizes_updated = false;
4438 + ssize_t bytes_written, total_bytes_written = 0;
4439 ++ struct inode *inode;
4440 +
4441 + pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
4442 +
4443 ++ /*
4444 ++ * We need to flush all unwritten data before we can send the
4445 ++ * copychunk ioctl to the server.
4446 ++ */
4447 ++ inode = d_inode(trgtfile->dentry);
4448 ++ filemap_write_and_wait(inode->i_mapping);
4449 ++
4450 + if (pcchunk == NULL)
4451 + return -ENOMEM;
4452 +
4453 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4454 +index fa21d81803190..d12f11c6fbf25 100644
4455 +--- a/fs/ext4/super.c
4456 ++++ b/fs/ext4/super.c
4457 +@@ -1167,20 +1167,25 @@ static void ext4_put_super(struct super_block *sb)
4458 + int aborted = 0;
4459 + int i, err;
4460 +
4461 +- ext4_unregister_li_request(sb);
4462 +- ext4_quota_off_umount(sb);
4463 +-
4464 +- flush_work(&sbi->s_error_work);
4465 +- destroy_workqueue(sbi->rsv_conversion_wq);
4466 +- ext4_release_orphan_info(sb);
4467 +-
4468 + /*
4469 + * Unregister sysfs before destroying jbd2 journal.
4470 + * Since we could still access attr_journal_task attribute via sysfs
4471 + * path which could have sbi->s_journal->j_task as NULL
4472 ++ * Unregister sysfs before flush sbi->s_error_work.
4473 ++ * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If
4474 ++ * read metadata verify failed then will queue error work.
4475 ++ * flush_stashed_error_work will call start_this_handle may trigger
4476 ++ * BUG_ON.
4477 + */
4478 + ext4_unregister_sysfs(sb);
4479 +
4480 ++ ext4_unregister_li_request(sb);
4481 ++ ext4_quota_off_umount(sb);
4482 ++
4483 ++ flush_work(&sbi->s_error_work);
4484 ++ destroy_workqueue(sbi->rsv_conversion_wq);
4485 ++ ext4_release_orphan_info(sb);
4486 ++
4487 + if (sbi->s_journal) {
4488 + aborted = is_journal_aborted(sbi->s_journal);
4489 + err = jbd2_journal_destroy(sbi->s_journal);
4490 +diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
4491 +index 247b8d95b5ef4..eb5ea0262f3c4 100644
4492 +--- a/fs/gfs2/file.c
4493 ++++ b/fs/gfs2/file.c
4494 +@@ -858,9 +858,9 @@ retry_under_glock:
4495 + leftover = fault_in_iov_iter_writeable(to, window_size);
4496 + gfs2_holder_disallow_demote(gh);
4497 + if (leftover != window_size) {
4498 +- if (!gfs2_holder_queued(gh))
4499 +- goto retry;
4500 +- goto retry_under_glock;
4501 ++ if (gfs2_holder_queued(gh))
4502 ++ goto retry_under_glock;
4503 ++ goto retry;
4504 + }
4505 + }
4506 + if (gfs2_holder_queued(gh))
4507 +@@ -927,9 +927,9 @@ retry_under_glock:
4508 + leftover = fault_in_iov_iter_readable(from, window_size);
4509 + gfs2_holder_disallow_demote(gh);
4510 + if (leftover != window_size) {
4511 +- if (!gfs2_holder_queued(gh))
4512 +- goto retry;
4513 +- goto retry_under_glock;
4514 ++ if (gfs2_holder_queued(gh))
4515 ++ goto retry_under_glock;
4516 ++ goto retry;
4517 + }
4518 + }
4519 + out:
4520 +@@ -996,12 +996,9 @@ retry_under_glock:
4521 + leftover = fault_in_iov_iter_writeable(to, window_size);
4522 + gfs2_holder_disallow_demote(&gh);
4523 + if (leftover != window_size) {
4524 +- if (!gfs2_holder_queued(&gh)) {
4525 +- if (written)
4526 +- goto out_uninit;
4527 +- goto retry;
4528 +- }
4529 +- goto retry_under_glock;
4530 ++ if (gfs2_holder_queued(&gh))
4531 ++ goto retry_under_glock;
4532 ++ goto retry;
4533 + }
4534 + }
4535 + if (gfs2_holder_queued(&gh))
4536 +@@ -1021,6 +1018,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
4537 + struct gfs2_sbd *sdp = GFS2_SB(inode);
4538 + struct gfs2_holder *statfs_gh = NULL;
4539 + size_t prev_count = 0, window_size = 0;
4540 ++ size_t orig_count = iov_iter_count(from);
4541 + size_t read = 0;
4542 + ssize_t ret;
4543 +
4544 +@@ -1065,6 +1063,7 @@ retry_under_glock:
4545 + if (inode == sdp->sd_rindex)
4546 + gfs2_glock_dq_uninit(statfs_gh);
4547 +
4548 ++ from->count = orig_count - read;
4549 + if (should_fault_in_pages(ret, from, &prev_count, &window_size)) {
4550 + size_t leftover;
4551 +
4552 +@@ -1072,12 +1071,10 @@ retry_under_glock:
4553 + leftover = fault_in_iov_iter_readable(from, window_size);
4554 + gfs2_holder_disallow_demote(gh);
4555 + if (leftover != window_size) {
4556 +- if (!gfs2_holder_queued(gh)) {
4557 +- if (read)
4558 +- goto out_uninit;
4559 +- goto retry;
4560 +- }
4561 +- goto retry_under_glock;
4562 ++ from->count = min(from->count, window_size - leftover);
4563 ++ if (gfs2_holder_queued(gh))
4564 ++ goto retry_under_glock;
4565 ++ goto retry;
4566 + }
4567 + }
4568 + out_unlock:
4569 +diff --git a/fs/io_uring.c b/fs/io_uring.c
4570 +index 1bf1ea2cd8b09..7aad4bde92e96 100644
4571 +--- a/fs/io_uring.c
4572 ++++ b/fs/io_uring.c
4573 +@@ -4786,6 +4786,8 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4574 +
4575 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4576 + return -EINVAL;
4577 ++ if (unlikely(sqe->addr2 || sqe->file_index))
4578 ++ return -EINVAL;
4579 +
4580 + sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4581 + sr->len = READ_ONCE(sqe->len);
4582 +@@ -5007,6 +5009,8 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4583 +
4584 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4585 + return -EINVAL;
4586 ++ if (unlikely(sqe->addr2 || sqe->file_index))
4587 ++ return -EINVAL;
4588 +
4589 + sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4590 + sr->len = READ_ONCE(sqe->len);
4591 +diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
4592 +index 192d8308afc27..1ed3046dd5b3f 100644
4593 +--- a/fs/ksmbd/smb2pdu.c
4594 ++++ b/fs/ksmbd/smb2pdu.c
4595 +@@ -11,6 +11,7 @@
4596 + #include <linux/statfs.h>
4597 + #include <linux/ethtool.h>
4598 + #include <linux/falloc.h>
4599 ++#include <linux/mount.h>
4600 +
4601 + #include "glob.h"
4602 + #include "smb2pdu.h"
4603 +@@ -4997,15 +4998,17 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
4604 + case FS_SECTOR_SIZE_INFORMATION:
4605 + {
4606 + struct smb3_fs_ss_info *info;
4607 ++ unsigned int sector_size =
4608 ++ min_t(unsigned int, path.mnt->mnt_sb->s_blocksize, 4096);
4609 +
4610 + info = (struct smb3_fs_ss_info *)(rsp->Buffer);
4611 +
4612 +- info->LogicalBytesPerSector = cpu_to_le32(stfs.f_bsize);
4613 ++ info->LogicalBytesPerSector = cpu_to_le32(sector_size);
4614 + info->PhysicalBytesPerSectorForAtomicity =
4615 +- cpu_to_le32(stfs.f_bsize);
4616 +- info->PhysicalBytesPerSectorForPerf = cpu_to_le32(stfs.f_bsize);
4617 ++ cpu_to_le32(sector_size);
4618 ++ info->PhysicalBytesPerSectorForPerf = cpu_to_le32(sector_size);
4619 + info->FSEffPhysicalBytesPerSectorForAtomicity =
4620 +- cpu_to_le32(stfs.f_bsize);
4621 ++ cpu_to_le32(sector_size);
4622 + info->Flags = cpu_to_le32(SSINFO_FLAGS_ALIGNED_DEVICE |
4623 + SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE);
4624 + info->ByteOffsetForSectorAlignment = 0;
4625 +@@ -5768,8 +5771,10 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
4626 + if (parent_fp) {
4627 + if (parent_fp->daccess & FILE_DELETE_LE) {
4628 + pr_err("parent dir is opened with delete access\n");
4629 ++ ksmbd_fd_put(work, parent_fp);
4630 + return -ESHARE;
4631 + }
4632 ++ ksmbd_fd_put(work, parent_fp);
4633 + }
4634 + next:
4635 + return smb2_rename(work, fp, user_ns, rename_info,
4636 +diff --git a/fs/ksmbd/vfs_cache.c b/fs/ksmbd/vfs_cache.c
4637 +index 29c1db66bd0f7..8b873d92d7854 100644
4638 +--- a/fs/ksmbd/vfs_cache.c
4639 ++++ b/fs/ksmbd/vfs_cache.c
4640 +@@ -497,6 +497,7 @@ struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
4641 + list_for_each_entry(lfp, &ci->m_fp_list, node) {
4642 + if (inode == file_inode(lfp->filp)) {
4643 + atomic_dec(&ci->m_count);
4644 ++ lfp = ksmbd_fp_get(lfp);
4645 + read_unlock(&ci->m_lock);
4646 + return lfp;
4647 + }
4648 +diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
4649 +index bced33b76beac..b34ccfd71b0ff 100644
4650 +--- a/fs/zonefs/super.c
4651 ++++ b/fs/zonefs/super.c
4652 +@@ -35,6 +35,17 @@ static inline int zonefs_zone_mgmt(struct inode *inode,
4653 +
4654 + lockdep_assert_held(&zi->i_truncate_mutex);
4655 +
4656 ++ /*
4657 ++ * With ZNS drives, closing an explicitly open zone that has not been
4658 ++ * written will change the zone state to "closed", that is, the zone
4659 ++ * will remain active. Since this can then cause failure of explicit
4660 ++ * open operation on other zones if the drive active zone resources
4661 ++ * are exceeded, make sure that the zone does not remain active by
4662 ++ * resetting it.
4663 ++ */
4664 ++ if (op == REQ_OP_ZONE_CLOSE && !zi->i_wpoffset)
4665 ++ op = REQ_OP_ZONE_RESET;
4666 ++
4667 + trace_zonefs_zone_mgmt(inode, op);
4668 + ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
4669 + zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS);
4670 +@@ -1144,6 +1155,7 @@ static struct inode *zonefs_alloc_inode(struct super_block *sb)
4671 + inode_init_once(&zi->i_vnode);
4672 + mutex_init(&zi->i_truncate_mutex);
4673 + zi->i_wr_refcnt = 0;
4674 ++ zi->i_flags = 0;
4675 +
4676 + return &zi->i_vnode;
4677 + }
4678 +@@ -1295,12 +1307,13 @@ static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
4679 + inc_nlink(parent);
4680 + }
4681 +
4682 +-static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
4683 +- enum zonefs_ztype type)
4684 ++static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
4685 ++ enum zonefs_ztype type)
4686 + {
4687 + struct super_block *sb = inode->i_sb;
4688 + struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
4689 + struct zonefs_inode_info *zi = ZONEFS_I(inode);
4690 ++ int ret = 0;
4691 +
4692 + inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
4693 + inode->i_mode = S_IFREG | sbi->s_perm;
4694 +@@ -1325,6 +1338,22 @@ static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
4695 + sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes);
4696 + sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits;
4697 + sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits;
4698 ++
4699 ++ /*
4700 ++ * For sequential zones, make sure that any open zone is closed first
4701 ++ * to ensure that the initial number of open zones is 0, in sync with
4702 ++ * the open zone accounting done when the mount option
4703 ++ * ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
4704 ++ */
4705 ++ if (type == ZONEFS_ZTYPE_SEQ &&
4706 ++ (zone->cond == BLK_ZONE_COND_IMP_OPEN ||
4707 ++ zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
4708 ++ mutex_lock(&zi->i_truncate_mutex);
4709 ++ ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
4710 ++ mutex_unlock(&zi->i_truncate_mutex);
4711 ++ }
4712 ++
4713 ++ return ret;
4714 + }
4715 +
4716 + static struct dentry *zonefs_create_inode(struct dentry *parent,
4717 +@@ -1334,6 +1363,7 @@ static struct dentry *zonefs_create_inode(struct dentry *parent,
4718 + struct inode *dir = d_inode(parent);
4719 + struct dentry *dentry;
4720 + struct inode *inode;
4721 ++ int ret;
4722 +
4723 + dentry = d_alloc_name(parent, name);
4724 + if (!dentry)
4725 +@@ -1344,10 +1374,16 @@ static struct dentry *zonefs_create_inode(struct dentry *parent,
4726 + goto dput;
4727 +
4728 + inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
4729 +- if (zone)
4730 +- zonefs_init_file_inode(inode, zone, type);
4731 +- else
4732 ++ if (zone) {
4733 ++ ret = zonefs_init_file_inode(inode, zone, type);
4734 ++ if (ret) {
4735 ++ iput(inode);
4736 ++ goto dput;
4737 ++ }
4738 ++ } else {
4739 + zonefs_init_dir_inode(dir, inode, type);
4740 ++ }
4741 ++
4742 + d_add(dentry, inode);
4743 + dir->i_size++;
4744 +
4745 +diff --git a/include/linux/kernel.h b/include/linux/kernel.h
4746 +index 2776423a587e4..f56cd8879a594 100644
4747 +--- a/include/linux/kernel.h
4748 ++++ b/include/linux/kernel.h
4749 +@@ -277,7 +277,7 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte)
4750 + return buf;
4751 + }
4752 +
4753 +-extern int hex_to_bin(char ch);
4754 ++extern int hex_to_bin(unsigned char ch);
4755 + extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
4756 + extern char *bin2hex(char *dst, const void *src, size_t count);
4757 +
4758 +diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
4759 +index 88227044fc86c..8a2c60235ebb8 100644
4760 +--- a/include/linux/mtd/mtd.h
4761 ++++ b/include/linux/mtd/mtd.h
4762 +@@ -394,10 +394,8 @@ struct mtd_info {
4763 + /* List of partitions attached to this MTD device */
4764 + struct list_head partitions;
4765 +
4766 +- union {
4767 +- struct mtd_part part;
4768 +- struct mtd_master master;
4769 +- };
4770 ++ struct mtd_part part;
4771 ++ struct mtd_master master;
4772 + };
4773 +
4774 + static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd)
4775 +diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
4776 +index 77c694a19149d..15dd0076c2936 100644
4777 +--- a/include/memory/renesas-rpc-if.h
4778 ++++ b/include/memory/renesas-rpc-if.h
4779 +@@ -66,6 +66,7 @@ struct rpcif {
4780 + size_t size;
4781 + enum rpcif_data_dir dir;
4782 + u8 bus_size;
4783 ++ u8 xfer_size;
4784 + void *buffer;
4785 + u32 xferlen;
4786 + u32 smcr;
4787 +diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
4788 +index 028eaea1c8544..42d50856fcf24 100644
4789 +--- a/include/net/ip6_tunnel.h
4790 ++++ b/include/net/ip6_tunnel.h
4791 +@@ -57,7 +57,7 @@ struct ip6_tnl {
4792 +
4793 + /* These fields used only by GRE */
4794 + __u32 i_seqno; /* The last seen seqno */
4795 +- __u32 o_seqno; /* The last output seqno */
4796 ++ atomic_t o_seqno; /* The last output seqno */
4797 + int hlen; /* tun_hlen + encap_hlen */
4798 + int tun_hlen; /* Precalculated header length */
4799 + int encap_hlen; /* Encap header length (FOU,GUE) */
4800 +diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
4801 +index bc3b13ec93c9d..37d5d4968e20a 100644
4802 +--- a/include/net/ip_tunnels.h
4803 ++++ b/include/net/ip_tunnels.h
4804 +@@ -113,7 +113,7 @@ struct ip_tunnel {
4805 +
4806 + /* These four fields used only by GRE */
4807 + u32 i_seqno; /* The last seen seqno */
4808 +- u32 o_seqno; /* The last output seqno */
4809 ++ atomic_t o_seqno; /* The last output seqno */
4810 + int tun_hlen; /* Precalculated header length */
4811 +
4812 + /* These four fields used only by ERSPAN */
4813 +diff --git a/include/net/tcp.h b/include/net/tcp.h
4814 +index 31d384c3778a1..91ac329ca5789 100644
4815 +--- a/include/net/tcp.h
4816 ++++ b/include/net/tcp.h
4817 +@@ -470,6 +470,7 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
4818 + u32 cookie);
4819 + struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
4820 + struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
4821 ++ const struct tcp_request_sock_ops *af_ops,
4822 + struct sock *sk, struct sk_buff *skb);
4823 + #ifdef CONFIG_SYN_COOKIES
4824 +
4825 +@@ -608,6 +609,7 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
4826 + void tcp_reset(struct sock *sk, struct sk_buff *skb);
4827 + void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
4828 + void tcp_fin(struct sock *sk);
4829 ++void tcp_check_space(struct sock *sk);
4830 +
4831 + /* tcp_timer.c */
4832 + void tcp_init_xmit_timers(struct sock *);
4833 +@@ -1026,6 +1028,7 @@ struct rate_sample {
4834 + int losses; /* number of packets marked lost upon ACK */
4835 + u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
4836 + u32 prior_in_flight; /* in flight before this ACK */
4837 ++ u32 last_end_seq; /* end_seq of most recently ACKed packet */
4838 + bool is_app_limited; /* is sample from packet with bubble in pipe? */
4839 + bool is_retrans; /* is sample from retransmission? */
4840 + bool is_ack_delayed; /* is this (likely) a delayed ACK? */
4841 +@@ -1148,6 +1151,11 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
4842 + bool is_sack_reneg, struct rate_sample *rs);
4843 + void tcp_rate_check_app_limited(struct sock *sk);
4844 +
4845 ++static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
4846 ++{
4847 ++ return t1 > t2 || (t1 == t2 && after(seq1, seq2));
4848 ++}
4849 ++
4850 + /* These functions determine how the current flow behaves in respect of SACK
4851 + * handling. SACK is negotiated with the peer, and therefore it can vary
4852 + * between different flows.
4853 +diff --git a/lib/hexdump.c b/lib/hexdump.c
4854 +index 9301578f98e8c..06833d404398d 100644
4855 +--- a/lib/hexdump.c
4856 ++++ b/lib/hexdump.c
4857 +@@ -22,15 +22,33 @@ EXPORT_SYMBOL(hex_asc_upper);
4858 + *
4859 + * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
4860 + * input.
4861 ++ *
4862 ++ * This function is used to load cryptographic keys, so it is coded in such a
4863 ++ * way that there are no conditions or memory accesses that depend on data.
4864 ++ *
4865 ++ * Explanation of the logic:
4866 ++ * (ch - '9' - 1) is negative if ch <= '9'
4867 ++ * ('0' - 1 - ch) is negative if ch >= '0'
4868 ++ * we "and" these two values, so the result is negative if ch is in the range
4869 ++ * '0' ... '9'
4870 ++ * we are only interested in the sign, so we do a shift ">> 8"; note that right
4871 ++ * shift of a negative value is implementation-defined, so we cast the
4872 ++ * value to (unsigned) before the shift --- we have 0xffffff if ch is in
4873 ++ * the range '0' ... '9', 0 otherwise
4874 ++ * we "and" this value with (ch - '0' + 1) --- we have a value 1 ... 10 if ch is
4875 ++ * in the range '0' ... '9', 0 otherwise
4876 ++ * we add this value to -1 --- we have a value 0 ... 9 if ch is in the range '0'
4877 ++ * ... '9', -1 otherwise
4878 ++ * the next line is similar to the previous one, but we need to decode both
4879 ++ * uppercase and lowercase letters, so we use (ch & 0xdf), which converts
4880 ++ * lowercase to uppercase
4881 + */
4882 +-int hex_to_bin(char ch)
4883 ++int hex_to_bin(unsigned char ch)
4884 + {
4885 +- if ((ch >= '0') && (ch <= '9'))
4886 +- return ch - '0';
4887 +- ch = tolower(ch);
4888 +- if ((ch >= 'a') && (ch <= 'f'))
4889 +- return ch - 'a' + 10;
4890 +- return -1;
4891 ++ unsigned char cu = ch & 0xdf;
4892 ++ return -1 +
4893 ++ ((ch - '0' + 1) & (unsigned)((ch - '9' - 1) & ('0' - 1 - ch)) >> 8) +
4894 ++ ((cu - 'A' + 11) & (unsigned)((cu - 'F' - 1) & ('A' - 1 - cu)) >> 8);
4895 + }
4896 + EXPORT_SYMBOL(hex_to_bin);
4897 +
4898 +@@ -45,10 +63,13 @@ EXPORT_SYMBOL(hex_to_bin);
4899 + int hex2bin(u8 *dst, const char *src, size_t count)
4900 + {
4901 + while (count--) {
4902 +- int hi = hex_to_bin(*src++);
4903 +- int lo = hex_to_bin(*src++);
4904 ++ int hi, lo;
4905 +
4906 +- if ((hi < 0) || (lo < 0))
4907 ++ hi = hex_to_bin(*src++);
4908 ++ if (unlikely(hi < 0))
4909 ++ return -EINVAL;
4910 ++ lo = hex_to_bin(*src++);
4911 ++ if (unlikely(lo < 0))
4912 + return -EINVAL;
4913 +
4914 + *dst++ = (hi << 4) | lo;
4915 +diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
4916 +index 47ed4fc33a29e..1bd6a3f13467b 100644
4917 +--- a/mm/kasan/quarantine.c
4918 ++++ b/mm/kasan/quarantine.c
4919 +@@ -315,6 +315,13 @@ static void per_cpu_remove_cache(void *arg)
4920 + struct qlist_head *q;
4921 +
4922 + q = this_cpu_ptr(&cpu_quarantine);
4923 ++ /*
4924 ++ * Ensure the ordering between the writing to q->offline and
4925 ++ * per_cpu_remove_cache. Prevent cpu_quarantine from being corrupted
4926 ++ * by interrupt.
4927 ++ */
4928 ++ if (READ_ONCE(q->offline))
4929 ++ return;
4930 + qlist_move_cache(q, &to_free, cache);
4931 + qlist_free_all(&to_free, cache);
4932 + }
4933 +diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
4934 +index 2f7940bcf7151..3fd207fe1284a 100644
4935 +--- a/net/core/lwt_bpf.c
4936 ++++ b/net/core/lwt_bpf.c
4937 +@@ -158,10 +158,8 @@ static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
4938 + return dst->lwtstate->orig_output(net, sk, skb);
4939 + }
4940 +
4941 +-static int xmit_check_hhlen(struct sk_buff *skb)
4942 ++static int xmit_check_hhlen(struct sk_buff *skb, int hh_len)
4943 + {
4944 +- int hh_len = skb_dst(skb)->dev->hard_header_len;
4945 +-
4946 + if (skb_headroom(skb) < hh_len) {
4947 + int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
4948 +
4949 +@@ -273,6 +271,7 @@ static int bpf_xmit(struct sk_buff *skb)
4950 +
4951 + bpf = bpf_lwt_lwtunnel(dst->lwtstate);
4952 + if (bpf->xmit.prog) {
4953 ++ int hh_len = dst->dev->hard_header_len;
4954 + __be16 proto = skb->protocol;
4955 + int ret;
4956 +
4957 +@@ -290,7 +289,7 @@ static int bpf_xmit(struct sk_buff *skb)
4958 + /* If the header was expanded, headroom might be too
4959 + * small for L2 header to come, expand as needed.
4960 + */
4961 +- ret = xmit_check_hhlen(skb);
4962 ++ ret = xmit_check_hhlen(skb, hh_len);
4963 + if (unlikely(ret))
4964 + return ret;
4965 +
4966 +diff --git a/net/dsa/port.c b/net/dsa/port.c
4967 +index 616330a16d319..63e88de963936 100644
4968 +--- a/net/dsa/port.c
4969 ++++ b/net/dsa/port.c
4970 +@@ -1201,8 +1201,10 @@ int dsa_port_link_register_of(struct dsa_port *dp)
4971 + if (ds->ops->phylink_mac_link_down)
4972 + ds->ops->phylink_mac_link_down(ds, port,
4973 + MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
4974 ++ of_node_put(phy_np);
4975 + return dsa_port_phylink_register(dp);
4976 + }
4977 ++ of_node_put(phy_np);
4978 + return 0;
4979 + }
4980 +
4981 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
4982 +index e7f3e37e4aa83..276a3b7b0e9c1 100644
4983 +--- a/net/ipv4/ip_gre.c
4984 ++++ b/net/ipv4/ip_gre.c
4985 +@@ -459,14 +459,12 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
4986 + __be16 proto)
4987 + {
4988 + struct ip_tunnel *tunnel = netdev_priv(dev);
4989 +-
4990 +- if (tunnel->parms.o_flags & TUNNEL_SEQ)
4991 +- tunnel->o_seqno++;
4992 ++ __be16 flags = tunnel->parms.o_flags;
4993 +
4994 + /* Push GRE header. */
4995 + gre_build_header(skb, tunnel->tun_hlen,
4996 +- tunnel->parms.o_flags, proto, tunnel->parms.o_key,
4997 +- htonl(tunnel->o_seqno));
4998 ++ flags, proto, tunnel->parms.o_key,
4999 ++ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
5000 +
5001 + ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
5002 + }
5003 +@@ -504,7 +502,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
5004 + (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
5005 + gre_build_header(skb, tunnel_hlen, flags, proto,
5006 + tunnel_id_to_key32(tun_info->key.tun_id),
5007 +- (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
5008 ++ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
5009 +
5010 + ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
5011 +
5012 +@@ -581,7 +579,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
5013 + }
5014 +
5015 + gre_build_header(skb, 8, TUNNEL_SEQ,
5016 +- proto, 0, htonl(tunnel->o_seqno++));
5017 ++ proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
5018 +
5019 + ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
5020 +
5021 +diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
5022 +index 33792cf55a793..10b469aee4920 100644
5023 +--- a/net/ipv4/syncookies.c
5024 ++++ b/net/ipv4/syncookies.c
5025 +@@ -283,6 +283,7 @@ bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt,
5026 + EXPORT_SYMBOL(cookie_ecn_ok);
5027 +
5028 + struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
5029 ++ const struct tcp_request_sock_ops *af_ops,
5030 + struct sock *sk,
5031 + struct sk_buff *skb)
5032 + {
5033 +@@ -299,6 +300,10 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
5034 + return NULL;
5035 +
5036 + treq = tcp_rsk(req);
5037 ++
5038 ++ /* treq->af_specific might be used to perform TCP_MD5 lookup */
5039 ++ treq->af_specific = af_ops;
5040 ++
5041 + treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
5042 + #if IS_ENABLED(CONFIG_MPTCP)
5043 + treq->is_mptcp = sk_is_mptcp(sk);
5044 +@@ -366,7 +371,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
5045 + goto out;
5046 +
5047 + ret = NULL;
5048 +- req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops, sk, skb);
5049 ++ req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops,
5050 ++ &tcp_request_sock_ipv4_ops, sk, skb);
5051 + if (!req)
5052 + goto out;
5053 +
5054 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
5055 +index 509f577869d4e..dfd32cd3b95e7 100644
5056 +--- a/net/ipv4/tcp_input.c
5057 ++++ b/net/ipv4/tcp_input.c
5058 +@@ -3860,7 +3860,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
5059 + tcp_process_tlp_ack(sk, ack, flag);
5060 +
5061 + if (tcp_ack_is_dubious(sk, flag)) {
5062 +- if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) {
5063 ++ if (!(flag & (FLAG_SND_UNA_ADVANCED |
5064 ++ FLAG_NOT_DUP | FLAG_DSACKING_ACK))) {
5065 + num_dupack = 1;
5066 + /* Consider if pure acks were aggregated in tcp_add_backlog() */
5067 + if (!(flag & FLAG_DATA))
5068 +@@ -5420,7 +5421,17 @@ static void tcp_new_space(struct sock *sk)
5069 + INDIRECT_CALL_1(sk->sk_write_space, sk_stream_write_space, sk);
5070 + }
5071 +
5072 +-static void tcp_check_space(struct sock *sk)
5073 ++/* Caller made space either from:
5074 ++ * 1) Freeing skbs in rtx queues (after tp->snd_una has advanced)
5075 ++ * 2) Sent skbs from output queue (and thus advancing tp->snd_nxt)
5076 ++ *
5077 ++ * We might be able to generate EPOLLOUT to the application if:
5078 ++ * 1) Space consumed in output/rtx queues is below sk->sk_sndbuf/2
5079 ++ * 2) notsent amount (tp->write_seq - tp->snd_nxt) became
5080 ++ * small enough that tcp_stream_memory_free() decides it
5081 ++ * is time to generate EPOLLOUT.
5082 ++ */
5083 ++void tcp_check_space(struct sock *sk)
5084 + {
5085 + /* pairs with tcp_poll() */
5086 + smp_mb();
5087 +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
5088 +index 0a4f3f16140ad..13783fc58e030 100644
5089 +--- a/net/ipv4/tcp_minisocks.c
5090 ++++ b/net/ipv4/tcp_minisocks.c
5091 +@@ -538,7 +538,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
5092 + newtp->tsoffset = treq->ts_off;
5093 + #ifdef CONFIG_TCP_MD5SIG
5094 + newtp->md5sig_info = NULL; /*XXX*/
5095 +- if (newtp->af_specific->md5_lookup(sk, newsk))
5096 ++ if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req)))
5097 + newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
5098 + #endif
5099 + if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
5100 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
5101 +index 369752f5f6769..df413282fa2e5 100644
5102 +--- a/net/ipv4/tcp_output.c
5103 ++++ b/net/ipv4/tcp_output.c
5104 +@@ -82,6 +82,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
5105 +
5106 + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
5107 + tcp_skb_pcount(skb));
5108 ++ tcp_check_space(sk);
5109 + }
5110 +
5111 + /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
5112 +diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
5113 +index 0de6935659635..6ab197928abbc 100644
5114 +--- a/net/ipv4/tcp_rate.c
5115 ++++ b/net/ipv4/tcp_rate.c
5116 +@@ -73,26 +73,31 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
5117 + *
5118 + * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
5119 + * called multiple times. We favor the information from the most recently
5120 +- * sent skb, i.e., the skb with the highest prior_delivered count.
5121 ++ * sent skb, i.e., the skb with the most recently sent time and the highest
5122 ++ * sequence.
5123 + */
5124 + void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
5125 + struct rate_sample *rs)
5126 + {
5127 + struct tcp_sock *tp = tcp_sk(sk);
5128 + struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
5129 ++ u64 tx_tstamp;
5130 +
5131 + if (!scb->tx.delivered_mstamp)
5132 + return;
5133 +
5134 ++ tx_tstamp = tcp_skb_timestamp_us(skb);
5135 + if (!rs->prior_delivered ||
5136 +- after(scb->tx.delivered, rs->prior_delivered)) {
5137 ++ tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
5138 ++ scb->end_seq, rs->last_end_seq)) {
5139 + rs->prior_delivered = scb->tx.delivered;
5140 + rs->prior_mstamp = scb->tx.delivered_mstamp;
5141 + rs->is_app_limited = scb->tx.is_app_limited;
5142 + rs->is_retrans = scb->sacked & TCPCB_RETRANS;
5143 ++ rs->last_end_seq = scb->end_seq;
5144 +
5145 + /* Record send time of most recently ACKed packet: */
5146 +- tp->first_tx_mstamp = tcp_skb_timestamp_us(skb);
5147 ++ tp->first_tx_mstamp = tx_tstamp;
5148 + /* Find the duration of the "send phase" of this window: */
5149 + rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
5150 + scb->tx.first_tx_mstamp);
5151 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
5152 +index 869c3337e319d..a817ac6d97598 100644
5153 +--- a/net/ipv6/ip6_gre.c
5154 ++++ b/net/ipv6/ip6_gre.c
5155 +@@ -724,6 +724,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
5156 + {
5157 + struct ip6_tnl *tunnel = netdev_priv(dev);
5158 + __be16 protocol;
5159 ++ __be16 flags;
5160 +
5161 + if (dev->type == ARPHRD_ETHER)
5162 + IPCB(skb)->flags = 0;
5163 +@@ -739,7 +740,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
5164 + if (tunnel->parms.collect_md) {
5165 + struct ip_tunnel_info *tun_info;
5166 + const struct ip_tunnel_key *key;
5167 +- __be16 flags;
5168 + int tun_hlen;
5169 +
5170 + tun_info = skb_tunnel_info_txcheck(skb);
5171 +@@ -766,19 +766,19 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
5172 + gre_build_header(skb, tun_hlen,
5173 + flags, protocol,
5174 + tunnel_id_to_key32(tun_info->key.tun_id),
5175 +- (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
5176 ++ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
5177 + : 0);
5178 +
5179 + } else {
5180 +- if (tunnel->parms.o_flags & TUNNEL_SEQ)
5181 +- tunnel->o_seqno++;
5182 +-
5183 + if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
5184 + return -ENOMEM;
5185 +
5186 +- gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
5187 ++ flags = tunnel->parms.o_flags;
5188 ++
5189 ++ gre_build_header(skb, tunnel->tun_hlen, flags,
5190 + protocol, tunnel->parms.o_key,
5191 +- htonl(tunnel->o_seqno));
5192 ++ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
5193 ++ : 0);
5194 + }
5195 +
5196 + return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
5197 +@@ -1056,7 +1056,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
5198 + /* Push GRE header. */
5199 + proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
5200 + : htons(ETH_P_ERSPAN2);
5201 +- gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
5202 ++ gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
5203 +
5204 + /* TooBig packet may have updated dst->dev's mtu */
5205 + if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
5206 +diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
5207 +index 6ab710b5a1a82..118e834e91902 100644
5208 +--- a/net/ipv6/netfilter.c
5209 ++++ b/net/ipv6/netfilter.c
5210 +@@ -24,14 +24,13 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
5211 + {
5212 + const struct ipv6hdr *iph = ipv6_hdr(skb);
5213 + struct sock *sk = sk_to_full_sk(sk_partial);
5214 ++ struct net_device *dev = skb_dst(skb)->dev;
5215 + struct flow_keys flkeys;
5216 + unsigned int hh_len;
5217 + struct dst_entry *dst;
5218 + int strict = (ipv6_addr_type(&iph->daddr) &
5219 + (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
5220 + struct flowi6 fl6 = {
5221 +- .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
5222 +- strict ? skb_dst(skb)->dev->ifindex : 0,
5223 + .flowi6_mark = skb->mark,
5224 + .flowi6_uid = sock_net_uid(net, sk),
5225 + .daddr = iph->daddr,
5226 +@@ -39,6 +38,13 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
5227 + };
5228 + int err;
5229 +
5230 ++ if (sk && sk->sk_bound_dev_if)
5231 ++ fl6.flowi6_oif = sk->sk_bound_dev_if;
5232 ++ else if (strict)
5233 ++ fl6.flowi6_oif = dev->ifindex;
5234 ++ else
5235 ++ fl6.flowi6_oif = l3mdev_master_ifindex(dev);
5236 ++
5237 + fib6_rules_early_flow_dissect(net, skb, &fl6, &flkeys);
5238 + dst = ip6_route_output(net, sk, &fl6);
5239 + err = dst->error;
5240 +diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
5241 +index e8cfb9e997bf0..ca92dd6981dea 100644
5242 +--- a/net/ipv6/syncookies.c
5243 ++++ b/net/ipv6/syncookies.c
5244 +@@ -170,7 +170,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
5245 + goto out;
5246 +
5247 + ret = NULL;
5248 +- req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops, sk, skb);
5249 ++ req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops,
5250 ++ &tcp_request_sock_ipv6_ops, sk, skb);
5251 + if (!req)
5252 + goto out;
5253 +
5254 +diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
5255 +index 2c467c422dc63..fb67f1ca2495b 100644
5256 +--- a/net/netfilter/ipvs/ip_vs_conn.c
5257 ++++ b/net/netfilter/ipvs/ip_vs_conn.c
5258 +@@ -1495,7 +1495,7 @@ int __init ip_vs_conn_init(void)
5259 + pr_info("Connection hash table configured "
5260 + "(size=%d, memory=%ldKbytes)\n",
5261 + ip_vs_conn_tab_size,
5262 +- (long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
5263 ++ (long)(ip_vs_conn_tab_size*sizeof(*ip_vs_conn_tab))/1024);
5264 + IP_VS_DBG(0, "Each connection entry needs %zd bytes at least\n",
5265 + sizeof(struct ip_vs_conn));
5266 +
5267 +diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
5268 +index 3e1afd10a9b60..55aa55b252b20 100644
5269 +--- a/net/netfilter/nf_conntrack_standalone.c
5270 ++++ b/net/netfilter/nf_conntrack_standalone.c
5271 +@@ -823,7 +823,7 @@ static struct ctl_table nf_ct_sysctl_table[] = {
5272 + .mode = 0644,
5273 + .proc_handler = proc_dointvec_jiffies,
5274 + },
5275 +-#if IS_ENABLED(CONFIG_NFT_FLOW_OFFLOAD)
5276 ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
5277 + [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD] = {
5278 + .procname = "nf_flowtable_udp_timeout",
5279 + .maxlen = sizeof(unsigned int),
5280 +diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
5281 +index d600a566da324..7325bee7d1442 100644
5282 +--- a/net/netfilter/nft_set_rbtree.c
5283 ++++ b/net/netfilter/nft_set_rbtree.c
5284 +@@ -349,7 +349,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
5285 + *ext = &rbe->ext;
5286 + return -EEXIST;
5287 + } else {
5288 +- p = &parent->rb_left;
5289 ++ overlap = false;
5290 ++ if (nft_rbtree_interval_end(rbe))
5291 ++ p = &parent->rb_left;
5292 ++ else
5293 ++ p = &parent->rb_right;
5294 + }
5295 + }
5296 +
5297 +diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
5298 +index b8f0111457650..9ad9cc0d1d27c 100644
5299 +--- a/net/netfilter/nft_socket.c
5300 ++++ b/net/netfilter/nft_socket.c
5301 +@@ -53,6 +53,32 @@ nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo
5302 + }
5303 + #endif
5304 +
5305 ++static struct sock *nft_socket_do_lookup(const struct nft_pktinfo *pkt)
5306 ++{
5307 ++ const struct net_device *indev = nft_in(pkt);
5308 ++ const struct sk_buff *skb = pkt->skb;
5309 ++ struct sock *sk = NULL;
5310 ++
5311 ++ if (!indev)
5312 ++ return NULL;
5313 ++
5314 ++ switch (nft_pf(pkt)) {
5315 ++ case NFPROTO_IPV4:
5316 ++ sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, indev);
5317 ++ break;
5318 ++#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
5319 ++ case NFPROTO_IPV6:
5320 ++ sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, indev);
5321 ++ break;
5322 ++#endif
5323 ++ default:
5324 ++ WARN_ON_ONCE(1);
5325 ++ break;
5326 ++ }
5327 ++
5328 ++ return sk;
5329 ++}
5330 ++
5331 + static void nft_socket_eval(const struct nft_expr *expr,
5332 + struct nft_regs *regs,
5333 + const struct nft_pktinfo *pkt)
5334 +@@ -66,20 +92,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
5335 + sk = NULL;
5336 +
5337 + if (!sk)
5338 +- switch(nft_pf(pkt)) {
5339 +- case NFPROTO_IPV4:
5340 +- sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, nft_in(pkt));
5341 +- break;
5342 +-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
5343 +- case NFPROTO_IPV6:
5344 +- sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, nft_in(pkt));
5345 +- break;
5346 +-#endif
5347 +- default:
5348 +- WARN_ON_ONCE(1);
5349 +- regs->verdict.code = NFT_BREAK;
5350 +- return;
5351 +- }
5352 ++ sk = nft_socket_do_lookup(pkt);
5353 +
5354 + if (!sk) {
5355 + regs->verdict.code = NFT_BREAK;
5356 +@@ -197,6 +210,16 @@ static int nft_socket_dump(struct sk_buff *skb,
5357 + return 0;
5358 + }
5359 +
5360 ++static int nft_socket_validate(const struct nft_ctx *ctx,
5361 ++ const struct nft_expr *expr,
5362 ++ const struct nft_data **data)
5363 ++{
5364 ++ return nft_chain_validate_hooks(ctx->chain,
5365 ++ (1 << NF_INET_PRE_ROUTING) |
5366 ++ (1 << NF_INET_LOCAL_IN) |
5367 ++ (1 << NF_INET_LOCAL_OUT));
5368 ++}
5369 ++
5370 + static struct nft_expr_type nft_socket_type;
5371 + static const struct nft_expr_ops nft_socket_ops = {
5372 + .type = &nft_socket_type,
5373 +@@ -204,6 +227,7 @@ static const struct nft_expr_ops nft_socket_ops = {
5374 + .eval = nft_socket_eval,
5375 + .init = nft_socket_init,
5376 + .dump = nft_socket_dump,
5377 ++ .validate = nft_socket_validate,
5378 + };
5379 +
5380 + static struct nft_expr_type nft_socket_type __read_mostly = {
5381 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
5382 +index b3815b568e8e5..463c4a58d2c36 100644
5383 +--- a/net/sctp/sm_sideeffect.c
5384 ++++ b/net/sctp/sm_sideeffect.c
5385 +@@ -458,6 +458,10 @@ void sctp_generate_reconf_event(struct timer_list *t)
5386 + goto out_unlock;
5387 + }
5388 +
5389 ++ /* This happens when the response arrives after the timer is triggered. */
5390 ++ if (!asoc->strreset_chunk)
5391 ++ goto out_unlock;
5392 ++
5393 + error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
5394 + SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF),
5395 + asoc->state, asoc->ep, asoc,
5396 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
5397 +index 499058248bdb8..fb801c249d923 100644
5398 +--- a/net/smc/af_smc.c
5399 ++++ b/net/smc/af_smc.c
5400 +@@ -1223,6 +1223,8 @@ static void smc_connect_work(struct work_struct *work)
5401 + smc->sk.sk_state = SMC_CLOSED;
5402 + if (rc == -EPIPE || rc == -EAGAIN)
5403 + smc->sk.sk_err = EPIPE;
5404 ++ else if (rc == -ECONNREFUSED)
5405 ++ smc->sk.sk_err = ECONNREFUSED;
5406 + else if (signal_pending(current))
5407 + smc->sk.sk_err = -sock_intr_errno(timeo);
5408 + sock_put(&smc->sk); /* passive closing */
5409 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
5410 +index b932469ee69cc..a40553e83f8b2 100644
5411 +--- a/net/tls/tls_device.c
5412 ++++ b/net/tls/tls_device.c
5413 +@@ -483,11 +483,13 @@ handle_error:
5414 + copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
5415 + copy = min_t(size_t, copy, (max_open_record_len - record->len));
5416 +
5417 +- rc = tls_device_copy_data(page_address(pfrag->page) +
5418 +- pfrag->offset, copy, msg_iter);
5419 +- if (rc)
5420 +- goto handle_error;
5421 +- tls_append_frag(record, pfrag, copy);
5422 ++ if (copy) {
5423 ++ rc = tls_device_copy_data(page_address(pfrag->page) +
5424 ++ pfrag->offset, copy, msg_iter);
5425 ++ if (rc)
5426 ++ goto handle_error;
5427 ++ tls_append_frag(record, pfrag, copy);
5428 ++ }
5429 +
5430 + size -= copy;
5431 + if (!size) {
5432 +diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
5433 +index 426e287431d24..444ad0bc09083 100644
5434 +--- a/net/xdp/xsk.c
5435 ++++ b/net/xdp/xsk.c
5436 +@@ -655,7 +655,7 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
5437 + if (sk_can_busy_loop(sk))
5438 + sk_busy_loop(sk, 1); /* only support non-blocking sockets */
5439 +
5440 +- if (xsk_no_wakeup(sk))
5441 ++ if (xs->zc && xsk_no_wakeup(sk))
5442 + return 0;
5443 +
5444 + pool = xs->pool;
5445 +diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
5446 +index dcee7b2bd3d79..859ebcec83838 100644
5447 +--- a/sound/soc/codecs/wm8731.c
5448 ++++ b/sound/soc/codecs/wm8731.c
5449 +@@ -602,7 +602,7 @@ static int wm8731_hw_init(struct device *dev, struct wm8731_priv *wm8731)
5450 + ret = wm8731_reset(wm8731->regmap);
5451 + if (ret < 0) {
5452 + dev_err(dev, "Failed to issue reset: %d\n", ret);
5453 +- goto err_regulator_enable;
5454 ++ goto err;
5455 + }
5456 +
5457 + /* Clear POWEROFF, keep everything else disabled */
5458 +@@ -619,10 +619,7 @@ static int wm8731_hw_init(struct device *dev, struct wm8731_priv *wm8731)
5459 +
5460 + regcache_mark_dirty(wm8731->regmap);
5461 +
5462 +-err_regulator_enable:
5463 +- /* Regulators will be enabled by bias management */
5464 +- regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
5465 +-
5466 ++err:
5467 + return ret;
5468 + }
5469 +
5470 +@@ -766,21 +763,27 @@ static int wm8731_i2c_probe(struct i2c_client *i2c,
5471 + ret = PTR_ERR(wm8731->regmap);
5472 + dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
5473 + ret);
5474 +- return ret;
5475 ++ goto err_regulator_enable;
5476 + }
5477 +
5478 + ret = wm8731_hw_init(&i2c->dev, wm8731);
5479 + if (ret != 0)
5480 +- return ret;
5481 ++ goto err_regulator_enable;
5482 +
5483 + ret = devm_snd_soc_register_component(&i2c->dev,
5484 + &soc_component_dev_wm8731, &wm8731_dai, 1);
5485 + if (ret != 0) {
5486 + dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
5487 +- return ret;
5488 ++ goto err_regulator_enable;
5489 + }
5490 +
5491 + return 0;
5492 ++
5493 ++err_regulator_enable:
5494 ++ /* Regulators will be enabled by bias management */
5495 ++ regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
5496 ++
5497 ++ return ret;
5498 + }
5499 +
5500 + static int wm8731_i2c_remove(struct i2c_client *client)
5501 +diff --git a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
5502 +index 11801b905ecc2..c93d8019b0e55 100644
5503 +--- a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
5504 ++++ b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
5505 +@@ -127,13 +127,13 @@ static const struct snd_soc_acpi_adr_device mx8373_1_adr[] = {
5506 + {
5507 + .adr = 0x000123019F837300ull,
5508 + .num_endpoints = 1,
5509 +- .endpoints = &spk_l_endpoint,
5510 ++ .endpoints = &spk_r_endpoint,
5511 + .name_prefix = "Right"
5512 + },
5513 + {
5514 + .adr = 0x000127019F837300ull,
5515 + .num_endpoints = 1,
5516 +- .endpoints = &spk_r_endpoint,
5517 ++ .endpoints = &spk_l_endpoint,
5518 + .name_prefix = "Left"
5519 + }
5520 + };
5521 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
5522 +index 81982948f981d..58350fe1944b7 100644
5523 +--- a/tools/objtool/check.c
5524 ++++ b/tools/objtool/check.c
5525 +@@ -393,12 +393,12 @@ static int add_dead_ends(struct objtool_file *file)
5526 + else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
5527 + insn = find_last_insn(file, reloc->sym->sec);
5528 + if (!insn) {
5529 +- WARN("can't find unreachable insn at %s+0x%x",
5530 ++ WARN("can't find unreachable insn at %s+0x%lx",
5531 + reloc->sym->sec->name, reloc->addend);
5532 + return -1;
5533 + }
5534 + } else {
5535 +- WARN("can't find unreachable insn at %s+0x%x",
5536 ++ WARN("can't find unreachable insn at %s+0x%lx",
5537 + reloc->sym->sec->name, reloc->addend);
5538 + return -1;
5539 + }
5540 +@@ -428,12 +428,12 @@ reachable:
5541 + else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
5542 + insn = find_last_insn(file, reloc->sym->sec);
5543 + if (!insn) {
5544 +- WARN("can't find reachable insn at %s+0x%x",
5545 ++ WARN("can't find reachable insn at %s+0x%lx",
5546 + reloc->sym->sec->name, reloc->addend);
5547 + return -1;
5548 + }
5549 + } else {
5550 +- WARN("can't find reachable insn at %s+0x%x",
5551 ++ WARN("can't find reachable insn at %s+0x%lx",
5552 + reloc->sym->sec->name, reloc->addend);
5553 + return -1;
5554 + }
5555 +diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
5556 +index fee03b744a6ea..a3395467c3161 100644
5557 +--- a/tools/objtool/elf.c
5558 ++++ b/tools/objtool/elf.c
5559 +@@ -485,7 +485,7 @@ static struct section *elf_create_reloc_section(struct elf *elf,
5560 + int reltype);
5561 +
5562 + int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
5563 +- unsigned int type, struct symbol *sym, int addend)
5564 ++ unsigned int type, struct symbol *sym, long addend)
5565 + {
5566 + struct reloc *reloc;
5567 +
5568 +@@ -514,37 +514,180 @@ int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
5569 + return 0;
5570 + }
5571 +
5572 +-int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
5573 +- unsigned long offset, unsigned int type,
5574 +- struct section *insn_sec, unsigned long insn_off)
5575 ++/*
5576 ++ * Ensure that any reloc section containing references to @sym is marked
5577 ++ * changed such that it will get re-generated in elf_rebuild_reloc_sections()
5578 ++ * with the new symbol index.
5579 ++ */
5580 ++static void elf_dirty_reloc_sym(struct elf *elf, struct symbol *sym)
5581 ++{
5582 ++ struct section *sec;
5583 ++
5584 ++ list_for_each_entry(sec, &elf->sections, list) {
5585 ++ struct reloc *reloc;
5586 ++
5587 ++ if (sec->changed)
5588 ++ continue;
5589 ++
5590 ++ list_for_each_entry(reloc, &sec->reloc_list, list) {
5591 ++ if (reloc->sym == sym) {
5592 ++ sec->changed = true;
5593 ++ break;
5594 ++ }
5595 ++ }
5596 ++ }
5597 ++}
5598 ++
5599 ++/*
5600 ++ * Move the first global symbol, as per sh_info, into a new, higher symbol
5601 ++ * index. This fees up the shndx for a new local symbol.
5602 ++ */
5603 ++static int elf_move_global_symbol(struct elf *elf, struct section *symtab,
5604 ++ struct section *symtab_shndx)
5605 + {
5606 ++ Elf_Data *data, *shndx_data = NULL;
5607 ++ Elf32_Word first_non_local;
5608 + struct symbol *sym;
5609 +- int addend;
5610 ++ Elf_Scn *s;
5611 +
5612 +- if (insn_sec->sym) {
5613 +- sym = insn_sec->sym;
5614 +- addend = insn_off;
5615 ++ first_non_local = symtab->sh.sh_info;
5616 +
5617 +- } else {
5618 +- /*
5619 +- * The Clang assembler strips section symbols, so we have to
5620 +- * reference the function symbol instead:
5621 +- */
5622 +- sym = find_symbol_containing(insn_sec, insn_off);
5623 +- if (!sym) {
5624 +- /*
5625 +- * Hack alert. This happens when we need to reference
5626 +- * the NOP pad insn immediately after the function.
5627 +- */
5628 +- sym = find_symbol_containing(insn_sec, insn_off - 1);
5629 ++ sym = find_symbol_by_index(elf, first_non_local);
5630 ++ if (!sym) {
5631 ++ WARN("no non-local symbols !?");
5632 ++ return first_non_local;
5633 ++ }
5634 ++
5635 ++ s = elf_getscn(elf->elf, symtab->idx);
5636 ++ if (!s) {
5637 ++ WARN_ELF("elf_getscn");
5638 ++ return -1;
5639 ++ }
5640 ++
5641 ++ data = elf_newdata(s);
5642 ++ if (!data) {
5643 ++ WARN_ELF("elf_newdata");
5644 ++ return -1;
5645 ++ }
5646 ++
5647 ++ data->d_buf = &sym->sym;
5648 ++ data->d_size = sizeof(sym->sym);
5649 ++ data->d_align = 1;
5650 ++ data->d_type = ELF_T_SYM;
5651 ++
5652 ++ sym->idx = symtab->sh.sh_size / sizeof(sym->sym);
5653 ++ elf_dirty_reloc_sym(elf, sym);
5654 ++
5655 ++ symtab->sh.sh_info += 1;
5656 ++ symtab->sh.sh_size += data->d_size;
5657 ++ symtab->changed = true;
5658 ++
5659 ++ if (symtab_shndx) {
5660 ++ s = elf_getscn(elf->elf, symtab_shndx->idx);
5661 ++ if (!s) {
5662 ++ WARN_ELF("elf_getscn");
5663 ++ return -1;
5664 + }
5665 +
5666 +- if (!sym) {
5667 +- WARN("can't find symbol containing %s+0x%lx", insn_sec->name, insn_off);
5668 ++ shndx_data = elf_newdata(s);
5669 ++ if (!shndx_data) {
5670 ++ WARN_ELF("elf_newshndx_data");
5671 + return -1;
5672 + }
5673 +
5674 +- addend = insn_off - sym->offset;
5675 ++ shndx_data->d_buf = &sym->sec->idx;
5676 ++ shndx_data->d_size = sizeof(Elf32_Word);
5677 ++ shndx_data->d_align = 4;
5678 ++ shndx_data->d_type = ELF_T_WORD;
5679 ++
5680 ++ symtab_shndx->sh.sh_size += 4;
5681 ++ symtab_shndx->changed = true;
5682 ++ }
5683 ++
5684 ++ return first_non_local;
5685 ++}
5686 ++
5687 ++static struct symbol *
5688 ++elf_create_section_symbol(struct elf *elf, struct section *sec)
5689 ++{
5690 ++ struct section *symtab, *symtab_shndx;
5691 ++ Elf_Data *shndx_data = NULL;
5692 ++ struct symbol *sym;
5693 ++ Elf32_Word shndx;
5694 ++
5695 ++ symtab = find_section_by_name(elf, ".symtab");
5696 ++ if (symtab) {
5697 ++ symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
5698 ++ if (symtab_shndx)
5699 ++ shndx_data = symtab_shndx->data;
5700 ++ } else {
5701 ++ WARN("no .symtab");
5702 ++ return NULL;
5703 ++ }
5704 ++
5705 ++ sym = malloc(sizeof(*sym));
5706 ++ if (!sym) {
5707 ++ perror("malloc");
5708 ++ return NULL;
5709 ++ }
5710 ++ memset(sym, 0, sizeof(*sym));
5711 ++
5712 ++ sym->idx = elf_move_global_symbol(elf, symtab, symtab_shndx);
5713 ++ if (sym->idx < 0) {
5714 ++ WARN("elf_move_global_symbol");
5715 ++ return NULL;
5716 ++ }
5717 ++
5718 ++ sym->name = sec->name;
5719 ++ sym->sec = sec;
5720 ++
5721 ++ // st_name 0
5722 ++ sym->sym.st_info = GELF_ST_INFO(STB_LOCAL, STT_SECTION);
5723 ++ // st_other 0
5724 ++ // st_value 0
5725 ++ // st_size 0
5726 ++ shndx = sec->idx;
5727 ++ if (shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) {
5728 ++ sym->sym.st_shndx = shndx;
5729 ++ if (!shndx_data)
5730 ++ shndx = 0;
5731 ++ } else {
5732 ++ sym->sym.st_shndx = SHN_XINDEX;
5733 ++ if (!shndx_data) {
5734 ++ WARN("no .symtab_shndx");
5735 ++ return NULL;
5736 ++ }
5737 ++ }
5738 ++
5739 ++ if (!gelf_update_symshndx(symtab->data, shndx_data, sym->idx, &sym->sym, shndx)) {
5740 ++ WARN_ELF("gelf_update_symshndx");
5741 ++ return NULL;
5742 ++ }
5743 ++
5744 ++ elf_add_symbol(elf, sym);
5745 ++
5746 ++ return sym;
5747 ++}
5748 ++
5749 ++int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
5750 ++ unsigned long offset, unsigned int type,
5751 ++ struct section *insn_sec, unsigned long insn_off)
5752 ++{
5753 ++ struct symbol *sym = insn_sec->sym;
5754 ++ int addend = insn_off;
5755 ++
5756 ++ if (!sym) {
5757 ++ /*
5758 ++ * Due to how weak functions work, we must use section based
5759 ++ * relocations. Symbol based relocations would result in the
5760 ++ * weak and non-weak function annotations being overlaid on the
5761 ++ * non-weak function after linking.
5762 ++ */
5763 ++ sym = elf_create_section_symbol(elf, insn_sec);
5764 ++ if (!sym)
5765 ++ return -1;
5766 ++
5767 ++ insn_sec->sym = sym;
5768 + }
5769 +
5770 + return elf_add_reloc(elf, sec, offset, type, sym, addend);
5771 +diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
5772 +index 075d8291b8546..b4d01f8fd09b8 100644
5773 +--- a/tools/objtool/include/objtool/elf.h
5774 ++++ b/tools/objtool/include/objtool/elf.h
5775 +@@ -69,7 +69,7 @@ struct reloc {
5776 + struct symbol *sym;
5777 + unsigned long offset;
5778 + unsigned int type;
5779 +- int addend;
5780 ++ long addend;
5781 + int idx;
5782 + bool jump_table_start;
5783 + };
5784 +@@ -131,7 +131,7 @@ struct elf *elf_open_read(const char *name, int flags);
5785 + struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr);
5786 +
5787 + int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
5788 +- unsigned int type, struct symbol *sym, int addend);
5789 ++ unsigned int type, struct symbol *sym, long addend);
5790 + int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
5791 + unsigned long offset, unsigned int type,
5792 + struct section *insn_sec, unsigned long insn_off);
5793 +diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
5794 +index 9fcb4e68add93..78dfc282e5e2b 100644
5795 +--- a/tools/perf/arch/arm64/util/Build
5796 ++++ b/tools/perf/arch/arm64/util/Build
5797 +@@ -1,5 +1,4 @@
5798 + perf-y += header.o
5799 +-perf-y += machine.o
5800 + perf-y += perf_regs.o
5801 + perf-y += tsc.o
5802 + perf-y += pmu.o
5803 +diff --git a/tools/perf/arch/arm64/util/machine.c b/tools/perf/arch/arm64/util/machine.c
5804 +deleted file mode 100644
5805 +index 7e7714290a873..0000000000000
5806 +--- a/tools/perf/arch/arm64/util/machine.c
5807 ++++ /dev/null
5808 +@@ -1,28 +0,0 @@
5809 +-// SPDX-License-Identifier: GPL-2.0
5810 +-
5811 +-#include <inttypes.h>
5812 +-#include <stdio.h>
5813 +-#include <string.h>
5814 +-#include "debug.h"
5815 +-#include "symbol.h"
5816 +-
5817 +-/* On arm64, kernel text segment starts at high memory address,
5818 +- * for example 0xffff 0000 8xxx xxxx. Modules start at a low memory
5819 +- * address, like 0xffff 0000 00ax xxxx. When only small amount of
5820 +- * memory is used by modules, gap between end of module's text segment
5821 +- * and start of kernel text segment may reach 2G.
5822 +- * Therefore do not fill this gap and do not assign it to the kernel dso map.
5823 +- */
5824 +-
5825 +-#define SYMBOL_LIMIT (1 << 12) /* 4K */
5826 +-
5827 +-void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
5828 +-{
5829 +- if ((strchr(p->name, '[') && strchr(c->name, '[') == NULL) ||
5830 +- (strchr(p->name, '[') == NULL && strchr(c->name, '[')))
5831 +- /* Limit range of last symbol in module and kernel */
5832 +- p->end += SYMBOL_LIMIT;
5833 +- else
5834 +- p->end = c->start;
5835 +- pr_debug4("%s sym:%s end:%#" PRIx64 "\n", __func__, p->name, p->end);
5836 +-}
5837 +diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
5838 +index 8a79c4126e5b4..0115f31665684 100644
5839 +--- a/tools/perf/arch/powerpc/util/Build
5840 ++++ b/tools/perf/arch/powerpc/util/Build
5841 +@@ -1,5 +1,4 @@
5842 + perf-y += header.o
5843 +-perf-y += machine.o
5844 + perf-y += kvm-stat.o
5845 + perf-y += perf_regs.o
5846 + perf-y += mem-events.o
5847 +diff --git a/tools/perf/arch/powerpc/util/machine.c b/tools/perf/arch/powerpc/util/machine.c
5848 +deleted file mode 100644
5849 +index e652a1aa81322..0000000000000
5850 +--- a/tools/perf/arch/powerpc/util/machine.c
5851 ++++ /dev/null
5852 +@@ -1,25 +0,0 @@
5853 +-// SPDX-License-Identifier: GPL-2.0
5854 +-
5855 +-#include <inttypes.h>
5856 +-#include <stdio.h>
5857 +-#include <string.h>
5858 +-#include <internal/lib.h> // page_size
5859 +-#include "debug.h"
5860 +-#include "symbol.h"
5861 +-
5862 +-/* On powerpc kernel text segment start at memory addresses, 0xc000000000000000
5863 +- * whereas the modules are located at very high memory addresses,
5864 +- * for example 0xc00800000xxxxxxx. The gap between end of kernel text segment
5865 +- * and beginning of first module's text segment is very high.
5866 +- * Therefore do not fill this gap and do not assign it to the kernel dso map.
5867 +- */
5868 +-
5869 +-void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
5870 +-{
5871 +- if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
5872 +- /* Limit the range of last kernel symbol */
5873 +- p->end += page_size;
5874 +- else
5875 +- p->end = c->start;
5876 +- pr_debug4("%s sym:%s end:%#" PRIx64 "\n", __func__, p->name, p->end);
5877 +-}
5878 +diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
5879 +index 7644a4f6d4a40..98bc3f39d5f35 100644
5880 +--- a/tools/perf/arch/s390/util/machine.c
5881 ++++ b/tools/perf/arch/s390/util/machine.c
5882 +@@ -35,19 +35,3 @@ int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
5883 +
5884 + return 0;
5885 + }
5886 +-
5887 +-/* On s390 kernel text segment start is located at very low memory addresses,
5888 +- * for example 0x10000. Modules are located at very high memory addresses,
5889 +- * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
5890 +- * and beginning of first module's text segment is very big.
5891 +- * Therefore do not fill this gap and do not assign it to the kernel dso map.
5892 +- */
5893 +-void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
5894 +-{
5895 +- if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
5896 +- /* Last kernel symbol mapped to end of page */
5897 +- p->end = roundup(p->end, page_size);
5898 +- else
5899 +- p->end = c->start;
5900 +- pr_debug4("%s sym:%s end:%#" PRIx64 "\n", __func__, p->name, p->end);
5901 +-}
5902 +diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
5903 +index 7054f23150e1b..235549bb28b94 100644
5904 +--- a/tools/perf/util/arm-spe.c
5905 ++++ b/tools/perf/util/arm-spe.c
5906 +@@ -927,7 +927,8 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
5907 + attr.type = PERF_TYPE_HARDWARE;
5908 + attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
5909 + attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
5910 +- PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC;
5911 ++ PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC |
5912 ++ PERF_SAMPLE_ADDR;
5913 + if (spe->timeless_decoding)
5914 + attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
5915 + else
5916 +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
5917 +index 31cd59a2b66e6..ecd377938eea8 100644
5918 +--- a/tools/perf/util/symbol-elf.c
5919 ++++ b/tools/perf/util/symbol-elf.c
5920 +@@ -1290,7 +1290,7 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
5921 + * For misannotated, zeroed, ASM function sizes.
5922 + */
5923 + if (nr > 0) {
5924 +- symbols__fixup_end(&dso->symbols);
5925 ++ symbols__fixup_end(&dso->symbols, false);
5926 + symbols__fixup_duplicate(&dso->symbols);
5927 + if (kmap) {
5928 + /*
5929 +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
5930 +index 61379ed2b75c5..b1e5fd99e38af 100644
5931 +--- a/tools/perf/util/symbol.c
5932 ++++ b/tools/perf/util/symbol.c
5933 +@@ -101,11 +101,6 @@ static int prefix_underscores_count(const char *str)
5934 + return tail - str;
5935 + }
5936 +
5937 +-void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
5938 +-{
5939 +- p->end = c->start;
5940 +-}
5941 +-
5942 + const char * __weak arch__normalize_symbol_name(const char *name)
5943 + {
5944 + return name;
5945 +@@ -217,7 +212,8 @@ again:
5946 + }
5947 + }
5948 +
5949 +-void symbols__fixup_end(struct rb_root_cached *symbols)
5950 ++/* Update zero-sized symbols using the address of the next symbol */
5951 ++void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
5952 + {
5953 + struct rb_node *nd, *prevnd = rb_first_cached(symbols);
5954 + struct symbol *curr, *prev;
5955 +@@ -231,8 +227,29 @@ void symbols__fixup_end(struct rb_root_cached *symbols)
5956 + prev = curr;
5957 + curr = rb_entry(nd, struct symbol, rb_node);
5958 +
5959 +- if (prev->end == prev->start || prev->end != curr->start)
5960 +- arch__symbols__fixup_end(prev, curr);
5961 ++ /*
5962 ++ * On some architecture kernel text segment start is located at
5963 ++ * some low memory address, while modules are located at high
5964 ++ * memory addresses (or vice versa). The gap between end of
5965 ++ * kernel text segment and beginning of first module's text
5966 ++ * segment is very big. Therefore do not fill this gap and do
5967 ++ * not assign it to the kernel dso map (kallsyms).
5968 ++ *
5969 ++ * In kallsyms, it determines module symbols using '[' character
5970 ++ * like in:
5971 ++ * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi]
5972 ++ */
5973 ++ if (prev->end == prev->start) {
5974 ++ /* Last kernel/module symbol mapped to end of page */
5975 ++ if (is_kallsyms && (!strchr(prev->name, '[') !=
5976 ++ !strchr(curr->name, '[')))
5977 ++ prev->end = roundup(prev->end + 4096, 4096);
5978 ++ else
5979 ++ prev->end = curr->start;
5980 ++
5981 ++ pr_debug4("%s sym:%s end:%#" PRIx64 "\n",
5982 ++ __func__, prev->name, prev->end);
5983 ++ }
5984 + }
5985 +
5986 + /* Last entry */
5987 +@@ -1456,7 +1473,7 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename,
5988 + if (kallsyms__delta(kmap, filename, &delta))
5989 + return -1;
5990 +
5991 +- symbols__fixup_end(&dso->symbols);
5992 ++ symbols__fixup_end(&dso->symbols, true);
5993 + symbols__fixup_duplicate(&dso->symbols);
5994 +
5995 + if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
5996 +@@ -1648,7 +1665,7 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
5997 + #undef bfd_asymbol_section
5998 + #endif
5999 +
6000 +- symbols__fixup_end(&dso->symbols);
6001 ++ symbols__fixup_end(&dso->symbols, false);
6002 + symbols__fixup_duplicate(&dso->symbols);
6003 + dso->adjust_symbols = 1;
6004 +
6005 +diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
6006 +index 954d6a049ee23..28721d761d91e 100644
6007 +--- a/tools/perf/util/symbol.h
6008 ++++ b/tools/perf/util/symbol.h
6009 +@@ -192,7 +192,7 @@ void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym,
6010 + bool kernel);
6011 + void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
6012 + void symbols__fixup_duplicate(struct rb_root_cached *symbols);
6013 +-void symbols__fixup_end(struct rb_root_cached *symbols);
6014 ++void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms);
6015 + void maps__fixup_end(struct maps *maps);
6016 +
6017 + typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
6018 +@@ -230,7 +230,6 @@ const char *arch__normalize_symbol_name(const char *name);
6019 + #define SYMBOL_A 0
6020 + #define SYMBOL_B 1
6021 +
6022 +-void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
6023 + int arch__compare_symbol_names(const char *namea, const char *nameb);
6024 + int arch__compare_symbol_names_n(const char *namea, const char *nameb,
6025 + unsigned int n);
6026 +diff --git a/tools/testing/selftests/vm/mremap_test.c b/tools/testing/selftests/vm/mremap_test.c
6027 +index 0624d1bd71b53..e3ce33a9954ea 100644
6028 +--- a/tools/testing/selftests/vm/mremap_test.c
6029 ++++ b/tools/testing/selftests/vm/mremap_test.c
6030 +@@ -6,9 +6,11 @@
6031 +
6032 + #include <errno.h>
6033 + #include <stdlib.h>
6034 ++#include <stdio.h>
6035 + #include <string.h>
6036 + #include <sys/mman.h>
6037 + #include <time.h>
6038 ++#include <stdbool.h>
6039 +
6040 + #include "../kselftest.h"
6041 +
6042 +@@ -64,6 +66,59 @@ enum {
6043 + .expect_failure = should_fail \
6044 + }
6045 +
6046 ++/*
6047 ++ * Returns false if the requested remap region overlaps with an
6048 ++ * existing mapping (e.g text, stack) else returns true.
6049 ++ */
6050 ++static bool is_remap_region_valid(void *addr, unsigned long long size)
6051 ++{
6052 ++ void *remap_addr = NULL;
6053 ++ bool ret = true;
6054 ++
6055 ++ /* Use MAP_FIXED_NOREPLACE flag to ensure region is not mapped */
6056 ++ remap_addr = mmap(addr, size, PROT_READ | PROT_WRITE,
6057 ++ MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
6058 ++ -1, 0);
6059 ++
6060 ++ if (remap_addr == MAP_FAILED) {
6061 ++ if (errno == EEXIST)
6062 ++ ret = false;
6063 ++ } else {
6064 ++ munmap(remap_addr, size);
6065 ++ }
6066 ++
6067 ++ return ret;
6068 ++}
6069 ++
6070 ++/* Returns mmap_min_addr sysctl tunable from procfs */
6071 ++static unsigned long long get_mmap_min_addr(void)
6072 ++{
6073 ++ FILE *fp;
6074 ++ int n_matched;
6075 ++ static unsigned long long addr;
6076 ++
6077 ++ if (addr)
6078 ++ return addr;
6079 ++
6080 ++ fp = fopen("/proc/sys/vm/mmap_min_addr", "r");
6081 ++ if (fp == NULL) {
6082 ++ ksft_print_msg("Failed to open /proc/sys/vm/mmap_min_addr: %s\n",
6083 ++ strerror(errno));
6084 ++ exit(KSFT_SKIP);
6085 ++ }
6086 ++
6087 ++ n_matched = fscanf(fp, "%llu", &addr);
6088 ++ if (n_matched != 1) {
6089 ++ ksft_print_msg("Failed to read /proc/sys/vm/mmap_min_addr: %s\n",
6090 ++ strerror(errno));
6091 ++ fclose(fp);
6092 ++ exit(KSFT_SKIP);
6093 ++ }
6094 ++
6095 ++ fclose(fp);
6096 ++ return addr;
6097 ++}
6098 ++
6099 + /*
6100 + * Returns the start address of the mapping on success, else returns
6101 + * NULL on failure.
6102 +@@ -72,11 +127,18 @@ static void *get_source_mapping(struct config c)
6103 + {
6104 + unsigned long long addr = 0ULL;
6105 + void *src_addr = NULL;
6106 ++ unsigned long long mmap_min_addr;
6107 ++
6108 ++ mmap_min_addr = get_mmap_min_addr();
6109 ++
6110 + retry:
6111 + addr += c.src_alignment;
6112 ++ if (addr < mmap_min_addr)
6113 ++ goto retry;
6114 ++
6115 + src_addr = mmap((void *) addr, c.region_size, PROT_READ | PROT_WRITE,
6116 +- MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
6117 +- -1, 0);
6118 ++ MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
6119 ++ -1, 0);
6120 + if (src_addr == MAP_FAILED) {
6121 + if (errno == EPERM || errno == EEXIST)
6122 + goto retry;
6123 +@@ -91,8 +153,10 @@ retry:
6124 + * alignment in the tests.
6125 + */
6126 + if (((unsigned long long) src_addr & (c.src_alignment - 1)) ||
6127 +- !((unsigned long long) src_addr & c.src_alignment))
6128 ++ !((unsigned long long) src_addr & c.src_alignment)) {
6129 ++ munmap(src_addr, c.region_size);
6130 + goto retry;
6131 ++ }
6132 +
6133 + if (!src_addr)
6134 + goto error;
6135 +@@ -141,9 +205,20 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
6136 + if (!((unsigned long long) addr & c.dest_alignment))
6137 + addr = (void *) ((unsigned long long) addr | c.dest_alignment);
6138 +
6139 ++ /* Don't destroy existing mappings unless expected to overlap */
6140 ++ while (!is_remap_region_valid(addr, c.region_size) && !c.overlapping) {
6141 ++ /* Check for unsigned overflow */
6142 ++ if (addr + c.dest_alignment < addr) {
6143 ++ ksft_print_msg("Couldn't find a valid region to remap to\n");
6144 ++ ret = -1;
6145 ++ goto out;
6146 ++ }
6147 ++ addr += c.dest_alignment;
6148 ++ }
6149 ++
6150 + clock_gettime(CLOCK_MONOTONIC, &t_start);
6151 + dest_addr = mremap(src_addr, c.region_size, c.region_size,
6152 +- MREMAP_MAYMOVE|MREMAP_FIXED, (char *) addr);
6153 ++ MREMAP_MAYMOVE|MREMAP_FIXED, (char *) addr);
6154 + clock_gettime(CLOCK_MONOTONIC, &t_end);
6155 +
6156 + if (dest_addr == MAP_FAILED) {