Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.6 commit in: /
Date: Wed, 03 Jun 2020 11:44:58
Message-Id: 1591184681.17359e12cf00380c018b7bb205be730390d8db4c.mpagano@gentoo
1 commit: 17359e12cf00380c018b7bb205be730390d8db4c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jun 3 11:44:41 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jun 3 11:44:41 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=17359e12
7
8 Linux patch 5.6.16
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1015_linux-5.6.16.patch | 7460 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 7464 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 1c0ea04..eb1d2c7 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -103,6 +103,10 @@ Patch: 1014_linux-5.6.15.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.6.15
23
24 +Patch: 1015_linux-5.6.16.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.6.16
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1015_linux-5.6.16.patch b/1015_linux-5.6.16.patch
33 new file mode 100644
34 index 0000000..d0d4b81
35 --- /dev/null
36 +++ b/1015_linux-5.6.16.patch
37 @@ -0,0 +1,7460 @@
38 +diff --git a/Makefile b/Makefile
39 +index 3eca0c523098..1befb37dcc58 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 6
46 +-SUBLEVEL = 15
47 ++SUBLEVEL = 16
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S
52 +index fc7ed03d8b93..51b078604978 100644
53 +--- a/arch/arm/boot/compressed/vmlinux.lds.S
54 ++++ b/arch/arm/boot/compressed/vmlinux.lds.S
55 +@@ -43,7 +43,7 @@ SECTIONS
56 + }
57 + .table : ALIGN(4) {
58 + _table_start = .;
59 +- LONG(ZIMAGE_MAGIC(2))
60 ++ LONG(ZIMAGE_MAGIC(4))
61 + LONG(ZIMAGE_MAGIC(0x5a534c4b))
62 + LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start))
63 + LONG(ZIMAGE_MAGIC(_kernel_bss_size))
64 +diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi
65 +index 6142c672811e..5e5f5ca3c86f 100644
66 +--- a/arch/arm/boot/dts/bcm-hr2.dtsi
67 ++++ b/arch/arm/boot/dts/bcm-hr2.dtsi
68 +@@ -75,7 +75,7 @@
69 + timer@20200 {
70 + compatible = "arm,cortex-a9-global-timer";
71 + reg = <0x20200 0x100>;
72 +- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
73 ++ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
74 + clocks = <&periph_clk>;
75 + };
76 +
77 +@@ -83,7 +83,7 @@
78 + compatible = "arm,cortex-a9-twd-timer";
79 + reg = <0x20600 0x20>;
80 + interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) |
81 +- IRQ_TYPE_LEVEL_HIGH)>;
82 ++ IRQ_TYPE_EDGE_RISING)>;
83 + clocks = <&periph_clk>;
84 + };
85 +
86 +@@ -91,7 +91,7 @@
87 + compatible = "arm,cortex-a9-twd-wdt";
88 + reg = <0x20620 0x20>;
89 + interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) |
90 +- IRQ_TYPE_LEVEL_HIGH)>;
91 ++ IRQ_TYPE_EDGE_RISING)>;
92 + clocks = <&periph_clk>;
93 + };
94 +
95 +diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
96 +index 4c3f606e5b8d..f65448c01e31 100644
97 +--- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
98 ++++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
99 +@@ -24,7 +24,7 @@
100 +
101 + leds {
102 + act {
103 +- gpios = <&gpio 47 GPIO_ACTIVE_HIGH>;
104 ++ gpios = <&gpio 47 GPIO_ACTIVE_LOW>;
105 + };
106 + };
107 +
108 +diff --git a/arch/arm/boot/dts/imx6q-b450v3.dts b/arch/arm/boot/dts/imx6q-b450v3.dts
109 +index 95b8f2d71821..fb0980190aa0 100644
110 +--- a/arch/arm/boot/dts/imx6q-b450v3.dts
111 ++++ b/arch/arm/boot/dts/imx6q-b450v3.dts
112 +@@ -65,13 +65,6 @@
113 + };
114 + };
115 +
116 +-&clks {
117 +- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
118 +- <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
119 +- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
120 +- <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
121 +-};
122 +-
123 + &ldb {
124 + status = "okay";
125 +
126 +diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts
127 +index 611cb7ae7e55..8f762d9c5ae9 100644
128 +--- a/arch/arm/boot/dts/imx6q-b650v3.dts
129 ++++ b/arch/arm/boot/dts/imx6q-b650v3.dts
130 +@@ -65,13 +65,6 @@
131 + };
132 + };
133 +
134 +-&clks {
135 +- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
136 +- <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
137 +- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
138 +- <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
139 +-};
140 +-
141 + &ldb {
142 + status = "okay";
143 +
144 +diff --git a/arch/arm/boot/dts/imx6q-b850v3.dts b/arch/arm/boot/dts/imx6q-b850v3.dts
145 +index e4cb118f88c6..1ea64ecf4291 100644
146 +--- a/arch/arm/boot/dts/imx6q-b850v3.dts
147 ++++ b/arch/arm/boot/dts/imx6q-b850v3.dts
148 +@@ -53,17 +53,6 @@
149 + };
150 + };
151 +
152 +-&clks {
153 +- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
154 +- <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
155 +- <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
156 +- <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>;
157 +- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
158 +- <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
159 +- <&clks IMX6QDL_CLK_PLL2_PFD2_396M>,
160 +- <&clks IMX6QDL_CLK_PLL2_PFD2_396M>;
161 +-};
162 +-
163 + &ldb {
164 + fsl,dual-channel;
165 + status = "okay";
166 +diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
167 +index fa27dcdf06f1..1938b04199c4 100644
168 +--- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi
169 ++++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
170 +@@ -377,3 +377,18 @@
171 + #interrupt-cells = <1>;
172 + };
173 + };
174 ++
175 ++&clks {
176 ++ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
177 ++ <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
178 ++ <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
179 ++ <&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>,
180 ++ <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>,
181 ++ <&clks IMX6QDL_CLK_IPU2_DI1_PRE_SEL>;
182 ++ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
183 ++ <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
184 ++ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
185 ++ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
186 ++ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
187 ++ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>;
188 ++};
189 +diff --git a/arch/arm/boot/dts/mmp3-dell-ariel.dts b/arch/arm/boot/dts/mmp3-dell-ariel.dts
190 +index 15449c72c042..b0ec14c42164 100644
191 +--- a/arch/arm/boot/dts/mmp3-dell-ariel.dts
192 ++++ b/arch/arm/boot/dts/mmp3-dell-ariel.dts
193 +@@ -98,19 +98,19 @@
194 + status = "okay";
195 + };
196 +
197 +-&ssp3 {
198 ++&ssp1 {
199 + status = "okay";
200 +- cs-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
201 ++ cs-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
202 +
203 + firmware-flash@0 {
204 +- compatible = "st,m25p80", "jedec,spi-nor";
205 ++ compatible = "winbond,w25q32", "jedec,spi-nor";
206 + reg = <0>;
207 +- spi-max-frequency = <40000000>;
208 ++ spi-max-frequency = <104000000>;
209 + m25p,fast-read;
210 + };
211 + };
212 +
213 +-&ssp4 {
214 +- cs-gpios = <&gpio 56 GPIO_ACTIVE_HIGH>;
215 ++&ssp2 {
216 ++ cs-gpios = <&gpio 56 GPIO_ACTIVE_LOW>;
217 + status = "okay";
218 + };
219 +diff --git a/arch/arm/boot/dts/mmp3.dtsi b/arch/arm/boot/dts/mmp3.dtsi
220 +index 59a108e49b41..1e25bf998ab5 100644
221 +--- a/arch/arm/boot/dts/mmp3.dtsi
222 ++++ b/arch/arm/boot/dts/mmp3.dtsi
223 +@@ -202,8 +202,7 @@
224 + };
225 +
226 + hsic_phy0: hsic-phy@f0001800 {
227 +- compatible = "marvell,mmp3-hsic-phy",
228 +- "usb-nop-xceiv";
229 ++ compatible = "marvell,mmp3-hsic-phy";
230 + reg = <0xf0001800 0x40>;
231 + #phy-cells = <0>;
232 + status = "disabled";
233 +@@ -224,8 +223,7 @@
234 + };
235 +
236 + hsic_phy1: hsic-phy@f0002800 {
237 +- compatible = "marvell,mmp3-hsic-phy",
238 +- "usb-nop-xceiv";
239 ++ compatible = "marvell,mmp3-hsic-phy";
240 + reg = <0xf0002800 0x40>;
241 + #phy-cells = <0>;
242 + status = "disabled";
243 +@@ -531,7 +529,7 @@
244 + };
245 +
246 + soc_clocks: clocks@d4050000 {
247 +- compatible = "marvell,mmp2-clock";
248 ++ compatible = "marvell,mmp3-clock";
249 + reg = <0xd4050000 0x1000>,
250 + <0xd4282800 0x400>,
251 + <0xd4015000 0x1000>;
252 +diff --git a/arch/arm/boot/dts/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
253 +index 9067e0ef4240..06fbffa81636 100644
254 +--- a/arch/arm/boot/dts/motorola-mapphone-common.dtsi
255 ++++ b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
256 +@@ -367,6 +367,8 @@
257 + };
258 +
259 + &mmc3 {
260 ++ pinctrl-names = "default";
261 ++ pinctrl-0 = <&mmc3_pins>;
262 + vmmc-supply = <&wl12xx_vmmc>;
263 + /* uart2_tx.sdmmc3_dat1 pad as wakeirq */
264 + interrupts-extended = <&wakeupgen GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH
265 +@@ -472,6 +474,37 @@
266 + >;
267 + };
268 +
269 ++ /*
270 ++ * Android uses PIN_OFF_INPUT_PULLDOWN | PIN_INPUT_PULLUP | MUX_MODE3
271 ++ * for gpio_100, but the internal pull makes wlan flakey on some
272 ++ * devices. Off mode value should be tested if we have off mode working
273 ++ * later on.
274 ++ */
275 ++ mmc3_pins: pinmux_mmc3_pins {
276 ++ pinctrl-single,pins = <
277 ++ /* 0x4a10008e gpmc_wait2.gpio_100 d23 */
278 ++ OMAP4_IOPAD(0x08e, PIN_INPUT | MUX_MODE3)
279 ++
280 ++ /* 0x4a100102 abe_mcbsp1_dx.sdmmc3_dat2 ab25 */
281 ++ OMAP4_IOPAD(0x102, PIN_INPUT_PULLUP | MUX_MODE1)
282 ++
283 ++ /* 0x4a100104 abe_mcbsp1_fsx.sdmmc3_dat3 ac27 */
284 ++ OMAP4_IOPAD(0x104, PIN_INPUT_PULLUP | MUX_MODE1)
285 ++
286 ++ /* 0x4a100118 uart2_cts.sdmmc3_clk ab26 */
287 ++ OMAP4_IOPAD(0x118, PIN_INPUT | MUX_MODE1)
288 ++
289 ++ /* 0x4a10011a uart2_rts.sdmmc3_cmd ab27 */
290 ++ OMAP4_IOPAD(0x11a, PIN_INPUT_PULLUP | MUX_MODE1)
291 ++
292 ++ /* 0x4a10011c uart2_rx.sdmmc3_dat0 aa25 */
293 ++ OMAP4_IOPAD(0x11c, PIN_INPUT_PULLUP | MUX_MODE1)
294 ++
295 ++ /* 0x4a10011e uart2_tx.sdmmc3_dat1 aa26 */
296 ++ OMAP4_IOPAD(0x11e, PIN_INPUT_PULLUP | MUX_MODE1)
297 ++ >;
298 ++ };
299 ++
300 + /* gpmc_ncs0.gpio_50 */
301 + poweroff_gpio: pinmux_poweroff_pins {
302 + pinctrl-single,pins = <
303 +@@ -690,14 +723,18 @@
304 + };
305 +
306 + /*
307 +- * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
308 +- * uart1 wakeirq.
309 ++ * The uart1 port is wired to mdm6600 with rts and cts. The modem uses gpio_149
310 ++ * for wake-up events for both the USB PHY and the UART. We can use gpio_149
311 ++ * pad as the shared wakeirq for the UART rather than the RX or CTS pad as we
312 ++ * have gpio_149 trigger before the UART transfer starts.
313 + */
314 + &uart1 {
315 + pinctrl-names = "default";
316 + pinctrl-0 = <&uart1_pins>;
317 + interrupts-extended = <&wakeupgen GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH
318 +- &omap4_pmx_core 0xfc>;
319 ++ &omap4_pmx_core 0x110>;
320 ++ uart-has-rtscts;
321 ++ current-speed = <115200>;
322 + };
323 +
324 + &uart3 {
325 +diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
326 +index cf36e25195b4..8c4b8f56c9e0 100644
327 +--- a/arch/arm/boot/dts/rk3036.dtsi
328 ++++ b/arch/arm/boot/dts/rk3036.dtsi
329 +@@ -128,7 +128,7 @@
330 + assigned-clocks = <&cru SCLK_GPU>;
331 + assigned-clock-rates = <100000000>;
332 + clocks = <&cru SCLK_GPU>, <&cru SCLK_GPU>;
333 +- clock-names = "core", "bus";
334 ++ clock-names = "bus", "core";
335 + resets = <&cru SRST_GPU>;
336 + status = "disabled";
337 + };
338 +diff --git a/arch/arm/boot/dts/rk3228-evb.dts b/arch/arm/boot/dts/rk3228-evb.dts
339 +index 5670b33fd1bd..aed879db6c15 100644
340 +--- a/arch/arm/boot/dts/rk3228-evb.dts
341 ++++ b/arch/arm/boot/dts/rk3228-evb.dts
342 +@@ -46,7 +46,7 @@
343 + #address-cells = <1>;
344 + #size-cells = <0>;
345 +
346 +- phy: phy@0 {
347 ++ phy: ethernet-phy@0 {
348 + compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22";
349 + reg = <0>;
350 + clocks = <&cru SCLK_MAC_PHY>;
351 +diff --git a/arch/arm/boot/dts/rk3229-xms6.dts b/arch/arm/boot/dts/rk3229-xms6.dts
352 +index 679fc2b00e5a..933ef69da32a 100644
353 +--- a/arch/arm/boot/dts/rk3229-xms6.dts
354 ++++ b/arch/arm/boot/dts/rk3229-xms6.dts
355 +@@ -150,7 +150,7 @@
356 + #address-cells = <1>;
357 + #size-cells = <0>;
358 +
359 +- phy: phy@0 {
360 ++ phy: ethernet-phy@0 {
361 + compatible = "ethernet-phy-id1234.d400",
362 + "ethernet-phy-ieee802.3-c22";
363 + reg = <0>;
364 +diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
365 +index 4e90efdc9630..a83f65486ad4 100644
366 +--- a/arch/arm/boot/dts/rk322x.dtsi
367 ++++ b/arch/arm/boot/dts/rk322x.dtsi
368 +@@ -561,7 +561,7 @@
369 + "pp1",
370 + "ppmmu1";
371 + clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>;
372 +- clock-names = "core", "bus";
373 ++ clock-names = "bus", "core";
374 + resets = <&cru SRST_GPU_A>;
375 + status = "disabled";
376 + };
377 +@@ -1033,7 +1033,7 @@
378 + };
379 + };
380 +
381 +- spi-0 {
382 ++ spi0 {
383 + spi0_clk: spi0-clk {
384 + rockchip,pins = <0 RK_PB1 2 &pcfg_pull_up>;
385 + };
386 +@@ -1051,7 +1051,7 @@
387 + };
388 + };
389 +
390 +- spi-1 {
391 ++ spi1 {
392 + spi1_clk: spi1-clk {
393 + rockchip,pins = <0 RK_PC7 2 &pcfg_pull_up>;
394 + };
395 +diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
396 +index 241f43e29c77..bb5ff10b9110 100644
397 +--- a/arch/arm/boot/dts/rk3xxx.dtsi
398 ++++ b/arch/arm/boot/dts/rk3xxx.dtsi
399 +@@ -84,7 +84,7 @@
400 + compatible = "arm,mali-400";
401 + reg = <0x10090000 0x10000>;
402 + clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>;
403 +- clock-names = "core", "bus";
404 ++ clock-names = "bus", "core";
405 + assigned-clocks = <&cru ACLK_GPU>;
406 + assigned-clock-rates = <100000000>;
407 + resets = <&cru SRST_GPU>;
408 +diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
409 +index 99929122dad7..3546d294d55f 100644
410 +--- a/arch/arm/include/asm/assembler.h
411 ++++ b/arch/arm/include/asm/assembler.h
412 +@@ -18,11 +18,11 @@
413 + #endif
414 +
415 + #include <asm/ptrace.h>
416 +-#include <asm/domain.h>
417 + #include <asm/opcodes-virt.h>
418 + #include <asm/asm-offsets.h>
419 + #include <asm/page.h>
420 + #include <asm/thread_info.h>
421 ++#include <asm/uaccess-asm.h>
422 +
423 + #define IOMEM(x) (x)
424 +
425 +@@ -446,79 +446,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
426 + .size \name , . - \name
427 + .endm
428 +
429 +- .macro csdb
430 +-#ifdef CONFIG_THUMB2_KERNEL
431 +- .inst.w 0xf3af8014
432 +-#else
433 +- .inst 0xe320f014
434 +-#endif
435 +- .endm
436 +-
437 +- .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
438 +-#ifndef CONFIG_CPU_USE_DOMAINS
439 +- adds \tmp, \addr, #\size - 1
440 +- sbcscc \tmp, \tmp, \limit
441 +- bcs \bad
442 +-#ifdef CONFIG_CPU_SPECTRE
443 +- movcs \addr, #0
444 +- csdb
445 +-#endif
446 +-#endif
447 +- .endm
448 +-
449 +- .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
450 +-#ifdef CONFIG_CPU_SPECTRE
451 +- sub \tmp, \limit, #1
452 +- subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
453 +- addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
454 +- subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
455 +- movlo \addr, #0 @ if (tmp < 0) addr = NULL
456 +- csdb
457 +-#endif
458 +- .endm
459 +-
460 +- .macro uaccess_disable, tmp, isb=1
461 +-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
462 +- /*
463 +- * Whenever we re-enter userspace, the domains should always be
464 +- * set appropriately.
465 +- */
466 +- mov \tmp, #DACR_UACCESS_DISABLE
467 +- mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
468 +- .if \isb
469 +- instr_sync
470 +- .endif
471 +-#endif
472 +- .endm
473 +-
474 +- .macro uaccess_enable, tmp, isb=1
475 +-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
476 +- /*
477 +- * Whenever we re-enter userspace, the domains should always be
478 +- * set appropriately.
479 +- */
480 +- mov \tmp, #DACR_UACCESS_ENABLE
481 +- mcr p15, 0, \tmp, c3, c0, 0
482 +- .if \isb
483 +- instr_sync
484 +- .endif
485 +-#endif
486 +- .endm
487 +-
488 +- .macro uaccess_save, tmp
489 +-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
490 +- mrc p15, 0, \tmp, c3, c0, 0
491 +- str \tmp, [sp, #SVC_DACR]
492 +-#endif
493 +- .endm
494 +-
495 +- .macro uaccess_restore
496 +-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
497 +- ldr r0, [sp, #SVC_DACR]
498 +- mcr p15, 0, r0, c3, c0, 0
499 +-#endif
500 +- .endm
501 +-
502 + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
503 + .macro ret\c, reg
504 + #if __LINUX_ARM_ARCH__ < 6
505 +diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
506 +new file mode 100644
507 +index 000000000000..907571fd05c6
508 +--- /dev/null
509 ++++ b/arch/arm/include/asm/uaccess-asm.h
510 +@@ -0,0 +1,117 @@
511 ++/* SPDX-License-Identifier: GPL-2.0-only */
512 ++
513 ++#ifndef __ASM_UACCESS_ASM_H__
514 ++#define __ASM_UACCESS_ASM_H__
515 ++
516 ++#include <asm/asm-offsets.h>
517 ++#include <asm/domain.h>
518 ++#include <asm/memory.h>
519 ++#include <asm/thread_info.h>
520 ++
521 ++ .macro csdb
522 ++#ifdef CONFIG_THUMB2_KERNEL
523 ++ .inst.w 0xf3af8014
524 ++#else
525 ++ .inst 0xe320f014
526 ++#endif
527 ++ .endm
528 ++
529 ++ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
530 ++#ifndef CONFIG_CPU_USE_DOMAINS
531 ++ adds \tmp, \addr, #\size - 1
532 ++ sbcscc \tmp, \tmp, \limit
533 ++ bcs \bad
534 ++#ifdef CONFIG_CPU_SPECTRE
535 ++ movcs \addr, #0
536 ++ csdb
537 ++#endif
538 ++#endif
539 ++ .endm
540 ++
541 ++ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
542 ++#ifdef CONFIG_CPU_SPECTRE
543 ++ sub \tmp, \limit, #1
544 ++ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
545 ++ addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
546 ++ subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
547 ++ movlo \addr, #0 @ if (tmp < 0) addr = NULL
548 ++ csdb
549 ++#endif
550 ++ .endm
551 ++
552 ++ .macro uaccess_disable, tmp, isb=1
553 ++#ifdef CONFIG_CPU_SW_DOMAIN_PAN
554 ++ /*
555 ++ * Whenever we re-enter userspace, the domains should always be
556 ++ * set appropriately.
557 ++ */
558 ++ mov \tmp, #DACR_UACCESS_DISABLE
559 ++ mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
560 ++ .if \isb
561 ++ instr_sync
562 ++ .endif
563 ++#endif
564 ++ .endm
565 ++
566 ++ .macro uaccess_enable, tmp, isb=1
567 ++#ifdef CONFIG_CPU_SW_DOMAIN_PAN
568 ++ /*
569 ++ * Whenever we re-enter userspace, the domains should always be
570 ++ * set appropriately.
571 ++ */
572 ++ mov \tmp, #DACR_UACCESS_ENABLE
573 ++ mcr p15, 0, \tmp, c3, c0, 0
574 ++ .if \isb
575 ++ instr_sync
576 ++ .endif
577 ++#endif
578 ++ .endm
579 ++
580 ++#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
581 ++#define DACR(x...) x
582 ++#else
583 ++#define DACR(x...)
584 ++#endif
585 ++
586 ++ /*
587 ++ * Save the address limit on entry to a privileged exception.
588 ++ *
589 ++ * If we are using the DACR for kernel access by the user accessors
590 ++ * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
591 ++ * back to client mode, whether or not \disable is set.
592 ++ *
593 ++ * If we are using SW PAN, set the DACR user domain to no access
594 ++ * if \disable is set.
595 ++ */
596 ++ .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
597 ++ ldr \tmp1, [\tsk, #TI_ADDR_LIMIT]
598 ++ mov \tmp2, #TASK_SIZE
599 ++ str \tmp2, [\tsk, #TI_ADDR_LIMIT]
600 ++ DACR( mrc p15, 0, \tmp0, c3, c0, 0)
601 ++ DACR( str \tmp0, [sp, #SVC_DACR])
602 ++ str \tmp1, [sp, #SVC_ADDR_LIMIT]
603 ++ .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
604 ++ /* kernel=client, user=no access */
605 ++ mov \tmp2, #DACR_UACCESS_DISABLE
606 ++ mcr p15, 0, \tmp2, c3, c0, 0
607 ++ instr_sync
608 ++ .elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
609 ++ /* kernel=client */
610 ++ bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
611 ++ orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
612 ++ mcr p15, 0, \tmp2, c3, c0, 0
613 ++ instr_sync
614 ++ .endif
615 ++ .endm
616 ++
617 ++ /* Restore the user access state previously saved by uaccess_entry */
618 ++ .macro uaccess_exit, tsk, tmp0, tmp1
619 ++ ldr \tmp1, [sp, #SVC_ADDR_LIMIT]
620 ++ DACR( ldr \tmp0, [sp, #SVC_DACR])
621 ++ str \tmp1, [\tsk, #TI_ADDR_LIMIT]
622 ++ DACR( mcr p15, 0, \tmp0, c3, c0, 0)
623 ++ .endm
624 ++
625 ++#undef DACR
626 ++
627 ++#endif /* __ASM_UACCESS_ASM_H__ */
628 +diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
629 +index 77f54830554c..55a47df04773 100644
630 +--- a/arch/arm/kernel/entry-armv.S
631 ++++ b/arch/arm/kernel/entry-armv.S
632 +@@ -27,6 +27,7 @@
633 + #include <asm/unistd.h>
634 + #include <asm/tls.h>
635 + #include <asm/system_info.h>
636 ++#include <asm/uaccess-asm.h>
637 +
638 + #include "entry-header.S"
639 + #include <asm/entry-macro-multi.S>
640 +@@ -179,15 +180,7 @@ ENDPROC(__und_invalid)
641 + stmia r7, {r2 - r6}
642 +
643 + get_thread_info tsk
644 +- ldr r0, [tsk, #TI_ADDR_LIMIT]
645 +- mov r1, #TASK_SIZE
646 +- str r1, [tsk, #TI_ADDR_LIMIT]
647 +- str r0, [sp, #SVC_ADDR_LIMIT]
648 +-
649 +- uaccess_save r0
650 +- .if \uaccess
651 +- uaccess_disable r0
652 +- .endif
653 ++ uaccess_entry tsk, r0, r1, r2, \uaccess
654 +
655 + .if \trace
656 + #ifdef CONFIG_TRACE_IRQFLAGS
657 +diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
658 +index 32051ec5b33f..40db0f9188b6 100644
659 +--- a/arch/arm/kernel/entry-header.S
660 ++++ b/arch/arm/kernel/entry-header.S
661 +@@ -6,6 +6,7 @@
662 + #include <asm/asm-offsets.h>
663 + #include <asm/errno.h>
664 + #include <asm/thread_info.h>
665 ++#include <asm/uaccess-asm.h>
666 + #include <asm/v7m.h>
667 +
668 + @ Bad Abort numbers
669 +@@ -217,9 +218,7 @@
670 + blne trace_hardirqs_off
671 + #endif
672 + .endif
673 +- ldr r1, [sp, #SVC_ADDR_LIMIT]
674 +- uaccess_restore
675 +- str r1, [tsk, #TI_ADDR_LIMIT]
676 ++ uaccess_exit tsk, r0, r1
677 +
678 + #ifndef CONFIG_THUMB2_KERNEL
679 + @ ARM mode SVC restore
680 +@@ -263,9 +262,7 @@
681 + @ on the stack remains correct).
682 + @
683 + .macro svc_exit_via_fiq
684 +- ldr r1, [sp, #SVC_ADDR_LIMIT]
685 +- uaccess_restore
686 +- str r1, [tsk, #TI_ADDR_LIMIT]
687 ++ uaccess_exit tsk, r0, r1
688 + #ifndef CONFIG_THUMB2_KERNEL
689 + @ ARM mode restore
690 + mov r0, sp
691 +diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
692 +index 8b4e806d5119..125c78321ab4 100644
693 +--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
694 ++++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
695 +@@ -1401,8 +1401,8 @@
696 + "venc_lt_sel";
697 + assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>,
698 + <&topckgen CLK_TOP_VENC_LT_SEL>;
699 +- assigned-clock-parents = <&topckgen CLK_TOP_VENCPLL_D2>,
700 +- <&topckgen CLK_TOP_UNIVPLL1_D2>;
701 ++ assigned-clock-parents = <&topckgen CLK_TOP_VCODECPLL>,
702 ++ <&topckgen CLK_TOP_VCODECPLL_370P5>;
703 + };
704 +
705 + jpegdec: jpegdec@18004000 {
706 +diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
707 +index a85b85d85a5f..3c7c9b52623c 100644
708 +--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
709 ++++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
710 +@@ -908,10 +908,27 @@
711 + status = "okay";
712 + };
713 +
714 ++&q6asmdai {
715 ++ dai@0 {
716 ++ reg = <0>;
717 ++ };
718 ++
719 ++ dai@1 {
720 ++ reg = <1>;
721 ++ };
722 ++
723 ++ dai@2 {
724 ++ reg = <2>;
725 ++ };
726 ++};
727 ++
728 + &sound {
729 + compatible = "qcom,apq8096-sndcard";
730 + model = "DB820c";
731 +- audio-routing = "RX_BIAS", "MCLK";
732 ++ audio-routing = "RX_BIAS", "MCLK",
733 ++ "MM_DL1", "MultiMedia1 Playback",
734 ++ "MM_DL2", "MultiMedia2 Playback",
735 ++ "MultiMedia3 Capture", "MM_UL3";
736 +
737 + mm1-dai-link {
738 + link-name = "MultiMedia1";
739 +diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
740 +index 7ae082ea14ea..f925a6c7d293 100644
741 +--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
742 ++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
743 +@@ -2053,6 +2053,8 @@
744 + reg = <APR_SVC_ASM>;
745 + q6asmdai: dais {
746 + compatible = "qcom,q6asm-dais";
747 ++ #address-cells = <1>;
748 ++ #size-cells = <0>;
749 + #sound-dai-cells = <1>;
750 + iommus = <&lpass_q6_smmu 1>;
751 + };
752 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
753 +index 6abc6f4a86cf..05265b38cc02 100644
754 +--- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
755 ++++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
756 +@@ -86,7 +86,7 @@
757 + assigned-clock-rate = <50000000>;
758 + assigned-clocks = <&cru SCLK_MAC2PHY>;
759 + assigned-clock-parents = <&cru SCLK_MAC2PHY_SRC>;
760 +-
761 ++ status = "okay";
762 + };
763 +
764 + &i2c1 {
765 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
766 +index 5c4238a80144..c341172ec208 100644
767 +--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
768 ++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
769 +@@ -1890,10 +1890,10 @@
770 + gpu: gpu@ff9a0000 {
771 + compatible = "rockchip,rk3399-mali", "arm,mali-t860";
772 + reg = <0x0 0xff9a0000 0x0 0x10000>;
773 +- interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH 0>,
774 +- <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH 0>,
775 +- <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH 0>;
776 +- interrupt-names = "gpu", "job", "mmu";
777 ++ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH 0>,
778 ++ <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH 0>,
779 ++ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH 0>;
780 ++ interrupt-names = "job", "mmu", "gpu";
781 + clocks = <&cru ACLK_GPU>;
782 + #cooling-cells = <2>;
783 + power-domains = <&power RK3399_PD_GPU>;
784 +diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h
785 +index 5056ebb902d1..61d94ec7dd16 100644
786 +--- a/arch/csky/abiv1/inc/abi/entry.h
787 ++++ b/arch/csky/abiv1/inc/abi/entry.h
788 +@@ -167,8 +167,8 @@
789 + * BA Reserved C D V
790 + */
791 + cprcr r6, cpcr30
792 +- lsri r6, 28
793 +- lsli r6, 28
794 ++ lsri r6, 29
795 ++ lsli r6, 29
796 + addi r6, 0xe
797 + cpwcr r6, cpcr30
798 +
799 +diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h
800 +index 111973c6c713..9023828ede97 100644
801 +--- a/arch/csky/abiv2/inc/abi/entry.h
802 ++++ b/arch/csky/abiv2/inc/abi/entry.h
803 +@@ -225,8 +225,8 @@
804 + */
805 + mfcr r6, cr<30, 15> /* Get MSA0 */
806 + 2:
807 +- lsri r6, 28
808 +- lsli r6, 28
809 ++ lsri r6, 29
810 ++ lsli r6, 29
811 + addi r6, 0x1ce
812 + mtcr r6, cr<30, 15> /* Set MSA0 */
813 +
814 +diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h
815 +index eaa1c3403a42..60f8a4112588 100644
816 +--- a/arch/csky/include/asm/uaccess.h
817 ++++ b/arch/csky/include/asm/uaccess.h
818 +@@ -254,7 +254,7 @@ do { \
819 +
820 + extern int __get_user_bad(void);
821 +
822 +-#define __copy_user(to, from, n) \
823 ++#define ___copy_to_user(to, from, n) \
824 + do { \
825 + int w0, w1, w2, w3; \
826 + asm volatile( \
827 +@@ -289,31 +289,34 @@ do { \
828 + " subi %0, 4 \n" \
829 + " br 3b \n" \
830 + "5: cmpnei %0, 0 \n" /* 1B */ \
831 +- " bf 8f \n" \
832 ++ " bf 13f \n" \
833 + " ldb %3, (%2, 0) \n" \
834 + "6: stb %3, (%1, 0) \n" \
835 + " addi %2, 1 \n" \
836 + " addi %1, 1 \n" \
837 + " subi %0, 1 \n" \
838 + " br 5b \n" \
839 +- "7: br 8f \n" \
840 ++ "7: subi %0, 4 \n" \
841 ++ "8: subi %0, 4 \n" \
842 ++ "12: subi %0, 4 \n" \
843 ++ " br 13f \n" \
844 + ".section __ex_table, \"a\" \n" \
845 + ".align 2 \n" \
846 +- ".long 2b, 7b \n" \
847 +- ".long 9b, 7b \n" \
848 +- ".long 10b, 7b \n" \
849 ++ ".long 2b, 13f \n" \
850 ++ ".long 4b, 13f \n" \
851 ++ ".long 6b, 13f \n" \
852 ++ ".long 9b, 12b \n" \
853 ++ ".long 10b, 8b \n" \
854 + ".long 11b, 7b \n" \
855 +- ".long 4b, 7b \n" \
856 +- ".long 6b, 7b \n" \
857 + ".previous \n" \
858 +- "8: \n" \
859 ++ "13: \n" \
860 + : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \
861 + "=r"(w1), "=r"(w2), "=r"(w3) \
862 + : "0"(n), "1"(to), "2"(from) \
863 + : "memory"); \
864 + } while (0)
865 +
866 +-#define __copy_user_zeroing(to, from, n) \
867 ++#define ___copy_from_user(to, from, n) \
868 + do { \
869 + int tmp; \
870 + int nsave; \
871 +@@ -356,22 +359,22 @@ do { \
872 + " addi %1, 1 \n" \
873 + " subi %0, 1 \n" \
874 + " br 5b \n" \
875 +- "8: mov %3, %0 \n" \
876 +- " movi %4, 0 \n" \
877 +- "9: stb %4, (%1, 0) \n" \
878 +- " addi %1, 1 \n" \
879 +- " subi %3, 1 \n" \
880 +- " cmpnei %3, 0 \n" \
881 +- " bt 9b \n" \
882 +- " br 7f \n" \
883 ++ "8: stw %3, (%1, 0) \n" \
884 ++ " subi %0, 4 \n" \
885 ++ " bf 7f \n" \
886 ++ "9: subi %0, 8 \n" \
887 ++ " bf 7f \n" \
888 ++ "13: stw %3, (%1, 8) \n" \
889 ++ " subi %0, 12 \n" \
890 ++ " bf 7f \n" \
891 + ".section __ex_table, \"a\" \n" \
892 + ".align 2 \n" \
893 +- ".long 2b, 8b \n" \
894 ++ ".long 2b, 7f \n" \
895 ++ ".long 4b, 7f \n" \
896 ++ ".long 6b, 7f \n" \
897 + ".long 10b, 8b \n" \
898 +- ".long 11b, 8b \n" \
899 +- ".long 12b, 8b \n" \
900 +- ".long 4b, 8b \n" \
901 +- ".long 6b, 8b \n" \
902 ++ ".long 11b, 9b \n" \
903 ++ ".long 12b,13b \n" \
904 + ".previous \n" \
905 + "7: \n" \
906 + : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \
907 +diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S
908 +index 007706328000..9718388448a4 100644
909 +--- a/arch/csky/kernel/entry.S
910 ++++ b/arch/csky/kernel/entry.S
911 +@@ -318,8 +318,6 @@ ENTRY(__switch_to)
912 +
913 + mfcr a2, psr /* Save PSR value */
914 + stw a2, (a3, THREAD_SR) /* Save PSR in task struct */
915 +- bclri a2, 6 /* Disable interrupts */
916 +- mtcr a2, psr
917 +
918 + SAVE_SWITCH_STACK
919 +
920 +diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c
921 +index e68ff375c8f8..ab55e98ee8f6 100644
922 +--- a/arch/csky/kernel/perf_callchain.c
923 ++++ b/arch/csky/kernel/perf_callchain.c
924 +@@ -12,12 +12,17 @@ struct stackframe {
925 +
926 + static int unwind_frame_kernel(struct stackframe *frame)
927 + {
928 +- if (kstack_end((void *)frame->fp))
929 ++ unsigned long low = (unsigned long)task_stack_page(current);
930 ++ unsigned long high = low + THREAD_SIZE;
931 ++
932 ++ if (unlikely(frame->fp < low || frame->fp > high))
933 + return -EPERM;
934 +- if (frame->fp & 0x3 || frame->fp < TASK_SIZE)
935 ++
936 ++ if (kstack_end((void *)frame->fp) || frame->fp & 0x3)
937 + return -EPERM;
938 +
939 + *frame = *(struct stackframe *)frame->fp;
940 ++
941 + if (__kernel_text_address(frame->lr)) {
942 + int graph = 0;
943 +
944 +diff --git a/arch/csky/lib/usercopy.c b/arch/csky/lib/usercopy.c
945 +index 647a23986fb5..3c9bd645e643 100644
946 +--- a/arch/csky/lib/usercopy.c
947 ++++ b/arch/csky/lib/usercopy.c
948 +@@ -7,10 +7,7 @@
949 + unsigned long raw_copy_from_user(void *to, const void *from,
950 + unsigned long n)
951 + {
952 +- if (access_ok(from, n))
953 +- __copy_user_zeroing(to, from, n);
954 +- else
955 +- memset(to, 0, n);
956 ++ ___copy_from_user(to, from, n);
957 + return n;
958 + }
959 + EXPORT_SYMBOL(raw_copy_from_user);
960 +@@ -18,8 +15,7 @@ EXPORT_SYMBOL(raw_copy_from_user);
961 + unsigned long raw_copy_to_user(void *to, const void *from,
962 + unsigned long n)
963 + {
964 +- if (access_ok(to, n))
965 +- __copy_user(to, from, n);
966 ++ ___copy_to_user(to, from, n);
967 + return n;
968 + }
969 + EXPORT_SYMBOL(raw_copy_to_user);
970 +diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
971 +index 5224fb38d766..01d7071b23f7 100644
972 +--- a/arch/parisc/mm/init.c
973 ++++ b/arch/parisc/mm/init.c
974 +@@ -562,7 +562,7 @@ void __init mem_init(void)
975 + > BITS_PER_LONG);
976 +
977 + high_memory = __va((max_pfn << PAGE_SHIFT));
978 +- set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
979 ++ set_max_mapnr(max_low_pfn);
980 + memblock_free_all();
981 +
982 + #ifdef CONFIG_PA11
983 +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
984 +index b0fb42b0bf4b..35608b9feb14 100644
985 +--- a/arch/powerpc/Kconfig
986 ++++ b/arch/powerpc/Kconfig
987 +@@ -125,6 +125,7 @@ config PPC
988 + select ARCH_HAS_MMIOWB if PPC64
989 + select ARCH_HAS_PHYS_TO_DMA
990 + select ARCH_HAS_PMEM_API
991 ++ select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
992 + select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
993 + select ARCH_HAS_PTE_SPECIAL
994 + select ARCH_HAS_MEMBARRIER_CALLBACKS
995 +diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
996 +index a131174a0a77..f310ad8ffcf7 100644
997 +--- a/arch/riscv/Kconfig.socs
998 ++++ b/arch/riscv/Kconfig.socs
999 +@@ -11,13 +11,14 @@ config SOC_SIFIVE
1000 + This enables support for SiFive SoC platform hardware.
1001 +
1002 + config SOC_VIRT
1003 +- bool "QEMU Virt Machine"
1004 +- select POWER_RESET_SYSCON
1005 +- select POWER_RESET_SYSCON_POWEROFF
1006 +- select GOLDFISH
1007 +- select RTC_DRV_GOLDFISH
1008 +- select SIFIVE_PLIC
1009 +- help
1010 +- This enables support for QEMU Virt Machine.
1011 ++ bool "QEMU Virt Machine"
1012 ++ select POWER_RESET
1013 ++ select POWER_RESET_SYSCON
1014 ++ select POWER_RESET_SYSCON_POWEROFF
1015 ++ select GOLDFISH
1016 ++ select RTC_DRV_GOLDFISH if RTC_CLASS
1017 ++ select SIFIVE_PLIC
1018 ++ help
1019 ++ This enables support for QEMU Virt Machine.
1020 +
1021 + endmenu
1022 +diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
1023 +index a2c809df2733..56053c9838b2 100644
1024 +--- a/arch/riscv/include/asm/mmio.h
1025 ++++ b/arch/riscv/include/asm/mmio.h
1026 +@@ -16,6 +16,8 @@
1027 +
1028 + #ifndef CONFIG_MMU
1029 + #define pgprot_noncached(x) (x)
1030 ++#define pgprot_writecombine(x) (x)
1031 ++#define pgprot_device(x) (x)
1032 + #endif /* CONFIG_MMU */
1033 +
1034 + /* Generic IO read/write. These perform native-endian accesses. */
1035 +diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
1036 +index 393f2014dfee..31d912944d8d 100644
1037 +--- a/arch/riscv/include/asm/pgtable.h
1038 ++++ b/arch/riscv/include/asm/pgtable.h
1039 +@@ -460,12 +460,15 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1040 +
1041 + #else /* CONFIG_MMU */
1042 +
1043 ++#define PAGE_SHARED __pgprot(0)
1044 + #define PAGE_KERNEL __pgprot(0)
1045 + #define swapper_pg_dir NULL
1046 + #define VMALLOC_START 0
1047 +
1048 + #define TASK_SIZE 0xffffffffUL
1049 +
1050 ++static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {}
1051 ++
1052 + #endif /* !CONFIG_MMU */
1053 +
1054 + #define kern_addr_valid(addr) (1) /* FIXME */
1055 +diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
1056 +index 0940681d2f68..19e46f4160cc 100644
1057 +--- a/arch/riscv/kernel/stacktrace.c
1058 ++++ b/arch/riscv/kernel/stacktrace.c
1059 +@@ -63,7 +63,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
1060 +
1061 + #else /* !CONFIG_FRAME_POINTER */
1062 +
1063 +-static void notrace walk_stackframe(struct task_struct *task,
1064 ++void notrace walk_stackframe(struct task_struct *task,
1065 + struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
1066 + {
1067 + unsigned long sp, pc;
1068 +diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h
1069 +index 00f7cf45e699..8e95aa4b0d17 100644
1070 +--- a/arch/x86/include/asm/dma.h
1071 ++++ b/arch/x86/include/asm/dma.h
1072 +@@ -74,7 +74,7 @@
1073 + #define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT)
1074 +
1075 + /* 4GB broken PCI/AGP hardware bus master zone */
1076 +-#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
1077 ++#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
1078 +
1079 + #ifdef CONFIG_X86_32
1080 + /* The maximum address that we can perform a DMA transfer to on this platform */
1081 +diff --git a/arch/x86/include/asm/io_bitmap.h b/arch/x86/include/asm/io_bitmap.h
1082 +index 07344d82e88e..ac1a99ffbd8d 100644
1083 +--- a/arch/x86/include/asm/io_bitmap.h
1084 ++++ b/arch/x86/include/asm/io_bitmap.h
1085 +@@ -17,7 +17,7 @@ struct task_struct;
1086 +
1087 + #ifdef CONFIG_X86_IOPL_IOPERM
1088 + void io_bitmap_share(struct task_struct *tsk);
1089 +-void io_bitmap_exit(void);
1090 ++void io_bitmap_exit(struct task_struct *tsk);
1091 +
1092 + void native_tss_update_io_bitmap(void);
1093 +
1094 +@@ -29,7 +29,7 @@ void native_tss_update_io_bitmap(void);
1095 +
1096 + #else
1097 + static inline void io_bitmap_share(struct task_struct *tsk) { }
1098 +-static inline void io_bitmap_exit(void) { }
1099 ++static inline void io_bitmap_exit(struct task_struct *tsk) { }
1100 + static inline void tss_update_io_bitmap(void) { }
1101 + #endif
1102 +
1103 +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
1104 +index a1806598aaa4..cf2f2a85f087 100644
1105 +--- a/arch/x86/kernel/fpu/xstate.c
1106 ++++ b/arch/x86/kernel/fpu/xstate.c
1107 +@@ -954,18 +954,31 @@ static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
1108 + return true;
1109 + }
1110 +
1111 +-/*
1112 +- * This is similar to user_regset_copyout(), but will not add offset to
1113 +- * the source data pointer or increment pos, count, kbuf, and ubuf.
1114 +- */
1115 +-static inline void
1116 +-__copy_xstate_to_kernel(void *kbuf, const void *data,
1117 +- unsigned int offset, unsigned int size, unsigned int size_total)
1118 ++static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count)
1119 + {
1120 +- if (offset < size_total) {
1121 +- unsigned int copy = min(size, size_total - offset);
1122 ++ if (*pos < to) {
1123 ++ unsigned size = to - *pos;
1124 ++
1125 ++ if (size > *count)
1126 ++ size = *count;
1127 ++ memcpy(*kbuf, (void *)&init_fpstate.xsave + *pos, size);
1128 ++ *kbuf += size;
1129 ++ *pos += size;
1130 ++ *count -= size;
1131 ++ }
1132 ++}
1133 +
1134 +- memcpy(kbuf + offset, data, copy);
1135 ++static void copy_part(unsigned offset, unsigned size, void *from,
1136 ++ void **kbuf, unsigned *pos, unsigned *count)
1137 ++{
1138 ++ fill_gap(offset, kbuf, pos, count);
1139 ++ if (size > *count)
1140 ++ size = *count;
1141 ++ if (size) {
1142 ++ memcpy(*kbuf, from, size);
1143 ++ *kbuf += size;
1144 ++ *pos += size;
1145 ++ *count -= size;
1146 + }
1147 + }
1148 +
1149 +@@ -978,8 +991,9 @@ __copy_xstate_to_kernel(void *kbuf, const void *data,
1150 + */
1151 + int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
1152 + {
1153 +- unsigned int offset, size;
1154 + struct xstate_header header;
1155 ++ const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr);
1156 ++ unsigned count = size_total;
1157 + int i;
1158 +
1159 + /*
1160 +@@ -995,46 +1009,42 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
1161 + header.xfeatures = xsave->header.xfeatures;
1162 + header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
1163 +
1164 ++ if (header.xfeatures & XFEATURE_MASK_FP)
1165 ++ copy_part(0, off_mxcsr,
1166 ++ &xsave->i387, &kbuf, &offset_start, &count);
1167 ++ if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM))
1168 ++ copy_part(off_mxcsr, MXCSR_AND_FLAGS_SIZE,
1169 ++ &xsave->i387.mxcsr, &kbuf, &offset_start, &count);
1170 ++ if (header.xfeatures & XFEATURE_MASK_FP)
1171 ++ copy_part(offsetof(struct fxregs_state, st_space), 128,
1172 ++ &xsave->i387.st_space, &kbuf, &offset_start, &count);
1173 ++ if (header.xfeatures & XFEATURE_MASK_SSE)
1174 ++ copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256,
1175 ++ &xsave->i387.xmm_space, &kbuf, &offset_start, &count);
1176 ++ /*
1177 ++ * Fill xsave->i387.sw_reserved value for ptrace frame:
1178 ++ */
1179 ++ copy_part(offsetof(struct fxregs_state, sw_reserved), 48,
1180 ++ xstate_fx_sw_bytes, &kbuf, &offset_start, &count);
1181 + /*
1182 + * Copy xregs_state->header:
1183 + */
1184 +- offset = offsetof(struct xregs_state, header);
1185 +- size = sizeof(header);
1186 +-
1187 +- __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total);
1188 ++ copy_part(offsetof(struct xregs_state, header), sizeof(header),
1189 ++ &header, &kbuf, &offset_start, &count);
1190 +
1191 +- for (i = 0; i < XFEATURE_MAX; i++) {
1192 ++ for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
1193 + /*
1194 + * Copy only in-use xstates:
1195 + */
1196 + if ((header.xfeatures >> i) & 1) {
1197 + void *src = __raw_xsave_addr(xsave, i);
1198 +
1199 +- offset = xstate_offsets[i];
1200 +- size = xstate_sizes[i];
1201 +-
1202 +- /* The next component has to fit fully into the output buffer: */
1203 +- if (offset + size > size_total)
1204 +- break;
1205 +-
1206 +- __copy_xstate_to_kernel(kbuf, src, offset, size, size_total);
1207 ++ copy_part(xstate_offsets[i], xstate_sizes[i],
1208 ++ src, &kbuf, &offset_start, &count);
1209 + }
1210 +
1211 + }
1212 +-
1213 +- if (xfeatures_mxcsr_quirk(header.xfeatures)) {
1214 +- offset = offsetof(struct fxregs_state, mxcsr);
1215 +- size = MXCSR_AND_FLAGS_SIZE;
1216 +- __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total);
1217 +- }
1218 +-
1219 +- /*
1220 +- * Fill xsave->i387.sw_reserved value for ptrace frame:
1221 +- */
1222 +- offset = offsetof(struct fxregs_state, sw_reserved);
1223 +- size = sizeof(xstate_fx_sw_bytes);
1224 +-
1225 +- __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total);
1226 ++ fill_gap(size_total, &kbuf, &offset_start, &count);
1227 +
1228 + return 0;
1229 + }
1230 +diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
1231 +index 8abeee0dd7bf..fce678f4471e 100644
1232 +--- a/arch/x86/kernel/ioport.c
1233 ++++ b/arch/x86/kernel/ioport.c
1234 +@@ -32,15 +32,15 @@ void io_bitmap_share(struct task_struct *tsk)
1235 + set_tsk_thread_flag(tsk, TIF_IO_BITMAP);
1236 + }
1237 +
1238 +-static void task_update_io_bitmap(void)
1239 ++static void task_update_io_bitmap(struct task_struct *tsk)
1240 + {
1241 +- struct thread_struct *t = &current->thread;
1242 ++ struct thread_struct *t = &tsk->thread;
1243 +
1244 + if (t->iopl_emul == 3 || t->io_bitmap) {
1245 + /* TSS update is handled on exit to user space */
1246 +- set_thread_flag(TIF_IO_BITMAP);
1247 ++ set_tsk_thread_flag(tsk, TIF_IO_BITMAP);
1248 + } else {
1249 +- clear_thread_flag(TIF_IO_BITMAP);
1250 ++ clear_tsk_thread_flag(tsk, TIF_IO_BITMAP);
1251 + /* Invalidate TSS */
1252 + preempt_disable();
1253 + tss_update_io_bitmap();
1254 +@@ -48,12 +48,12 @@ static void task_update_io_bitmap(void)
1255 + }
1256 + }
1257 +
1258 +-void io_bitmap_exit(void)
1259 ++void io_bitmap_exit(struct task_struct *tsk)
1260 + {
1261 +- struct io_bitmap *iobm = current->thread.io_bitmap;
1262 ++ struct io_bitmap *iobm = tsk->thread.io_bitmap;
1263 +
1264 +- current->thread.io_bitmap = NULL;
1265 +- task_update_io_bitmap();
1266 ++ tsk->thread.io_bitmap = NULL;
1267 ++ task_update_io_bitmap(tsk);
1268 + if (iobm && refcount_dec_and_test(&iobm->refcnt))
1269 + kfree(iobm);
1270 + }
1271 +@@ -101,7 +101,7 @@ long ksys_ioperm(unsigned long from, unsigned long num, int turn_on)
1272 + if (!iobm)
1273 + return -ENOMEM;
1274 + refcount_set(&iobm->refcnt, 1);
1275 +- io_bitmap_exit();
1276 ++ io_bitmap_exit(current);
1277 + }
1278 +
1279 + /*
1280 +@@ -133,7 +133,7 @@ long ksys_ioperm(unsigned long from, unsigned long num, int turn_on)
1281 + }
1282 + /* All permissions dropped? */
1283 + if (max_long == UINT_MAX) {
1284 +- io_bitmap_exit();
1285 ++ io_bitmap_exit(current);
1286 + return 0;
1287 + }
1288 +
1289 +@@ -191,7 +191,7 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
1290 + }
1291 +
1292 + t->iopl_emul = level;
1293 +- task_update_io_bitmap();
1294 ++ task_update_io_bitmap(current);
1295 +
1296 + return 0;
1297 + }
1298 +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
1299 +index 3053c85e0e42..9898f672b81d 100644
1300 +--- a/arch/x86/kernel/process.c
1301 ++++ b/arch/x86/kernel/process.c
1302 +@@ -97,7 +97,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1303 + }
1304 +
1305 + /*
1306 +- * Free current thread data structures etc..
1307 ++ * Free thread data structures etc..
1308 + */
1309 + void exit_thread(struct task_struct *tsk)
1310 + {
1311 +@@ -105,7 +105,7 @@ void exit_thread(struct task_struct *tsk)
1312 + struct fpu *fpu = &t->fpu;
1313 +
1314 + if (test_thread_flag(TIF_IO_BITMAP))
1315 +- io_bitmap_exit();
1316 ++ io_bitmap_exit(tsk);
1317 +
1318 + free_vm86(t);
1319 +
1320 +diff --git a/block/blk-core.c b/block/blk-core.c
1321 +index 60dc9552ef8d..92232907605c 100644
1322 +--- a/block/blk-core.c
1323 ++++ b/block/blk-core.c
1324 +@@ -885,14 +885,11 @@ generic_make_request_checks(struct bio *bio)
1325 + }
1326 +
1327 + /*
1328 +- * Non-mq queues do not honor REQ_NOWAIT, so complete a bio
1329 +- * with BLK_STS_AGAIN status in order to catch -EAGAIN and
1330 +- * to give a chance to the caller to repeat request gracefully.
1331 ++ * For a REQ_NOWAIT based request, return -EOPNOTSUPP
1332 ++ * if queue is not a request based queue.
1333 + */
1334 +- if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) {
1335 +- status = BLK_STS_AGAIN;
1336 +- goto end_io;
1337 +- }
1338 ++ if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
1339 ++ goto not_supported;
1340 +
1341 + if (should_fail_bio(bio))
1342 + goto end_io;
1343 +diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
1344 +index 20877214acff..e3959ff5cb55 100644
1345 +--- a/drivers/clk/qcom/gcc-sm8150.c
1346 ++++ b/drivers/clk/qcom/gcc-sm8150.c
1347 +@@ -75,8 +75,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = {
1348 + .clkr.hw.init = &(struct clk_init_data){
1349 + .name = "gpll0_out_even",
1350 + .parent_data = &(const struct clk_parent_data){
1351 +- .fw_name = "bi_tcxo",
1352 +- .name = "bi_tcxo",
1353 ++ .hw = &gpll0.clkr.hw,
1354 + },
1355 + .num_parents = 1,
1356 + .ops = &clk_trion_pll_postdiv_ops,
1357 +diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
1358 +index e001b9bcb6bf..7dc30dd6c8d5 100644
1359 +--- a/drivers/clk/ti/clk-33xx.c
1360 ++++ b/drivers/clk/ti/clk-33xx.c
1361 +@@ -212,7 +212,7 @@ static const struct omap_clkctrl_reg_data am3_mpu_clkctrl_regs[] __initconst = {
1362 + };
1363 +
1364 + static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst = {
1365 +- { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
1366 ++ { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk-24mhz-clkctrl:0000:0" },
1367 + { 0 },
1368 + };
1369 +
1370 +diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
1371 +index 5cf9b021220b..fdaed234ae92 100644
1372 +--- a/drivers/crypto/chelsio/chtls/chtls_io.c
1373 ++++ b/drivers/crypto/chelsio/chtls/chtls_io.c
1374 +@@ -682,7 +682,7 @@ int chtls_push_frames(struct chtls_sock *csk, int comp)
1375 + make_tx_data_wr(sk, skb, immdlen, len,
1376 + credits_needed, completion);
1377 + tp->snd_nxt += len;
1378 +- tp->lsndtime = tcp_time_stamp(tp);
1379 ++ tp->lsndtime = tcp_jiffies32;
1380 + if (completion)
1381 + ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
1382 + } else {
1383 +diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
1384 +index baee8c3f06ad..cf3687a7925f 100644
1385 +--- a/drivers/gpio/gpio-bcm-kona.c
1386 ++++ b/drivers/gpio/gpio-bcm-kona.c
1387 +@@ -625,7 +625,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
1388 +
1389 + kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
1390 + if (IS_ERR(kona_gpio->reg_base)) {
1391 +- ret = -ENXIO;
1392 ++ ret = PTR_ERR(kona_gpio->reg_base);
1393 + goto err_irq_domain;
1394 + }
1395 +
1396 +diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
1397 +index da1ef0b1c291..b1accfba017d 100644
1398 +--- a/drivers/gpio/gpio-exar.c
1399 ++++ b/drivers/gpio/gpio-exar.c
1400 +@@ -148,8 +148,10 @@ static int gpio_exar_probe(struct platform_device *pdev)
1401 + mutex_init(&exar_gpio->lock);
1402 +
1403 + index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
1404 +- if (index < 0)
1405 +- goto err_destroy;
1406 ++ if (index < 0) {
1407 ++ ret = index;
1408 ++ goto err_mutex_destroy;
1409 ++ }
1410 +
1411 + sprintf(exar_gpio->name, "exar_gpio%d", index);
1412 + exar_gpio->gpio_chip.label = exar_gpio->name;
1413 +@@ -176,6 +178,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
1414 +
1415 + err_destroy:
1416 + ida_simple_remove(&ida_index, index);
1417 ++err_mutex_destroy:
1418 + mutex_destroy(&exar_gpio->lock);
1419 + return ret;
1420 + }
1421 +diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
1422 +index d2b999c7987f..f0c5433a327f 100644
1423 +--- a/drivers/gpio/gpio-mvebu.c
1424 ++++ b/drivers/gpio/gpio-mvebu.c
1425 +@@ -782,6 +782,15 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
1426 + "marvell,armada-370-gpio"))
1427 + return 0;
1428 +
1429 ++ /*
1430 ++ * There are only two sets of PWM configuration registers for
1431 ++ * all the GPIO lines on those SoCs which this driver reserves
1432 ++ * for the first two GPIO chips. So if the resource is missing
1433 ++ * we can't treat it as an error.
1434 ++ */
1435 ++ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
1436 ++ return 0;
1437 ++
1438 + if (IS_ERR(mvchip->clk))
1439 + return PTR_ERR(mvchip->clk);
1440 +
1441 +@@ -804,12 +813,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
1442 + mvchip->mvpwm = mvpwm;
1443 + mvpwm->mvchip = mvchip;
1444 +
1445 +- /*
1446 +- * There are only two sets of PWM configuration registers for
1447 +- * all the GPIO lines on those SoCs which this driver reserves
1448 +- * for the first two GPIO chips. So if the resource is missing
1449 +- * we can't treat it as an error.
1450 +- */
1451 + mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm");
1452 + if (IS_ERR(mvpwm->membase))
1453 + return PTR_ERR(mvpwm->membase);
1454 +diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
1455 +index 9888b62f37af..432c487f77b4 100644
1456 +--- a/drivers/gpio/gpio-pxa.c
1457 ++++ b/drivers/gpio/gpio-pxa.c
1458 +@@ -663,8 +663,8 @@ static int pxa_gpio_probe(struct platform_device *pdev)
1459 + pchip->irq1 = irq1;
1460 +
1461 + gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
1462 +- if (!gpio_reg_base)
1463 +- return -EINVAL;
1464 ++ if (IS_ERR(gpio_reg_base))
1465 ++ return PTR_ERR(gpio_reg_base);
1466 +
1467 + clk = clk_get(&pdev->dev, NULL);
1468 + if (IS_ERR(clk)) {
1469 +diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
1470 +index acb99eff9939..86568154cdb3 100644
1471 +--- a/drivers/gpio/gpio-tegra.c
1472 ++++ b/drivers/gpio/gpio-tegra.c
1473 +@@ -368,6 +368,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d)
1474 + struct tegra_gpio_info *tgi = bank->tgi;
1475 + unsigned int gpio = d->hwirq;
1476 +
1477 ++ tegra_gpio_irq_mask(d);
1478 + gpiochip_unlock_as_irq(&tgi->gc, gpio);
1479 + }
1480 +
1481 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1482 +index 00fb91feba70..2f350e3df965 100644
1483 +--- a/drivers/gpio/gpiolib.c
1484 ++++ b/drivers/gpio/gpiolib.c
1485 +@@ -4025,7 +4025,9 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
1486 + }
1487 + }
1488 +
1489 +- if (test_bit(FLAG_IS_OUT, &desc->flags)) {
1490 ++ /* To be valid for IRQ the line needs to be input or open drain */
1491 ++ if (test_bit(FLAG_IS_OUT, &desc->flags) &&
1492 ++ !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
1493 + chip_err(chip,
1494 + "%s: tried to flag a GPIO set as output for IRQ\n",
1495 + __func__);
1496 +@@ -4088,7 +4090,12 @@ void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset)
1497 +
1498 + if (!IS_ERR(desc) &&
1499 + !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) {
1500 +- WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags));
1501 ++ /*
1502 ++ * We must not be output when using IRQ UNLESS we are
1503 ++ * open drain.
1504 ++ */
1505 ++ WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) &&
1506 ++ !test_bit(FLAG_OPEN_DRAIN, &desc->flags));
1507 + set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
1508 + }
1509 + }
1510 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1511 +index fa8ac9d19a7a..6326c1792270 100644
1512 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1513 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1514 +@@ -1304,7 +1304,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1515 + }
1516 +
1517 + /* Free the BO*/
1518 +- amdgpu_bo_unref(&mem->bo);
1519 ++ drm_gem_object_put_unlocked(&mem->bo->tbo.base);
1520 + mutex_destroy(&mem->lock);
1521 + kfree(mem);
1522 +
1523 +@@ -1647,7 +1647,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1524 + ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
1525 + ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
1526 +
1527 +- (*mem)->bo = amdgpu_bo_ref(bo);
1528 ++ drm_gem_object_get(&bo->tbo.base);
1529 ++ (*mem)->bo = bo;
1530 + (*mem)->va = va;
1531 + (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1532 + AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1533 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1534 +index 02702597ddeb..012df3d574bf 100644
1535 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1536 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1537 +@@ -4241,11 +4241,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
1538 + switch (adev->asic_type) {
1539 + case CHIP_NAVI10:
1540 + case CHIP_NAVI14:
1541 +- if (!enable) {
1542 +- amdgpu_gfx_off_ctrl(adev, false);
1543 +- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
1544 +- } else
1545 +- amdgpu_gfx_off_ctrl(adev, true);
1546 ++ amdgpu_gfx_off_ctrl(adev, enable);
1547 + break;
1548 + default:
1549 + break;
1550 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1551 +index 906648fca9ef..914dbd901b98 100644
1552 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1553 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1554 +@@ -4734,10 +4734,9 @@ static int gfx_v9_0_set_powergating_state(void *handle,
1555 + switch (adev->asic_type) {
1556 + case CHIP_RAVEN:
1557 + case CHIP_RENOIR:
1558 +- if (!enable) {
1559 ++ if (!enable)
1560 + amdgpu_gfx_off_ctrl(adev, false);
1561 +- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
1562 +- }
1563 ++
1564 + if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
1565 + gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
1566 + gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
1567 +@@ -4761,12 +4760,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
1568 + amdgpu_gfx_off_ctrl(adev, true);
1569 + break;
1570 + case CHIP_VEGA12:
1571 +- if (!enable) {
1572 +- amdgpu_gfx_off_ctrl(adev, false);
1573 +- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
1574 +- } else {
1575 +- amdgpu_gfx_off_ctrl(adev, true);
1576 +- }
1577 ++ amdgpu_gfx_off_ctrl(adev, enable);
1578 + break;
1579 + default:
1580 + break;
1581 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1582 +index 0cd11d3d4cf4..8e7cffe10cc5 100644
1583 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1584 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1585 +@@ -7746,13 +7746,6 @@ static int dm_update_plane_state(struct dc *dc,
1586 + return -EINVAL;
1587 + }
1588 +
1589 +- if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
1590 +- new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
1591 +- DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
1592 +- new_plane_state->crtc_x, new_plane_state->crtc_y);
1593 +- return -EINVAL;
1594 +- }
1595 +-
1596 + return 0;
1597 + }
1598 +
1599 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
1600 +index 3abeff7722e3..e80371542622 100644
1601 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
1602 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
1603 +@@ -316,15 +316,15 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
1604 + struct mod_hdcp_display *display = &hdcp_work[link_index].display;
1605 + struct mod_hdcp_link *link = &hdcp_work[link_index].link;
1606 +
1607 +- memset(display, 0, sizeof(*display));
1608 +- memset(link, 0, sizeof(*link));
1609 +-
1610 +- display->index = aconnector->base.index;
1611 +-
1612 + if (config->dpms_off) {
1613 + hdcp_remove_display(hdcp_work, link_index, aconnector);
1614 + return;
1615 + }
1616 ++
1617 ++ memset(display, 0, sizeof(*display));
1618 ++ memset(link, 0, sizeof(*link));
1619 ++
1620 ++ display->index = aconnector->base.index;
1621 + display->state = MOD_HDCP_DISPLAY_ACTIVE;
1622 +
1623 + if (aconnector->dc_sink != NULL)
1624 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
1625 +index b3987124183a..32a07665863f 100644
1626 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
1627 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
1628 +@@ -763,6 +763,29 @@ static bool disable_all_writeback_pipes_for_stream(
1629 + return true;
1630 + }
1631 +
1632 ++void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
1633 ++{
1634 ++ int i = 0;
1635 ++
1636 ++ /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1637 ++ if (dc->hwss.interdependent_update_lock)
1638 ++ dc->hwss.interdependent_update_lock(dc, context, lock);
1639 ++ else {
1640 ++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
1641 ++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1642 ++ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1643 ++
1644 ++ // Copied conditions that were previously in dce110_apply_ctx_for_surface
1645 ++ if (stream == pipe_ctx->stream) {
1646 ++ if (!pipe_ctx->top_pipe &&
1647 ++ (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1648 ++ dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1649 ++ break;
1650 ++ }
1651 ++ }
1652 ++ }
1653 ++}
1654 ++
1655 + static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1656 + {
1657 + int i, j;
1658 +@@ -788,11 +811,20 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1659 + if (should_disable && old_stream) {
1660 + dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1661 + disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1662 +- if (dc->hwss.apply_ctx_for_surface)
1663 ++
1664 ++ if (dc->hwss.apply_ctx_for_surface) {
1665 ++ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1666 + dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1667 ++ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1668 ++ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1669 ++ }
1670 ++ if (dc->hwss.program_front_end_for_ctx) {
1671 ++ dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1672 ++ dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1673 ++ dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1674 ++ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1675 ++ }
1676 + }
1677 +- if (dc->hwss.program_front_end_for_ctx)
1678 +- dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1679 + }
1680 +
1681 + current_ctx = dc->current_state;
1682 +@@ -1211,16 +1243,19 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
1683 + /* re-program planes for existing stream, in case we need to
1684 + * free up plane resource for later use
1685 + */
1686 +- if (dc->hwss.apply_ctx_for_surface)
1687 ++ if (dc->hwss.apply_ctx_for_surface) {
1688 + for (i = 0; i < context->stream_count; i++) {
1689 + if (context->streams[i]->mode_changed)
1690 + continue;
1691 +-
1692 ++ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1693 + dc->hwss.apply_ctx_for_surface(
1694 + dc, context->streams[i],
1695 + context->stream_status[i].plane_count,
1696 + context); /* use new pipe config in new context */
1697 ++ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1698 ++ dc->hwss.post_unlock_program_front_end(dc, context);
1699 + }
1700 ++ }
1701 +
1702 + /* Program hardware */
1703 + for (i = 0; i < dc->res_pool->pipe_count; i++) {
1704 +@@ -1239,19 +1274,27 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
1705 + }
1706 +
1707 + /* Program all planes within new context*/
1708 +- if (dc->hwss.program_front_end_for_ctx)
1709 ++ if (dc->hwss.program_front_end_for_ctx) {
1710 ++ dc->hwss.interdependent_update_lock(dc, context, true);
1711 + dc->hwss.program_front_end_for_ctx(dc, context);
1712 ++ dc->hwss.interdependent_update_lock(dc, context, false);
1713 ++ dc->hwss.post_unlock_program_front_end(dc, context);
1714 ++ }
1715 + for (i = 0; i < context->stream_count; i++) {
1716 + const struct dc_link *link = context->streams[i]->link;
1717 +
1718 + if (!context->streams[i]->mode_changed)
1719 + continue;
1720 +
1721 +- if (dc->hwss.apply_ctx_for_surface)
1722 ++ if (dc->hwss.apply_ctx_for_surface) {
1723 ++ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1724 + dc->hwss.apply_ctx_for_surface(
1725 + dc, context->streams[i],
1726 + context->stream_status[i].plane_count,
1727 + context);
1728 ++ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1729 ++ dc->hwss.post_unlock_program_front_end(dc, context);
1730 ++ }
1731 +
1732 + /*
1733 + * enable stereo
1734 +@@ -1735,14 +1778,15 @@ static enum surface_update_type check_update_surfaces_for_stream(
1735 +
1736 + if (stream_update->wb_update)
1737 + su_flags->bits.wb_update = 1;
1738 ++
1739 ++ if (stream_update->dsc_config)
1740 ++ su_flags->bits.dsc_changed = 1;
1741 ++
1742 + if (su_flags->raw != 0)
1743 + overall_type = UPDATE_TYPE_FULL;
1744 +
1745 + if (stream_update->output_csc_transform || stream_update->output_color_space)
1746 + su_flags->bits.out_csc = 1;
1747 +-
1748 +- if (stream_update->dsc_config)
1749 +- overall_type = UPDATE_TYPE_FULL;
1750 + }
1751 +
1752 + for (i = 0 ; i < surface_count; i++) {
1753 +@@ -1777,8 +1821,11 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
1754 +
1755 + type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1756 + if (type == UPDATE_TYPE_FULL) {
1757 +- if (stream_update)
1758 ++ if (stream_update) {
1759 ++ uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
1760 + stream_update->stream->update_flags.raw = 0xFFFFFFFF;
1761 ++ stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
1762 ++ }
1763 + for (i = 0; i < surface_count; i++)
1764 + updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1765 + }
1766 +@@ -2094,18 +2141,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
1767 + }
1768 + }
1769 +
1770 +- if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
1771 +- dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
1772 +- dp_update_dsc_config(pipe_ctx);
1773 +- dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
1774 +- }
1775 + /* Full fe update*/
1776 + if (update_type == UPDATE_TYPE_FAST)
1777 + continue;
1778 +
1779 +- if (stream_update->dpms_off) {
1780 +- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
1781 ++ if (stream_update->dsc_config)
1782 ++ dp_update_dsc_config(pipe_ctx);
1783 +
1784 ++ if (stream_update->dpms_off) {
1785 + if (*stream_update->dpms_off) {
1786 + core_link_disable_stream(pipe_ctx);
1787 + /* for dpms, keep acquired resources*/
1788 +@@ -2119,8 +2162,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
1789 +
1790 + core_link_enable_stream(dc->current_state, pipe_ctx);
1791 + }
1792 +-
1793 +- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
1794 + }
1795 +
1796 + if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
1797 +@@ -2176,6 +2217,27 @@ static void commit_planes_for_stream(struct dc *dc,
1798 + context_clock_trace(dc, context);
1799 + }
1800 +
1801 ++ for (j = 0; j < dc->res_pool->pipe_count; j++) {
1802 ++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1803 ++
1804 ++ if (!pipe_ctx->top_pipe &&
1805 ++ !pipe_ctx->prev_odm_pipe &&
1806 ++ pipe_ctx->stream &&
1807 ++ pipe_ctx->stream == stream) {
1808 ++ top_pipe_to_program = pipe_ctx;
1809 ++ }
1810 ++ }
1811 ++
1812 ++ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
1813 ++ dc->hwss.interdependent_update_lock(dc, context, true);
1814 ++ else
1815 ++ /* Lock the top pipe while updating plane addrs, since freesync requires
1816 ++ * plane addr update event triggers to be synchronized.
1817 ++ * top_pipe_to_program is expected to never be NULL
1818 ++ */
1819 ++ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
1820 ++
1821 ++
1822 + // Stream updates
1823 + if (stream_update)
1824 + commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
1825 +@@ -2190,6 +2252,12 @@ static void commit_planes_for_stream(struct dc *dc,
1826 + if (dc->hwss.program_front_end_for_ctx)
1827 + dc->hwss.program_front_end_for_ctx(dc, context);
1828 +
1829 ++ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
1830 ++ dc->hwss.interdependent_update_lock(dc, context, false);
1831 ++ else
1832 ++ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
1833 ++
1834 ++ dc->hwss.post_unlock_program_front_end(dc, context);
1835 + return;
1836 + }
1837 +
1838 +@@ -2225,8 +2293,6 @@ static void commit_planes_for_stream(struct dc *dc,
1839 + pipe_ctx->stream == stream) {
1840 + struct dc_stream_status *stream_status = NULL;
1841 +
1842 +- top_pipe_to_program = pipe_ctx;
1843 +-
1844 + if (!pipe_ctx->plane_state)
1845 + continue;
1846 +
1847 +@@ -2271,12 +2337,6 @@ static void commit_planes_for_stream(struct dc *dc,
1848 +
1849 + // Update Type FAST, Surface updates
1850 + if (update_type == UPDATE_TYPE_FAST) {
1851 +- /* Lock the top pipe while updating plane addrs, since freesync requires
1852 +- * plane addr update event triggers to be synchronized.
1853 +- * top_pipe_to_program is expected to never be NULL
1854 +- */
1855 +- dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
1856 +-
1857 + if (dc->hwss.set_flip_control_gsl)
1858 + for (i = 0; i < surface_count; i++) {
1859 + struct dc_plane_state *plane_state = srf_updates[i].surface;
1860 +@@ -2318,9 +2378,15 @@ static void commit_planes_for_stream(struct dc *dc,
1861 + dc->hwss.update_plane_addr(dc, pipe_ctx);
1862 + }
1863 + }
1864 ++ }
1865 +
1866 ++ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
1867 ++ dc->hwss.interdependent_update_lock(dc, context, false);
1868 ++ else
1869 + dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
1870 +- }
1871 ++
1872 ++ if (update_type != UPDATE_TYPE_FAST)
1873 ++ dc->hwss.post_unlock_program_front_end(dc, context);
1874 +
1875 + // Fire manual trigger only when bottom plane is flipped
1876 + for (j = 0; j < dc->res_pool->pipe_count; j++) {
1877 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
1878 +index 8c20e9e907b2..4f0e7203dba4 100644
1879 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
1880 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
1881 +@@ -231,34 +231,6 @@ struct dc_stream_status *dc_stream_get_status(
1882 + return dc_stream_get_status_from_state(dc->current_state, stream);
1883 + }
1884 +
1885 +-static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
1886 +-{
1887 +-#if defined(CONFIG_DRM_AMD_DC_DCN)
1888 +- unsigned int vupdate_line;
1889 +- unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
1890 +- struct dc_stream_state *stream = pipe_ctx->stream;
1891 +- unsigned int us_per_line;
1892 +-
1893 +- if (!dc->hwss.get_vupdate_offset_from_vsync)
1894 +- return;
1895 +-
1896 +- vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
1897 +- if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
1898 +- return;
1899 +-
1900 +- if (vpos >= vupdate_line)
1901 +- return;
1902 +-
1903 +- us_per_line =
1904 +- stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
1905 +- lines_to_vupdate = vupdate_line - vpos;
1906 +- us_to_vupdate = lines_to_vupdate * us_per_line;
1907 +-
1908 +- /* 70 us is a conservative estimate of cursor update time*/
1909 +- if (us_to_vupdate < 70)
1910 +- udelay(us_to_vupdate);
1911 +-#endif
1912 +-}
1913 +
1914 + /**
1915 + * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
1916 +@@ -298,9 +270,7 @@ bool dc_stream_set_cursor_attributes(
1917 +
1918 + if (!pipe_to_program) {
1919 + pipe_to_program = pipe_ctx;
1920 +-
1921 +- delay_cursor_until_vupdate(pipe_ctx, dc);
1922 +- dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
1923 ++ dc->hwss.cursor_lock(dc, pipe_to_program, true);
1924 + }
1925 +
1926 + dc->hwss.set_cursor_attribute(pipe_ctx);
1927 +@@ -309,7 +279,7 @@ bool dc_stream_set_cursor_attributes(
1928 + }
1929 +
1930 + if (pipe_to_program)
1931 +- dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
1932 ++ dc->hwss.cursor_lock(dc, pipe_to_program, false);
1933 +
1934 + return true;
1935 + }
1936 +@@ -349,16 +319,14 @@ bool dc_stream_set_cursor_position(
1937 +
1938 + if (!pipe_to_program) {
1939 + pipe_to_program = pipe_ctx;
1940 +-
1941 +- delay_cursor_until_vupdate(pipe_ctx, dc);
1942 +- dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
1943 ++ dc->hwss.cursor_lock(dc, pipe_to_program, true);
1944 + }
1945 +
1946 + dc->hwss.set_cursor_position(pipe_ctx);
1947 + }
1948 +
1949 + if (pipe_to_program)
1950 +- dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
1951 ++ dc->hwss.cursor_lock(dc, pipe_to_program, false);
1952 +
1953 + return true;
1954 + }
1955 +diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
1956 +index 92096de79dec..a5c7ef47b8d3 100644
1957 +--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
1958 ++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
1959 +@@ -118,6 +118,7 @@ union stream_update_flags {
1960 + uint32_t dpms_off:1;
1961 + uint32_t gamut_remap:1;
1962 + uint32_t wb_update:1;
1963 ++ uint32_t dsc_changed : 1;
1964 + } bits;
1965 +
1966 + uint32_t raw;
1967 +diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
1968 +index 5b689273ff44..454a123b92fc 100644
1969 +--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
1970 ++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
1971 +@@ -2574,17 +2574,6 @@ static void dce110_apply_ctx_for_surface(
1972 + if (dc->fbc_compressor)
1973 + dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
1974 +
1975 +- for (i = 0; i < dc->res_pool->pipe_count; i++) {
1976 +- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1977 +- struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1978 +-
1979 +- if (stream == pipe_ctx->stream) {
1980 +- if (!pipe_ctx->top_pipe &&
1981 +- (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1982 +- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
1983 +- }
1984 +- }
1985 +-
1986 + for (i = 0; i < dc->res_pool->pipe_count; i++) {
1987 + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1988 +
1989 +@@ -2607,20 +2596,16 @@ static void dce110_apply_ctx_for_surface(
1990 +
1991 + }
1992 +
1993 +- for (i = 0; i < dc->res_pool->pipe_count; i++) {
1994 +- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1995 +- struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1996 +-
1997 +- if ((stream == pipe_ctx->stream) &&
1998 +- (!pipe_ctx->top_pipe) &&
1999 +- (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
2000 +- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
2001 +- }
2002 +-
2003 + if (dc->fbc_compressor)
2004 + enable_fbc(dc, context);
2005 + }
2006 +
2007 ++static void dce110_post_unlock_program_front_end(
2008 ++ struct dc *dc,
2009 ++ struct dc_state *context)
2010 ++{
2011 ++}
2012 ++
2013 + static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
2014 + {
2015 + struct dce_hwseq *hws = dc->hwseq;
2016 +@@ -2722,6 +2707,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
2017 + .init_hw = init_hw,
2018 + .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
2019 + .apply_ctx_for_surface = dce110_apply_ctx_for_surface,
2020 ++ .post_unlock_program_front_end = dce110_post_unlock_program_front_end,
2021 + .update_plane_addr = update_plane_addr,
2022 + .update_pending_status = dce110_update_pending_status,
2023 + .enable_accelerated_mode = dce110_enable_accelerated_mode,
2024 +@@ -2736,6 +2722,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
2025 + .disable_audio_stream = dce110_disable_audio_stream,
2026 + .disable_plane = dce110_power_down_fe,
2027 + .pipe_control_lock = dce_pipe_control_lock,
2028 ++ .interdependent_update_lock = NULL,
2029 ++ .cursor_lock = dce_pipe_control_lock,
2030 + .prepare_bandwidth = dce110_prepare_bandwidth,
2031 + .optimize_bandwidth = dce110_optimize_bandwidth,
2032 + .set_drr = set_drr,
2033 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
2034 +index 1008ac8a0f2a..0c987b5d68e2 100644
2035 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
2036 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
2037 +@@ -82,7 +82,7 @@ void print_microsec(struct dc_context *dc_ctx,
2038 + us_x10 % frac);
2039 + }
2040 +
2041 +-static void dcn10_lock_all_pipes(struct dc *dc,
2042 ++void dcn10_lock_all_pipes(struct dc *dc,
2043 + struct dc_state *context,
2044 + bool lock)
2045 + {
2046 +@@ -93,6 +93,7 @@ static void dcn10_lock_all_pipes(struct dc *dc,
2047 + for (i = 0; i < dc->res_pool->pipe_count; i++) {
2048 + pipe_ctx = &context->res_ctx.pipe_ctx[i];
2049 + tg = pipe_ctx->stream_res.tg;
2050 ++
2051 + /*
2052 + * Only lock the top pipe's tg to prevent redundant
2053 + * (un)locking. Also skip if pipe is disabled.
2054 +@@ -103,9 +104,9 @@ static void dcn10_lock_all_pipes(struct dc *dc,
2055 + continue;
2056 +
2057 + if (lock)
2058 +- tg->funcs->lock(tg);
2059 ++ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
2060 + else
2061 +- tg->funcs->unlock(tg);
2062 ++ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
2063 + }
2064 + }
2065 +
2066 +@@ -1576,7 +1577,7 @@ void dcn10_pipe_control_lock(
2067 + /* use TG master update lock to lock everything on the TG
2068 + * therefore only top pipe need to lock
2069 + */
2070 +- if (pipe->top_pipe)
2071 ++ if (!pipe || pipe->top_pipe)
2072 + return;
2073 +
2074 + if (dc->debug.sanity_checks)
2075 +@@ -1591,6 +1592,85 @@ void dcn10_pipe_control_lock(
2076 + hws->funcs.verify_allow_pstate_change_high(dc);
2077 + }
2078 +
2079 ++/**
2080 ++ * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
2081 ++ *
2082 ++ * Software keepout workaround to prevent cursor update locking from stalling
2083 ++ * out cursor updates indefinitely or from old values from being retained in
2084 ++ * the case where the viewport changes in the same frame as the cursor.
2085 ++ *
2086 ++ * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
2087 ++ * too close to VUPDATE, then stall out until VUPDATE finishes.
2088 ++ *
2089 ++ * TODO: Optimize cursor programming to be once per frame before VUPDATE
2090 ++ * to avoid the need for this workaround.
2091 ++ */
2092 ++static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
2093 ++{
2094 ++ struct dc_stream_state *stream = pipe_ctx->stream;
2095 ++ struct crtc_position position;
2096 ++ uint32_t vupdate_start, vupdate_end;
2097 ++ unsigned int lines_to_vupdate, us_to_vupdate, vpos;
2098 ++ unsigned int us_per_line, us_vupdate;
2099 ++
2100 ++ if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
2101 ++ return;
2102 ++
2103 ++ if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
2104 ++ return;
2105 ++
2106 ++ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
2107 ++ &vupdate_end);
2108 ++
2109 ++ dc->hwss.get_position(&pipe_ctx, 1, &position);
2110 ++ vpos = position.vertical_count;
2111 ++
2112 ++ /* Avoid wraparound calculation issues */
2113 ++ vupdate_start += stream->timing.v_total;
2114 ++ vupdate_end += stream->timing.v_total;
2115 ++ vpos += stream->timing.v_total;
2116 ++
2117 ++ if (vpos <= vupdate_start) {
2118 ++ /* VPOS is in VACTIVE or back porch. */
2119 ++ lines_to_vupdate = vupdate_start - vpos;
2120 ++ } else if (vpos > vupdate_end) {
2121 ++ /* VPOS is in the front porch. */
2122 ++ return;
2123 ++ } else {
2124 ++ /* VPOS is in VUPDATE. */
2125 ++ lines_to_vupdate = 0;
2126 ++ }
2127 ++
2128 ++ /* Calculate time until VUPDATE in microseconds. */
2129 ++ us_per_line =
2130 ++ stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
2131 ++ us_to_vupdate = lines_to_vupdate * us_per_line;
2132 ++
2133 ++ /* 70 us is a conservative estimate of cursor update time*/
2134 ++ if (us_to_vupdate > 70)
2135 ++ return;
2136 ++
2137 ++ /* Stall out until the cursor update completes. */
2138 ++ if (vupdate_end < vupdate_start)
2139 ++ vupdate_end += stream->timing.v_total;
2140 ++ us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
2141 ++ udelay(us_to_vupdate + us_vupdate);
2142 ++}
2143 ++
2144 ++void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
2145 ++{
2146 ++ /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
2147 ++ if (!pipe || pipe->top_pipe)
2148 ++ return;
2149 ++
2150 ++ /* Prevent cursor lock from stalling out cursor updates. */
2151 ++ if (lock)
2152 ++ delay_cursor_until_vupdate(dc, pipe);
2153 ++
2154 ++ dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
2155 ++ pipe->stream_res.opp->inst, lock);
2156 ++}
2157 ++
2158 + static bool wait_for_reset_trigger_to_occur(
2159 + struct dc_context *dc_ctx,
2160 + struct timing_generator *tg)
2161 +@@ -2512,7 +2592,6 @@ void dcn10_apply_ctx_for_surface(
2162 + int i;
2163 + struct timing_generator *tg;
2164 + uint32_t underflow_check_delay_us;
2165 +- bool removed_pipe[4] = { false };
2166 + bool interdependent_update = false;
2167 + struct pipe_ctx *top_pipe_to_program =
2168 + dcn10_find_top_pipe_for_stream(dc, context, stream);
2169 +@@ -2531,11 +2610,6 @@ void dcn10_apply_ctx_for_surface(
2170 + if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2171 + ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2172 +
2173 +- if (interdependent_update)
2174 +- dcn10_lock_all_pipes(dc, context, true);
2175 +- else
2176 +- dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
2177 +-
2178 + if (underflow_check_delay_us != 0xFFFFFFFF)
2179 + udelay(underflow_check_delay_us);
2180 +
2181 +@@ -2552,18 +2626,8 @@ void dcn10_apply_ctx_for_surface(
2182 + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2183 + struct pipe_ctx *old_pipe_ctx =
2184 + &dc->current_state->res_ctx.pipe_ctx[i];
2185 +- /*
2186 +- * Powergate reused pipes that are not powergated
2187 +- * fairly hacky right now, using opp_id as indicator
2188 +- * TODO: After move dc_post to dc_update, this will
2189 +- * be removed.
2190 +- */
2191 +- if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
2192 +- if (old_pipe_ctx->stream_res.tg == tg &&
2193 +- old_pipe_ctx->plane_res.hubp &&
2194 +- old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
2195 +- dc->hwss.disable_plane(dc, old_pipe_ctx);
2196 +- }
2197 ++
2198 ++ pipe_ctx->update_flags.raw = 0;
2199 +
2200 + if ((!pipe_ctx->plane_state ||
2201 + pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
2202 +@@ -2571,7 +2635,7 @@ void dcn10_apply_ctx_for_surface(
2203 + old_pipe_ctx->stream_res.tg == tg) {
2204 +
2205 + hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
2206 +- removed_pipe[i] = true;
2207 ++ pipe_ctx->update_flags.bits.disable = 1;
2208 +
2209 + DC_LOG_DC("Reset mpcc for pipe %d\n",
2210 + old_pipe_ctx->pipe_idx);
2211 +@@ -2597,21 +2661,41 @@ void dcn10_apply_ctx_for_surface(
2212 + &pipe_ctx->dlg_regs,
2213 + &pipe_ctx->ttu_regs);
2214 + }
2215 ++}
2216 +
2217 +- if (interdependent_update)
2218 +- dcn10_lock_all_pipes(dc, context, false);
2219 +- else
2220 +- dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
2221 ++void dcn10_post_unlock_program_front_end(
2222 ++ struct dc *dc,
2223 ++ struct dc_state *context)
2224 ++{
2225 ++ int i, j;
2226 +
2227 +- if (num_planes == 0)
2228 +- false_optc_underflow_wa(dc, stream, tg);
2229 ++ DC_LOGGER_INIT(dc->ctx->logger);
2230 ++
2231 ++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
2232 ++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2233 ++
2234 ++ if (!pipe_ctx->top_pipe &&
2235 ++ !pipe_ctx->prev_odm_pipe &&
2236 ++ pipe_ctx->stream) {
2237 ++ struct dc_stream_status *stream_status = NULL;
2238 ++ struct timing_generator *tg = pipe_ctx->stream_res.tg;
2239 ++
2240 ++ for (j = 0; j < context->stream_count; j++) {
2241 ++ if (pipe_ctx->stream == context->streams[j])
2242 ++ stream_status = &context->stream_status[j];
2243 ++ }
2244 ++
2245 ++ if (context->stream_status[i].plane_count == 0)
2246 ++ false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2247 ++ }
2248 ++ }
2249 +
2250 + for (i = 0; i < dc->res_pool->pipe_count; i++)
2251 +- if (removed_pipe[i])
2252 ++ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2253 + dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2254 +
2255 + for (i = 0; i < dc->res_pool->pipe_count; i++)
2256 +- if (removed_pipe[i]) {
2257 ++ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2258 + dc->hwss.optimize_bandwidth(dc, context);
2259 + break;
2260 + }
2261 +@@ -3127,7 +3211,7 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
2262 + return vertical_line_start;
2263 + }
2264 +
2265 +-static void dcn10_calc_vupdate_position(
2266 ++void dcn10_calc_vupdate_position(
2267 + struct dc *dc,
2268 + struct pipe_ctx *pipe_ctx,
2269 + uint32_t *start_line,
2270 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
2271 +index 4d20f6586bb5..42b6e016d71e 100644
2272 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
2273 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
2274 +@@ -34,6 +34,11 @@ struct dc;
2275 + void dcn10_hw_sequencer_construct(struct dc *dc);
2276 +
2277 + int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
2278 ++void dcn10_calc_vupdate_position(
2279 ++ struct dc *dc,
2280 ++ struct pipe_ctx *pipe_ctx,
2281 ++ uint32_t *start_line,
2282 ++ uint32_t *end_line);
2283 + void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
2284 + enum dc_status dcn10_enable_stream_timing(
2285 + struct pipe_ctx *pipe_ctx,
2286 +@@ -49,6 +54,7 @@ void dcn10_pipe_control_lock(
2287 + struct dc *dc,
2288 + struct pipe_ctx *pipe,
2289 + bool lock);
2290 ++void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock);
2291 + void dcn10_blank_pixel_data(
2292 + struct dc *dc,
2293 + struct pipe_ctx *pipe_ctx,
2294 +@@ -70,11 +76,18 @@ void dcn10_reset_hw_ctx_wrap(
2295 + struct dc *dc,
2296 + struct dc_state *context);
2297 + void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
2298 ++void dcn10_lock_all_pipes(
2299 ++ struct dc *dc,
2300 ++ struct dc_state *context,
2301 ++ bool lock);
2302 + void dcn10_apply_ctx_for_surface(
2303 + struct dc *dc,
2304 + const struct dc_stream_state *stream,
2305 + int num_planes,
2306 + struct dc_state *context);
2307 ++void dcn10_post_unlock_program_front_end(
2308 ++ struct dc *dc,
2309 ++ struct dc_state *context);
2310 + void dcn10_hubp_pg_control(
2311 + struct dce_hwseq *hws,
2312 + unsigned int hubp_inst,
2313 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
2314 +index e7e5352ec424..0900c861204f 100644
2315 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
2316 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
2317 +@@ -32,6 +32,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
2318 + .init_hw = dcn10_init_hw,
2319 + .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
2320 + .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
2321 ++ .post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
2322 + .update_plane_addr = dcn10_update_plane_addr,
2323 + .update_dchub = dcn10_update_dchub,
2324 + .update_pending_status = dcn10_update_pending_status,
2325 +@@ -49,6 +50,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
2326 + .disable_audio_stream = dce110_disable_audio_stream,
2327 + .disable_plane = dcn10_disable_plane,
2328 + .pipe_control_lock = dcn10_pipe_control_lock,
2329 ++ .cursor_lock = dcn10_cursor_lock,
2330 ++ .interdependent_update_lock = dcn10_lock_all_pipes,
2331 + .prepare_bandwidth = dcn10_prepare_bandwidth,
2332 + .optimize_bandwidth = dcn10_optimize_bandwidth,
2333 + .set_drr = dcn10_set_drr,
2334 +@@ -69,6 +72,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
2335 + .set_clock = dcn10_set_clock,
2336 + .get_clock = dcn10_get_clock,
2337 + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
2338 ++ .calc_vupdate_position = dcn10_calc_vupdate_position,
2339 + };
2340 +
2341 + static const struct hwseq_private_funcs dcn10_private_funcs = {
2342 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
2343 +index 04f863499cfb..3fcd408e9103 100644
2344 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
2345 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
2346 +@@ -223,6 +223,9 @@ struct mpcc *mpc1_insert_plane(
2347 + REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
2348 + REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
2349 +
2350 ++ /* Configure VUPDATE lock set for this MPCC to map to the OPP */
2351 ++ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id);
2352 ++
2353 + /* update mpc tree mux setting */
2354 + if (tree->opp_list == insert_above_mpcc) {
2355 + /* insert the toppest mpcc */
2356 +@@ -318,6 +321,7 @@ void mpc1_remove_mpcc(
2357 + REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
2358 + REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
2359 + REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
2360 ++ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
2361 +
2362 + /* mark this mpcc as not in use */
2363 + mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
2364 +@@ -328,6 +332,7 @@ void mpc1_remove_mpcc(
2365 + REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
2366 + REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
2367 + REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
2368 ++ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
2369 + }
2370 + }
2371 +
2372 +@@ -361,6 +366,7 @@ void mpc1_mpc_init(struct mpc *mpc)
2373 + REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
2374 + REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
2375 + REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
2376 ++ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
2377 +
2378 + mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
2379 + }
2380 +@@ -381,6 +387,7 @@ void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
2381 + REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
2382 + REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
2383 + REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
2384 ++ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
2385 +
2386 + mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
2387 +
2388 +@@ -453,6 +460,13 @@ void mpc1_read_mpcc_state(
2389 + MPCC_BUSY, &s->busy);
2390 + }
2391 +
2392 ++void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
2393 ++{
2394 ++ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
2395 ++
2396 ++ REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
2397 ++}
2398 ++
2399 + static const struct mpc_funcs dcn10_mpc_funcs = {
2400 + .read_mpcc_state = mpc1_read_mpcc_state,
2401 + .insert_plane = mpc1_insert_plane,
2402 +@@ -464,6 +478,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
2403 + .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
2404 + .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
2405 + .update_blending = mpc1_update_blending,
2406 ++ .cursor_lock = mpc1_cursor_lock,
2407 + .set_denorm = NULL,
2408 + .set_denorm_clamp = NULL,
2409 + .set_output_csc = NULL,
2410 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
2411 +index 962a68e322ee..66a4719c22a0 100644
2412 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
2413 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
2414 +@@ -39,11 +39,12 @@
2415 + SRII(MPCC_BG_G_Y, MPCC, inst),\
2416 + SRII(MPCC_BG_R_CR, MPCC, inst),\
2417 + SRII(MPCC_BG_B_CB, MPCC, inst),\
2418 +- SRII(MPCC_BG_B_CB, MPCC, inst),\
2419 +- SRII(MPCC_SM_CONTROL, MPCC, inst)
2420 ++ SRII(MPCC_SM_CONTROL, MPCC, inst),\
2421 ++ SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst)
2422 +
2423 + #define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
2424 +- SRII(MUX, MPC_OUT, inst)
2425 ++ SRII(MUX, MPC_OUT, inst),\
2426 ++ VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst)
2427 +
2428 + #define MPC_COMMON_REG_VARIABLE_LIST \
2429 + uint32_t MPCC_TOP_SEL[MAX_MPCC]; \
2430 +@@ -55,7 +56,9 @@
2431 + uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
2432 + uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
2433 + uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \
2434 +- uint32_t MUX[MAX_OPP];
2435 ++ uint32_t MUX[MAX_OPP]; \
2436 ++ uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \
2437 ++ uint32_t CUR[MAX_OPP];
2438 +
2439 + #define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
2440 + SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\
2441 +@@ -78,7 +81,8 @@
2442 + SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\
2443 + SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\
2444 + SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\
2445 +- SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
2446 ++ SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\
2447 ++ SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh)
2448 +
2449 + #define MPC_REG_FIELD_LIST(type) \
2450 + type MPCC_TOP_SEL;\
2451 +@@ -101,7 +105,9 @@
2452 + type MPCC_SM_FIELD_ALT;\
2453 + type MPCC_SM_FORCE_NEXT_FRAME_POL;\
2454 + type MPCC_SM_FORCE_NEXT_TOP_POL;\
2455 +- type MPC_OUT_MUX;
2456 ++ type MPC_OUT_MUX;\
2457 ++ type MPCC_UPDATE_LOCK_SEL;\
2458 ++ type CUR_VUPDATE_LOCK_SET;
2459 +
2460 + struct dcn_mpc_registers {
2461 + MPC_COMMON_REG_VARIABLE_LIST
2462 +@@ -192,4 +198,6 @@ void mpc1_read_mpcc_state(
2463 + int mpcc_inst,
2464 + struct mpcc_state *s);
2465 +
2466 ++void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock);
2467 ++
2468 + #endif
2469 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
2470 +index 3b71898e859e..e3c4c06ac191 100644
2471 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
2472 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
2473 +@@ -181,6 +181,14 @@ enum dcn10_clk_src_array_id {
2474 + .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
2475 + mm ## block ## id ## _ ## reg_name
2476 +
2477 ++#define VUPDATE_SRII(reg_name, block, id)\
2478 ++ .reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \
2479 ++ mm ## reg_name ## 0 ## _ ## block ## id
2480 ++
2481 ++/* set field/register/bitfield name */
2482 ++#define SFRB(field_name, reg_name, bitfield, post_fix)\
2483 ++ .field_name = reg_name ## __ ## bitfield ## post_fix
2484 ++
2485 + /* NBIO */
2486 + #define NBIO_BASE_INNER(seg) \
2487 + NBIF_BASE__INST0_SEG ## seg
2488 +@@ -419,11 +427,13 @@ static const struct dcn_mpc_registers mpc_regs = {
2489 + };
2490 +
2491 + static const struct dcn_mpc_shift mpc_shift = {
2492 +- MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
2493 ++ MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\
2494 ++ SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT)
2495 + };
2496 +
2497 + static const struct dcn_mpc_mask mpc_mask = {
2498 +- MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
2499 ++ MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\
2500 ++ SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK)
2501 + };
2502 +
2503 + #define tg_regs(id)\
2504 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
2505 +index ad422e00f9fe..611dac544bfe 100644
2506 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
2507 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
2508 +@@ -1088,40 +1088,18 @@ void dcn20_enable_plane(
2509 + // }
2510 + }
2511 +
2512 +-
2513 +-void dcn20_pipe_control_lock_global(
2514 +- struct dc *dc,
2515 +- struct pipe_ctx *pipe,
2516 +- bool lock)
2517 +-{
2518 +- if (lock) {
2519 +- pipe->stream_res.tg->funcs->lock_doublebuffer_enable(
2520 +- pipe->stream_res.tg);
2521 +- pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
2522 +- } else {
2523 +- pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
2524 +- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
2525 +- CRTC_STATE_VACTIVE);
2526 +- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
2527 +- CRTC_STATE_VBLANK);
2528 +- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
2529 +- CRTC_STATE_VACTIVE);
2530 +- pipe->stream_res.tg->funcs->lock_doublebuffer_disable(
2531 +- pipe->stream_res.tg);
2532 +- }
2533 +-}
2534 +-
2535 + void dcn20_pipe_control_lock(
2536 + struct dc *dc,
2537 + struct pipe_ctx *pipe,
2538 + bool lock)
2539 + {
2540 + bool flip_immediate = false;
2541 ++ bool dig_update_required = false;
2542 +
2543 + /* use TG master update lock to lock everything on the TG
2544 + * therefore only top pipe need to lock
2545 + */
2546 +- if (pipe->top_pipe)
2547 ++ if (!pipe || pipe->top_pipe)
2548 + return;
2549 +
2550 + if (pipe->plane_state != NULL)
2551 +@@ -1154,6 +1132,19 @@ void dcn20_pipe_control_lock(
2552 + (!flip_immediate && pipe->stream_res.gsl_group > 0))
2553 + dcn20_setup_gsl_group_as_lock(dc, pipe, flip_immediate);
2554 +
2555 ++ if (pipe->stream && pipe->stream->update_flags.bits.dsc_changed)
2556 ++ dig_update_required = true;
2557 ++
2558 ++ /* Need double buffer lock mode in order to synchronize front end pipe
2559 ++ * updates with dig updates.
2560 ++ */
2561 ++ if (dig_update_required) {
2562 ++ if (lock) {
2563 ++ pipe->stream_res.tg->funcs->lock_doublebuffer_enable(
2564 ++ pipe->stream_res.tg);
2565 ++ }
2566 ++ }
2567 ++
2568 + if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
2569 + if (lock)
2570 + pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
2571 +@@ -1165,6 +1156,19 @@ void dcn20_pipe_control_lock(
2572 + else
2573 + pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
2574 + }
2575 ++
2576 ++ if (dig_update_required) {
2577 ++ if (!lock) {
2578 ++ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
2579 ++ CRTC_STATE_VACTIVE);
2580 ++ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
2581 ++ CRTC_STATE_VBLANK);
2582 ++ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
2583 ++ CRTC_STATE_VACTIVE);
2584 ++ pipe->stream_res.tg->funcs->lock_doublebuffer_disable(
2585 ++ pipe->stream_res.tg);
2586 ++ }
2587 ++ }
2588 + }
2589 +
2590 + static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
2591 +@@ -1536,27 +1540,28 @@ static void dcn20_program_pipe(
2592 + }
2593 + }
2594 +
2595 +-static bool does_pipe_need_lock(struct pipe_ctx *pipe)
2596 +-{
2597 +- if ((pipe->plane_state && pipe->plane_state->update_flags.raw)
2598 +- || pipe->update_flags.raw)
2599 +- return true;
2600 +- if (pipe->bottom_pipe)
2601 +- return does_pipe_need_lock(pipe->bottom_pipe);
2602 +-
2603 +- return false;
2604 +-}
2605 +-
2606 + void dcn20_program_front_end_for_ctx(
2607 + struct dc *dc,
2608 + struct dc_state *context)
2609 + {
2610 +- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
2611 + int i;
2612 + struct dce_hwseq *hws = dc->hwseq;
2613 +- bool pipe_locked[MAX_PIPES] = {false};
2614 + DC_LOGGER_INIT(dc->ctx->logger);
2615 +
2616 ++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
2617 ++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2618 ++
2619 ++ if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
2620 ++ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2621 ++ if (dc->hwss.program_triplebuffer != NULL &&
2622 ++ !dc->debug.disable_tri_buf) {
2623 ++ /*turn off triple buffer for full update*/
2624 ++ dc->hwss.program_triplebuffer(
2625 ++ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2626 ++ }
2627 ++ }
2628 ++ }
2629 ++
2630 + /* Carry over GSL groups in case the context is changing. */
2631 + for (i = 0; i < dc->res_pool->pipe_count; i++)
2632 + if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream)
2633 +@@ -1567,17 +1572,6 @@ void dcn20_program_front_end_for_ctx(
2634 + for (i = 0; i < dc->res_pool->pipe_count; i++)
2635 + dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
2636 + &context->res_ctx.pipe_ctx[i]);
2637 +- for (i = 0; i < dc->res_pool->pipe_count; i++)
2638 +- if (!context->res_ctx.pipe_ctx[i].top_pipe &&
2639 +- does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) {
2640 +- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2641 +-
2642 +- if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
2643 +- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
2644 +- if (!pipe_ctx->update_flags.bits.enable)
2645 +- dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true);
2646 +- pipe_locked[i] = true;
2647 +- }
2648 +
2649 + /* OTG blank before disabling all front ends */
2650 + for (i = 0; i < dc->res_pool->pipe_count; i++)
2651 +@@ -1615,17 +1609,16 @@ void dcn20_program_front_end_for_ctx(
2652 + hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
2653 + }
2654 + }
2655 ++}
2656 +
2657 +- /* Unlock all locked pipes */
2658 +- for (i = 0; i < dc->res_pool->pipe_count; i++)
2659 +- if (pipe_locked[i]) {
2660 +- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2661 ++void dcn20_post_unlock_program_front_end(
2662 ++ struct dc *dc,
2663 ++ struct dc_state *context)
2664 ++{
2665 ++ int i;
2666 ++ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
2667 +
2668 +- if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
2669 +- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
2670 +- if (!pipe_ctx->update_flags.bits.enable)
2671 +- dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false);
2672 +- }
2673 ++ DC_LOGGER_INIT(dc->ctx->logger);
2674 +
2675 + for (i = 0; i < dc->res_pool->pipe_count; i++)
2676 + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2677 +@@ -1655,7 +1648,6 @@ void dcn20_program_front_end_for_ctx(
2678 + dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
2679 + }
2680 +
2681 +-
2682 + void dcn20_prepare_bandwidth(
2683 + struct dc *dc,
2684 + struct dc_state *context)
2685 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
2686 +index 02c9be5ebd47..63ce763f148e 100644
2687 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
2688 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
2689 +@@ -35,6 +35,9 @@ bool dcn20_set_shaper_3dlut(
2690 + void dcn20_program_front_end_for_ctx(
2691 + struct dc *dc,
2692 + struct dc_state *context);
2693 ++void dcn20_post_unlock_program_front_end(
2694 ++ struct dc *dc,
2695 ++ struct dc_state *context);
2696 + void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
2697 + void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
2698 + bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
2699 +@@ -58,10 +61,6 @@ void dcn20_pipe_control_lock(
2700 + struct dc *dc,
2701 + struct pipe_ctx *pipe,
2702 + bool lock);
2703 +-void dcn20_pipe_control_lock_global(
2704 +- struct dc *dc,
2705 +- struct pipe_ctx *pipe,
2706 +- bool lock);
2707 + void dcn20_prepare_bandwidth(
2708 + struct dc *dc,
2709 + struct dc_state *context);
2710 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
2711 +index 5e640f17d3d4..71bfde2cf646 100644
2712 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
2713 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
2714 +@@ -33,6 +33,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
2715 + .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
2716 + .apply_ctx_for_surface = NULL,
2717 + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
2718 ++ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
2719 + .update_plane_addr = dcn20_update_plane_addr,
2720 + .update_dchub = dcn10_update_dchub,
2721 + .update_pending_status = dcn10_update_pending_status,
2722 +@@ -50,7 +51,8 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
2723 + .disable_audio_stream = dce110_disable_audio_stream,
2724 + .disable_plane = dcn20_disable_plane,
2725 + .pipe_control_lock = dcn20_pipe_control_lock,
2726 +- .pipe_control_lock_global = dcn20_pipe_control_lock_global,
2727 ++ .interdependent_update_lock = dcn10_lock_all_pipes,
2728 ++ .cursor_lock = dcn10_cursor_lock,
2729 + .prepare_bandwidth = dcn20_prepare_bandwidth,
2730 + .optimize_bandwidth = dcn20_optimize_bandwidth,
2731 + .update_bandwidth = dcn20_update_bandwidth,
2732 +@@ -81,6 +83,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
2733 + .init_vm_ctx = dcn20_init_vm_ctx,
2734 + .set_flip_control_gsl = dcn20_set_flip_control_gsl,
2735 + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
2736 ++ .calc_vupdate_position = dcn10_calc_vupdate_position,
2737 + };
2738 +
2739 + static const struct hwseq_private_funcs dcn20_private_funcs = {
2740 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
2741 +index de9c857ab3e9..570dfd9a243f 100644
2742 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
2743 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
2744 +@@ -545,6 +545,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
2745 + .mpc_init = mpc1_mpc_init,
2746 + .mpc_init_single_inst = mpc1_mpc_init_single_inst,
2747 + .update_blending = mpc2_update_blending,
2748 ++ .cursor_lock = mpc1_cursor_lock,
2749 + .get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp,
2750 + .wait_for_idle = mpc2_assert_idle_mpcc,
2751 + .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
2752 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
2753 +index c78fd5123497..496658f420db 100644
2754 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
2755 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
2756 +@@ -179,7 +179,8 @@
2757 + SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
2758 + SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
2759 + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
2760 +- SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
2761 ++ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
2762 ++ SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
2763 +
2764 + /*
2765 + * DCN2 MPC_OCSC debug status register:
2766 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
2767 +index 1b0bca9587d0..1ba47f3a6857 100644
2768 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
2769 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
2770 +@@ -506,6 +506,10 @@ enum dcn20_clk_src_array_id {
2771 + .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
2772 + mm ## block ## id ## _ ## reg_name
2773 +
2774 ++#define VUPDATE_SRII(reg_name, block, id)\
2775 ++ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
2776 ++ mm ## reg_name ## _ ## block ## id
2777 ++
2778 + /* NBIO */
2779 + #define NBIO_BASE_INNER(seg) \
2780 + NBIO_BASE__INST0_SEG ## seg
2781 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
2782 +index fddbd59bf4f9..7f53bf724fce 100644
2783 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
2784 ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
2785 +@@ -34,6 +34,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
2786 + .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
2787 + .apply_ctx_for_surface = NULL,
2788 + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
2789 ++ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
2790 + .update_plane_addr = dcn20_update_plane_addr,
2791 + .update_dchub = dcn10_update_dchub,
2792 + .update_pending_status = dcn10_update_pending_status,
2793 +@@ -51,7 +52,8 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
2794 + .disable_audio_stream = dce110_disable_audio_stream,
2795 + .disable_plane = dcn20_disable_plane,
2796 + .pipe_control_lock = dcn20_pipe_control_lock,
2797 +- .pipe_control_lock_global = dcn20_pipe_control_lock_global,
2798 ++ .interdependent_update_lock = dcn10_lock_all_pipes,
2799 ++ .cursor_lock = dcn10_cursor_lock,
2800 + .prepare_bandwidth = dcn20_prepare_bandwidth,
2801 + .optimize_bandwidth = dcn20_optimize_bandwidth,
2802 + .update_bandwidth = dcn20_update_bandwidth,
2803 +@@ -84,6 +86,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
2804 + .optimize_pwr_state = dcn21_optimize_pwr_state,
2805 + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
2806 + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
2807 ++ .calc_vupdate_position = dcn10_calc_vupdate_position,
2808 + .set_cursor_position = dcn10_set_cursor_position,
2809 + .set_cursor_attribute = dcn10_set_cursor_attribute,
2810 + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
2811 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
2812 +index 122d3e734c59..5286cc7d1261 100644
2813 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
2814 ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
2815 +@@ -306,6 +306,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
2816 + .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
2817 + mm ## block ## id ## _ ## reg_name
2818 +
2819 ++#define VUPDATE_SRII(reg_name, block, id)\
2820 ++ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
2821 ++ mm ## reg_name ## _ ## block ## id
2822 ++
2823 + /* NBIO */
2824 + #define NBIO_BASE_INNER(seg) \
2825 + NBIF0_BASE__INST0_SEG ## seg
2826 +diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
2827 +index 094afc4c8173..50ee8aa7ec3b 100644
2828 +--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
2829 ++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
2830 +@@ -210,6 +210,22 @@ struct mpc_funcs {
2831 + struct mpcc_blnd_cfg *blnd_cfg,
2832 + int mpcc_id);
2833 +
2834 ++ /*
2835 ++ * Lock cursor updates for the specified OPP.
2836 ++ * OPP defines the set of MPCC that are locked together for cursor.
2837 ++ *
2838 ++ * Parameters:
2839 ++ * [in] mpc - MPC context.
2840 ++ * [in] opp_id - The OPP to lock cursor updates on
2841 ++ * [in] lock - lock/unlock the OPP
2842 ++ *
2843 ++ * Return: void
2844 ++ */
2845 ++ void (*cursor_lock)(
2846 ++ struct mpc *mpc,
2847 ++ int opp_id,
2848 ++ bool lock);
2849 ++
2850 + struct mpcc* (*get_mpcc_for_dpp)(
2851 + struct mpc_tree *tree,
2852 + int dpp_id);
2853 +diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
2854 +index 209118f9f193..08307f3796e3 100644
2855 +--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
2856 ++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
2857 +@@ -66,6 +66,8 @@ struct hw_sequencer_funcs {
2858 + int num_planes, struct dc_state *context);
2859 + void (*program_front_end_for_ctx)(struct dc *dc,
2860 + struct dc_state *context);
2861 ++ void (*post_unlock_program_front_end)(struct dc *dc,
2862 ++ struct dc_state *context);
2863 + void (*update_plane_addr)(const struct dc *dc,
2864 + struct pipe_ctx *pipe_ctx);
2865 + void (*update_dchub)(struct dce_hwseq *hws,
2866 +@@ -78,17 +80,23 @@ struct hw_sequencer_funcs {
2867 + void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
2868 +
2869 + /* Pipe Lock Related */
2870 +- void (*pipe_control_lock_global)(struct dc *dc,
2871 +- struct pipe_ctx *pipe, bool lock);
2872 + void (*pipe_control_lock)(struct dc *dc,
2873 + struct pipe_ctx *pipe, bool lock);
2874 ++ void (*interdependent_update_lock)(struct dc *dc,
2875 ++ struct dc_state *context, bool lock);
2876 + void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
2877 + bool flip_immediate);
2878 ++ void (*cursor_lock)(struct dc *dc, struct pipe_ctx *pipe, bool lock);
2879 +
2880 + /* Timing Related */
2881 + void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
2882 + struct crtc_position *position);
2883 + int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx);
2884 ++ void (*calc_vupdate_position)(
2885 ++ struct dc *dc,
2886 ++ struct pipe_ctx *pipe_ctx,
2887 ++ uint32_t *start_line,
2888 ++ uint32_t *end_line);
2889 + void (*enable_per_frame_crtc_position_reset)(struct dc *dc,
2890 + int group_size, struct pipe_ctx *grouped_pipes[]);
2891 + void (*enable_timing_synchronization)(struct dc *dc,
2892 +diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
2893 +index e4e5a53b2b4e..8e2acb4df860 100644
2894 +--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
2895 ++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
2896 +@@ -319,12 +319,12 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
2897 + if (*level & profile_mode_mask) {
2898 + hwmgr->saved_dpm_level = hwmgr->dpm_level;
2899 + hwmgr->en_umd_pstate = true;
2900 +- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
2901 +- AMD_IP_BLOCK_TYPE_GFX,
2902 +- AMD_CG_STATE_UNGATE);
2903 + amdgpu_device_ip_set_powergating_state(hwmgr->adev,
2904 + AMD_IP_BLOCK_TYPE_GFX,
2905 + AMD_PG_STATE_UNGATE);
2906 ++ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
2907 ++ AMD_IP_BLOCK_TYPE_GFX,
2908 ++ AMD_CG_STATE_UNGATE);
2909 + }
2910 + } else {
2911 + /* exit umd pstate, restore level, enable gfx cg*/
2912 +diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
2913 +index 96e81c7bc266..e2565967db07 100644
2914 +--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
2915 ++++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
2916 +@@ -1675,12 +1675,12 @@ static int smu_enable_umd_pstate(void *handle,
2917 + if (*level & profile_mode_mask) {
2918 + smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2919 + smu_dpm_ctx->enable_umd_pstate = true;
2920 +- amdgpu_device_ip_set_clockgating_state(smu->adev,
2921 +- AMD_IP_BLOCK_TYPE_GFX,
2922 +- AMD_CG_STATE_UNGATE);
2923 + amdgpu_device_ip_set_powergating_state(smu->adev,
2924 + AMD_IP_BLOCK_TYPE_GFX,
2925 + AMD_PG_STATE_UNGATE);
2926 ++ amdgpu_device_ip_set_clockgating_state(smu->adev,
2927 ++ AMD_IP_BLOCK_TYPE_GFX,
2928 ++ AMD_CG_STATE_UNGATE);
2929 + }
2930 + } else {
2931 + /* exit umd pstate, restore level, enable gfx cg*/
2932 +diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
2933 +index bcba2f024842..e9900e078d51 100644
2934 +--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
2935 ++++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
2936 +@@ -328,8 +328,8 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
2937 + if (!drm_atomic_crtc_needs_modeset(state))
2938 + return 0;
2939 +
2940 +- if (state->mode.hdisplay > priv->soc_info->max_height ||
2941 +- state->mode.vdisplay > priv->soc_info->max_width)
2942 ++ if (state->mode.hdisplay > priv->soc_info->max_width ||
2943 ++ state->mode.vdisplay > priv->soc_info->max_height)
2944 + return -EINVAL;
2945 +
2946 + rate = clk_round_rate(priv->pix_clk,
2947 +@@ -474,7 +474,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
2948 +
2949 + static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
2950 + {
2951 +- struct ingenic_drm *priv = arg;
2952 ++ struct ingenic_drm *priv = drm_device_get_priv(arg);
2953 + unsigned int state;
2954 +
2955 + regmap_read(priv->map, JZ_REG_LCD_STATE, &state);
2956 +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
2957 +index b5f5eb7b4bb9..8c2e1b47e81a 100644
2958 +--- a/drivers/gpu/drm/meson/meson_drv.c
2959 ++++ b/drivers/gpu/drm/meson/meson_drv.c
2960 +@@ -412,9 +412,7 @@ static int __maybe_unused meson_drv_pm_resume(struct device *dev)
2961 + if (priv->afbcd.ops)
2962 + priv->afbcd.ops->init(priv);
2963 +
2964 +- drm_mode_config_helper_resume(priv->drm);
2965 +-
2966 +- return 0;
2967 ++ return drm_mode_config_helper_resume(priv->drm);
2968 + }
2969 +
2970 + static int compare_of(struct device *dev, void *data)
2971 +diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
2972 +index 281c81edabc6..dfb122b5e1b7 100644
2973 +--- a/drivers/hwmon/nct7904.c
2974 ++++ b/drivers/hwmon/nct7904.c
2975 +@@ -356,6 +356,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
2976 + struct nct7904_data *data = dev_get_drvdata(dev);
2977 + int ret, temp;
2978 + unsigned int reg1, reg2, reg3;
2979 ++ s8 temps;
2980 +
2981 + switch (attr) {
2982 + case hwmon_temp_input:
2983 +@@ -461,7 +462,8 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
2984 +
2985 + if (ret < 0)
2986 + return ret;
2987 +- *val = ret * 1000;
2988 ++ temps = ret;
2989 ++ *val = temps * 1000;
2990 + return 0;
2991 + }
2992 +
2993 +diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
2994 +index bf8e149d3191..e0a5e897e4b1 100644
2995 +--- a/drivers/infiniband/core/rdma_core.c
2996 ++++ b/drivers/infiniband/core/rdma_core.c
2997 +@@ -153,9 +153,9 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
2998 + uobj->context = NULL;
2999 +
3000 + /*
3001 +- * For DESTROY the usecnt is held write locked, the caller is expected
3002 +- * to put it unlock and put the object when done with it. Only DESTROY
3003 +- * can remove the IDR handle.
3004 ++ * For DESTROY the usecnt is not changed, the caller is expected to
3005 ++ * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR
3006 ++ * handle.
3007 + */
3008 + if (reason != RDMA_REMOVE_DESTROY)
3009 + atomic_set(&uobj->usecnt, 0);
3010 +@@ -187,7 +187,7 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
3011 + /*
3012 + * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
3013 + * sequence. It should only be used from command callbacks. On success the
3014 +- * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
3015 ++ * caller must pair this with uobj_put_destroy(). This
3016 + * version requires the caller to have already obtained an
3017 + * LOOKUP_DESTROY uobject kref.
3018 + */
3019 +@@ -198,6 +198,13 @@ int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
3020 +
3021 + down_read(&ufile->hw_destroy_rwsem);
3022 +
3023 ++ /*
3024 ++ * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left
3025 ++ * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY.
3026 ++ * This is because any other concurrent thread can still see the object
3027 ++ * in the xarray due to RCU. Leaving it locked ensures nothing else will
3028 ++ * touch it.
3029 ++ */
3030 + ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
3031 + if (ret)
3032 + goto out_unlock;
3033 +@@ -216,7 +223,7 @@ out_unlock:
3034 + /*
3035 + * uobj_get_destroy destroys the HW object and returns a handle to the uobj
3036 + * with a NULL object pointer. The caller must pair this with
3037 +- * uverbs_put_destroy.
3038 ++ * uobj_put_destroy().
3039 + */
3040 + struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
3041 + u32 id, struct uverbs_attr_bundle *attrs)
3042 +@@ -250,8 +257,7 @@ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
3043 + uobj = __uobj_get_destroy(obj, id, attrs);
3044 + if (IS_ERR(uobj))
3045 + return PTR_ERR(uobj);
3046 +-
3047 +- rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
3048 ++ uobj_put_destroy(uobj);
3049 + return 0;
3050 + }
3051 +
3052 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
3053 +index bb78d3280acc..fa7a5ff498c7 100644
3054 +--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
3055 ++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
3056 +@@ -1987,7 +1987,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
3057 + struct rtable *rt;
3058 + struct neighbour *neigh;
3059 + int rc = arpindex;
3060 +- struct net_device *netdev = iwdev->netdev;
3061 + __be32 dst_ipaddr = htonl(dst_ip);
3062 + __be32 src_ipaddr = htonl(src_ip);
3063 +
3064 +@@ -1997,9 +1996,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
3065 + return rc;
3066 + }
3067 +
3068 +- if (netif_is_bond_slave(netdev))
3069 +- netdev = netdev_master_upper_dev_get(netdev);
3070 +-
3071 + neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
3072 +
3073 + rcu_read_lock();
3074 +@@ -2065,7 +2061,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
3075 + {
3076 + struct neighbour *neigh;
3077 + int rc = arpindex;
3078 +- struct net_device *netdev = iwdev->netdev;
3079 + struct dst_entry *dst;
3080 + struct sockaddr_in6 dst_addr;
3081 + struct sockaddr_in6 src_addr;
3082 +@@ -2086,9 +2081,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
3083 + return rc;
3084 + }
3085 +
3086 +- if (netif_is_bond_slave(netdev))
3087 +- netdev = netdev_master_upper_dev_get(netdev);
3088 +-
3089 + neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
3090 +
3091 + rcu_read_lock();
3092 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
3093 +index 6fa0a83c19de..9a1747a97fb6 100644
3094 +--- a/drivers/infiniband/hw/mlx5/mr.c
3095 ++++ b/drivers/infiniband/hw/mlx5/mr.c
3096 +@@ -1319,6 +1319,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
3097 +
3098 + if (is_odp_mr(mr)) {
3099 + to_ib_umem_odp(mr->umem)->private = mr;
3100 ++ init_waitqueue_head(&mr->q_deferred_work);
3101 + atomic_set(&mr->num_deferred_work, 0);
3102 + err = xa_err(xa_store(&dev->odp_mkeys,
3103 + mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
3104 +diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
3105 +index 568b21eb6ea1..021df0654ba7 100644
3106 +--- a/drivers/infiniband/hw/qib/qib_sysfs.c
3107 ++++ b/drivers/infiniband/hw/qib/qib_sysfs.c
3108 +@@ -760,7 +760,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
3109 + qib_dev_err(dd,
3110 + "Skipping linkcontrol sysfs info, (err %d) port %u\n",
3111 + ret, port_num);
3112 +- goto bail;
3113 ++ goto bail_link;
3114 + }
3115 + kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
3116 +
3117 +@@ -770,7 +770,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
3118 + qib_dev_err(dd,
3119 + "Skipping sl2vl sysfs info, (err %d) port %u\n",
3120 + ret, port_num);
3121 +- goto bail_link;
3122 ++ goto bail_sl;
3123 + }
3124 + kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
3125 +
3126 +@@ -780,7 +780,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
3127 + qib_dev_err(dd,
3128 + "Skipping diag_counters sysfs info, (err %d) port %u\n",
3129 + ret, port_num);
3130 +- goto bail_sl;
3131 ++ goto bail_diagc;
3132 + }
3133 + kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
3134 +
3135 +@@ -793,7 +793,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
3136 + qib_dev_err(dd,
3137 + "Skipping Congestion Control sysfs info, (err %d) port %u\n",
3138 + ret, port_num);
3139 +- goto bail_diagc;
3140 ++ goto bail_cc;
3141 + }
3142 +
3143 + kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
3144 +@@ -854,6 +854,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
3145 + &cc_table_bin_attr);
3146 + kobject_put(&ppd->pport_cc_kobj);
3147 + }
3148 ++ kobject_put(&ppd->diagc_kobj);
3149 + kobject_put(&ppd->sl2vl_kobj);
3150 + kobject_put(&ppd->pport_kobj);
3151 + }
3152 +diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
3153 +index e580ae9cc55a..780fd2dfc07e 100644
3154 +--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
3155 ++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
3156 +@@ -829,7 +829,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
3157 + !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
3158 + dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
3159 + ret = -ENOMEM;
3160 +- goto err_free_device;
3161 ++ goto err_disable_pdev;
3162 + }
3163 +
3164 + ret = pci_request_regions(pdev, DRV_NAME);
3165 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
3166 +index 2aa3457a30ce..0e5f27caf2b2 100644
3167 +--- a/drivers/infiniband/ulp/ipoib/ipoib.h
3168 ++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
3169 +@@ -377,8 +377,12 @@ struct ipoib_dev_priv {
3170 + struct ipoib_rx_buf *rx_ring;
3171 +
3172 + struct ipoib_tx_buf *tx_ring;
3173 ++ /* cyclic ring variables for managing tx_ring, for UD only */
3174 + unsigned int tx_head;
3175 + unsigned int tx_tail;
3176 ++ /* cyclic ring variables for counting overall outstanding send WRs */
3177 ++ unsigned int global_tx_head;
3178 ++ unsigned int global_tx_tail;
3179 + struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
3180 + struct ib_ud_wr tx_wr;
3181 + struct ib_wc send_wc[MAX_SEND_CQE];
3182 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
3183 +index c59e00a0881f..9bf0fa30df28 100644
3184 +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
3185 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
3186 +@@ -756,7 +756,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
3187 + return;
3188 + }
3189 +
3190 +- if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) {
3191 ++ if ((priv->global_tx_head - priv->global_tx_tail) ==
3192 ++ ipoib_sendq_size - 1) {
3193 + ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
3194 + tx->qp->qp_num);
3195 + netif_stop_queue(dev);
3196 +@@ -786,7 +787,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
3197 + } else {
3198 + netif_trans_update(dev);
3199 + ++tx->tx_head;
3200 +- ++priv->tx_head;
3201 ++ ++priv->global_tx_head;
3202 + }
3203 + }
3204 +
3205 +@@ -820,10 +821,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
3206 + netif_tx_lock(dev);
3207 +
3208 + ++tx->tx_tail;
3209 +- ++priv->tx_tail;
3210 ++ ++priv->global_tx_tail;
3211 +
3212 + if (unlikely(netif_queue_stopped(dev) &&
3213 +- (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 &&
3214 ++ ((priv->global_tx_head - priv->global_tx_tail) <=
3215 ++ ipoib_sendq_size >> 1) &&
3216 + test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
3217 + netif_wake_queue(dev);
3218 +
3219 +@@ -1232,8 +1234,9 @@ timeout:
3220 + dev_kfree_skb_any(tx_req->skb);
3221 + netif_tx_lock_bh(p->dev);
3222 + ++p->tx_tail;
3223 +- ++priv->tx_tail;
3224 +- if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
3225 ++ ++priv->global_tx_tail;
3226 ++ if (unlikely((priv->global_tx_head - priv->global_tx_tail) <=
3227 ++ ipoib_sendq_size >> 1) &&
3228 + netif_queue_stopped(p->dev) &&
3229 + test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
3230 + netif_wake_queue(p->dev);
3231 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
3232 +index c332b4761816..da3c5315bbb5 100644
3233 +--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
3234 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
3235 +@@ -407,9 +407,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
3236 + dev_kfree_skb_any(tx_req->skb);
3237 +
3238 + ++priv->tx_tail;
3239 ++ ++priv->global_tx_tail;
3240 +
3241 + if (unlikely(netif_queue_stopped(dev) &&
3242 +- ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) &&
3243 ++ ((priv->global_tx_head - priv->global_tx_tail) <=
3244 ++ ipoib_sendq_size >> 1) &&
3245 + test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
3246 + netif_wake_queue(dev);
3247 +
3248 +@@ -634,7 +636,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
3249 + else
3250 + priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
3251 + /* increase the tx_head after send success, but use it for queue state */
3252 +- if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
3253 ++ if ((priv->global_tx_head - priv->global_tx_tail) ==
3254 ++ ipoib_sendq_size - 1) {
3255 + ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
3256 + netif_stop_queue(dev);
3257 + }
3258 +@@ -662,6 +665,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
3259 +
3260 + rc = priv->tx_head;
3261 + ++priv->tx_head;
3262 ++ ++priv->global_tx_head;
3263 + }
3264 + return rc;
3265 + }
3266 +@@ -807,6 +811,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
3267 + ipoib_dma_unmap_tx(priv, tx_req);
3268 + dev_kfree_skb_any(tx_req->skb);
3269 + ++priv->tx_tail;
3270 ++ ++priv->global_tx_tail;
3271 + }
3272 +
3273 + for (i = 0; i < ipoib_recvq_size; ++i) {
3274 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
3275 +index 4a0d3a9e72e1..70d6d476ba90 100644
3276 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
3277 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
3278 +@@ -1188,9 +1188,11 @@ static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
3279 +
3280 + ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
3281 + jiffies_to_msecs(jiffies - dev_trans_start(dev)));
3282 +- ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
3283 +- netif_queue_stopped(dev),
3284 +- priv->tx_head, priv->tx_tail);
3285 ++ ipoib_warn(priv,
3286 ++ "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
3287 ++ netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
3288 ++ priv->global_tx_head, priv->global_tx_tail);
3289 ++
3290 + /* XXX reset QP, etc. */
3291 + }
3292 +
3293 +@@ -1705,7 +1707,7 @@ static int ipoib_dev_init_default(struct net_device *dev)
3294 + goto out_rx_ring_cleanup;
3295 + }
3296 +
3297 +- /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
3298 ++ /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
3299 +
3300 + if (ipoib_transport_dev_init(dev, priv->ca)) {
3301 + pr_warn("%s: ipoib_transport_dev_init failed\n",
3302 +diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
3303 +index cb6e3a5f509c..0d57e51b8ba1 100644
3304 +--- a/drivers/input/evdev.c
3305 ++++ b/drivers/input/evdev.c
3306 +@@ -326,20 +326,6 @@ static int evdev_fasync(int fd, struct file *file, int on)
3307 + return fasync_helper(fd, file, on, &client->fasync);
3308 + }
3309 +
3310 +-static int evdev_flush(struct file *file, fl_owner_t id)
3311 +-{
3312 +- struct evdev_client *client = file->private_data;
3313 +- struct evdev *evdev = client->evdev;
3314 +-
3315 +- mutex_lock(&evdev->mutex);
3316 +-
3317 +- if (evdev->exist && !client->revoked)
3318 +- input_flush_device(&evdev->handle, file);
3319 +-
3320 +- mutex_unlock(&evdev->mutex);
3321 +- return 0;
3322 +-}
3323 +-
3324 + static void evdev_free(struct device *dev)
3325 + {
3326 + struct evdev *evdev = container_of(dev, struct evdev, dev);
3327 +@@ -453,6 +439,10 @@ static int evdev_release(struct inode *inode, struct file *file)
3328 + unsigned int i;
3329 +
3330 + mutex_lock(&evdev->mutex);
3331 ++
3332 ++ if (evdev->exist && !client->revoked)
3333 ++ input_flush_device(&evdev->handle, file);
3334 ++
3335 + evdev_ungrab(evdev, client);
3336 + mutex_unlock(&evdev->mutex);
3337 +
3338 +@@ -1310,7 +1300,6 @@ static const struct file_operations evdev_fops = {
3339 + .compat_ioctl = evdev_ioctl_compat,
3340 + #endif
3341 + .fasync = evdev_fasync,
3342 +- .flush = evdev_flush,
3343 + .llseek = no_llseek,
3344 + };
3345 +
3346 +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
3347 +index 6b40a1c68f9f..c77cdb3b62b5 100644
3348 +--- a/drivers/input/joystick/xpad.c
3349 ++++ b/drivers/input/joystick/xpad.c
3350 +@@ -458,6 +458,16 @@ static const u8 xboxone_fw2015_init[] = {
3351 + 0x05, 0x20, 0x00, 0x01, 0x00
3352 + };
3353 +
3354 ++/*
3355 ++ * This packet is required for Xbox One S (0x045e:0x02ea)
3356 ++ * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to
3357 ++ * initialize the controller that was previously used in
3358 ++ * Bluetooth mode.
3359 ++ */
3360 ++static const u8 xboxone_s_init[] = {
3361 ++ 0x05, 0x20, 0x00, 0x0f, 0x06
3362 ++};
3363 ++
3364 + /*
3365 + * This packet is required for the Titanfall 2 Xbox One pads
3366 + * (0x0e6f:0x0165) to finish initialization and for Hori pads
3367 +@@ -516,6 +526,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
3368 + XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
3369 + XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
3370 + XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
3371 ++ XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init),
3372 ++ XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
3373 + XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
3374 + XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
3375 + XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
3376 +diff --git a/drivers/input/keyboard/dlink-dir685-touchkeys.c b/drivers/input/keyboard/dlink-dir685-touchkeys.c
3377 +index b0ead7199c40..a69dcc3bd30c 100644
3378 +--- a/drivers/input/keyboard/dlink-dir685-touchkeys.c
3379 ++++ b/drivers/input/keyboard/dlink-dir685-touchkeys.c
3380 +@@ -143,7 +143,7 @@ MODULE_DEVICE_TABLE(of, dir685_tk_of_match);
3381 +
3382 + static struct i2c_driver dir685_tk_i2c_driver = {
3383 + .driver = {
3384 +- .name = "dlin-dir685-touchkeys",
3385 ++ .name = "dlink-dir685-touchkeys",
3386 + .of_match_table = of_match_ptr(dir685_tk_of_match),
3387 + },
3388 + .probe = dir685_tk_probe,
3389 +diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
3390 +index 190b9974526b..258d5fe3d395 100644
3391 +--- a/drivers/input/rmi4/rmi_driver.c
3392 ++++ b/drivers/input/rmi4/rmi_driver.c
3393 +@@ -205,7 +205,7 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
3394 +
3395 + if (count) {
3396 + kfree(attn_data.data);
3397 +- attn_data.data = NULL;
3398 ++ drvdata->attn_data.data = NULL;
3399 + }
3400 +
3401 + if (!kfifo_is_empty(&drvdata->attn_fifo))
3402 +@@ -1210,7 +1210,8 @@ static int rmi_driver_probe(struct device *dev)
3403 + if (data->input) {
3404 + rmi_driver_set_input_name(rmi_dev, data->input);
3405 + if (!rmi_dev->xport->input) {
3406 +- if (input_register_device(data->input)) {
3407 ++ retval = input_register_device(data->input);
3408 ++ if (retval) {
3409 + dev_err(dev, "%s: Failed to register input device.\n",
3410 + __func__);
3411 + goto err_destroy_functions;
3412 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
3413 +index 08e919dbeb5d..7e048b557462 100644
3414 +--- a/drivers/input/serio/i8042-x86ia64io.h
3415 ++++ b/drivers/input/serio/i8042-x86ia64io.h
3416 +@@ -662,6 +662,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
3417 + DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
3418 + },
3419 + },
3420 ++ {
3421 ++ /* Lenovo ThinkPad Twist S230u */
3422 ++ .matches = {
3423 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
3424 ++ DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
3425 ++ },
3426 ++ },
3427 + { }
3428 + };
3429 +
3430 +diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
3431 +index 16d70201de4a..397cb1d3f481 100644
3432 +--- a/drivers/input/touchscreen/usbtouchscreen.c
3433 ++++ b/drivers/input/touchscreen/usbtouchscreen.c
3434 +@@ -182,6 +182,7 @@ static const struct usb_device_id usbtouch_devices[] = {
3435 + #endif
3436 +
3437 + #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
3438 ++ {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
3439 + {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
3440 + {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
3441 + {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES},
3442 +diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
3443 +index 22b28076d48e..b09de25df02e 100644
3444 +--- a/drivers/iommu/iommu.c
3445 ++++ b/drivers/iommu/iommu.c
3446 +@@ -509,7 +509,7 @@ struct iommu_group *iommu_group_alloc(void)
3447 + NULL, "%d", group->id);
3448 + if (ret) {
3449 + ida_simple_remove(&iommu_group_ida, group->id);
3450 +- kfree(group);
3451 ++ kobject_put(&group->kobj);
3452 + return ERR_PTR(ret);
3453 + }
3454 +
3455 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
3456 +index 32db16f6debc..2d19291ebc84 100644
3457 +--- a/drivers/mmc/core/block.c
3458 ++++ b/drivers/mmc/core/block.c
3459 +@@ -2475,8 +2475,8 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
3460 + struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
3461 + struct mmc_rpmb_data, chrdev);
3462 +
3463 +- put_device(&rpmb->dev);
3464 + mmc_blk_put(rpmb->md);
3465 ++ put_device(&rpmb->dev);
3466 +
3467 + return 0;
3468 + }
3469 +diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
3470 +index 007481557191..9b8346638f69 100644
3471 +--- a/drivers/net/bonding/bond_sysfs_slave.c
3472 ++++ b/drivers/net/bonding/bond_sysfs_slave.c
3473 +@@ -149,8 +149,10 @@ int bond_sysfs_slave_add(struct slave *slave)
3474 +
3475 + err = kobject_init_and_add(&slave->kobj, &slave_ktype,
3476 + &(slave->dev->dev.kobj), "bonding_slave");
3477 +- if (err)
3478 ++ if (err) {
3479 ++ kobject_put(&slave->kobj);
3480 + return err;
3481 ++ }
3482 +
3483 + for (a = slave_attrs; *a; ++a) {
3484 + err = sysfs_create_file(&slave->kobj, &((*a)->attr));
3485 +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
3486 +index 0123498242b9..b95425a63a13 100644
3487 +--- a/drivers/net/dsa/mt7530.c
3488 ++++ b/drivers/net/dsa/mt7530.c
3489 +@@ -639,11 +639,8 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
3490 + mt7530_write(priv, MT7530_PVC_P(port),
3491 + PORT_SPEC_TAG);
3492 +
3493 +- /* Disable auto learning on the cpu port */
3494 +- mt7530_set(priv, MT7530_PSC_P(port), SA_DIS);
3495 +-
3496 +- /* Unknown unicast frame fordwarding to the cpu port */
3497 +- mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port)));
3498 ++ /* Unknown multicast frame forwarding to the cpu port */
3499 ++ mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port)));
3500 +
3501 + /* Set CPU port number */
3502 + if (priv->id == ID_MT7621)
3503 +@@ -1247,8 +1244,6 @@ mt7530_setup(struct dsa_switch *ds)
3504 + /* Enable and reset MIB counters */
3505 + mt7530_mib_reset(ds);
3506 +
3507 +- mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK);
3508 +-
3509 + for (i = 0; i < MT7530_NUM_PORTS; i++) {
3510 + /* Disable forwarding by default on all ports */
3511 + mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
3512 +diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
3513 +index 756140b7dfd5..0e7e36d8f994 100644
3514 +--- a/drivers/net/dsa/mt7530.h
3515 ++++ b/drivers/net/dsa/mt7530.h
3516 +@@ -31,6 +31,7 @@ enum {
3517 + #define MT7530_MFC 0x10
3518 + #define BC_FFP(x) (((x) & 0xff) << 24)
3519 + #define UNM_FFP(x) (((x) & 0xff) << 16)
3520 ++#define UNM_FFP_MASK UNM_FFP(~0)
3521 + #define UNU_FFP(x) (((x) & 0xff) << 8)
3522 + #define UNU_FFP_MASK UNU_FFP(~0)
3523 + #define CPU_EN BIT(7)
3524 +diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
3525 +index a7780c06fa65..b74580e87be8 100644
3526 +--- a/drivers/net/dsa/ocelot/felix.c
3527 ++++ b/drivers/net/dsa/ocelot/felix.c
3528 +@@ -385,6 +385,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
3529 + struct ocelot *ocelot = &felix->ocelot;
3530 + phy_interface_t *port_phy_modes;
3531 + resource_size_t switch_base;
3532 ++ struct resource res;
3533 + int port, i, err;
3534 +
3535 + ocelot->num_phys_ports = num_phys_ports;
3536 +@@ -416,17 +417,16 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
3537 +
3538 + for (i = 0; i < TARGET_MAX; i++) {
3539 + struct regmap *target;
3540 +- struct resource *res;
3541 +
3542 + if (!felix->info->target_io_res[i].name)
3543 + continue;
3544 +
3545 +- res = &felix->info->target_io_res[i];
3546 +- res->flags = IORESOURCE_MEM;
3547 +- res->start += switch_base;
3548 +- res->end += switch_base;
3549 ++ memcpy(&res, &felix->info->target_io_res[i], sizeof(res));
3550 ++ res.flags = IORESOURCE_MEM;
3551 ++ res.start += switch_base;
3552 ++ res.end += switch_base;
3553 +
3554 +- target = ocelot_regmap_init(ocelot, res);
3555 ++ target = ocelot_regmap_init(ocelot, &res);
3556 + if (IS_ERR(target)) {
3557 + dev_err(ocelot->dev,
3558 + "Failed to map device memory space\n");
3559 +@@ -447,7 +447,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
3560 + for (port = 0; port < num_phys_ports; port++) {
3561 + struct ocelot_port *ocelot_port;
3562 + void __iomem *port_regs;
3563 +- struct resource *res;
3564 +
3565 + ocelot_port = devm_kzalloc(ocelot->dev,
3566 + sizeof(struct ocelot_port),
3567 +@@ -459,12 +458,12 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
3568 + return -ENOMEM;
3569 + }
3570 +
3571 +- res = &felix->info->port_io_res[port];
3572 +- res->flags = IORESOURCE_MEM;
3573 +- res->start += switch_base;
3574 +- res->end += switch_base;
3575 ++ memcpy(&res, &felix->info->port_io_res[port], sizeof(res));
3576 ++ res.flags = IORESOURCE_MEM;
3577 ++ res.start += switch_base;
3578 ++ res.end += switch_base;
3579 +
3580 +- port_regs = devm_ioremap_resource(ocelot->dev, res);
3581 ++ port_regs = devm_ioremap_resource(ocelot->dev, &res);
3582 + if (IS_ERR(port_regs)) {
3583 + dev_err(ocelot->dev,
3584 + "failed to map registers for port %d\n", port);
3585 +diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
3586 +index 8771d40324f1..2c024cc901d4 100644
3587 +--- a/drivers/net/dsa/ocelot/felix.h
3588 ++++ b/drivers/net/dsa/ocelot/felix.h
3589 +@@ -8,9 +8,9 @@
3590 +
3591 + /* Platform-specific information */
3592 + struct felix_info {
3593 +- struct resource *target_io_res;
3594 +- struct resource *port_io_res;
3595 +- struct resource *imdio_res;
3596 ++ const struct resource *target_io_res;
3597 ++ const struct resource *port_io_res;
3598 ++ const struct resource *imdio_res;
3599 + const struct reg_field *regfields;
3600 + const u32 *const *map;
3601 + const struct ocelot_ops *ops;
3602 +diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
3603 +index edc1a67c002b..50074da3a1a0 100644
3604 +--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
3605 ++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
3606 +@@ -328,10 +328,8 @@ static const u32 *vsc9959_regmap[] = {
3607 + [GCB] = vsc9959_gcb_regmap,
3608 + };
3609 +
3610 +-/* Addresses are relative to the PCI device's base address and
3611 +- * will be fixed up at ioremap time.
3612 +- */
3613 +-static struct resource vsc9959_target_io_res[] = {
3614 ++/* Addresses are relative to the PCI device's base address */
3615 ++static const struct resource vsc9959_target_io_res[] = {
3616 + [ANA] = {
3617 + .start = 0x0280000,
3618 + .end = 0x028ffff,
3619 +@@ -374,7 +372,7 @@ static struct resource vsc9959_target_io_res[] = {
3620 + },
3621 + };
3622 +
3623 +-static struct resource vsc9959_port_io_res[] = {
3624 ++static const struct resource vsc9959_port_io_res[] = {
3625 + {
3626 + .start = 0x0100000,
3627 + .end = 0x010ffff,
3628 +@@ -410,7 +408,7 @@ static struct resource vsc9959_port_io_res[] = {
3629 + /* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
3630 + * SGMII/QSGMII MAC PCS can be found.
3631 + */
3632 +-static struct resource vsc9959_imdio_res = {
3633 ++static const struct resource vsc9959_imdio_res = {
3634 + .start = 0x8030,
3635 + .end = 0x8040,
3636 + .name = "imdio",
3637 +@@ -984,7 +982,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
3638 + struct device *dev = ocelot->dev;
3639 + resource_size_t imdio_base;
3640 + void __iomem *imdio_regs;
3641 +- struct resource *res;
3642 ++ struct resource res;
3643 + struct enetc_hw *hw;
3644 + struct mii_bus *bus;
3645 + int port;
3646 +@@ -1001,12 +999,12 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
3647 + imdio_base = pci_resource_start(felix->pdev,
3648 + felix->info->imdio_pci_bar);
3649 +
3650 +- res = felix->info->imdio_res;
3651 +- res->flags = IORESOURCE_MEM;
3652 +- res->start += imdio_base;
3653 +- res->end += imdio_base;
3654 ++ memcpy(&res, felix->info->imdio_res, sizeof(res));
3655 ++ res.flags = IORESOURCE_MEM;
3656 ++ res.start += imdio_base;
3657 ++ res.end += imdio_base;
3658 +
3659 +- imdio_regs = devm_ioremap_resource(dev, res);
3660 ++ imdio_regs = devm_ioremap_resource(dev, &res);
3661 + if (IS_ERR(imdio_regs)) {
3662 + dev_err(dev, "failed to map internal MDIO registers\n");
3663 + return PTR_ERR(imdio_regs);
3664 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
3665 +index d0ddd08c4112..fce4e26c36cf 100644
3666 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
3667 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
3668 +@@ -4184,14 +4184,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3669 + int i, intr_process, rc, tmo_count;
3670 + struct input *req = msg;
3671 + u32 *data = msg;
3672 +- __le32 *resp_len;
3673 + u8 *valid;
3674 + u16 cp_ring_id, len = 0;
3675 + struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3676 + u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
3677 + struct hwrm_short_input short_input = {0};
3678 + u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
3679 +- u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
3680 + u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
3681 + u16 dst = BNXT_HWRM_CHNL_CHIMP;
3682 +
3683 +@@ -4209,7 +4207,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3684 + bar_offset = BNXT_GRCPF_REG_KONG_COMM;
3685 + doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
3686 + resp = bp->hwrm_cmd_kong_resp_addr;
3687 +- resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
3688 + }
3689 +
3690 + memset(resp, 0, PAGE_SIZE);
3691 +@@ -4278,7 +4275,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3692 + tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
3693 + timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
3694 + tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
3695 +- resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
3696 +
3697 + if (intr_process) {
3698 + u16 seq_id = bp->hwrm_intr_seq_id;
3699 +@@ -4306,9 +4302,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3700 + le16_to_cpu(req->req_type));
3701 + return -EBUSY;
3702 + }
3703 +- len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3704 +- HWRM_RESP_LEN_SFT;
3705 +- valid = resp_addr + len - 1;
3706 ++ len = le16_to_cpu(resp->resp_len);
3707 ++ valid = ((u8 *)resp) + len - 1;
3708 + } else {
3709 + int j;
3710 +
3711 +@@ -4319,8 +4314,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3712 + */
3713 + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
3714 + return -EBUSY;
3715 +- len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3716 +- HWRM_RESP_LEN_SFT;
3717 ++ len = le16_to_cpu(resp->resp_len);
3718 + if (len)
3719 + break;
3720 + /* on first few passes, just barely sleep */
3721 +@@ -4342,7 +4336,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3722 + }
3723 +
3724 + /* Last byte of resp contains valid bit */
3725 +- valid = resp_addr + len - 1;
3726 ++ valid = ((u8 *)resp) + len - 1;
3727 + for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
3728 + /* make sure we read from updated DMA memory */
3729 + dma_rmb();
3730 +@@ -9324,7 +9318,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
3731 + bnxt_free_skbs(bp);
3732 +
3733 + /* Save ring stats before shutdown */
3734 +- if (bp->bnapi)
3735 ++ if (bp->bnapi && irq_re_init)
3736 + bnxt_get_ring_stats(bp, &bp->net_stats_prev);
3737 + if (irq_re_init) {
3738 + bnxt_free_irq(bp);
3739 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
3740 +index ef0268649822..f76c42652e1a 100644
3741 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
3742 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
3743 +@@ -654,11 +654,6 @@ struct nqe_cn {
3744 + #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
3745 + #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
3746 + #define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12)
3747 +-#define HWRM_RESP_ERR_CODE_MASK 0xffff
3748 +-#define HWRM_RESP_LEN_OFFSET 4
3749 +-#define HWRM_RESP_LEN_MASK 0xffff0000
3750 +-#define HWRM_RESP_LEN_SFT 16
3751 +-#define HWRM_RESP_VALID_MASK 0xff000000
3752 + #define BNXT_HWRM_REQ_MAX_SIZE 128
3753 + #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
3754 + BNXT_HWRM_REQ_MAX_SIZE)
3755 +diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
3756 +index 2bd7ace0a953..bfc6bfe94d0a 100644
3757 +--- a/drivers/net/ethernet/freescale/Kconfig
3758 ++++ b/drivers/net/ethernet/freescale/Kconfig
3759 +@@ -77,6 +77,7 @@ config UCC_GETH
3760 + depends on QUICC_ENGINE && PPC32
3761 + select FSL_PQ_MDIO
3762 + select PHYLIB
3763 ++ select FIXED_PHY
3764 + ---help---
3765 + This driver supports the Gigabit Ethernet mode of the QUICC Engine,
3766 + which is available on some Freescale SOCs.
3767 +@@ -90,6 +91,7 @@ config GIANFAR
3768 + depends on HAS_DMA
3769 + select FSL_PQ_MDIO
3770 + select PHYLIB
3771 ++ select FIXED_PHY
3772 + select CRC32
3773 + ---help---
3774 + This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
3775 +diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
3776 +index 3b325733a4f8..0a54c7e0e4ae 100644
3777 +--- a/drivers/net/ethernet/freescale/dpaa/Kconfig
3778 ++++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
3779 +@@ -3,6 +3,7 @@ menuconfig FSL_DPAA_ETH
3780 + tristate "DPAA Ethernet"
3781 + depends on FSL_DPAA && FSL_FMAN
3782 + select PHYLIB
3783 ++ select FIXED_PHY
3784 + select FSL_FMAN_MAC
3785 + ---help---
3786 + Data Path Acceleration Architecture Ethernet driver,
3787 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
3788 +index ca74a684a904..ab337632793b 100644
3789 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
3790 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
3791 +@@ -2902,7 +2902,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
3792 + }
3793 +
3794 + /* Do this here, so we can be verbose early */
3795 +- SET_NETDEV_DEV(net_dev, dev);
3796 ++ SET_NETDEV_DEV(net_dev, dev->parent);
3797 + dev_set_drvdata(dev, net_dev);
3798 +
3799 + priv = netdev_priv(net_dev);
3800 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
3801 +index 4344a59c823f..6122057d60c0 100644
3802 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
3803 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
3804 +@@ -1070,7 +1070,7 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3805 + (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3806 +
3807 + val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3808 +- val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3809 ++ val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3810 + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3811 + }
3812 +
3813 +diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
3814 +index 6e501af0e532..f6ff9620a137 100644
3815 +--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
3816 ++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
3817 +@@ -2734,7 +2734,7 @@ void mlx4_opreq_action(struct work_struct *work)
3818 + if (err) {
3819 + mlx4_err(dev, "Failed to retrieve required operation: %d\n",
3820 + err);
3821 +- return;
3822 ++ goto out;
3823 + }
3824 + MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
3825 + MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
3826 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
3827 +index cede5bdfd598..7a77fe40af3a 100644
3828 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
3829 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
3830 +@@ -848,6 +848,14 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
3831 + static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
3832 + struct mlx5_cmd_msg *msg);
3833 +
3834 ++static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
3835 ++{
3836 ++ if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL)
3837 ++ return true;
3838 ++
3839 ++ return cmd->allowed_opcode == opcode;
3840 ++}
3841 ++
3842 + static void cmd_work_handler(struct work_struct *work)
3843 + {
3844 + struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
3845 +@@ -861,6 +869,7 @@ static void cmd_work_handler(struct work_struct *work)
3846 + int alloc_ret;
3847 + int cmd_mode;
3848 +
3849 ++ complete(&ent->handling);
3850 + sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
3851 + down(sem);
3852 + if (!ent->page_queue) {
3853 +@@ -913,7 +922,9 @@ static void cmd_work_handler(struct work_struct *work)
3854 +
3855 + /* Skip sending command to fw if internal error */
3856 + if (pci_channel_offline(dev->pdev) ||
3857 +- dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
3858 ++ dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
3859 ++ cmd->state != MLX5_CMDIF_STATE_UP ||
3860 ++ !opcode_allowed(&dev->cmd, ent->op)) {
3861 + u8 status = 0;
3862 + u32 drv_synd;
3863 +
3864 +@@ -978,6 +989,11 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
3865 + struct mlx5_cmd *cmd = &dev->cmd;
3866 + int err;
3867 +
3868 ++ if (!wait_for_completion_timeout(&ent->handling, timeout) &&
3869 ++ cancel_work_sync(&ent->work)) {
3870 ++ ent->ret = -ECANCELED;
3871 ++ goto out_err;
3872 ++ }
3873 + if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
3874 + wait_for_completion(&ent->done);
3875 + } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
3876 +@@ -985,12 +1001,17 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
3877 + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
3878 + }
3879 +
3880 ++out_err:
3881 + err = ent->ret;
3882 +
3883 + if (err == -ETIMEDOUT) {
3884 + mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
3885 + mlx5_command_str(msg_to_opcode(ent->in)),
3886 + msg_to_opcode(ent->in));
3887 ++ } else if (err == -ECANCELED) {
3888 ++ mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
3889 ++ mlx5_command_str(msg_to_opcode(ent->in)),
3890 ++ msg_to_opcode(ent->in));
3891 + }
3892 + mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
3893 + err, deliv_status_to_str(ent->status), ent->status);
3894 +@@ -1026,6 +1047,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
3895 + ent->token = token;
3896 + ent->polling = force_polling;
3897 +
3898 ++ init_completion(&ent->handling);
3899 + if (!callback)
3900 + init_completion(&ent->done);
3901 +
3902 +@@ -1045,6 +1067,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
3903 + err = wait_func(dev, ent);
3904 + if (err == -ETIMEDOUT)
3905 + goto out;
3906 ++ if (err == -ECANCELED)
3907 ++ goto out_free;
3908 +
3909 + ds = ent->ts2 - ent->ts1;
3910 + op = MLX5_GET(mbox_in, in->first.data, opcode);
3911 +@@ -1391,6 +1415,22 @@ static void create_debugfs_files(struct mlx5_core_dev *dev)
3912 + mlx5_cmdif_debugfs_init(dev);
3913 + }
3914 +
3915 ++void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
3916 ++{
3917 ++ struct mlx5_cmd *cmd = &dev->cmd;
3918 ++ int i;
3919 ++
3920 ++ for (i = 0; i < cmd->max_reg_cmds; i++)
3921 ++ down(&cmd->sem);
3922 ++ down(&cmd->pages_sem);
3923 ++
3924 ++ cmd->allowed_opcode = opcode;
3925 ++
3926 ++ up(&cmd->pages_sem);
3927 ++ for (i = 0; i < cmd->max_reg_cmds; i++)
3928 ++ up(&cmd->sem);
3929 ++}
3930 ++
3931 + static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
3932 + {
3933 + struct mlx5_cmd *cmd = &dev->cmd;
3934 +@@ -1667,12 +1707,14 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
3935 + int err;
3936 + u8 status = 0;
3937 + u32 drv_synd;
3938 ++ u16 opcode;
3939 + u8 token;
3940 +
3941 ++ opcode = MLX5_GET(mbox_in, in, opcode);
3942 + if (pci_channel_offline(dev->pdev) ||
3943 +- dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
3944 +- u16 opcode = MLX5_GET(mbox_in, in, opcode);
3945 +-
3946 ++ dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
3947 ++ dev->cmd.state != MLX5_CMDIF_STATE_UP ||
3948 ++ !opcode_allowed(&dev->cmd, opcode)) {
3949 + err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
3950 + MLX5_SET(mbox_out, out, status, status);
3951 + MLX5_SET(mbox_out, out, syndrome, drv_synd);
3952 +@@ -1937,6 +1979,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
3953 + goto err_free_page;
3954 + }
3955 +
3956 ++ cmd->state = MLX5_CMDIF_STATE_DOWN;
3957 + cmd->checksum_disabled = 1;
3958 + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
3959 + cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
3960 +@@ -1974,6 +2017,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
3961 + mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
3962 +
3963 + cmd->mode = CMD_MODE_POLLING;
3964 ++ cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
3965 +
3966 + create_msg_cache(dev);
3967 +
3968 +@@ -2013,3 +2057,10 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
3969 + dma_pool_destroy(cmd->pool);
3970 + }
3971 + EXPORT_SYMBOL(mlx5_cmd_cleanup);
3972 ++
3973 ++void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
3974 ++ enum mlx5_cmdif_state cmdif_state)
3975 ++{
3976 ++ dev->cmd.state = cmdif_state;
3977 ++}
3978 ++EXPORT_SYMBOL(mlx5_cmd_set_state);
3979 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
3980 +index 5a5e6a21c6e1..80c579948152 100644
3981 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
3982 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
3983 +@@ -1104,7 +1104,7 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
3984 + int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
3985 +
3986 + int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
3987 +-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
3988 ++void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
3989 +
3990 + int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
3991 + void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
3992 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
3993 +index 46725cd743a3..7d1985fa0d4f 100644
3994 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
3995 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
3996 +@@ -69,8 +69,8 @@ static void mlx5e_ktls_del(struct net_device *netdev,
3997 + struct mlx5e_ktls_offload_context_tx *tx_priv =
3998 + mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
3999 +
4000 +- mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
4001 + mlx5e_destroy_tis(priv->mdev, tx_priv->tisn);
4002 ++ mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
4003 + kvfree(tx_priv);
4004 + }
4005 +
4006 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
4007 +index d02db5aebac4..4fef7587165c 100644
4008 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
4009 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
4010 +@@ -2747,7 +2747,8 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
4011 + mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
4012 + }
4013 +
4014 +- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
4015 ++ /* Verify inner tirs resources allocated */
4016 ++ if (!priv->inner_indir_tir[0].tirn)
4017 + return;
4018 +
4019 + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
4020 +@@ -3394,14 +3395,15 @@ out:
4021 + return err;
4022 + }
4023 +
4024 +-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
4025 ++void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
4026 + {
4027 + int i;
4028 +
4029 + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
4030 + mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
4031 +
4032 +- if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
4033 ++ /* Verify inner tirs resources allocated */
4034 ++ if (!priv->inner_indir_tir[0].tirn)
4035 + return;
4036 +
4037 + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
4038 +@@ -5107,7 +5109,7 @@ err_destroy_xsk_rqts:
4039 + err_destroy_direct_tirs:
4040 + mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
4041 + err_destroy_indirect_tirs:
4042 +- mlx5e_destroy_indirect_tirs(priv, true);
4043 ++ mlx5e_destroy_indirect_tirs(priv);
4044 + err_destroy_direct_rqts:
4045 + mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
4046 + err_destroy_indirect_rqts:
4047 +@@ -5126,7 +5128,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
4048 + mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
4049 + mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
4050 + mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
4051 +- mlx5e_destroy_indirect_tirs(priv, true);
4052 ++ mlx5e_destroy_indirect_tirs(priv);
4053 + mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
4054 + mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4055 + mlx5e_close_drop_rq(&priv->drop_rq);
4056 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
4057 +index 2ad0d09cc9bd..c3c3d89d9153 100644
4058 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
4059 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
4060 +@@ -1667,7 +1667,7 @@ err_destroy_ttc_table:
4061 + err_destroy_direct_tirs:
4062 + mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
4063 + err_destroy_indirect_tirs:
4064 +- mlx5e_destroy_indirect_tirs(priv, false);
4065 ++ mlx5e_destroy_indirect_tirs(priv);
4066 + err_destroy_direct_rqts:
4067 + mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
4068 + err_destroy_indirect_rqts:
4069 +@@ -1684,7 +1684,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
4070 + mlx5_del_flow_rules(rpriv->vport_rx_rule);
4071 + mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
4072 + mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
4073 +- mlx5e_destroy_indirect_tirs(priv, false);
4074 ++ mlx5e_destroy_indirect_tirs(priv);
4075 + mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
4076 + mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4077 + mlx5e_close_drop_rq(&priv->drop_rq);
4078 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
4079 +index ee60383adc5b..c2b801b435cf 100644
4080 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
4081 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
4082 +@@ -538,10 +538,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
4083 + void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
4084 + {
4085 + struct mlx5e_tx_wqe_info *wi;
4086 ++ u32 dma_fifo_cc, nbytes = 0;
4087 ++ u16 ci, sqcc, npkts = 0;
4088 + struct sk_buff *skb;
4089 +- u32 dma_fifo_cc;
4090 +- u16 sqcc;
4091 +- u16 ci;
4092 + int i;
4093 +
4094 + sqcc = sq->cc;
4095 +@@ -566,11 +565,15 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
4096 + }
4097 +
4098 + dev_kfree_skb_any(skb);
4099 ++ npkts++;
4100 ++ nbytes += wi->num_bytes;
4101 + sqcc += wi->num_wqebbs;
4102 + }
4103 +
4104 + sq->dma_fifo_cc = dma_fifo_cc;
4105 + sq->cc = sqcc;
4106 ++
4107 ++ netdev_tx_completed_queue(sq->txq, npkts, nbytes);
4108 + }
4109 +
4110 + #ifdef CONFIG_MLX5_CORE_IPOIB
4111 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
4112 +index cccea3a8eddd..ce6c621af043 100644
4113 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
4114 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
4115 +@@ -611,11 +611,13 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
4116 + .nent = MLX5_NUM_CMD_EQE,
4117 + .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
4118 + };
4119 ++ mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
4120 + err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd");
4121 + if (err)
4122 + goto err1;
4123 +
4124 + mlx5_cmd_use_events(dev);
4125 ++ mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
4126 +
4127 + param = (struct mlx5_eq_param) {
4128 + .irq_index = 0,
4129 +@@ -645,6 +647,7 @@ err2:
4130 + mlx5_cmd_use_polling(dev);
4131 + cleanup_async_eq(dev, &table->cmd_eq, "cmd");
4132 + err1:
4133 ++ mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
4134 + mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
4135 + return err;
4136 + }
4137 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
4138 +index 8bcf3426b9c6..3ce17c3d7a00 100644
4139 +--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
4140 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
4141 +@@ -346,8 +346,10 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
4142 + events->dev = dev;
4143 + dev->priv.events = events;
4144 + events->wq = create_singlethread_workqueue("mlx5_events");
4145 +- if (!events->wq)
4146 ++ if (!events->wq) {
4147 ++ kfree(events);
4148 + return -ENOMEM;
4149 ++ }
4150 + INIT_WORK(&events->pcie_core_work, mlx5_pcie_event);
4151 +
4152 + return 0;
4153 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
4154 +index 9dc24241dc91..cf09cfc33234 100644
4155 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
4156 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
4157 +@@ -323,14 +323,13 @@ static void tree_put_node(struct fs_node *node, bool locked)
4158 + if (node->del_hw_func)
4159 + node->del_hw_func(node);
4160 + if (parent_node) {
4161 +- /* Only root namespace doesn't have parent and we just
4162 +- * need to free its node.
4163 +- */
4164 + down_write_ref_node(parent_node, locked);
4165 + list_del_init(&node->list);
4166 + if (node->del_sw_func)
4167 + node->del_sw_func(node);
4168 + up_write_ref_node(parent_node, locked);
4169 ++ } else if (node->del_sw_func) {
4170 ++ node->del_sw_func(node);
4171 + } else {
4172 + kfree(node);
4173 + }
4174 +@@ -417,6 +416,12 @@ static void del_sw_ns(struct fs_node *node)
4175 +
4176 + static void del_sw_prio(struct fs_node *node)
4177 + {
4178 ++ struct mlx5_flow_root_namespace *root_ns;
4179 ++ struct mlx5_flow_namespace *ns;
4180 ++
4181 ++ fs_get_obj(ns, node);
4182 ++ root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
4183 ++ mutex_destroy(&root_ns->chain_lock);
4184 + kfree(node);
4185 + }
4186 +
4187 +@@ -447,8 +452,10 @@ static void del_sw_flow_table(struct fs_node *node)
4188 + fs_get_obj(ft, node);
4189 +
4190 + rhltable_destroy(&ft->fgs_hash);
4191 +- fs_get_obj(prio, ft->node.parent);
4192 +- prio->num_ft--;
4193 ++ if (ft->node.parent) {
4194 ++ fs_get_obj(prio, ft->node.parent);
4195 ++ prio->num_ft--;
4196 ++ }
4197 + kfree(ft);
4198 + }
4199 +
4200 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
4201 +index 56078b23f1a0..0a334ceba7b1 100644
4202 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
4203 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
4204 +@@ -396,7 +396,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
4205 + err_destroy_direct_tirs:
4206 + mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
4207 + err_destroy_indirect_tirs:
4208 +- mlx5e_destroy_indirect_tirs(priv, true);
4209 ++ mlx5e_destroy_indirect_tirs(priv);
4210 + err_destroy_direct_rqts:
4211 + mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
4212 + err_destroy_indirect_rqts:
4213 +@@ -412,7 +412,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
4214 + {
4215 + mlx5i_destroy_flow_steering(priv);
4216 + mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
4217 +- mlx5e_destroy_indirect_tirs(priv, true);
4218 ++ mlx5e_destroy_indirect_tirs(priv);
4219 + mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
4220 + mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4221 + mlx5e_close_drop_rq(&priv->drop_rq);
4222 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
4223 +index f554cfddcf4e..4a08e4eef283 100644
4224 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
4225 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
4226 +@@ -962,6 +962,8 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
4227 + goto err_cmd_cleanup;
4228 + }
4229 +
4230 ++ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
4231 ++
4232 + err = mlx5_core_enable_hca(dev, 0);
4233 + if (err) {
4234 + mlx5_core_err(dev, "enable hca failed\n");
4235 +@@ -1023,6 +1025,7 @@ reclaim_boot_pages:
4236 + err_disable_hca:
4237 + mlx5_core_disable_hca(dev, 0);
4238 + err_cmd_cleanup:
4239 ++ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
4240 + mlx5_cmd_cleanup(dev);
4241 +
4242 + return err;
4243 +@@ -1040,6 +1043,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
4244 + }
4245 + mlx5_reclaim_startup_pages(dev);
4246 + mlx5_core_disable_hca(dev, 0);
4247 ++ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
4248 + mlx5_cmd_cleanup(dev);
4249 +
4250 + return 0;
4251 +@@ -1179,7 +1183,7 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
4252 +
4253 + err = mlx5_function_setup(dev, boot);
4254 + if (err)
4255 +- goto out;
4256 ++ goto err_function;
4257 +
4258 + if (boot) {
4259 + err = mlx5_init_once(dev);
4260 +@@ -1225,6 +1229,7 @@ err_load:
4261 + mlx5_cleanup_once(dev);
4262 + function_teardown:
4263 + mlx5_function_teardown(dev, boot);
4264 ++err_function:
4265 + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
4266 + mutex_unlock(&dev->intf_state_mutex);
4267 +
4268 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4269 +index 7358b5bc7eb6..58ebabe99876 100644
4270 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4271 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4272 +@@ -4043,6 +4043,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
4273 + mlxsw_sp_port_remove(mlxsw_sp, i);
4274 + mlxsw_sp_cpu_port_remove(mlxsw_sp);
4275 + kfree(mlxsw_sp->ports);
4276 ++ mlxsw_sp->ports = NULL;
4277 + }
4278 +
4279 + static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
4280 +@@ -4079,6 +4080,7 @@ err_port_create:
4281 + mlxsw_sp_cpu_port_remove(mlxsw_sp);
4282 + err_cpu_port_create:
4283 + kfree(mlxsw_sp->ports);
4284 ++ mlxsw_sp->ports = NULL;
4285 + return err;
4286 + }
4287 +
4288 +@@ -4200,6 +4202,14 @@ static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
4289 + return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
4290 + }
4291 +
4292 ++static struct mlxsw_sp_port *
4293 ++mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
4294 ++{
4295 ++ if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
4296 ++ return mlxsw_sp->ports[local_port];
4297 ++ return NULL;
4298 ++}
4299 ++
4300 + static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
4301 + unsigned int count,
4302 + struct netlink_ext_ack *extack)
4303 +@@ -4213,7 +4223,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
4304 + int i;
4305 + int err;
4306 +
4307 +- mlxsw_sp_port = mlxsw_sp->ports[local_port];
4308 ++ mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
4309 + if (!mlxsw_sp_port) {
4310 + dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
4311 + local_port);
4312 +@@ -4308,7 +4318,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
4313 + int offset;
4314 + int i;
4315 +
4316 +- mlxsw_sp_port = mlxsw_sp->ports[local_port];
4317 ++ mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
4318 + if (!mlxsw_sp_port) {
4319 + dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
4320 + local_port);
4321 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
4322 +index f0e98ec8f1ee..c69232445ab7 100644
4323 +--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
4324 ++++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
4325 +@@ -1259,6 +1259,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
4326 + if (mlxsw_sx_port_created(mlxsw_sx, i))
4327 + mlxsw_sx_port_remove(mlxsw_sx, i);
4328 + kfree(mlxsw_sx->ports);
4329 ++ mlxsw_sx->ports = NULL;
4330 + }
4331 +
4332 + static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
4333 +@@ -1293,6 +1294,7 @@ err_port_module_info_get:
4334 + if (mlxsw_sx_port_created(mlxsw_sx, i))
4335 + mlxsw_sx_port_remove(mlxsw_sx, i);
4336 + kfree(mlxsw_sx->ports);
4337 ++ mlxsw_sx->ports = NULL;
4338 + return err;
4339 + }
4340 +
4341 +@@ -1376,6 +1378,12 @@ static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
4342 + u8 module, width;
4343 + int err;
4344 +
4345 ++ if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) {
4346 ++ dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n",
4347 ++ local_port);
4348 ++ return -EINVAL;
4349 ++ }
4350 ++
4351 + if (new_type == DEVLINK_PORT_TYPE_AUTO)
4352 + return -EOPNOTSUPP;
4353 +
4354 +diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
4355 +index 39925e4bf2ec..b25a13da900a 100644
4356 +--- a/drivers/net/ethernet/microchip/encx24j600.c
4357 ++++ b/drivers/net/ethernet/microchip/encx24j600.c
4358 +@@ -1070,7 +1070,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
4359 + if (unlikely(ret)) {
4360 + netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
4361 + ret);
4362 +- goto out_free;
4363 ++ goto out_stop;
4364 + }
4365 +
4366 + eidled = encx24j600_read_reg(priv, EIDLED);
4367 +@@ -1088,6 +1088,8 @@ static int encx24j600_spi_probe(struct spi_device *spi)
4368 +
4369 + out_unregister:
4370 + unregister_netdev(priv->ndev);
4371 ++out_stop:
4372 ++ kthread_stop(priv->kworker_task);
4373 + out_free:
4374 + free_netdev(ndev);
4375 +
4376 +@@ -1100,6 +1102,7 @@ static int encx24j600_spi_remove(struct spi_device *spi)
4377 + struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
4378 +
4379 + unregister_netdev(priv->ndev);
4380 ++ kthread_stop(priv->kworker_task);
4381 +
4382 + free_netdev(priv->ndev);
4383 +
4384 +diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
4385 +index 419e2ce2eac0..d5aa4e725853 100644
4386 +--- a/drivers/net/ethernet/mscc/ocelot.c
4387 ++++ b/drivers/net/ethernet/mscc/ocelot.c
4388 +@@ -1460,7 +1460,7 @@ static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port,
4389 + unsigned long ageing_clock_t)
4390 + {
4391 + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
4392 +- u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
4393 ++ u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
4394 +
4395 + ocelot_set_ageing_time(ocelot, ageing_time);
4396 + }
4397 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
4398 +index 2a533280b124..29b9c728a65e 100644
4399 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
4400 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
4401 +@@ -3651,7 +3651,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
4402 + ahw->diag_cnt = 0;
4403 + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
4404 + if (ret)
4405 +- goto fail_diag_irq;
4406 ++ goto fail_mbx_args;
4407 +
4408 + if (adapter->flags & QLCNIC_MSIX_ENABLED)
4409 + intrpt_id = ahw->intr_tbl[0].id;
4410 +@@ -3681,6 +3681,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
4411 +
4412 + done:
4413 + qlcnic_free_mbx_args(&cmd);
4414 ++
4415 ++fail_mbx_args:
4416 + qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
4417 +
4418 + fail_diag_irq:
4419 +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
4420 +index 07a6b609f741..6e4fe2566f6b 100644
4421 +--- a/drivers/net/ethernet/realtek/r8169_main.c
4422 ++++ b/drivers/net/ethernet/realtek/r8169_main.c
4423 +@@ -1044,6 +1044,13 @@ static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
4424 + RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
4425 + }
4426 +
4427 ++static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
4428 ++{
4429 ++ /* based on RTL8168FP_OOBMAC_BASE in vendor driver */
4430 ++ if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB)
4431 ++ *cmd |= 0x7f0 << 18;
4432 ++}
4433 ++
4434 + DECLARE_RTL_COND(rtl_eriar_cond)
4435 + {
4436 + return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
4437 +@@ -1052,9 +1059,12 @@ DECLARE_RTL_COND(rtl_eriar_cond)
4438 + static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
4439 + u32 val, int type)
4440 + {
4441 ++ u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
4442 ++
4443 + BUG_ON((addr & 3) || (mask == 0));
4444 + RTL_W32(tp, ERIDR, val);
4445 +- RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
4446 ++ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
4447 ++ RTL_W32(tp, ERIAR, cmd);
4448 +
4449 + rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
4450 + }
4451 +@@ -1067,7 +1077,10 @@ static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
4452 +
4453 + static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
4454 + {
4455 +- RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
4456 ++ u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
4457 ++
4458 ++ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
4459 ++ RTL_W32(tp, ERIAR, cmd);
4460 +
4461 + return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
4462 + RTL_R32(tp, ERIDR) : ~0;
4463 +diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
4464 +index db6b2988e632..f4895777f5e3 100644
4465 +--- a/drivers/net/ethernet/sgi/ioc3-eth.c
4466 ++++ b/drivers/net/ethernet/sgi/ioc3-eth.c
4467 +@@ -865,14 +865,14 @@ static int ioc3eth_probe(struct platform_device *pdev)
4468 + ip = netdev_priv(dev);
4469 + ip->dma_dev = pdev->dev.parent;
4470 + ip->regs = devm_platform_ioremap_resource(pdev, 0);
4471 +- if (!ip->regs) {
4472 +- err = -ENOMEM;
4473 ++ if (IS_ERR(ip->regs)) {
4474 ++ err = PTR_ERR(ip->regs);
4475 + goto out_free;
4476 + }
4477 +
4478 + ip->ssram = devm_platform_ioremap_resource(pdev, 1);
4479 +- if (!ip->ssram) {
4480 +- err = -ENOMEM;
4481 ++ if (IS_ERR(ip->ssram)) {
4482 ++ err = PTR_ERR(ip->ssram);
4483 + goto out_free;
4484 + }
4485 +
4486 +diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
4487 +index 6ec9163e232c..b716f188188e 100644
4488 +--- a/drivers/net/ethernet/sun/cassini.c
4489 ++++ b/drivers/net/ethernet/sun/cassini.c
4490 +@@ -4971,7 +4971,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4491 + cas_cacheline_size)) {
4492 + dev_err(&pdev->dev, "Could not set PCI cache "
4493 + "line size\n");
4494 +- goto err_write_cacheline;
4495 ++ goto err_out_free_res;
4496 + }
4497 + }
4498 + #endif
4499 +@@ -5144,7 +5144,6 @@ err_out_iounmap:
4500 + err_out_free_res:
4501 + pci_release_regions(pdev);
4502 +
4503 +-err_write_cacheline:
4504 + /* Try to restore it in case the error occurred after we
4505 + * set it.
4506 + */
4507 +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
4508 +index 6ae4a72e6f43..5577ff0b7663 100644
4509 +--- a/drivers/net/ethernet/ti/cpsw.c
4510 ++++ b/drivers/net/ethernet/ti/cpsw.c
4511 +@@ -1752,11 +1752,15 @@ static int cpsw_suspend(struct device *dev)
4512 + struct cpsw_common *cpsw = dev_get_drvdata(dev);
4513 + int i;
4514 +
4515 ++ rtnl_lock();
4516 ++
4517 + for (i = 0; i < cpsw->data.slaves; i++)
4518 + if (cpsw->slaves[i].ndev)
4519 + if (netif_running(cpsw->slaves[i].ndev))
4520 + cpsw_ndo_stop(cpsw->slaves[i].ndev);
4521 +
4522 ++ rtnl_unlock();
4523 ++
4524 + /* Select sleep pin state */
4525 + pinctrl_pm_select_sleep_state(dev);
4526 +
4527 +diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
4528 +index fbea6f232819..e2ad3c2e8df5 100644
4529 +--- a/drivers/net/hamradio/bpqether.c
4530 ++++ b/drivers/net/hamradio/bpqether.c
4531 +@@ -127,7 +127,8 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
4532 + {
4533 + struct bpqdev *bpq;
4534 +
4535 +- list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) {
4536 ++ list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list,
4537 ++ lockdep_rtnl_is_held()) {
4538 + if (bpq->ethdev == dev)
4539 + return bpq->axdev;
4540 + }
4541 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
4542 +index 0cdb2ce47645..a657943c9f01 100644
4543 +--- a/drivers/net/usb/cdc_ether.c
4544 ++++ b/drivers/net/usb/cdc_ether.c
4545 +@@ -815,14 +815,21 @@ static const struct usb_device_id products[] = {
4546 + .driver_info = 0,
4547 + },
4548 +
4549 +-/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
4550 ++/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */
4551 + {
4552 + USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
4553 + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
4554 + .driver_info = 0,
4555 + },
4556 +
4557 +- /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
4558 ++/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */
4559 ++{
4560 ++ USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM,
4561 ++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
4562 ++ .driver_info = 0,
4563 ++},
4564 ++
4565 ++/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
4566 + {
4567 + USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
4568 + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
4569 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
4570 +index 95b19ce96513..7c8c45984a5c 100644
4571 +--- a/drivers/net/usb/r8152.c
4572 ++++ b/drivers/net/usb/r8152.c
4573 +@@ -6901,6 +6901,7 @@ static const struct usb_device_id rtl8152_table[] = {
4574 + {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
4575 + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
4576 + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
4577 ++ {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)},
4578 + {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
4579 + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
4580 + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
4581 +diff --git a/drivers/net/wireguard/messages.h b/drivers/net/wireguard/messages.h
4582 +index b8a7b9ce32ba..208da72673fc 100644
4583 +--- a/drivers/net/wireguard/messages.h
4584 ++++ b/drivers/net/wireguard/messages.h
4585 +@@ -32,7 +32,7 @@ enum cookie_values {
4586 + };
4587 +
4588 + enum counter_values {
4589 +- COUNTER_BITS_TOTAL = 2048,
4590 ++ COUNTER_BITS_TOTAL = 8192,
4591 + COUNTER_REDUNDANT_BITS = BITS_PER_LONG,
4592 + COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS
4593 + };
4594 +diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
4595 +index 708dc61c974f..626433690abb 100644
4596 +--- a/drivers/net/wireguard/noise.c
4597 ++++ b/drivers/net/wireguard/noise.c
4598 +@@ -104,6 +104,7 @@ static struct noise_keypair *keypair_create(struct wg_peer *peer)
4599 +
4600 + if (unlikely(!keypair))
4601 + return NULL;
4602 ++ spin_lock_init(&keypair->receiving_counter.lock);
4603 + keypair->internal_id = atomic64_inc_return(&keypair_counter);
4604 + keypair->entry.type = INDEX_HASHTABLE_KEYPAIR;
4605 + keypair->entry.peer = peer;
4606 +@@ -358,25 +359,16 @@ out:
4607 + memzero_explicit(output, BLAKE2S_HASH_SIZE + 1);
4608 + }
4609 +
4610 +-static void symmetric_key_init(struct noise_symmetric_key *key)
4611 +-{
4612 +- spin_lock_init(&key->counter.receive.lock);
4613 +- atomic64_set(&key->counter.counter, 0);
4614 +- memset(key->counter.receive.backtrack, 0,
4615 +- sizeof(key->counter.receive.backtrack));
4616 +- key->birthdate = ktime_get_coarse_boottime_ns();
4617 +- key->is_valid = true;
4618 +-}
4619 +-
4620 + static void derive_keys(struct noise_symmetric_key *first_dst,
4621 + struct noise_symmetric_key *second_dst,
4622 + const u8 chaining_key[NOISE_HASH_LEN])
4623 + {
4624 ++ u64 birthdate = ktime_get_coarse_boottime_ns();
4625 + kdf(first_dst->key, second_dst->key, NULL, NULL,
4626 + NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0,
4627 + chaining_key);
4628 +- symmetric_key_init(first_dst);
4629 +- symmetric_key_init(second_dst);
4630 ++ first_dst->birthdate = second_dst->birthdate = birthdate;
4631 ++ first_dst->is_valid = second_dst->is_valid = true;
4632 + }
4633 +
4634 + static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN],
4635 +@@ -715,6 +707,7 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
4636 + u8 e[NOISE_PUBLIC_KEY_LEN];
4637 + u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN];
4638 + u8 static_private[NOISE_PUBLIC_KEY_LEN];
4639 ++ u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN];
4640 +
4641 + down_read(&wg->static_identity.lock);
4642 +
4643 +@@ -733,6 +726,8 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
4644 + memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN);
4645 + memcpy(ephemeral_private, handshake->ephemeral_private,
4646 + NOISE_PUBLIC_KEY_LEN);
4647 ++ memcpy(preshared_key, handshake->preshared_key,
4648 ++ NOISE_SYMMETRIC_KEY_LEN);
4649 + up_read(&handshake->lock);
4650 +
4651 + if (state != HANDSHAKE_CREATED_INITIATION)
4652 +@@ -750,7 +745,7 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
4653 + goto fail;
4654 +
4655 + /* psk */
4656 +- mix_psk(chaining_key, hash, key, handshake->preshared_key);
4657 ++ mix_psk(chaining_key, hash, key, preshared_key);
4658 +
4659 + /* {} */
4660 + if (!message_decrypt(NULL, src->encrypted_nothing,
4661 +@@ -783,6 +778,7 @@ out:
4662 + memzero_explicit(chaining_key, NOISE_HASH_LEN);
4663 + memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN);
4664 + memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN);
4665 ++ memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN);
4666 + up_read(&wg->static_identity.lock);
4667 + return ret_peer;
4668 + }
4669 +diff --git a/drivers/net/wireguard/noise.h b/drivers/net/wireguard/noise.h
4670 +index f532d59d3f19..c527253dba80 100644
4671 +--- a/drivers/net/wireguard/noise.h
4672 ++++ b/drivers/net/wireguard/noise.h
4673 +@@ -15,18 +15,14 @@
4674 + #include <linux/mutex.h>
4675 + #include <linux/kref.h>
4676 +
4677 +-union noise_counter {
4678 +- struct {
4679 +- u64 counter;
4680 +- unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG];
4681 +- spinlock_t lock;
4682 +- } receive;
4683 +- atomic64_t counter;
4684 ++struct noise_replay_counter {
4685 ++ u64 counter;
4686 ++ spinlock_t lock;
4687 ++ unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG];
4688 + };
4689 +
4690 + struct noise_symmetric_key {
4691 + u8 key[NOISE_SYMMETRIC_KEY_LEN];
4692 +- union noise_counter counter;
4693 + u64 birthdate;
4694 + bool is_valid;
4695 + };
4696 +@@ -34,7 +30,9 @@ struct noise_symmetric_key {
4697 + struct noise_keypair {
4698 + struct index_hashtable_entry entry;
4699 + struct noise_symmetric_key sending;
4700 ++ atomic64_t sending_counter;
4701 + struct noise_symmetric_key receiving;
4702 ++ struct noise_replay_counter receiving_counter;
4703 + __le32 remote_index;
4704 + bool i_am_the_initiator;
4705 + struct kref refcount;
4706 +diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
4707 +index 3432232afe06..c58df439dbbe 100644
4708 +--- a/drivers/net/wireguard/queueing.h
4709 ++++ b/drivers/net/wireguard/queueing.h
4710 +@@ -87,12 +87,20 @@ static inline bool wg_check_packet_protocol(struct sk_buff *skb)
4711 + return real_protocol && skb->protocol == real_protocol;
4712 + }
4713 +
4714 +-static inline void wg_reset_packet(struct sk_buff *skb)
4715 ++static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
4716 + {
4717 ++ u8 l4_hash = skb->l4_hash;
4718 ++ u8 sw_hash = skb->sw_hash;
4719 ++ u32 hash = skb->hash;
4720 + skb_scrub_packet(skb, true);
4721 + memset(&skb->headers_start, 0,
4722 + offsetof(struct sk_buff, headers_end) -
4723 + offsetof(struct sk_buff, headers_start));
4724 ++ if (encapsulating) {
4725 ++ skb->l4_hash = l4_hash;
4726 ++ skb->sw_hash = sw_hash;
4727 ++ skb->hash = hash;
4728 ++ }
4729 + skb->queue_mapping = 0;
4730 + skb->nohdr = 0;
4731 + skb->peeked = 0;
4732 +diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
4733 +index 2566e13a292d..474bb69f0e1b 100644
4734 +--- a/drivers/net/wireguard/receive.c
4735 ++++ b/drivers/net/wireguard/receive.c
4736 +@@ -246,20 +246,20 @@ static void keep_key_fresh(struct wg_peer *peer)
4737 + }
4738 + }
4739 +
4740 +-static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
4741 ++static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
4742 + {
4743 + struct scatterlist sg[MAX_SKB_FRAGS + 8];
4744 + struct sk_buff *trailer;
4745 + unsigned int offset;
4746 + int num_frags;
4747 +
4748 +- if (unlikely(!key))
4749 ++ if (unlikely(!keypair))
4750 + return false;
4751 +
4752 +- if (unlikely(!READ_ONCE(key->is_valid) ||
4753 +- wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) ||
4754 +- key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) {
4755 +- WRITE_ONCE(key->is_valid, false);
4756 ++ if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
4757 ++ wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
4758 ++ keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
4759 ++ WRITE_ONCE(keypair->receiving.is_valid, false);
4760 + return false;
4761 + }
4762 +
4763 +@@ -284,7 +284,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
4764 +
4765 + if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0,
4766 + PACKET_CB(skb)->nonce,
4767 +- key->key))
4768 ++ keypair->receiving.key))
4769 + return false;
4770 +
4771 + /* Another ugly situation of pushing and pulling the header so as to
4772 +@@ -299,41 +299,41 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
4773 + }
4774 +
4775 + /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */
4776 +-static bool counter_validate(union noise_counter *counter, u64 their_counter)
4777 ++static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter)
4778 + {
4779 + unsigned long index, index_current, top, i;
4780 + bool ret = false;
4781 +
4782 +- spin_lock_bh(&counter->receive.lock);
4783 ++ spin_lock_bh(&counter->lock);
4784 +
4785 +- if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 ||
4786 ++ if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 ||
4787 + their_counter >= REJECT_AFTER_MESSAGES))
4788 + goto out;
4789 +
4790 + ++their_counter;
4791 +
4792 + if (unlikely((COUNTER_WINDOW_SIZE + their_counter) <
4793 +- counter->receive.counter))
4794 ++ counter->counter))
4795 + goto out;
4796 +
4797 + index = their_counter >> ilog2(BITS_PER_LONG);
4798 +
4799 +- if (likely(their_counter > counter->receive.counter)) {
4800 +- index_current = counter->receive.counter >> ilog2(BITS_PER_LONG);
4801 ++ if (likely(their_counter > counter->counter)) {
4802 ++ index_current = counter->counter >> ilog2(BITS_PER_LONG);
4803 + top = min_t(unsigned long, index - index_current,
4804 + COUNTER_BITS_TOTAL / BITS_PER_LONG);
4805 + for (i = 1; i <= top; ++i)
4806 +- counter->receive.backtrack[(i + index_current) &
4807 ++ counter->backtrack[(i + index_current) &
4808 + ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
4809 +- counter->receive.counter = their_counter;
4810 ++ counter->counter = their_counter;
4811 + }
4812 +
4813 + index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
4814 + ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1),
4815 +- &counter->receive.backtrack[index]);
4816 ++ &counter->backtrack[index]);
4817 +
4818 + out:
4819 +- spin_unlock_bh(&counter->receive.lock);
4820 ++ spin_unlock_bh(&counter->lock);
4821 + return ret;
4822 + }
4823 +
4824 +@@ -473,19 +473,19 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
4825 + if (unlikely(state != PACKET_STATE_CRYPTED))
4826 + goto next;
4827 +
4828 +- if (unlikely(!counter_validate(&keypair->receiving.counter,
4829 ++ if (unlikely(!counter_validate(&keypair->receiving_counter,
4830 + PACKET_CB(skb)->nonce))) {
4831 + net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
4832 + peer->device->dev->name,
4833 + PACKET_CB(skb)->nonce,
4834 +- keypair->receiving.counter.receive.counter);
4835 ++ keypair->receiving_counter.counter);
4836 + goto next;
4837 + }
4838 +
4839 + if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb)))
4840 + goto next;
4841 +
4842 +- wg_reset_packet(skb);
4843 ++ wg_reset_packet(skb, false);
4844 + wg_packet_consume_data_done(peer, skb, &endpoint);
4845 + free = false;
4846 +
4847 +@@ -512,8 +512,8 @@ void wg_packet_decrypt_worker(struct work_struct *work)
4848 + struct sk_buff *skb;
4849 +
4850 + while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
4851 +- enum packet_state state = likely(decrypt_packet(skb,
4852 +- &PACKET_CB(skb)->keypair->receiving)) ?
4853 ++ enum packet_state state =
4854 ++ likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ?
4855 + PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
4856 + wg_queue_enqueue_per_peer_napi(skb, state);
4857 + if (need_resched())
4858 +diff --git a/drivers/net/wireguard/selftest/counter.c b/drivers/net/wireguard/selftest/counter.c
4859 +index f4fbb9072ed7..ec3c156bf91b 100644
4860 +--- a/drivers/net/wireguard/selftest/counter.c
4861 ++++ b/drivers/net/wireguard/selftest/counter.c
4862 +@@ -6,18 +6,24 @@
4863 + #ifdef DEBUG
4864 + bool __init wg_packet_counter_selftest(void)
4865 + {
4866 ++ struct noise_replay_counter *counter;
4867 + unsigned int test_num = 0, i;
4868 +- union noise_counter counter;
4869 + bool success = true;
4870 +
4871 +-#define T_INIT do { \
4872 +- memset(&counter, 0, sizeof(union noise_counter)); \
4873 +- spin_lock_init(&counter.receive.lock); \
4874 ++ counter = kmalloc(sizeof(*counter), GFP_KERNEL);
4875 ++ if (unlikely(!counter)) {
4876 ++ pr_err("nonce counter self-test malloc: FAIL\n");
4877 ++ return false;
4878 ++ }
4879 ++
4880 ++#define T_INIT do { \
4881 ++ memset(counter, 0, sizeof(*counter)); \
4882 ++ spin_lock_init(&counter->lock); \
4883 + } while (0)
4884 + #define T_LIM (COUNTER_WINDOW_SIZE + 1)
4885 + #define T(n, v) do { \
4886 + ++test_num; \
4887 +- if (counter_validate(&counter, n) != (v)) { \
4888 ++ if (counter_validate(counter, n) != (v)) { \
4889 + pr_err("nonce counter self-test %u: FAIL\n", \
4890 + test_num); \
4891 + success = false; \
4892 +@@ -99,6 +105,7 @@ bool __init wg_packet_counter_selftest(void)
4893 +
4894 + if (success)
4895 + pr_info("nonce counter self-tests: pass\n");
4896 ++ kfree(counter);
4897 + return success;
4898 + }
4899 + #endif
4900 +diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
4901 +index e8a7d0a0cb88..485d5d7a217b 100644
4902 +--- a/drivers/net/wireguard/send.c
4903 ++++ b/drivers/net/wireguard/send.c
4904 +@@ -129,7 +129,7 @@ static void keep_key_fresh(struct wg_peer *peer)
4905 + rcu_read_lock_bh();
4906 + keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
4907 + if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
4908 +- (unlikely(atomic64_read(&keypair->sending.counter.counter) >
4909 ++ (unlikely(atomic64_read(&keypair->sending_counter) >
4910 + REKEY_AFTER_MESSAGES) ||
4911 + (keypair->i_am_the_initiator &&
4912 + unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
4913 +@@ -170,6 +170,11 @@ static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
4914 + struct sk_buff *trailer;
4915 + int num_frags;
4916 +
4917 ++ /* Force hash calculation before encryption so that flow analysis is
4918 ++ * consistent over the inner packet.
4919 ++ */
4920 ++ skb_get_hash(skb);
4921 ++
4922 + /* Calculate lengths. */
4923 + padding_len = calculate_skb_padding(skb);
4924 + trailer_len = padding_len + noise_encrypted_len(0);
4925 +@@ -298,7 +303,7 @@ void wg_packet_encrypt_worker(struct work_struct *work)
4926 + skb_list_walk_safe(first, skb, next) {
4927 + if (likely(encrypt_packet(skb,
4928 + PACKET_CB(first)->keypair))) {
4929 +- wg_reset_packet(skb);
4930 ++ wg_reset_packet(skb, true);
4931 + } else {
4932 + state = PACKET_STATE_DEAD;
4933 + break;
4934 +@@ -348,7 +353,6 @@ void wg_packet_purge_staged_packets(struct wg_peer *peer)
4935 +
4936 + void wg_packet_send_staged_packets(struct wg_peer *peer)
4937 + {
4938 +- struct noise_symmetric_key *key;
4939 + struct noise_keypair *keypair;
4940 + struct sk_buff_head packets;
4941 + struct sk_buff *skb;
4942 +@@ -368,10 +372,9 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
4943 + rcu_read_unlock_bh();
4944 + if (unlikely(!keypair))
4945 + goto out_nokey;
4946 +- key = &keypair->sending;
4947 +- if (unlikely(!READ_ONCE(key->is_valid)))
4948 ++ if (unlikely(!READ_ONCE(keypair->sending.is_valid)))
4949 + goto out_nokey;
4950 +- if (unlikely(wg_birthdate_has_expired(key->birthdate,
4951 ++ if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
4952 + REJECT_AFTER_TIME)))
4953 + goto out_invalid;
4954 +
4955 +@@ -386,7 +389,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
4956 + */
4957 + PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb);
4958 + PACKET_CB(skb)->nonce =
4959 +- atomic64_inc_return(&key->counter.counter) - 1;
4960 ++ atomic64_inc_return(&keypair->sending_counter) - 1;
4961 + if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES))
4962 + goto out_invalid;
4963 + }
4964 +@@ -398,7 +401,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
4965 + return;
4966 +
4967 + out_invalid:
4968 +- WRITE_ONCE(key->is_valid, false);
4969 ++ WRITE_ONCE(keypair->sending.is_valid, false);
4970 + out_nokey:
4971 + wg_noise_keypair_put(keypair, false);
4972 +
4973 +diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
4974 +index db37144ae98c..87ee9f767b7a 100644
4975 +--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
4976 ++++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
4977 +@@ -351,7 +351,9 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
4978 + spin_unlock_irqrestore(&client->lock, flags);
4979 + }
4980 +
4981 +- mbox_send_message(client->chan, pkt);
4982 ++ err = mbox_send_message(client->chan, pkt);
4983 ++ if (err < 0)
4984 ++ return err;
4985 + /* We can send next packet immediately, so just call txdone. */
4986 + mbox_client_txdone(client->chan, 0);
4987 +
4988 +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
4989 +index 7051611229c9..b67372737dc9 100644
4990 +--- a/drivers/usb/dwc3/dwc3-pci.c
4991 ++++ b/drivers/usb/dwc3/dwc3-pci.c
4992 +@@ -114,6 +114,7 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
4993 +
4994 + static const struct property_entry dwc3_pci_mrfld_properties[] = {
4995 + PROPERTY_ENTRY_STRING("dr_mode", "otg"),
4996 ++ PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
4997 + PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
4998 + {}
4999 + };
5000 +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
5001 +index b47938dff1a2..238f555fe494 100644
5002 +--- a/drivers/usb/gadget/legacy/inode.c
5003 ++++ b/drivers/usb/gadget/legacy/inode.c
5004 +@@ -1361,7 +1361,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
5005 +
5006 + req->buf = dev->rbuf;
5007 + req->context = NULL;
5008 +- value = -EOPNOTSUPP;
5009 + switch (ctrl->bRequest) {
5010 +
5011 + case USB_REQ_GET_DESCRIPTOR:
5012 +@@ -1784,7 +1783,7 @@ static ssize_t
5013 + dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
5014 + {
5015 + struct dev_data *dev = fd->private_data;
5016 +- ssize_t value = len, length = len;
5017 ++ ssize_t value, length = len;
5018 + unsigned total;
5019 + u32 tag;
5020 + char *kbuf;
5021 +diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
5022 +index bfebf1f2e991..9a7e655d5280 100644
5023 +--- a/drivers/usb/phy/phy-twl6030-usb.c
5024 ++++ b/drivers/usb/phy/phy-twl6030-usb.c
5025 +@@ -377,7 +377,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
5026 + if (status < 0) {
5027 + dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
5028 + twl->irq1, status);
5029 +- return status;
5030 ++ goto err_put_regulator;
5031 + }
5032 +
5033 + status = request_threaded_irq(twl->irq2, NULL, twl6030_usb_irq,
5034 +@@ -386,8 +386,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
5035 + if (status < 0) {
5036 + dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
5037 + twl->irq2, status);
5038 +- free_irq(twl->irq1, twl);
5039 +- return status;
5040 ++ goto err_free_irq1;
5041 + }
5042 +
5043 + twl->asleep = 0;
5044 +@@ -396,6 +395,13 @@ static int twl6030_usb_probe(struct platform_device *pdev)
5045 + dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
5046 +
5047 + return 0;
5048 ++
5049 ++err_free_irq1:
5050 ++ free_irq(twl->irq1, twl);
5051 ++err_put_regulator:
5052 ++ regulator_put(twl->usb3v3);
5053 ++
5054 ++ return status;
5055 + }
5056 +
5057 + static int twl6030_usb_remove(struct platform_device *pdev)
5058 +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
5059 +index 341458fd95ca..44375a22307b 100644
5060 +--- a/drivers/virtio/virtio_balloon.c
5061 ++++ b/drivers/virtio/virtio_balloon.c
5062 +@@ -14,6 +14,7 @@
5063 + #include <linux/slab.h>
5064 + #include <linux/module.h>
5065 + #include <linux/balloon_compaction.h>
5066 ++#include <linux/oom.h>
5067 + #include <linux/wait.h>
5068 + #include <linux/mm.h>
5069 + #include <linux/mount.h>
5070 +@@ -27,7 +28,9 @@
5071 + */
5072 + #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
5073 + #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
5074 +-#define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80
5075 ++/* Maximum number of (4k) pages to deflate on OOM notifications. */
5076 ++#define VIRTIO_BALLOON_OOM_NR_PAGES 256
5077 ++#define VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY 80
5078 +
5079 + #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
5080 + __GFP_NOMEMALLOC)
5081 +@@ -112,8 +115,11 @@ struct virtio_balloon {
5082 + /* Memory statistics */
5083 + struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
5084 +
5085 +- /* To register a shrinker to shrink memory upon memory pressure */
5086 ++ /* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */
5087 + struct shrinker shrinker;
5088 ++
5089 ++ /* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */
5090 ++ struct notifier_block oom_nb;
5091 + };
5092 +
5093 + static struct virtio_device_id id_table[] = {
5094 +@@ -788,50 +794,13 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb,
5095 + return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
5096 + }
5097 +
5098 +-static unsigned long leak_balloon_pages(struct virtio_balloon *vb,
5099 +- unsigned long pages_to_free)
5100 +-{
5101 +- return leak_balloon(vb, pages_to_free * VIRTIO_BALLOON_PAGES_PER_PAGE) /
5102 +- VIRTIO_BALLOON_PAGES_PER_PAGE;
5103 +-}
5104 +-
5105 +-static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
5106 +- unsigned long pages_to_free)
5107 +-{
5108 +- unsigned long pages_freed = 0;
5109 +-
5110 +- /*
5111 +- * One invocation of leak_balloon can deflate at most
5112 +- * VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it
5113 +- * multiple times to deflate pages till reaching pages_to_free.
5114 +- */
5115 +- while (vb->num_pages && pages_freed < pages_to_free)
5116 +- pages_freed += leak_balloon_pages(vb,
5117 +- pages_to_free - pages_freed);
5118 +-
5119 +- update_balloon_size(vb);
5120 +-
5121 +- return pages_freed;
5122 +-}
5123 +-
5124 + static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
5125 + struct shrink_control *sc)
5126 + {
5127 +- unsigned long pages_to_free, pages_freed = 0;
5128 + struct virtio_balloon *vb = container_of(shrinker,
5129 + struct virtio_balloon, shrinker);
5130 +
5131 +- pages_to_free = sc->nr_to_scan;
5132 +-
5133 +- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
5134 +- pages_freed = shrink_free_pages(vb, pages_to_free);
5135 +-
5136 +- if (pages_freed >= pages_to_free)
5137 +- return pages_freed;
5138 +-
5139 +- pages_freed += shrink_balloon_pages(vb, pages_to_free - pages_freed);
5140 +-
5141 +- return pages_freed;
5142 ++ return shrink_free_pages(vb, sc->nr_to_scan);
5143 + }
5144 +
5145 + static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
5146 +@@ -839,26 +808,22 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
5147 + {
5148 + struct virtio_balloon *vb = container_of(shrinker,
5149 + struct virtio_balloon, shrinker);
5150 +- unsigned long count;
5151 +-
5152 +- count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
5153 +- count += vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
5154 +
5155 +- return count;
5156 ++ return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
5157 + }
5158 +
5159 +-static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb)
5160 ++static int virtio_balloon_oom_notify(struct notifier_block *nb,
5161 ++ unsigned long dummy, void *parm)
5162 + {
5163 +- unregister_shrinker(&vb->shrinker);
5164 +-}
5165 ++ struct virtio_balloon *vb = container_of(nb,
5166 ++ struct virtio_balloon, oom_nb);
5167 ++ unsigned long *freed = parm;
5168 +
5169 +-static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
5170 +-{
5171 +- vb->shrinker.scan_objects = virtio_balloon_shrinker_scan;
5172 +- vb->shrinker.count_objects = virtio_balloon_shrinker_count;
5173 +- vb->shrinker.seeks = DEFAULT_SEEKS;
5174 ++ *freed += leak_balloon(vb, VIRTIO_BALLOON_OOM_NR_PAGES) /
5175 ++ VIRTIO_BALLOON_PAGES_PER_PAGE;
5176 ++ update_balloon_size(vb);
5177 +
5178 +- return register_shrinker(&vb->shrinker);
5179 ++ return NOTIFY_OK;
5180 + }
5181 +
5182 + static int virtballoon_probe(struct virtio_device *vdev)
5183 +@@ -935,22 +900,35 @@ static int virtballoon_probe(struct virtio_device *vdev)
5184 + virtio_cwrite(vb->vdev, struct virtio_balloon_config,
5185 + poison_val, &poison_val);
5186 + }
5187 +- }
5188 +- /*
5189 +- * We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a
5190 +- * shrinker needs to be registered to relieve memory pressure.
5191 +- */
5192 +- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
5193 +- err = virtio_balloon_register_shrinker(vb);
5194 ++
5195 ++ /*
5196 ++ * We're allowed to reuse any free pages, even if they are
5197 ++ * still to be processed by the host.
5198 ++ */
5199 ++ vb->shrinker.scan_objects = virtio_balloon_shrinker_scan;
5200 ++ vb->shrinker.count_objects = virtio_balloon_shrinker_count;
5201 ++ vb->shrinker.seeks = DEFAULT_SEEKS;
5202 ++ err = register_shrinker(&vb->shrinker);
5203 + if (err)
5204 + goto out_del_balloon_wq;
5205 + }
5206 ++ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
5207 ++ vb->oom_nb.notifier_call = virtio_balloon_oom_notify;
5208 ++ vb->oom_nb.priority = VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY;
5209 ++ err = register_oom_notifier(&vb->oom_nb);
5210 ++ if (err < 0)
5211 ++ goto out_unregister_shrinker;
5212 ++ }
5213 ++
5214 + virtio_device_ready(vdev);
5215 +
5216 + if (towards_target(vb))
5217 + virtballoon_changed(vdev);
5218 + return 0;
5219 +
5220 ++out_unregister_shrinker:
5221 ++ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
5222 ++ unregister_shrinker(&vb->shrinker);
5223 + out_del_balloon_wq:
5224 + if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
5225 + destroy_workqueue(vb->balloon_wq);
5226 +@@ -989,8 +967,11 @@ static void virtballoon_remove(struct virtio_device *vdev)
5227 + {
5228 + struct virtio_balloon *vb = vdev->priv;
5229 +
5230 +- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
5231 +- virtio_balloon_unregister_shrinker(vb);
5232 ++ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
5233 ++ unregister_oom_notifier(&vb->oom_nb);
5234 ++ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
5235 ++ unregister_shrinker(&vb->shrinker);
5236 ++
5237 + spin_lock_irq(&vb->stop_update_lock);
5238 + vb->stop_update = true;
5239 + spin_unlock_irq(&vb->stop_update_lock);
5240 +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
5241 +index f4713ea76e82..54f888ddb8cc 100644
5242 +--- a/fs/binfmt_elf.c
5243 ++++ b/fs/binfmt_elf.c
5244 +@@ -1733,7 +1733,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
5245 + (!regset->active || regset->active(t->task, regset) > 0)) {
5246 + int ret;
5247 + size_t size = regset_size(t->task, regset);
5248 +- void *data = kmalloc(size, GFP_KERNEL);
5249 ++ void *data = kzalloc(size, GFP_KERNEL);
5250 + if (unlikely(!data))
5251 + return 0;
5252 + ret = regset->get(t->task, regset,
5253 +diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
5254 +index d3d78176b23c..e7726f5f1241 100644
5255 +--- a/fs/cachefiles/rdwr.c
5256 ++++ b/fs/cachefiles/rdwr.c
5257 +@@ -60,9 +60,9 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
5258 + object = container_of(op->op.object, struct cachefiles_object, fscache);
5259 + spin_lock(&object->work_lock);
5260 + list_add_tail(&monitor->op_link, &op->to_do);
5261 ++ fscache_enqueue_retrieval(op);
5262 + spin_unlock(&object->work_lock);
5263 +
5264 +- fscache_enqueue_retrieval(op);
5265 + fscache_put_retrieval(op);
5266 + return 0;
5267 + }
5268 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
5269 +index f50204380a65..3ae88ca03ccd 100644
5270 +--- a/fs/ceph/caps.c
5271 ++++ b/fs/ceph/caps.c
5272 +@@ -3952,7 +3952,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
5273 + __ceph_queue_cap_release(session, cap);
5274 + spin_unlock(&session->s_cap_lock);
5275 + }
5276 +- goto done;
5277 ++ goto flush_cap_releases;
5278 + }
5279 +
5280 + /* these will work even if we don't have a cap yet */
5281 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
5282 +index 5920820bfbd0..b30b03747dd6 100644
5283 +--- a/fs/cifs/file.c
5284 ++++ b/fs/cifs/file.c
5285 +@@ -4060,7 +4060,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
5286 + * than it negotiated since it will refuse the read
5287 + * then.
5288 + */
5289 +- if ((tcon->ses) && !(tcon->ses->capabilities &
5290 ++ if (!(tcon->ses->capabilities &
5291 + tcon->ses->server->vals->cap_large_files)) {
5292 + current_read_size = min_t(uint,
5293 + current_read_size, CIFSMaxBufSize);
5294 +diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
5295 +index 60d911e293e6..2674feda1d7a 100644
5296 +--- a/fs/gfs2/log.c
5297 ++++ b/fs/gfs2/log.c
5298 +@@ -603,13 +603,13 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
5299 + struct buffer_head *bh = bd->bd_bh;
5300 + struct gfs2_glock *gl = bd->bd_gl;
5301 +
5302 ++ sdp->sd_log_num_revoke++;
5303 ++ if (atomic_inc_return(&gl->gl_revokes) == 1)
5304 ++ gfs2_glock_hold(gl);
5305 + bh->b_private = NULL;
5306 + bd->bd_blkno = bh->b_blocknr;
5307 + gfs2_remove_from_ail(bd); /* drops ref on bh */
5308 + bd->bd_bh = NULL;
5309 +- sdp->sd_log_num_revoke++;
5310 +- if (atomic_inc_return(&gl->gl_revokes) == 1)
5311 +- gfs2_glock_hold(gl);
5312 + set_bit(GLF_LFLUSH, &gl->gl_flags);
5313 + list_add(&bd->bd_list, &sdp->sd_log_revokes);
5314 + }
5315 +diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
5316 +index e9f93045eb01..832d44782f74 100644
5317 +--- a/fs/gfs2/quota.c
5318 ++++ b/fs/gfs2/quota.c
5319 +@@ -1040,8 +1040,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
5320 + u32 x;
5321 + int error = 0;
5322 +
5323 +- if (capable(CAP_SYS_RESOURCE) ||
5324 +- sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
5325 ++ if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
5326 + return 0;
5327 +
5328 + error = gfs2_quota_hold(ip, uid, gid);
5329 +diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
5330 +index 765627d9a91e..fe68a91dc16f 100644
5331 +--- a/fs/gfs2/quota.h
5332 ++++ b/fs/gfs2/quota.h
5333 +@@ -44,7 +44,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
5334 + int ret;
5335 +
5336 + ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
5337 +- if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
5338 ++ if (capable(CAP_SYS_RESOURCE) ||
5339 ++ sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
5340 + return 0;
5341 + ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
5342 + if (ret)
5343 +diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
5344 +index 238873739550..5aa8705df87e 100644
5345 +--- a/include/asm-generic/topology.h
5346 ++++ b/include/asm-generic/topology.h
5347 +@@ -48,7 +48,7 @@
5348 + #ifdef CONFIG_NEED_MULTIPLE_NODES
5349 + #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
5350 + #else
5351 +- #define cpumask_of_node(node) ((void)node, cpu_online_mask)
5352 ++ #define cpumask_of_node(node) ((void)(node), cpu_online_mask)
5353 + #endif
5354 + #endif
5355 + #ifndef pcibus_to_node
5356 +diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
5357 +index 7f3486e32e5d..624d2643bfba 100644
5358 +--- a/include/linux/ieee80211.h
5359 ++++ b/include/linux/ieee80211.h
5360 +@@ -2047,7 +2047,7 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
5361 + }
5362 +
5363 + /* HE Operation defines */
5364 +-#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003
5365 ++#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007
5366 + #define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
5367 + #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
5368 + #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
5369 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
5370 +index 277a51d3ec40..a1842ce8bd4e 100644
5371 +--- a/include/linux/mlx5/driver.h
5372 ++++ b/include/linux/mlx5/driver.h
5373 +@@ -230,6 +230,12 @@ struct mlx5_bfreg_info {
5374 + u32 num_dyn_bfregs;
5375 + };
5376 +
5377 ++enum mlx5_cmdif_state {
5378 ++ MLX5_CMDIF_STATE_UNINITIALIZED,
5379 ++ MLX5_CMDIF_STATE_UP,
5380 ++ MLX5_CMDIF_STATE_DOWN,
5381 ++};
5382 ++
5383 + struct mlx5_cmd_first {
5384 + __be32 data[4];
5385 + };
5386 +@@ -275,6 +281,7 @@ struct mlx5_cmd_stats {
5387 + struct mlx5_cmd {
5388 + struct mlx5_nb nb;
5389 +
5390 ++ enum mlx5_cmdif_state state;
5391 + void *cmd_alloc_buf;
5392 + dma_addr_t alloc_dma;
5393 + int alloc_size;
5394 +@@ -301,6 +308,7 @@ struct mlx5_cmd {
5395 + struct semaphore sem;
5396 + struct semaphore pages_sem;
5397 + int mode;
5398 ++ u16 allowed_opcode;
5399 + struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
5400 + struct dma_pool *pool;
5401 + struct mlx5_cmd_debug dbg;
5402 +@@ -761,6 +769,7 @@ struct mlx5_cmd_work_ent {
5403 + struct delayed_work cb_timeout_work;
5404 + void *context;
5405 + int idx;
5406 ++ struct completion handling;
5407 + struct completion done;
5408 + struct mlx5_cmd *cmd;
5409 + struct work_struct work;
5410 +@@ -892,10 +901,17 @@ mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
5411 + return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
5412 + }
5413 +
5414 ++enum {
5415 ++ CMD_ALLOWED_OPCODE_ALL,
5416 ++};
5417 ++
5418 + int mlx5_cmd_init(struct mlx5_core_dev *dev);
5419 + void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
5420 ++void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
5421 ++ enum mlx5_cmdif_state cmdif_state);
5422 + void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
5423 + void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
5424 ++void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
5425 +
5426 + struct mlx5_async_ctx {
5427 + struct mlx5_core_dev *dev;
5428 +diff --git a/include/linux/mm.h b/include/linux/mm.h
5429 +index c54fb96cb1e6..96deeecd9179 100644
5430 +--- a/include/linux/mm.h
5431 ++++ b/include/linux/mm.h
5432 +@@ -670,6 +670,11 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
5433 +
5434 + extern void kvfree(const void *addr);
5435 +
5436 ++/*
5437 ++ * Mapcount of compound page as a whole, does not include mapped sub-pages.
5438 ++ *
5439 ++ * Must be called only for compound pages or any their tail sub-pages.
5440 ++ */
5441 + static inline int compound_mapcount(struct page *page)
5442 + {
5443 + VM_BUG_ON_PAGE(!PageCompound(page), page);
5444 +@@ -689,10 +694,16 @@ static inline void page_mapcount_reset(struct page *page)
5445 +
5446 + int __page_mapcount(struct page *page);
5447 +
5448 ++/*
5449 ++ * Mapcount of 0-order page; when compound sub-page, includes
5450 ++ * compound_mapcount().
5451 ++ *
5452 ++ * Result is undefined for pages which cannot be mapped into userspace.
5453 ++ * For example SLAB or special types of pages. See function page_has_type().
5454 ++ * They use this place in struct page differently.
5455 ++ */
5456 + static inline int page_mapcount(struct page *page)
5457 + {
5458 +- VM_BUG_ON_PAGE(PageSlab(page), page);
5459 +-
5460 + if (unlikely(PageCompound(page)))
5461 + return __page_mapcount(page);
5462 + return atomic_read(&page->_mapcount) + 1;
5463 +diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h
5464 +index fcc409de31a4..a28aa289afdc 100644
5465 +--- a/include/linux/netfilter/nf_conntrack_pptp.h
5466 ++++ b/include/linux/netfilter/nf_conntrack_pptp.h
5467 +@@ -10,7 +10,7 @@
5468 + #include <net/netfilter/nf_conntrack_expect.h>
5469 + #include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
5470 +
5471 +-extern const char *const pptp_msg_name[];
5472 ++const char *pptp_msg_name(u_int16_t msg);
5473 +
5474 + /* state of the control session */
5475 + enum pptp_ctrlsess_state {
5476 +diff --git a/include/net/act_api.h b/include/net/act_api.h
5477 +index 71347a90a9d1..050c0246dee8 100644
5478 +--- a/include/net/act_api.h
5479 ++++ b/include/net/act_api.h
5480 +@@ -69,7 +69,8 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
5481 + {
5482 + dtm->install = jiffies_to_clock_t(jiffies - stm->install);
5483 + dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse);
5484 +- dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse);
5485 ++ dtm->firstuse = stm->firstuse ?
5486 ++ jiffies_to_clock_t(jiffies - stm->firstuse) : 0;
5487 + dtm->expires = jiffies_to_clock_t(stm->expires);
5488 + }
5489 +
5490 +diff --git a/include/net/espintcp.h b/include/net/espintcp.h
5491 +index dd7026a00066..0335bbd76552 100644
5492 +--- a/include/net/espintcp.h
5493 ++++ b/include/net/espintcp.h
5494 +@@ -25,6 +25,7 @@ struct espintcp_ctx {
5495 + struct espintcp_msg partial;
5496 + void (*saved_data_ready)(struct sock *sk);
5497 + void (*saved_write_space)(struct sock *sk);
5498 ++ void (*saved_destruct)(struct sock *sk);
5499 + struct work_struct work;
5500 + bool tx_running;
5501 + };
5502 +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
5503 +index 6a1ae49809de..464772420206 100644
5504 +--- a/include/net/ip_fib.h
5505 ++++ b/include/net/ip_fib.h
5506 +@@ -257,7 +257,6 @@ struct fib_dump_filter {
5507 + u32 table_id;
5508 + /* filter_set is an optimization that an entry is set */
5509 + bool filter_set;
5510 +- bool dump_all_families;
5511 + bool dump_routes;
5512 + bool dump_exceptions;
5513 + unsigned char protocol;
5514 +@@ -448,6 +447,16 @@ static inline int fib_num_tclassid_users(struct net *net)
5515 + #endif
5516 + int fib_unmerge(struct net *net);
5517 +
5518 ++static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc,
5519 ++const struct net_device *dev)
5520 ++{
5521 ++ if (nhc->nhc_dev == dev ||
5522 ++ l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex)
5523 ++ return true;
5524 ++
5525 ++ return false;
5526 ++}
5527 ++
5528 + /* Exported by fib_semantics.c */
5529 + int ip_fib_check_default(__be32 gw, struct net_device *dev);
5530 + int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
5531 +diff --git a/include/net/nexthop.h b/include/net/nexthop.h
5532 +index 331ebbc94fe7..3bb618e5ecf7 100644
5533 +--- a/include/net/nexthop.h
5534 ++++ b/include/net/nexthop.h
5535 +@@ -70,6 +70,7 @@ struct nh_grp_entry {
5536 + };
5537 +
5538 + struct nh_group {
5539 ++ struct nh_group *spare; /* spare group for removals */
5540 + u16 num_nh;
5541 + bool mpath;
5542 + bool has_v4;
5543 +@@ -136,21 +137,20 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh)
5544 + {
5545 + unsigned int rc = 1;
5546 +
5547 +- if (nexthop_is_multipath(nh)) {
5548 ++ if (nh->is_group) {
5549 + struct nh_group *nh_grp;
5550 +
5551 + nh_grp = rcu_dereference_rtnl(nh->nh_grp);
5552 +- rc = nh_grp->num_nh;
5553 ++ if (nh_grp->mpath)
5554 ++ rc = nh_grp->num_nh;
5555 + }
5556 +
5557 + return rc;
5558 + }
5559 +
5560 + static inline
5561 +-struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel)
5562 ++struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel)
5563 + {
5564 +- const struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
5565 +-
5566 + /* for_nexthops macros in fib_semantics.c grabs a pointer to
5567 + * the nexthop before checking nhsel
5568 + */
5569 +@@ -185,12 +185,14 @@ static inline bool nexthop_is_blackhole(const struct nexthop *nh)
5570 + {
5571 + const struct nh_info *nhi;
5572 +
5573 +- if (nexthop_is_multipath(nh)) {
5574 +- if (nexthop_num_path(nh) > 1)
5575 +- return false;
5576 +- nh = nexthop_mpath_select(nh, 0);
5577 +- if (!nh)
5578 ++ if (nh->is_group) {
5579 ++ struct nh_group *nh_grp;
5580 ++
5581 ++ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
5582 ++ if (nh_grp->num_nh > 1)
5583 + return false;
5584 ++
5585 ++ nh = nh_grp->nh_entries[0].nh;
5586 + }
5587 +
5588 + nhi = rcu_dereference_rtnl(nh->nh_info);
5589 +@@ -216,16 +218,46 @@ struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel)
5590 + BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0);
5591 + BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0);
5592 +
5593 +- if (nexthop_is_multipath(nh)) {
5594 +- nh = nexthop_mpath_select(nh, nhsel);
5595 +- if (!nh)
5596 +- return NULL;
5597 ++ if (nh->is_group) {
5598 ++ struct nh_group *nh_grp;
5599 ++
5600 ++ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
5601 ++ if (nh_grp->mpath) {
5602 ++ nh = nexthop_mpath_select(nh_grp, nhsel);
5603 ++ if (!nh)
5604 ++ return NULL;
5605 ++ }
5606 + }
5607 +
5608 + nhi = rcu_dereference_rtnl(nh->nh_info);
5609 + return &nhi->fib_nhc;
5610 + }
5611 +
5612 ++static inline bool nexthop_uses_dev(const struct nexthop *nh,
5613 ++ const struct net_device *dev)
5614 ++{
5615 ++ struct nh_info *nhi;
5616 ++
5617 ++ if (nh->is_group) {
5618 ++ struct nh_group *nhg = rcu_dereference(nh->nh_grp);
5619 ++ int i;
5620 ++
5621 ++ for (i = 0; i < nhg->num_nh; i++) {
5622 ++ struct nexthop *nhe = nhg->nh_entries[i].nh;
5623 ++
5624 ++ nhi = rcu_dereference(nhe->nh_info);
5625 ++ if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
5626 ++ return true;
5627 ++ }
5628 ++ } else {
5629 ++ nhi = rcu_dereference(nh->nh_info);
5630 ++ if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
5631 ++ return true;
5632 ++ }
5633 ++
5634 ++ return false;
5635 ++}
5636 ++
5637 + static inline unsigned int fib_info_num_path(const struct fib_info *fi)
5638 + {
5639 + if (unlikely(fi->nh))
5640 +@@ -263,8 +295,11 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
5641 + {
5642 + struct nh_info *nhi;
5643 +
5644 +- if (nexthop_is_multipath(nh)) {
5645 +- nh = nexthop_mpath_select(nh, 0);
5646 ++ if (nh->is_group) {
5647 ++ struct nh_group *nh_grp;
5648 ++
5649 ++ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
5650 ++ nh = nexthop_mpath_select(nh_grp, 0);
5651 + if (!nh)
5652 + return NULL;
5653 + }
5654 +diff --git a/include/net/tls.h b/include/net/tls.h
5655 +index bf9eb4823933..18cd4f418464 100644
5656 +--- a/include/net/tls.h
5657 ++++ b/include/net/tls.h
5658 +@@ -135,6 +135,8 @@ struct tls_sw_context_tx {
5659 + struct tls_rec *open_rec;
5660 + struct list_head tx_list;
5661 + atomic_t encrypt_pending;
5662 ++ /* protect crypto_wait with encrypt_pending */
5663 ++ spinlock_t encrypt_compl_lock;
5664 + int async_notify;
5665 + u8 async_capable:1;
5666 +
5667 +@@ -155,6 +157,8 @@ struct tls_sw_context_rx {
5668 + u8 async_capable:1;
5669 + u8 decrypted:1;
5670 + atomic_t decrypt_pending;
5671 ++ /* protect crypto_wait with decrypt_pending*/
5672 ++ spinlock_t decrypt_compl_lock;
5673 + bool async_notify;
5674 + };
5675 +
5676 +diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
5677 +index 1b28ce1aba07..325fdaa3bb66 100644
5678 +--- a/include/rdma/uverbs_std_types.h
5679 ++++ b/include/rdma/uverbs_std_types.h
5680 +@@ -88,7 +88,7 @@ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
5681 +
5682 + static inline void uobj_put_destroy(struct ib_uobject *uobj)
5683 + {
5684 +- rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
5685 ++ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
5686 + }
5687 +
5688 + static inline void uobj_put_read(struct ib_uobject *uobj)
5689 +diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
5690 +index 5f3b9fec7b5f..ff7cfdc6cb44 100644
5691 +--- a/include/uapi/linux/xfrm.h
5692 ++++ b/include/uapi/linux/xfrm.h
5693 +@@ -304,7 +304,7 @@ enum xfrm_attr_type_t {
5694 + XFRMA_PROTO, /* __u8 */
5695 + XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
5696 + XFRMA_PAD,
5697 +- XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
5698 ++ XFRMA_OFFLOAD_DEV, /* struct xfrm_user_offload */
5699 + XFRMA_SET_MARK, /* __u32 */
5700 + XFRMA_SET_MARK_MASK, /* __u32 */
5701 + XFRMA_IF_ID, /* __u32 */
5702 +diff --git a/mm/khugepaged.c b/mm/khugepaged.c
5703 +index b679908743cb..ba059e68cf50 100644
5704 +--- a/mm/khugepaged.c
5705 ++++ b/mm/khugepaged.c
5706 +@@ -1673,6 +1673,7 @@ static void collapse_file(struct mm_struct *mm,
5707 + if (page_has_private(page) &&
5708 + !try_to_release_page(page, GFP_KERNEL)) {
5709 + result = SCAN_PAGE_HAS_PRIVATE;
5710 ++ putback_lru_page(page);
5711 + goto out_unlock;
5712 + }
5713 +
5714 +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
5715 +index ff57ea89c27e..fd91cd34f25e 100644
5716 +--- a/net/ax25/af_ax25.c
5717 ++++ b/net/ax25/af_ax25.c
5718 +@@ -635,8 +635,10 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
5719 + break;
5720 +
5721 + case SO_BINDTODEVICE:
5722 +- if (optlen > IFNAMSIZ)
5723 +- optlen = IFNAMSIZ;
5724 ++ if (optlen > IFNAMSIZ - 1)
5725 ++ optlen = IFNAMSIZ - 1;
5726 ++
5727 ++ memset(devname, 0, sizeof(devname));
5728 +
5729 + if (copy_from_user(devname, optval, optlen)) {
5730 + res = -EFAULT;
5731 +diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
5732 +index b325b569e761..f48cf4cfb80f 100644
5733 +--- a/net/bridge/netfilter/nft_reject_bridge.c
5734 ++++ b/net/bridge/netfilter/nft_reject_bridge.c
5735 +@@ -31,6 +31,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
5736 + ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
5737 + eth->h_proto = eth_hdr(oldskb)->h_proto;
5738 + skb_pull(nskb, ETH_HLEN);
5739 ++
5740 ++ if (skb_vlan_tag_present(oldskb)) {
5741 ++ u16 vid = skb_vlan_tag_get(oldskb);
5742 ++
5743 ++ __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid);
5744 ++ }
5745 + }
5746 +
5747 + static int nft_bridge_iphdr_validate(struct sk_buff *skb)
5748 +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
5749 +index af868d3923b9..834019dbc6b1 100644
5750 +--- a/net/ceph/osd_client.c
5751 ++++ b/net/ceph/osd_client.c
5752 +@@ -3652,7 +3652,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
5753 + * supported.
5754 + */
5755 + req->r_t.target_oloc.pool = m.redirect.oloc.pool;
5756 +- req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
5757 ++ req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
5758 ++ CEPH_OSD_FLAG_IGNORE_OVERLAY |
5759 ++ CEPH_OSD_FLAG_IGNORE_CACHE;
5760 + req->r_tid = 0;
5761 + __submit_request(req, false);
5762 + goto out_unlock_osdc;
5763 +diff --git a/net/core/dev.c b/net/core/dev.c
5764 +index c7047b40f569..87fd5424e205 100644
5765 +--- a/net/core/dev.c
5766 ++++ b/net/core/dev.c
5767 +@@ -4988,11 +4988,12 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5768 + return 0;
5769 + }
5770 +
5771 +-static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
5772 ++static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5773 + struct packet_type **ppt_prev)
5774 + {
5775 + struct packet_type *ptype, *pt_prev;
5776 + rx_handler_func_t *rx_handler;
5777 ++ struct sk_buff *skb = *pskb;
5778 + struct net_device *orig_dev;
5779 + bool deliver_exact = false;
5780 + int ret = NET_RX_DROP;
5781 +@@ -5023,8 +5024,10 @@ another_round:
5782 + ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5783 + preempt_enable();
5784 +
5785 +- if (ret2 != XDP_PASS)
5786 +- return NET_RX_DROP;
5787 ++ if (ret2 != XDP_PASS) {
5788 ++ ret = NET_RX_DROP;
5789 ++ goto out;
5790 ++ }
5791 + skb_reset_mac_len(skb);
5792 + }
5793 +
5794 +@@ -5174,6 +5177,13 @@ drop:
5795 + }
5796 +
5797 + out:
5798 ++ /* The invariant here is that if *ppt_prev is not NULL
5799 ++ * then skb should also be non-NULL.
5800 ++ *
5801 ++ * Apparently *ppt_prev assignment above holds this invariant due to
5802 ++ * skb dereferencing near it.
5803 ++ */
5804 ++ *pskb = skb;
5805 + return ret;
5806 + }
5807 +
5808 +@@ -5183,7 +5193,7 @@ static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5809 + struct packet_type *pt_prev = NULL;
5810 + int ret;
5811 +
5812 +- ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
5813 ++ ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5814 + if (pt_prev)
5815 + ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5816 + skb->dev, pt_prev, orig_dev);
5817 +@@ -5261,7 +5271,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
5818 + struct packet_type *pt_prev = NULL;
5819 +
5820 + skb_list_del_init(skb);
5821 +- __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
5822 ++ __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5823 + if (!pt_prev)
5824 + continue;
5825 + if (pt_curr != pt_prev || od_curr != orig_dev) {
5826 +diff --git a/net/dsa/slave.c b/net/dsa/slave.c
5827 +index ddc0f9236928..e2a3d198e8f5 100644
5828 +--- a/net/dsa/slave.c
5829 ++++ b/net/dsa/slave.c
5830 +@@ -1393,6 +1393,7 @@ int dsa_slave_create(struct dsa_port *port)
5831 + if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
5832 + slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5833 + slave_dev->hw_features |= NETIF_F_HW_TC;
5834 ++ slave_dev->features |= NETIF_F_LLTX;
5835 + slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
5836 + if (!IS_ERR_OR_NULL(port->mac))
5837 + ether_addr_copy(slave_dev->dev_addr, port->mac);
5838 +diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
5839 +index b5705cba8318..d6619edd53e5 100644
5840 +--- a/net/dsa/tag_mtk.c
5841 ++++ b/net/dsa/tag_mtk.c
5842 +@@ -15,6 +15,7 @@
5843 + #define MTK_HDR_XMIT_TAGGED_TPID_8100 1
5844 + #define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0)
5845 + #define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0)
5846 ++#define MTK_HDR_XMIT_SA_DIS BIT(6)
5847 +
5848 + static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
5849 + struct net_device *dev)
5850 +@@ -22,6 +23,9 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
5851 + struct dsa_port *dp = dsa_slave_to_port(dev);
5852 + u8 *mtk_tag;
5853 + bool is_vlan_skb = true;
5854 ++ unsigned char *dest = eth_hdr(skb)->h_dest;
5855 ++ bool is_multicast_skb = is_multicast_ether_addr(dest) &&
5856 ++ !is_broadcast_ether_addr(dest);
5857 +
5858 + /* Build the special tag after the MAC Source Address. If VLAN header
5859 + * is present, it's required that VLAN header and special tag is
5860 +@@ -47,6 +51,10 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
5861 + MTK_HDR_XMIT_UNTAGGED;
5862 + mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
5863 +
5864 ++ /* Disable SA learning for multicast frames */
5865 ++ if (unlikely(is_multicast_skb))
5866 ++ mtk_tag[1] |= MTK_HDR_XMIT_SA_DIS;
5867 ++
5868 + /* Tag control information is kept for 802.1Q */
5869 + if (!is_vlan_skb) {
5870 + mtk_tag[2] = 0;
5871 +@@ -61,6 +69,9 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
5872 + {
5873 + int port;
5874 + __be16 *phdr, hdr;
5875 ++ unsigned char *dest = eth_hdr(skb)->h_dest;
5876 ++ bool is_multicast_skb = is_multicast_ether_addr(dest) &&
5877 ++ !is_broadcast_ether_addr(dest);
5878 +
5879 + if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
5880 + return NULL;
5881 +@@ -86,6 +97,10 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
5882 + if (!skb->dev)
5883 + return NULL;
5884 +
5885 ++ /* Only unicast or broadcast frames are offloaded */
5886 ++ if (likely(!is_multicast_skb))
5887 ++ skb->offload_fwd_mark = 1;
5888 ++
5889 + return skb;
5890 + }
5891 +
5892 +diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
5893 +index fc9e0b806889..d863dffbe53c 100644
5894 +--- a/net/ethtool/netlink.c
5895 ++++ b/net/ethtool/netlink.c
5896 +@@ -334,7 +334,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info)
5897 + ret = ops->reply_size(req_info, reply_data);
5898 + if (ret < 0)
5899 + goto err_cleanup;
5900 +- reply_len = ret;
5901 ++ reply_len = ret + ethnl_reply_header_size();
5902 + ret = -ENOMEM;
5903 + rskb = ethnl_reply_init(reply_len, req_info->dev, ops->reply_cmd,
5904 + ops->hdr_attr, info, &reply_payload);
5905 +@@ -573,7 +573,7 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd,
5906 + ret = ops->reply_size(req_info, reply_data);
5907 + if (ret < 0)
5908 + goto err_cleanup;
5909 +- reply_len = ret;
5910 ++ reply_len = ret + ethnl_reply_header_size();
5911 + ret = -ENOMEM;
5912 + skb = genlmsg_new(reply_len, GFP_KERNEL);
5913 + if (!skb)
5914 +diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
5915 +index 8e5911887b4c..fb7b3585458d 100644
5916 +--- a/net/ethtool/strset.c
5917 ++++ b/net/ethtool/strset.c
5918 +@@ -309,7 +309,6 @@ static int strset_reply_size(const struct ethnl_req_info *req_base,
5919 + int len = 0;
5920 + int ret;
5921 +
5922 +- len += ethnl_reply_header_size();
5923 + for (i = 0; i < ETH_SS_COUNT; i++) {
5924 + const struct strset_info *set_info = &data->sets[i];
5925 +
5926 +diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
5927 +index e2e219c7854a..25c8ba6732df 100644
5928 +--- a/net/ipv4/esp4_offload.c
5929 ++++ b/net/ipv4/esp4_offload.c
5930 +@@ -63,10 +63,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
5931 + sp->olen++;
5932 +
5933 + xo = xfrm_offload(skb);
5934 +- if (!xo) {
5935 +- xfrm_state_put(x);
5936 ++ if (!xo)
5937 + goto out_reset;
5938 +- }
5939 + }
5940 +
5941 + xo->flags |= XFRM_GRO;
5942 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
5943 +index 213be9c050ad..41079490a118 100644
5944 +--- a/net/ipv4/fib_frontend.c
5945 ++++ b/net/ipv4/fib_frontend.c
5946 +@@ -309,17 +309,18 @@ bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev)
5947 + {
5948 + bool dev_match = false;
5949 + #ifdef CONFIG_IP_ROUTE_MULTIPATH
5950 +- int ret;
5951 ++ if (unlikely(fi->nh)) {
5952 ++ dev_match = nexthop_uses_dev(fi->nh, dev);
5953 ++ } else {
5954 ++ int ret;
5955 +
5956 +- for (ret = 0; ret < fib_info_num_path(fi); ret++) {
5957 +- const struct fib_nh_common *nhc = fib_info_nhc(fi, ret);
5958 ++ for (ret = 0; ret < fib_info_num_path(fi); ret++) {
5959 ++ const struct fib_nh_common *nhc = fib_info_nhc(fi, ret);
5960 +
5961 +- if (nhc->nhc_dev == dev) {
5962 +- dev_match = true;
5963 +- break;
5964 +- } else if (l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) {
5965 +- dev_match = true;
5966 +- break;
5967 ++ if (nhc_l3mdev_matches_dev(nhc, dev)) {
5968 ++ dev_match = true;
5969 ++ break;
5970 ++ }
5971 + }
5972 + }
5973 + #else
5974 +@@ -918,7 +919,6 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
5975 + else
5976 + filter->dump_exceptions = false;
5977 +
5978 +- filter->dump_all_families = (rtm->rtm_family == AF_UNSPEC);
5979 + filter->flags = rtm->rtm_flags;
5980 + filter->protocol = rtm->rtm_protocol;
5981 + filter->rt_type = rtm->rtm_type;
5982 +@@ -990,7 +990,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
5983 + if (filter.table_id) {
5984 + tb = fib_get_table(net, filter.table_id);
5985 + if (!tb) {
5986 +- if (filter.dump_all_families)
5987 ++ if (rtnl_msg_family(cb->nlh) != PF_INET)
5988 + return skb->len;
5989 +
5990 + NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist");
5991 +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
5992 +index d545fb99a8a1..76afe93904d5 100644
5993 +--- a/net/ipv4/inet_connection_sock.c
5994 ++++ b/net/ipv4/inet_connection_sock.c
5995 +@@ -24,17 +24,19 @@
5996 + #include <net/addrconf.h>
5997 +
5998 + #if IS_ENABLED(CONFIG_IPV6)
5999 +-/* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
6000 +- * only, and any IPv4 addresses if not IPv6 only
6001 +- * match_wildcard == false: addresses must be exactly the same, i.e.
6002 +- * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
6003 +- * and 0.0.0.0 equals to 0.0.0.0 only
6004 ++/* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses
6005 ++ * if IPv6 only, and any IPv4 addresses
6006 ++ * if not IPv6 only
6007 ++ * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
6008 ++ * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
6009 ++ * and 0.0.0.0 equals to 0.0.0.0 only
6010 + */
6011 + static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
6012 + const struct in6_addr *sk2_rcv_saddr6,
6013 + __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
6014 + bool sk1_ipv6only, bool sk2_ipv6only,
6015 +- bool match_wildcard)
6016 ++ bool match_sk1_wildcard,
6017 ++ bool match_sk2_wildcard)
6018 + {
6019 + int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
6020 + int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
6021 +@@ -44,8 +46,8 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
6022 + if (!sk2_ipv6only) {
6023 + if (sk1_rcv_saddr == sk2_rcv_saddr)
6024 + return true;
6025 +- if (!sk1_rcv_saddr || !sk2_rcv_saddr)
6026 +- return match_wildcard;
6027 ++ return (match_sk1_wildcard && !sk1_rcv_saddr) ||
6028 ++ (match_sk2_wildcard && !sk2_rcv_saddr);
6029 + }
6030 + return false;
6031 + }
6032 +@@ -53,11 +55,11 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
6033 + if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
6034 + return true;
6035 +
6036 +- if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
6037 ++ if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
6038 + !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
6039 + return true;
6040 +
6041 +- if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
6042 ++ if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
6043 + !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
6044 + return true;
6045 +
6046 +@@ -69,18 +71,19 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
6047 + }
6048 + #endif
6049 +
6050 +-/* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
6051 +- * match_wildcard == false: addresses must be exactly the same, i.e.
6052 +- * 0.0.0.0 only equals to 0.0.0.0
6053 ++/* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
6054 ++ * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
6055 ++ * 0.0.0.0 only equals to 0.0.0.0
6056 + */
6057 + static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
6058 +- bool sk2_ipv6only, bool match_wildcard)
6059 ++ bool sk2_ipv6only, bool match_sk1_wildcard,
6060 ++ bool match_sk2_wildcard)
6061 + {
6062 + if (!sk2_ipv6only) {
6063 + if (sk1_rcv_saddr == sk2_rcv_saddr)
6064 + return true;
6065 +- if (!sk1_rcv_saddr || !sk2_rcv_saddr)
6066 +- return match_wildcard;
6067 ++ return (match_sk1_wildcard && !sk1_rcv_saddr) ||
6068 ++ (match_sk2_wildcard && !sk2_rcv_saddr);
6069 + }
6070 + return false;
6071 + }
6072 +@@ -96,10 +99,12 @@ bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
6073 + sk2->sk_rcv_saddr,
6074 + ipv6_only_sock(sk),
6075 + ipv6_only_sock(sk2),
6076 ++ match_wildcard,
6077 + match_wildcard);
6078 + #endif
6079 + return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
6080 +- ipv6_only_sock(sk2), match_wildcard);
6081 ++ ipv6_only_sock(sk2), match_wildcard,
6082 ++ match_wildcard);
6083 + }
6084 + EXPORT_SYMBOL(inet_rcv_saddr_equal);
6085 +
6086 +@@ -273,10 +278,10 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
6087 + tb->fast_rcv_saddr,
6088 + sk->sk_rcv_saddr,
6089 + tb->fast_ipv6_only,
6090 +- ipv6_only_sock(sk), true);
6091 ++ ipv6_only_sock(sk), true, false);
6092 + #endif
6093 + return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
6094 +- ipv6_only_sock(sk), true);
6095 ++ ipv6_only_sock(sk), true, false);
6096 + }
6097 +
6098 + /* Obtain a reference to a local port for the given sock,
6099 +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
6100 +index 1b4e6f298648..1dda7c155c48 100644
6101 +--- a/net/ipv4/ip_vti.c
6102 ++++ b/net/ipv4/ip_vti.c
6103 +@@ -93,7 +93,28 @@ static int vti_rcv_proto(struct sk_buff *skb)
6104 +
6105 + static int vti_rcv_tunnel(struct sk_buff *skb)
6106 + {
6107 +- return vti_rcv(skb, ip_hdr(skb)->saddr, true);
6108 ++ struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id);
6109 ++ const struct iphdr *iph = ip_hdr(skb);
6110 ++ struct ip_tunnel *tunnel;
6111 ++
6112 ++ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
6113 ++ iph->saddr, iph->daddr, 0);
6114 ++ if (tunnel) {
6115 ++ struct tnl_ptk_info tpi = {
6116 ++ .proto = htons(ETH_P_IP),
6117 ++ };
6118 ++
6119 ++ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
6120 ++ goto drop;
6121 ++ if (iptunnel_pull_header(skb, 0, tpi.proto, false))
6122 ++ goto drop;
6123 ++ return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false);
6124 ++ }
6125 ++
6126 ++ return -EINVAL;
6127 ++drop:
6128 ++ kfree_skb(skb);
6129 ++ return 0;
6130 + }
6131 +
6132 + static int vti_rcv_cb(struct sk_buff *skb, int err)
6133 +diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
6134 +index 2f01cf6fa0de..678575adaf3b 100644
6135 +--- a/net/ipv4/ipip.c
6136 ++++ b/net/ipv4/ipip.c
6137 +@@ -698,7 +698,7 @@ out:
6138 +
6139 + rtnl_link_failed:
6140 + #if IS_ENABLED(CONFIG_MPLS)
6141 +- xfrm4_tunnel_deregister(&mplsip_handler, AF_INET);
6142 ++ xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS);
6143 + xfrm_tunnel_mplsip_failed:
6144 +
6145 + #endif
6146 +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
6147 +index 6e68def66822..2508b4c37af3 100644
6148 +--- a/net/ipv4/ipmr.c
6149 ++++ b/net/ipv4/ipmr.c
6150 +@@ -2611,7 +2611,7 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
6151 +
6152 + mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
6153 + if (!mrt) {
6154 +- if (filter.dump_all_families)
6155 ++ if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR)
6156 + return skb->len;
6157 +
6158 + NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
6159 +diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
6160 +index b2aeb7bf5dac..2a1e10f4ae93 100644
6161 +--- a/net/ipv4/netfilter/nf_nat_pptp.c
6162 ++++ b/net/ipv4/netfilter/nf_nat_pptp.c
6163 +@@ -166,8 +166,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
6164 + break;
6165 + default:
6166 + pr_debug("unknown outbound packet 0x%04x:%s\n", msg,
6167 +- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
6168 +- pptp_msg_name[0]);
6169 ++ pptp_msg_name(msg));
6170 + /* fall through */
6171 + case PPTP_SET_LINK_INFO:
6172 + /* only need to NAT in case PAC is behind NAT box */
6173 +@@ -268,9 +267,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
6174 + pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
6175 + break;
6176 + default:
6177 +- pr_debug("unknown inbound packet %s\n",
6178 +- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
6179 +- pptp_msg_name[0]);
6180 ++ pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg));
6181 + /* fall through */
6182 + case PPTP_START_SESSION_REQUEST:
6183 + case PPTP_START_SESSION_REPLY:
6184 +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
6185 +index d072c326dd64..b6ecb30544f6 100644
6186 +--- a/net/ipv4/nexthop.c
6187 ++++ b/net/ipv4/nexthop.c
6188 +@@ -63,9 +63,16 @@ static void nexthop_free_mpath(struct nexthop *nh)
6189 + int i;
6190 +
6191 + nhg = rcu_dereference_raw(nh->nh_grp);
6192 +- for (i = 0; i < nhg->num_nh; ++i)
6193 +- WARN_ON(nhg->nh_entries[i].nh);
6194 ++ for (i = 0; i < nhg->num_nh; ++i) {
6195 ++ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
6196 ++
6197 ++ WARN_ON(!list_empty(&nhge->nh_list));
6198 ++ nexthop_put(nhge->nh);
6199 ++ }
6200 ++
6201 ++ WARN_ON(nhg->spare == nhg);
6202 +
6203 ++ kfree(nhg->spare);
6204 + kfree(nhg);
6205 + }
6206 +
6207 +@@ -276,6 +283,7 @@ out:
6208 + return 0;
6209 +
6210 + nla_put_failure:
6211 ++ nlmsg_cancel(skb, nlh);
6212 + return -EMSGSIZE;
6213 + }
6214 +
6215 +@@ -433,7 +441,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
6216 + if (!valid_group_nh(nh, len, extack))
6217 + return -EINVAL;
6218 + }
6219 +- for (i = NHA_GROUP + 1; i < __NHA_MAX; ++i) {
6220 ++ for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) {
6221 + if (!tb[i])
6222 + continue;
6223 +
6224 +@@ -693,41 +701,56 @@ static void nh_group_rebalance(struct nh_group *nhg)
6225 + }
6226 + }
6227 +
6228 +-static void remove_nh_grp_entry(struct nh_grp_entry *nhge,
6229 +- struct nh_group *nhg,
6230 ++static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
6231 + struct nl_info *nlinfo)
6232 + {
6233 ++ struct nh_grp_entry *nhges, *new_nhges;
6234 ++ struct nexthop *nhp = nhge->nh_parent;
6235 + struct nexthop *nh = nhge->nh;
6236 +- struct nh_grp_entry *nhges;
6237 +- bool found = false;
6238 +- int i;
6239 ++ struct nh_group *nhg, *newg;
6240 ++ int i, j;
6241 +
6242 + WARN_ON(!nh);
6243 +
6244 +- nhges = nhg->nh_entries;
6245 +- for (i = 0; i < nhg->num_nh; ++i) {
6246 +- if (found) {
6247 +- nhges[i-1].nh = nhges[i].nh;
6248 +- nhges[i-1].weight = nhges[i].weight;
6249 +- list_del(&nhges[i].nh_list);
6250 +- list_add(&nhges[i-1].nh_list, &nhges[i-1].nh->grp_list);
6251 +- } else if (nhg->nh_entries[i].nh == nh) {
6252 +- found = true;
6253 +- }
6254 +- }
6255 ++ nhg = rtnl_dereference(nhp->nh_grp);
6256 ++ newg = nhg->spare;
6257 +
6258 +- if (WARN_ON(!found))
6259 ++ /* last entry, keep it visible and remove the parent */
6260 ++ if (nhg->num_nh == 1) {
6261 ++ remove_nexthop(net, nhp, nlinfo);
6262 + return;
6263 ++ }
6264 ++
6265 ++ newg->has_v4 = nhg->has_v4;
6266 ++ newg->mpath = nhg->mpath;
6267 ++ newg->num_nh = nhg->num_nh;
6268 +
6269 +- nhg->num_nh--;
6270 +- nhg->nh_entries[nhg->num_nh].nh = NULL;
6271 ++ /* copy old entries to new except the one getting removed */
6272 ++ nhges = nhg->nh_entries;
6273 ++ new_nhges = newg->nh_entries;
6274 ++ for (i = 0, j = 0; i < nhg->num_nh; ++i) {
6275 ++ /* current nexthop getting removed */
6276 ++ if (nhg->nh_entries[i].nh == nh) {
6277 ++ newg->num_nh--;
6278 ++ continue;
6279 ++ }
6280 +
6281 +- nh_group_rebalance(nhg);
6282 ++ list_del(&nhges[i].nh_list);
6283 ++ new_nhges[j].nh_parent = nhges[i].nh_parent;
6284 ++ new_nhges[j].nh = nhges[i].nh;
6285 ++ new_nhges[j].weight = nhges[i].weight;
6286 ++ list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
6287 ++ j++;
6288 ++ }
6289 +
6290 +- nexthop_put(nh);
6291 ++ nh_group_rebalance(newg);
6292 ++ rcu_assign_pointer(nhp->nh_grp, newg);
6293 ++
6294 ++ list_del(&nhge->nh_list);
6295 ++ nexthop_put(nhge->nh);
6296 +
6297 + if (nlinfo)
6298 +- nexthop_notify(RTM_NEWNEXTHOP, nhge->nh_parent, nlinfo);
6299 ++ nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
6300 + }
6301 +
6302 + static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
6303 +@@ -735,17 +758,11 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
6304 + {
6305 + struct nh_grp_entry *nhge, *tmp;
6306 +
6307 +- list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) {
6308 +- struct nh_group *nhg;
6309 +-
6310 +- list_del(&nhge->nh_list);
6311 +- nhg = rtnl_dereference(nhge->nh_parent->nh_grp);
6312 +- remove_nh_grp_entry(nhge, nhg, nlinfo);
6313 ++ list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
6314 ++ remove_nh_grp_entry(net, nhge, nlinfo);
6315 +
6316 +- /* if this group has no more entries then remove it */
6317 +- if (!nhg->num_nh)
6318 +- remove_nexthop(net, nhge->nh_parent, nlinfo);
6319 +- }
6320 ++ /* make sure all see the newly published array before releasing rtnl */
6321 ++ synchronize_rcu();
6322 + }
6323 +
6324 + static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
6325 +@@ -759,10 +776,7 @@ static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
6326 + if (WARN_ON(!nhge->nh))
6327 + continue;
6328 +
6329 +- list_del(&nhge->nh_list);
6330 +- nexthop_put(nhge->nh);
6331 +- nhge->nh = NULL;
6332 +- nhg->num_nh--;
6333 ++ list_del_init(&nhge->nh_list);
6334 + }
6335 + }
6336 +
6337 +@@ -1085,6 +1099,7 @@ static struct nexthop *nexthop_create_group(struct net *net,
6338 + {
6339 + struct nlattr *grps_attr = cfg->nh_grp;
6340 + struct nexthop_grp *entry = nla_data(grps_attr);
6341 ++ u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
6342 + struct nh_group *nhg;
6343 + struct nexthop *nh;
6344 + int i;
6345 +@@ -1095,12 +1110,21 @@ static struct nexthop *nexthop_create_group(struct net *net,
6346 +
6347 + nh->is_group = 1;
6348 +
6349 +- nhg = nexthop_grp_alloc(nla_len(grps_attr) / sizeof(*entry));
6350 ++ nhg = nexthop_grp_alloc(num_nh);
6351 + if (!nhg) {
6352 + kfree(nh);
6353 + return ERR_PTR(-ENOMEM);
6354 + }
6355 +
6356 ++ /* spare group used for removals */
6357 ++ nhg->spare = nexthop_grp_alloc(num_nh);
6358 ++ if (!nhg) {
6359 ++ kfree(nhg);
6360 ++ kfree(nh);
6361 ++ return NULL;
6362 ++ }
6363 ++ nhg->spare->spare = nhg;
6364 ++
6365 + for (i = 0; i < nhg->num_nh; ++i) {
6366 + struct nexthop *nhe;
6367 + struct nh_info *nhi;
6368 +@@ -1132,6 +1156,7 @@ out_no_nh:
6369 + for (; i >= 0; --i)
6370 + nexthop_put(nhg->nh_entries[i].nh);
6371 +
6372 ++ kfree(nhg->spare);
6373 + kfree(nhg);
6374 + kfree(nh);
6375 +
6376 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
6377 +index ef6b70774fe1..fea6a8a11183 100644
6378 +--- a/net/ipv4/route.c
6379 ++++ b/net/ipv4/route.c
6380 +@@ -491,18 +491,16 @@ u32 ip_idents_reserve(u32 hash, int segs)
6381 + atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
6382 + u32 old = READ_ONCE(*p_tstamp);
6383 + u32 now = (u32)jiffies;
6384 +- u32 new, delta = 0;
6385 ++ u32 delta = 0;
6386 +
6387 + if (old != now && cmpxchg(p_tstamp, old, now) == old)
6388 + delta = prandom_u32_max(now - old);
6389 +
6390 +- /* Do not use atomic_add_return() as it makes UBSAN unhappy */
6391 +- do {
6392 +- old = (u32)atomic_read(p_id);
6393 +- new = old + delta + segs;
6394 +- } while (atomic_cmpxchg(p_id, old, new) != old);
6395 +-
6396 +- return new - segs;
6397 ++ /* If UBSAN reports an error there, please make sure your compiler
6398 ++ * supports -fno-strict-overflow before reporting it that was a bug
6399 ++ * in UBSAN, and it has been fixed in GCC-8.
6400 ++ */
6401 ++ return atomic_add_return(segs + delta, p_id) - segs;
6402 + }
6403 + EXPORT_SYMBOL(ip_idents_reserve);
6404 +
6405 +diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
6406 +index fd535053245b..93e086cf058a 100644
6407 +--- a/net/ipv6/esp6_offload.c
6408 ++++ b/net/ipv6/esp6_offload.c
6409 +@@ -85,10 +85,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
6410 + sp->olen++;
6411 +
6412 + xo = xfrm_offload(skb);
6413 +- if (!xo) {
6414 +- xfrm_state_put(x);
6415 ++ if (!xo)
6416 + goto out_reset;
6417 +- }
6418 + }
6419 +
6420 + xo->flags |= XFRM_GRO;
6421 +@@ -123,9 +121,16 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
6422 + struct ip_esp_hdr *esph;
6423 + struct ipv6hdr *iph = ipv6_hdr(skb);
6424 + struct xfrm_offload *xo = xfrm_offload(skb);
6425 +- int proto = iph->nexthdr;
6426 ++ u8 proto = iph->nexthdr;
6427 +
6428 + skb_push(skb, -skb_network_offset(skb));
6429 ++
6430 ++ if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) {
6431 ++ __be16 frag;
6432 ++
6433 ++ ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag);
6434 ++ }
6435 ++
6436 + esph = ip_esp_hdr(skb);
6437 + *skb_mac_header(skb) = IPPROTO_ESP;
6438 +
6439 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
6440 +index 72abf892302f..9a53590ef79c 100644
6441 +--- a/net/ipv6/ip6_fib.c
6442 ++++ b/net/ipv6/ip6_fib.c
6443 +@@ -664,7 +664,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
6444 + if (arg.filter.table_id) {
6445 + tb = fib6_get_table(net, arg.filter.table_id);
6446 + if (!tb) {
6447 +- if (arg.filter.dump_all_families)
6448 ++ if (rtnl_msg_family(cb->nlh) != PF_INET6)
6449 + goto out;
6450 +
6451 + NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
6452 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
6453 +index bfa49ff70531..2ddb7c513e54 100644
6454 +--- a/net/ipv6/ip6mr.c
6455 ++++ b/net/ipv6/ip6mr.c
6456 +@@ -2501,7 +2501,7 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
6457 +
6458 + mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
6459 + if (!mrt) {
6460 +- if (filter.dump_all_families)
6461 ++ if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR)
6462 + return skb->len;
6463 +
6464 + NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
6465 +diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
6466 +index 38a0383dfbcf..aa5150929996 100644
6467 +--- a/net/mac80211/mesh_hwmp.c
6468 ++++ b/net/mac80211/mesh_hwmp.c
6469 +@@ -1103,7 +1103,14 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
6470 + mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
6471 + target_flags, mpath->dst, mpath->sn, da, 0,
6472 + ttl, lifetime, 0, ifmsh->preq_id++, sdata);
6473 ++
6474 ++ spin_lock_bh(&mpath->state_lock);
6475 ++ if (mpath->flags & MESH_PATH_DELETED) {
6476 ++ spin_unlock_bh(&mpath->state_lock);
6477 ++ goto enddiscovery;
6478 ++ }
6479 + mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
6480 ++ spin_unlock_bh(&mpath->state_lock);
6481 +
6482 + enddiscovery:
6483 + rcu_read_unlock();
6484 +diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
6485 +index cd747c0962fd..5a67f7966574 100644
6486 +--- a/net/netfilter/ipset/ip_set_list_set.c
6487 ++++ b/net/netfilter/ipset/ip_set_list_set.c
6488 +@@ -59,7 +59,7 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
6489 + /* Don't lookup sub-counters at all */
6490 + opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
6491 + if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
6492 +- opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
6493 ++ opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE;
6494 + list_for_each_entry_rcu(e, &map->members, list) {
6495 + ret = ip_set_test(e->id, skb, par, opt);
6496 + if (ret <= 0)
6497 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
6498 +index d11a58348133..7c503b4751c4 100644
6499 +--- a/net/netfilter/nf_conntrack_core.c
6500 ++++ b/net/netfilter/nf_conntrack_core.c
6501 +@@ -2014,22 +2014,18 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
6502 + nf_conntrack_get(skb_nfct(nskb));
6503 + }
6504 +
6505 +-static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
6506 ++static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
6507 ++ struct nf_conn *ct,
6508 ++ enum ip_conntrack_info ctinfo)
6509 + {
6510 + struct nf_conntrack_tuple_hash *h;
6511 + struct nf_conntrack_tuple tuple;
6512 +- enum ip_conntrack_info ctinfo;
6513 + struct nf_nat_hook *nat_hook;
6514 + unsigned int status;
6515 +- struct nf_conn *ct;
6516 + int dataoff;
6517 + u16 l3num;
6518 + u8 l4num;
6519 +
6520 +- ct = nf_ct_get(skb, &ctinfo);
6521 +- if (!ct || nf_ct_is_confirmed(ct))
6522 +- return 0;
6523 +-
6524 + l3num = nf_ct_l3num(ct);
6525 +
6526 + dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
6527 +@@ -2086,6 +2082,76 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
6528 + return 0;
6529 + }
6530 +
6531 ++/* This packet is coming from userspace via nf_queue, complete the packet
6532 ++ * processing after the helper invocation in nf_confirm().
6533 ++ */
6534 ++static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
6535 ++ enum ip_conntrack_info ctinfo)
6536 ++{
6537 ++ const struct nf_conntrack_helper *helper;
6538 ++ const struct nf_conn_help *help;
6539 ++ int protoff;
6540 ++
6541 ++ help = nfct_help(ct);
6542 ++ if (!help)
6543 ++ return 0;
6544 ++
6545 ++ helper = rcu_dereference(help->helper);
6546 ++ if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
6547 ++ return 0;
6548 ++
6549 ++ switch (nf_ct_l3num(ct)) {
6550 ++ case NFPROTO_IPV4:
6551 ++ protoff = skb_network_offset(skb) + ip_hdrlen(skb);
6552 ++ break;
6553 ++#if IS_ENABLED(CONFIG_IPV6)
6554 ++ case NFPROTO_IPV6: {
6555 ++ __be16 frag_off;
6556 ++ u8 pnum;
6557 ++
6558 ++ pnum = ipv6_hdr(skb)->nexthdr;
6559 ++ protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
6560 ++ &frag_off);
6561 ++ if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
6562 ++ return 0;
6563 ++ break;
6564 ++ }
6565 ++#endif
6566 ++ default:
6567 ++ return 0;
6568 ++ }
6569 ++
6570 ++ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
6571 ++ !nf_is_loopback_packet(skb)) {
6572 ++ if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
6573 ++ NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
6574 ++ return -1;
6575 ++ }
6576 ++ }
6577 ++
6578 ++ /* We've seen it coming out the other side: confirm it */
6579 ++ return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0;
6580 ++}
6581 ++
6582 ++static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
6583 ++{
6584 ++ enum ip_conntrack_info ctinfo;
6585 ++ struct nf_conn *ct;
6586 ++ int err;
6587 ++
6588 ++ ct = nf_ct_get(skb, &ctinfo);
6589 ++ if (!ct)
6590 ++ return 0;
6591 ++
6592 ++ if (!nf_ct_is_confirmed(ct)) {
6593 ++ err = __nf_conntrack_update(net, skb, ct, ctinfo);
6594 ++ if (err < 0)
6595 ++ return err;
6596 ++ }
6597 ++
6598 ++ return nf_confirm_cthelper(skb, ct, ctinfo);
6599 ++}
6600 ++
6601 + static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
6602 + const struct sk_buff *skb)
6603 + {
6604 +diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
6605 +index a971183f11af..1f44d523b512 100644
6606 +--- a/net/netfilter/nf_conntrack_pptp.c
6607 ++++ b/net/netfilter/nf_conntrack_pptp.c
6608 +@@ -72,24 +72,32 @@ EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn);
6609 +
6610 + #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
6611 + /* PptpControlMessageType names */
6612 +-const char *const pptp_msg_name[] = {
6613 +- "UNKNOWN_MESSAGE",
6614 +- "START_SESSION_REQUEST",
6615 +- "START_SESSION_REPLY",
6616 +- "STOP_SESSION_REQUEST",
6617 +- "STOP_SESSION_REPLY",
6618 +- "ECHO_REQUEST",
6619 +- "ECHO_REPLY",
6620 +- "OUT_CALL_REQUEST",
6621 +- "OUT_CALL_REPLY",
6622 +- "IN_CALL_REQUEST",
6623 +- "IN_CALL_REPLY",
6624 +- "IN_CALL_CONNECT",
6625 +- "CALL_CLEAR_REQUEST",
6626 +- "CALL_DISCONNECT_NOTIFY",
6627 +- "WAN_ERROR_NOTIFY",
6628 +- "SET_LINK_INFO"
6629 ++static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = {
6630 ++ [0] = "UNKNOWN_MESSAGE",
6631 ++ [PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST",
6632 ++ [PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY",
6633 ++ [PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST",
6634 ++ [PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY",
6635 ++ [PPTP_ECHO_REQUEST] = "ECHO_REQUEST",
6636 ++ [PPTP_ECHO_REPLY] = "ECHO_REPLY",
6637 ++ [PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST",
6638 ++ [PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY",
6639 ++ [PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST",
6640 ++ [PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY",
6641 ++ [PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT",
6642 ++ [PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST",
6643 ++ [PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY",
6644 ++ [PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY",
6645 ++ [PPTP_SET_LINK_INFO] = "SET_LINK_INFO"
6646 + };
6647 ++
6648 ++const char *pptp_msg_name(u_int16_t msg)
6649 ++{
6650 ++ if (msg > PPTP_MSG_MAX)
6651 ++ return pptp_msg_name_array[0];
6652 ++
6653 ++ return pptp_msg_name_array[msg];
6654 ++}
6655 + EXPORT_SYMBOL(pptp_msg_name);
6656 + #endif
6657 +
6658 +@@ -276,7 +284,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
6659 + typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;
6660 +
6661 + msg = ntohs(ctlh->messageType);
6662 +- pr_debug("inbound control message %s\n", pptp_msg_name[msg]);
6663 ++ pr_debug("inbound control message %s\n", pptp_msg_name(msg));
6664 +
6665 + switch (msg) {
6666 + case PPTP_START_SESSION_REPLY:
6667 +@@ -311,7 +319,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
6668 + pcid = pptpReq->ocack.peersCallID;
6669 + if (info->pns_call_id != pcid)
6670 + goto invalid;
6671 +- pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg],
6672 ++ pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg),
6673 + ntohs(cid), ntohs(pcid));
6674 +
6675 + if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) {
6676 +@@ -328,7 +336,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
6677 + goto invalid;
6678 +
6679 + cid = pptpReq->icreq.callID;
6680 +- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
6681 ++ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
6682 + info->cstate = PPTP_CALL_IN_REQ;
6683 + info->pac_call_id = cid;
6684 + break;
6685 +@@ -347,7 +355,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
6686 + if (info->pns_call_id != pcid)
6687 + goto invalid;
6688 +
6689 +- pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid));
6690 ++ pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid));
6691 + info->cstate = PPTP_CALL_IN_CONF;
6692 +
6693 + /* we expect a GRE connection from PAC to PNS */
6694 +@@ -357,7 +365,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
6695 + case PPTP_CALL_DISCONNECT_NOTIFY:
6696 + /* server confirms disconnect */
6697 + cid = pptpReq->disc.callID;
6698 +- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
6699 ++ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
6700 + info->cstate = PPTP_CALL_NONE;
6701 +
6702 + /* untrack this call id, unexpect GRE packets */
6703 +@@ -384,7 +392,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
6704 + invalid:
6705 + pr_debug("invalid %s: type=%d cid=%u pcid=%u "
6706 + "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
6707 +- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
6708 ++ pptp_msg_name(msg),
6709 + msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
6710 + ntohs(info->pns_call_id), ntohs(info->pac_call_id));
6711 + return NF_ACCEPT;
6712 +@@ -404,7 +412,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
6713 + typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
6714 +
6715 + msg = ntohs(ctlh->messageType);
6716 +- pr_debug("outbound control message %s\n", pptp_msg_name[msg]);
6717 ++ pr_debug("outbound control message %s\n", pptp_msg_name(msg));
6718 +
6719 + switch (msg) {
6720 + case PPTP_START_SESSION_REQUEST:
6721 +@@ -426,7 +434,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
6722 + info->cstate = PPTP_CALL_OUT_REQ;
6723 + /* track PNS call id */
6724 + cid = pptpReq->ocreq.callID;
6725 +- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
6726 ++ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
6727 + info->pns_call_id = cid;
6728 + break;
6729 +
6730 +@@ -440,7 +448,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
6731 + pcid = pptpReq->icack.peersCallID;
6732 + if (info->pac_call_id != pcid)
6733 + goto invalid;
6734 +- pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg],
6735 ++ pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg),
6736 + ntohs(cid), ntohs(pcid));
6737 +
6738 + if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) {
6739 +@@ -480,7 +488,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
6740 + invalid:
6741 + pr_debug("invalid %s: type=%d cid=%u pcid=%u "
6742 + "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
6743 +- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
6744 ++ pptp_msg_name(msg),
6745 + msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
6746 + ntohs(info->pns_call_id), ntohs(info->pac_call_id));
6747 + return NF_ACCEPT;
6748 +diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
6749 +index a5f294aa8e4c..5b0d0a77379c 100644
6750 +--- a/net/netfilter/nfnetlink_cthelper.c
6751 ++++ b/net/netfilter/nfnetlink_cthelper.c
6752 +@@ -103,7 +103,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
6753 + if (help->helper->data_len == 0)
6754 + return -EINVAL;
6755 +
6756 +- nla_memcpy(help->data, nla_data(attr), sizeof(help->data));
6757 ++ nla_memcpy(help->data, attr, sizeof(help->data));
6758 + return 0;
6759 + }
6760 +
6761 +@@ -240,6 +240,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
6762 + ret = -ENOMEM;
6763 + goto err2;
6764 + }
6765 ++ helper->data_len = size;
6766 +
6767 + helper->flags |= NF_CT_HELPER_F_USERSPACE;
6768 + memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple));
6769 +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
6770 +index b7b854621c26..9d38c14d251a 100644
6771 +--- a/net/qrtr/qrtr.c
6772 ++++ b/net/qrtr/qrtr.c
6773 +@@ -855,7 +855,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
6774 + }
6775 + mutex_unlock(&qrtr_node_lock);
6776 +
6777 +- qrtr_local_enqueue(node, skb, type, from, to);
6778 ++ qrtr_local_enqueue(NULL, skb, type, from, to);
6779 +
6780 + return 0;
6781 + }
6782 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
6783 +index 2bc29463e1dc..9f36fe911d08 100644
6784 +--- a/net/sctp/sm_sideeffect.c
6785 ++++ b/net/sctp/sm_sideeffect.c
6786 +@@ -1523,9 +1523,17 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
6787 + timeout = asoc->timeouts[cmd->obj.to];
6788 + BUG_ON(!timeout);
6789 +
6790 +- timer->expires = jiffies + timeout;
6791 +- sctp_association_hold(asoc);
6792 +- add_timer(timer);
6793 ++ /*
6794 ++ * SCTP has a hard time with timer starts. Because we process
6795 ++ * timer starts as side effects, it can be hard to tell if we
6796 ++ * have already started a timer or not, which leads to BUG
6797 ++ * halts when we call add_timer. So here, instead of just starting
6798 ++ * a timer, if the timer is already started, and just mod
6799 ++ * the timer with the shorter of the two expiration times
6800 ++ */
6801 ++ if (!timer_pending(timer))
6802 ++ sctp_association_hold(asoc);
6803 ++ timer_reduce(timer, jiffies + timeout);
6804 + break;
6805 +
6806 + case SCTP_CMD_TIMER_RESTART:
6807 +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
6808 +index 26788f4a3b9e..e86620fbd90f 100644
6809 +--- a/net/sctp/sm_statefuns.c
6810 ++++ b/net/sctp/sm_statefuns.c
6811 +@@ -1856,12 +1856,13 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
6812 + /* Update the content of current association. */
6813 + sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
6814 + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
6815 +- if (sctp_state(asoc, SHUTDOWN_PENDING) &&
6816 ++ if ((sctp_state(asoc, SHUTDOWN_PENDING) ||
6817 ++ sctp_state(asoc, SHUTDOWN_SENT)) &&
6818 + (sctp_sstate(asoc->base.sk, CLOSING) ||
6819 + sock_flag(asoc->base.sk, SOCK_DEAD))) {
6820 +- /* if were currently in SHUTDOWN_PENDING, but the socket
6821 +- * has been closed by user, don't transition to ESTABLISHED.
6822 +- * Instead trigger SHUTDOWN bundled with COOKIE_ACK.
6823 ++ /* If the socket has been closed by user, don't
6824 ++ * transition to ESTABLISHED. Instead trigger SHUTDOWN
6825 ++ * bundled with COOKIE_ACK.
6826 + */
6827 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
6828 + return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
6829 +diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
6830 +index d6620ad53546..28a283f26a8d 100644
6831 +--- a/net/tipc/udp_media.c
6832 ++++ b/net/tipc/udp_media.c
6833 +@@ -161,9 +161,11 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
6834 + struct udp_bearer *ub, struct udp_media_addr *src,
6835 + struct udp_media_addr *dst, struct dst_cache *cache)
6836 + {
6837 +- struct dst_entry *ndst = dst_cache_get(cache);
6838 ++ struct dst_entry *ndst;
6839 + int ttl, err = 0;
6840 +
6841 ++ local_bh_disable();
6842 ++ ndst = dst_cache_get(cache);
6843 + if (dst->proto == htons(ETH_P_IP)) {
6844 + struct rtable *rt = (struct rtable *)ndst;
6845 +
6846 +@@ -210,9 +212,11 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
6847 + src->port, dst->port, false);
6848 + #endif
6849 + }
6850 ++ local_bh_enable();
6851 + return err;
6852 +
6853 + tx_error:
6854 ++ local_bh_enable();
6855 + kfree_skb(skb);
6856 + return err;
6857 + }
6858 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
6859 +index e23f94a5549b..8c2763eb6aae 100644
6860 +--- a/net/tls/tls_sw.c
6861 ++++ b/net/tls/tls_sw.c
6862 +@@ -206,10 +206,12 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
6863 +
6864 + kfree(aead_req);
6865 +
6866 ++ spin_lock_bh(&ctx->decrypt_compl_lock);
6867 + pending = atomic_dec_return(&ctx->decrypt_pending);
6868 +
6869 +- if (!pending && READ_ONCE(ctx->async_notify))
6870 ++ if (!pending && ctx->async_notify)
6871 + complete(&ctx->async_wait.completion);
6872 ++ spin_unlock_bh(&ctx->decrypt_compl_lock);
6873 + }
6874 +
6875 + static int tls_do_decryption(struct sock *sk,
6876 +@@ -467,10 +469,12 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
6877 + ready = true;
6878 + }
6879 +
6880 ++ spin_lock_bh(&ctx->encrypt_compl_lock);
6881 + pending = atomic_dec_return(&ctx->encrypt_pending);
6882 +
6883 +- if (!pending && READ_ONCE(ctx->async_notify))
6884 ++ if (!pending && ctx->async_notify)
6885 + complete(&ctx->async_wait.completion);
6886 ++ spin_unlock_bh(&ctx->encrypt_compl_lock);
6887 +
6888 + if (!ready)
6889 + return;
6890 +@@ -780,7 +784,7 @@ static int tls_push_record(struct sock *sk, int flags,
6891 +
6892 + static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
6893 + bool full_record, u8 record_type,
6894 +- size_t *copied, int flags)
6895 ++ ssize_t *copied, int flags)
6896 + {
6897 + struct tls_context *tls_ctx = tls_get_ctx(sk);
6898 + struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
6899 +@@ -796,9 +800,10 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
6900 + psock = sk_psock_get(sk);
6901 + if (!psock || !policy) {
6902 + err = tls_push_record(sk, flags, record_type);
6903 +- if (err && err != -EINPROGRESS) {
6904 ++ if (err && sk->sk_err == EBADMSG) {
6905 + *copied -= sk_msg_free(sk, msg);
6906 + tls_free_open_rec(sk);
6907 ++ err = -sk->sk_err;
6908 + }
6909 + if (psock)
6910 + sk_psock_put(sk, psock);
6911 +@@ -824,9 +829,10 @@ more_data:
6912 + switch (psock->eval) {
6913 + case __SK_PASS:
6914 + err = tls_push_record(sk, flags, record_type);
6915 +- if (err && err != -EINPROGRESS) {
6916 ++ if (err && sk->sk_err == EBADMSG) {
6917 + *copied -= sk_msg_free(sk, msg);
6918 + tls_free_open_rec(sk);
6919 ++ err = -sk->sk_err;
6920 + goto out_err;
6921 + }
6922 + break;
6923 +@@ -916,7 +922,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
6924 + unsigned char record_type = TLS_RECORD_TYPE_DATA;
6925 + bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
6926 + bool eor = !(msg->msg_flags & MSG_MORE);
6927 +- size_t try_to_copy, copied = 0;
6928 ++ size_t try_to_copy;
6929 ++ ssize_t copied = 0;
6930 + struct sk_msg *msg_pl, *msg_en;
6931 + struct tls_rec *rec;
6932 + int required_size;
6933 +@@ -926,6 +933,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
6934 + int num_zc = 0;
6935 + int orig_size;
6936 + int ret = 0;
6937 ++ int pending;
6938 +
6939 + if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
6940 + return -EOPNOTSUPP;
6941 +@@ -1092,13 +1100,19 @@ trim_sgl:
6942 + goto send_end;
6943 + } else if (num_zc) {
6944 + /* Wait for pending encryptions to get completed */
6945 +- smp_store_mb(ctx->async_notify, true);
6946 ++ spin_lock_bh(&ctx->encrypt_compl_lock);
6947 ++ ctx->async_notify = true;
6948 +
6949 +- if (atomic_read(&ctx->encrypt_pending))
6950 ++ pending = atomic_read(&ctx->encrypt_pending);
6951 ++ spin_unlock_bh(&ctx->encrypt_compl_lock);
6952 ++ if (pending)
6953 + crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
6954 + else
6955 + reinit_completion(&ctx->async_wait.completion);
6956 +
6957 ++ /* There can be no concurrent accesses, since we have no
6958 ++ * pending encrypt operations
6959 ++ */
6960 + WRITE_ONCE(ctx->async_notify, false);
6961 +
6962 + if (ctx->async_wait.err) {
6963 +@@ -1118,7 +1132,7 @@ send_end:
6964 +
6965 + release_sock(sk);
6966 + mutex_unlock(&tls_ctx->tx_lock);
6967 +- return copied ? copied : ret;
6968 ++ return copied > 0 ? copied : ret;
6969 + }
6970 +
6971 + static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
6972 +@@ -1132,7 +1146,7 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
6973 + struct sk_msg *msg_pl;
6974 + struct tls_rec *rec;
6975 + int num_async = 0;
6976 +- size_t copied = 0;
6977 ++ ssize_t copied = 0;
6978 + bool full_record;
6979 + int record_room;
6980 + int ret = 0;
6981 +@@ -1234,7 +1248,7 @@ wait_for_memory:
6982 + }
6983 + sendpage_end:
6984 + ret = sk_stream_error(sk, flags, ret);
6985 +- return copied ? copied : ret;
6986 ++ return copied > 0 ? copied : ret;
6987 + }
6988 +
6989 + int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
6990 +@@ -1729,6 +1743,7 @@ int tls_sw_recvmsg(struct sock *sk,
6991 + bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
6992 + bool is_peek = flags & MSG_PEEK;
6993 + int num_async = 0;
6994 ++ int pending;
6995 +
6996 + flags |= nonblock;
6997 +
6998 +@@ -1891,8 +1906,11 @@ pick_next_record:
6999 + recv_end:
7000 + if (num_async) {
7001 + /* Wait for all previously submitted records to be decrypted */
7002 +- smp_store_mb(ctx->async_notify, true);
7003 +- if (atomic_read(&ctx->decrypt_pending)) {
7004 ++ spin_lock_bh(&ctx->decrypt_compl_lock);
7005 ++ ctx->async_notify = true;
7006 ++ pending = atomic_read(&ctx->decrypt_pending);
7007 ++ spin_unlock_bh(&ctx->decrypt_compl_lock);
7008 ++ if (pending) {
7009 + err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
7010 + if (err) {
7011 + /* one of async decrypt failed */
7012 +@@ -1904,6 +1922,10 @@ recv_end:
7013 + } else {
7014 + reinit_completion(&ctx->async_wait.completion);
7015 + }
7016 ++
7017 ++ /* There can be no concurrent accesses, since we have no
7018 ++ * pending decrypt operations
7019 ++ */
7020 + WRITE_ONCE(ctx->async_notify, false);
7021 +
7022 + /* Drain records from the rx_list & copy if required */
7023 +@@ -2290,6 +2312,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
7024 +
7025 + if (tx) {
7026 + crypto_init_wait(&sw_ctx_tx->async_wait);
7027 ++ spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
7028 + crypto_info = &ctx->crypto_send.info;
7029 + cctx = &ctx->tx;
7030 + aead = &sw_ctx_tx->aead_send;
7031 +@@ -2298,6 +2321,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
7032 + sw_ctx_tx->tx_work.sk = sk;
7033 + } else {
7034 + crypto_init_wait(&sw_ctx_rx->async_wait);
7035 ++ spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
7036 + crypto_info = &ctx->crypto_recv.info;
7037 + cctx = &ctx->rx;
7038 + skb_queue_head_init(&sw_ctx_rx->rx_list);
7039 +diff --git a/net/wireless/core.c b/net/wireless/core.c
7040 +index 3e25229a059d..ee5bb8d8af04 100644
7041 +--- a/net/wireless/core.c
7042 ++++ b/net/wireless/core.c
7043 +@@ -142,7 +142,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
7044 + if (result)
7045 + return result;
7046 +
7047 +- if (rdev->wiphy.debugfsdir)
7048 ++ if (!IS_ERR_OR_NULL(rdev->wiphy.debugfsdir))
7049 + debugfs_rename(rdev->wiphy.debugfsdir->d_parent,
7050 + rdev->wiphy.debugfsdir,
7051 + rdev->wiphy.debugfsdir->d_parent, newname);
7052 +diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
7053 +index ed7a6060f73c..3889bd9aec46 100644
7054 +--- a/net/xdp/xdp_umem.c
7055 ++++ b/net/xdp/xdp_umem.c
7056 +@@ -341,8 +341,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
7057 + {
7058 + bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
7059 + u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
7060 ++ u64 npgs, addr = mr->addr, size = mr->len;
7061 + unsigned int chunks, chunks_per_page;
7062 +- u64 addr = mr->addr, size = mr->len;
7063 + int err;
7064 +
7065 + if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
7066 +@@ -372,6 +372,10 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
7067 + if ((addr + size) < addr)
7068 + return -EINVAL;
7069 +
7070 ++ npgs = div_u64(size, PAGE_SIZE);
7071 ++ if (npgs > U32_MAX)
7072 ++ return -EINVAL;
7073 ++
7074 + chunks = (unsigned int)div_u64(size, chunk_size);
7075 + if (chunks == 0)
7076 + return -EINVAL;
7077 +@@ -391,7 +395,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
7078 + umem->size = size;
7079 + umem->headroom = headroom;
7080 + umem->chunk_size_nohr = chunk_size - headroom;
7081 +- umem->npgs = size / PAGE_SIZE;
7082 ++ umem->npgs = (u32)npgs;
7083 + umem->pgs = NULL;
7084 + umem->user = NULL;
7085 + umem->flags = mr->flags;
7086 +diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
7087 +index f15d6a564b0e..36abb6750ffe 100644
7088 +--- a/net/xfrm/espintcp.c
7089 ++++ b/net/xfrm/espintcp.c
7090 +@@ -379,6 +379,7 @@ static void espintcp_destruct(struct sock *sk)
7091 + {
7092 + struct espintcp_ctx *ctx = espintcp_getctx(sk);
7093 +
7094 ++ ctx->saved_destruct(sk);
7095 + kfree(ctx);
7096 + }
7097 +
7098 +@@ -419,6 +420,7 @@ static int espintcp_init_sk(struct sock *sk)
7099 + sk->sk_socket->ops = &espintcp_ops;
7100 + ctx->saved_data_ready = sk->sk_data_ready;
7101 + ctx->saved_write_space = sk->sk_write_space;
7102 ++ ctx->saved_destruct = sk->sk_destruct;
7103 + sk->sk_data_ready = espintcp_data_ready;
7104 + sk->sk_write_space = espintcp_write_space;
7105 + sk->sk_destruct = espintcp_destruct;
7106 +diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
7107 +index e2db468cf50e..4c1b939616b3 100644
7108 +--- a/net/xfrm/xfrm_device.c
7109 ++++ b/net/xfrm/xfrm_device.c
7110 +@@ -25,12 +25,10 @@ static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
7111 + struct xfrm_offload *xo = xfrm_offload(skb);
7112 +
7113 + skb_reset_mac_len(skb);
7114 +- pskb_pull(skb, skb->mac_len + hsize + x->props.header_len);
7115 +-
7116 +- if (xo->flags & XFRM_GSO_SEGMENT) {
7117 +- skb_reset_transport_header(skb);
7118 ++ if (xo->flags & XFRM_GSO_SEGMENT)
7119 + skb->transport_header -= x->props.header_len;
7120 +- }
7121 ++
7122 ++ pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
7123 + }
7124 +
7125 + static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
7126 +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
7127 +index aa35f23c4912..8a202c44f89a 100644
7128 +--- a/net/xfrm/xfrm_input.c
7129 ++++ b/net/xfrm/xfrm_input.c
7130 +@@ -644,7 +644,7 @@ resume:
7131 + dev_put(skb->dev);
7132 +
7133 + spin_lock(&x->lock);
7134 +- if (nexthdr <= 0) {
7135 ++ if (nexthdr < 0) {
7136 + if (nexthdr == -EBADMSG) {
7137 + xfrm_audit_state_icvfail(x, skb,
7138 + x->type->proto);
7139 +diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
7140 +index 3361e3ac5714..1e115cbf21d3 100644
7141 +--- a/net/xfrm/xfrm_interface.c
7142 ++++ b/net/xfrm/xfrm_interface.c
7143 +@@ -750,7 +750,28 @@ static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
7144 + .get_link_net = xfrmi_get_link_net,
7145 + };
7146 +
7147 ++static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
7148 ++{
7149 ++ struct net *net;
7150 ++ LIST_HEAD(list);
7151 ++
7152 ++ rtnl_lock();
7153 ++ list_for_each_entry(net, net_exit_list, exit_list) {
7154 ++ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
7155 ++ struct xfrm_if __rcu **xip;
7156 ++ struct xfrm_if *xi;
7157 ++
7158 ++ for (xip = &xfrmn->xfrmi[0];
7159 ++ (xi = rtnl_dereference(*xip)) != NULL;
7160 ++ xip = &xi->next)
7161 ++ unregister_netdevice_queue(xi->dev, &list);
7162 ++ }
7163 ++ unregister_netdevice_many(&list);
7164 ++ rtnl_unlock();
7165 ++}
7166 ++
7167 + static struct pernet_operations xfrmi_net_ops = {
7168 ++ .exit_batch = xfrmi_exit_batch_net,
7169 + .id = &xfrmi_net_id,
7170 + .size = sizeof(struct xfrmi_net),
7171 + };
7172 +diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
7173 +index fafc7aba705f..d5f5a787ebbc 100644
7174 +--- a/net/xfrm/xfrm_output.c
7175 ++++ b/net/xfrm/xfrm_output.c
7176 +@@ -583,18 +583,20 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
7177 + xfrm_state_hold(x);
7178 +
7179 + if (skb_is_gso(skb)) {
7180 +- skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
7181 ++ if (skb->inner_protocol)
7182 ++ return xfrm_output_gso(net, sk, skb);
7183 +
7184 +- return xfrm_output2(net, sk, skb);
7185 ++ skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
7186 ++ goto out;
7187 + }
7188 +
7189 + if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
7190 + goto out;
7191 ++ } else {
7192 ++ if (skb_is_gso(skb))
7193 ++ return xfrm_output_gso(net, sk, skb);
7194 + }
7195 +
7196 +- if (skb_is_gso(skb))
7197 +- return xfrm_output_gso(net, sk, skb);
7198 +-
7199 + if (skb->ip_summed == CHECKSUM_PARTIAL) {
7200 + err = skb_checksum_help(skb);
7201 + if (err) {
7202 +@@ -640,7 +642,8 @@ void xfrm_local_error(struct sk_buff *skb, int mtu)
7203 +
7204 + if (skb->protocol == htons(ETH_P_IP))
7205 + proto = AF_INET;
7206 +- else if (skb->protocol == htons(ETH_P_IPV6))
7207 ++ else if (skb->protocol == htons(ETH_P_IPV6) &&
7208 ++ skb->sk->sk_family == AF_INET6)
7209 + proto = AF_INET6;
7210 + else
7211 + return;
7212 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
7213 +index 8a4af86a285e..580735652754 100644
7214 +--- a/net/xfrm/xfrm_policy.c
7215 ++++ b/net/xfrm/xfrm_policy.c
7216 +@@ -1436,12 +1436,7 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
7217 + static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
7218 + struct xfrm_policy *pol)
7219 + {
7220 +- u32 mark = policy->mark.v & policy->mark.m;
7221 +-
7222 +- if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
7223 +- return true;
7224 +-
7225 +- if ((mark & pol->mark.m) == pol->mark.v &&
7226 ++ if (policy->mark.v == pol->mark.v &&
7227 + policy->priority == pol->priority)
7228 + return true;
7229 +
7230 +diff --git a/samples/bpf/lwt_len_hist_user.c b/samples/bpf/lwt_len_hist_user.c
7231 +index 587b68b1f8dd..430a4b7e353e 100644
7232 +--- a/samples/bpf/lwt_len_hist_user.c
7233 ++++ b/samples/bpf/lwt_len_hist_user.c
7234 +@@ -15,8 +15,6 @@
7235 + #define MAX_INDEX 64
7236 + #define MAX_STARS 38
7237 +
7238 +-char bpf_log_buf[BPF_LOG_BUF_SIZE];
7239 +-
7240 + static void stars(char *str, long val, long max, int width)
7241 + {
7242 + int i;
7243 +diff --git a/security/commoncap.c b/security/commoncap.c
7244 +index f4ee0ae106b2..0ca31c8bc0b1 100644
7245 +--- a/security/commoncap.c
7246 ++++ b/security/commoncap.c
7247 +@@ -812,6 +812,7 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
7248 + int ret;
7249 + kuid_t root_uid;
7250 +
7251 ++ new->cap_ambient = old->cap_ambient;
7252 + if (WARN_ON(!cap_ambient_invariant_ok(old)))
7253 + return -EPERM;
7254 +
7255 +diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c
7256 +index b412d3b3d5ff..21edb8ac95eb 100644
7257 +--- a/sound/core/hwdep.c
7258 ++++ b/sound/core/hwdep.c
7259 +@@ -216,12 +216,12 @@ static int snd_hwdep_dsp_load(struct snd_hwdep *hw,
7260 + if (info.index >= 32)
7261 + return -EINVAL;
7262 + /* check whether the dsp was already loaded */
7263 +- if (hw->dsp_loaded & (1 << info.index))
7264 ++ if (hw->dsp_loaded & (1u << info.index))
7265 + return -EBUSY;
7266 + err = hw->ops.dsp_load(hw, &info);
7267 + if (err < 0)
7268 + return err;
7269 +- hw->dsp_loaded |= (1 << info.index);
7270 ++ hw->dsp_loaded |= (1u << info.index);
7271 + return 0;
7272 + }
7273 +
7274 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7275 +index 041d2a32059b..e62d58872b6e 100644
7276 +--- a/sound/pci/hda/patch_realtek.c
7277 ++++ b/sound/pci/hda/patch_realtek.c
7278 +@@ -384,6 +384,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
7279 + case 0x10ec0282:
7280 + case 0x10ec0283:
7281 + case 0x10ec0286:
7282 ++ case 0x10ec0287:
7283 + case 0x10ec0288:
7284 + case 0x10ec0285:
7285 + case 0x10ec0298:
7286 +@@ -5484,18 +5485,9 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
7287 + { 0x19, 0x21a11010 }, /* dock mic */
7288 + { }
7289 + };
7290 +- /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
7291 +- * the speaker output becomes too low by some reason on Thinkpads with
7292 +- * ALC298 codec
7293 +- */
7294 +- static const hda_nid_t preferred_pairs[] = {
7295 +- 0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
7296 +- 0
7297 +- };
7298 + struct alc_spec *spec = codec->spec;
7299 +
7300 + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
7301 +- spec->gen.preferred_dacs = preferred_pairs;
7302 + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
7303 + snd_hda_apply_pincfgs(codec, pincfgs);
7304 + } else if (action == HDA_FIXUP_ACT_INIT) {
7305 +@@ -5508,6 +5500,23 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
7306 + }
7307 + }
7308 +
7309 ++static void alc_fixup_tpt470_dacs(struct hda_codec *codec,
7310 ++ const struct hda_fixup *fix, int action)
7311 ++{
7312 ++ /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
7313 ++ * the speaker output becomes too low by some reason on Thinkpads with
7314 ++ * ALC298 codec
7315 ++ */
7316 ++ static const hda_nid_t preferred_pairs[] = {
7317 ++ 0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
7318 ++ 0
7319 ++ };
7320 ++ struct alc_spec *spec = codec->spec;
7321 ++
7322 ++ if (action == HDA_FIXUP_ACT_PRE_PROBE)
7323 ++ spec->gen.preferred_dacs = preferred_pairs;
7324 ++}
7325 ++
7326 + static void alc_shutup_dell_xps13(struct hda_codec *codec)
7327 + {
7328 + struct alc_spec *spec = codec->spec;
7329 +@@ -6063,6 +6072,7 @@ enum {
7330 + ALC700_FIXUP_INTEL_REFERENCE,
7331 + ALC274_FIXUP_DELL_BIND_DACS,
7332 + ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
7333 ++ ALC298_FIXUP_TPT470_DOCK_FIX,
7334 + ALC298_FIXUP_TPT470_DOCK,
7335 + ALC255_FIXUP_DUMMY_LINEOUT_VERB,
7336 + ALC255_FIXUP_DELL_HEADSET_MIC,
7337 +@@ -6994,12 +7004,18 @@ static const struct hda_fixup alc269_fixups[] = {
7338 + .chained = true,
7339 + .chain_id = ALC274_FIXUP_DELL_BIND_DACS
7340 + },
7341 +- [ALC298_FIXUP_TPT470_DOCK] = {
7342 ++ [ALC298_FIXUP_TPT470_DOCK_FIX] = {
7343 + .type = HDA_FIXUP_FUNC,
7344 + .v.func = alc_fixup_tpt470_dock,
7345 + .chained = true,
7346 + .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
7347 + },
7348 ++ [ALC298_FIXUP_TPT470_DOCK] = {
7349 ++ .type = HDA_FIXUP_FUNC,
7350 ++ .v.func = alc_fixup_tpt470_dacs,
7351 ++ .chained = true,
7352 ++ .chain_id = ALC298_FIXUP_TPT470_DOCK_FIX
7353 ++ },
7354 + [ALC255_FIXUP_DUMMY_LINEOUT_VERB] = {
7355 + .type = HDA_FIXUP_PINS,
7356 + .v.pins = (const struct hda_pintbl[]) {
7357 +@@ -7638,6 +7654,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
7358 + {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
7359 + {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
7360 + {.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
7361 ++ {.id = ALC298_FIXUP_TPT470_DOCK_FIX, .name = "tpt470-dock-fix"},
7362 + {.id = ALC298_FIXUP_TPT470_DOCK, .name = "tpt470-dock"},
7363 + {.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
7364 + {.id = ALC700_FIXUP_INTEL_REFERENCE, .name = "alc700-ref"},
7365 +@@ -8276,6 +8293,7 @@ static int patch_alc269(struct hda_codec *codec)
7366 + case 0x10ec0215:
7367 + case 0x10ec0245:
7368 + case 0x10ec0285:
7369 ++ case 0x10ec0287:
7370 + case 0x10ec0289:
7371 + spec->codec_variant = ALC269_TYPE_ALC215;
7372 + spec->shutup = alc225_shutup;
7373 +@@ -9554,6 +9572,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
7374 + HDA_CODEC_ENTRY(0x10ec0284, "ALC284", patch_alc269),
7375 + HDA_CODEC_ENTRY(0x10ec0285, "ALC285", patch_alc269),
7376 + HDA_CODEC_ENTRY(0x10ec0286, "ALC286", patch_alc269),
7377 ++ HDA_CODEC_ENTRY(0x10ec0287, "ALC287", patch_alc269),
7378 + HDA_CODEC_ENTRY(0x10ec0288, "ALC288", patch_alc269),
7379 + HDA_CODEC_ENTRY(0x10ec0289, "ALC289", patch_alc269),
7380 + HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269),
7381 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
7382 +index 7a2961ad60de..68fefe55e5c0 100644
7383 +--- a/sound/usb/mixer.c
7384 ++++ b/sound/usb/mixer.c
7385 +@@ -1171,6 +1171,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
7386 + cval->res = 384;
7387 + }
7388 + break;
7389 ++ case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */
7390 ++ if ((strstr(kctl->id.name, "Playback Volume") != NULL) ||
7391 ++ strstr(kctl->id.name, "Capture Volume") != NULL) {
7392 ++ cval->min >>= 8;
7393 ++ cval->max = 0;
7394 ++ cval->res = 1;
7395 ++ }
7396 ++ break;
7397 + }
7398 + }
7399 +
7400 +diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
7401 +index 0260c750e156..9af7aa93f6fa 100644
7402 +--- a/sound/usb/mixer_maps.c
7403 ++++ b/sound/usb/mixer_maps.c
7404 +@@ -397,6 +397,21 @@ static const struct usbmix_connector_map trx40_mobo_connector_map[] = {
7405 + {}
7406 + };
7407 +
7408 ++/* Rear panel + front mic on Gigabyte TRX40 Aorus Master with ALC1220-VB */
7409 ++static const struct usbmix_name_map aorus_master_alc1220vb_map[] = {
7410 ++ { 17, NULL }, /* OT, IEC958?, disabled */
7411 ++ { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */
7412 ++ { 16, "Line Out" }, /* OT */
7413 ++ { 22, "Line Out Playback" }, /* FU */
7414 ++ { 7, "Line" }, /* IT */
7415 ++ { 19, "Line Capture" }, /* FU */
7416 ++ { 8, "Mic" }, /* IT */
7417 ++ { 20, "Mic Capture" }, /* FU */
7418 ++ { 9, "Front Mic" }, /* IT */
7419 ++ { 21, "Front Mic Capture" }, /* FU */
7420 ++ {}
7421 ++};
7422 ++
7423 + /*
7424 + * Control map entries
7425 + */
7426 +@@ -526,6 +541,10 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
7427 + .id = USB_ID(0x1b1c, 0x0a42),
7428 + .map = corsair_virtuoso_map,
7429 + },
7430 ++ { /* Gigabyte TRX40 Aorus Master (rear panel + front mic) */
7431 ++ .id = USB_ID(0x0414, 0xa001),
7432 ++ .map = aorus_master_alc1220vb_map,
7433 ++ },
7434 + { /* Gigabyte TRX40 Aorus Pro WiFi */
7435 + .id = USB_ID(0x0414, 0xa002),
7436 + .map = trx40_mobo_map,
7437 +@@ -549,6 +568,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
7438 + .map = trx40_mobo_map,
7439 + .connector_map = trx40_mobo_connector_map,
7440 + },
7441 ++ { /* Asrock TRX40 Creator */
7442 ++ .id = USB_ID(0x26ce, 0x0a01),
7443 ++ .map = trx40_mobo_map,
7444 ++ .connector_map = trx40_mobo_connector_map,
7445 ++ },
7446 + { 0 } /* terminator */
7447 + };
7448 +
7449 +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
7450 +index 8c2f5c23e1b4..bbae11605a4c 100644
7451 +--- a/sound/usb/quirks-table.h
7452 ++++ b/sound/usb/quirks-table.h
7453 +@@ -3647,6 +3647,32 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
7454 + ALC1220_VB_DESKTOP(0x0414, 0xa002), /* Gigabyte TRX40 Aorus Pro WiFi */
7455 + ALC1220_VB_DESKTOP(0x0db0, 0x0d64), /* MSI TRX40 Creator */
7456 + ALC1220_VB_DESKTOP(0x0db0, 0x543d), /* MSI TRX40 */
7457 ++ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
7458 + #undef ALC1220_VB_DESKTOP
7459 +
7460 ++/* Two entries for Gigabyte TRX40 Aorus Master:
7461 ++ * TRX40 Aorus Master has two USB-audio devices, one for the front headphone
7462 ++ * with ESS SABRE9218 DAC chip, while another for the rest I/O (the rear
7463 ++ * panel and the front mic) with Realtek ALC1220-VB.
7464 ++ * Here we provide two distinct names for making UCM profiles easier.
7465 ++ */
7466 ++{
7467 ++ USB_DEVICE(0x0414, 0xa000),
7468 ++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
7469 ++ .vendor_name = "Gigabyte",
7470 ++ .product_name = "Aorus Master Front Headphone",
7471 ++ .profile_name = "Gigabyte-Aorus-Master-Front-Headphone",
7472 ++ .ifnum = QUIRK_NO_INTERFACE
7473 ++ }
7474 ++},
7475 ++{
7476 ++ USB_DEVICE(0x0414, 0xa001),
7477 ++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
7478 ++ .vendor_name = "Gigabyte",
7479 ++ .product_name = "Aorus Master Main Audio",
7480 ++ .profile_name = "Gigabyte-Aorus-Master-Main-Audio",
7481 ++ .ifnum = QUIRK_NO_INTERFACE
7482 ++ }
7483 ++},
7484 ++
7485 + #undef USB_DEVICE_VENDOR_SPEC
7486 +diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
7487 +index cd5e1f602ac9..909da9cdda97 100644
7488 +--- a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
7489 ++++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
7490 +@@ -351,6 +351,7 @@ static int test_alloc_errors(char *heap_name)
7491 + }
7492 +
7493 + printf("Expected error checking passed\n");
7494 ++ ret = 0;
7495 + out:
7496 + if (dmabuf_fd >= 0)
7497 + close(dmabuf_fd);