Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:6.1 commit in: /
Date: Wed, 01 Feb 2023 08:05:57
Message-Id: 1675238613.cabf8a4c1fb2af4bb6390d026ade7dc8133df2d5.alicef@gentoo
1 commit: cabf8a4c1fb2af4bb6390d026ade7dc8133df2d5
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 1 08:03:33 2023 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 1 08:03:33 2023 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cabf8a4c
7
8 Linux patch 6.1.9
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1008_linux-6.1.9.patch | 11622 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 11626 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 396dd2ee..0965d9a9 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -75,6 +75,10 @@ Patch: 1007_linux-6.1.8.patch
21 From: http://www.kernel.org
22 Desc: Linux 6.1.8
23
24 +Patch: 1008_linux-6.1.9.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 6.1.9
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1008_linux-6.1.9.patch b/1008_linux-6.1.9.patch
33 new file mode 100644
34 index 00000000..f4ffc2a3
35 --- /dev/null
36 +++ b/1008_linux-6.1.9.patch
37 @@ -0,0 +1,11622 @@
38 +diff --git a/Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml b/Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml
39 +index c46378efc1239..92e899905ef88 100644
40 +--- a/Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml
41 ++++ b/Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml
42 +@@ -16,7 +16,7 @@ properties:
43 + compatible:
44 + items:
45 + - enum:
46 +- - renesas,i2c-r9a09g011 # RZ/V2M
47 ++ - renesas,r9a09g011-i2c # RZ/V2M
48 + - const: renesas,rzv2m-i2c
49 +
50 + reg:
51 +@@ -66,7 +66,7 @@ examples:
52 + #include <dt-bindings/interrupt-controller/arm-gic.h>
53 +
54 + i2c0: i2c@a4030000 {
55 +- compatible = "renesas,i2c-r9a09g011", "renesas,rzv2m-i2c";
56 ++ compatible = "renesas,r9a09g011-i2c", "renesas,rzv2m-i2c";
57 + reg = <0xa4030000 0x80>;
58 + interrupts = <GIC_SPI 232 IRQ_TYPE_EDGE_RISING>,
59 + <GIC_SPI 236 IRQ_TYPE_EDGE_RISING>;
60 +diff --git a/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml b/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
61 +index 01f9d4e236e94..a7feb497eb89b 100644
62 +--- a/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
63 ++++ b/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
64 +@@ -19,8 +19,8 @@ description: |
65 + additional information and example.
66 +
67 + patternProperties:
68 +- # 25 LDOs
69 +- "^LDO([1-9]|[1][0-9]|2[0-5])$":
70 ++ # 25 LDOs, without LDO10-12
71 ++ "^LDO([1-9]|1[3-9]|2[0-5])$":
72 + type: object
73 + $ref: regulator.yaml#
74 + unevaluatedProperties: false
75 +@@ -30,6 +30,23 @@ patternProperties:
76 + required:
77 + - regulator-name
78 +
79 ++ "^LDO(1[0-2])$":
80 ++ type: object
81 ++ $ref: regulator.yaml#
82 ++ unevaluatedProperties: false
83 ++ description:
84 ++ Properties for single LDO regulator.
85 ++
86 ++ properties:
87 ++ samsung,ext-control-gpios:
88 ++ maxItems: 1
89 ++ description:
90 ++ LDO10, LDO11 and LDO12 can be configured to external control over
91 ++ GPIO.
92 ++
93 ++ required:
94 ++ - regulator-name
95 ++
96 + # 5 bucks
97 + "^BUCK[1-5]$":
98 + type: object
99 +diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
100 +index 90a7cabf58feb..d4148418350c6 100644
101 +--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
102 ++++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
103 +@@ -80,7 +80,7 @@ properties:
104 + insensitive, letters in the riscv,isa string must be all
105 + lowercase to simplify parsing.
106 + $ref: "/schemas/types.yaml#/definitions/string"
107 +- pattern: ^rv(?:64|32)imaf?d?q?c?b?v?k?h?(?:_[hsxz](?:[a-z])+)*$
108 ++ pattern: ^rv(?:64|32)imaf?d?q?c?b?k?j?p?v?h?(?:[hsxz](?:[a-z])+)?(?:_[hsxz](?:[a-z])+)*$
109 +
110 + # RISC-V requires 'timebase-frequency' in /cpus, so disallow it here
111 + timebase-frequency: false
112 +diff --git a/Documentation/devicetree/bindings/sound/everest,es8326.yaml b/Documentation/devicetree/bindings/sound/everest,es8326.yaml
113 +old mode 100755
114 +new mode 100644
115 +diff --git a/Documentation/x86/amd-memory-encryption.rst b/Documentation/x86/amd-memory-encryption.rst
116 +index a1940ebe7be50..934310ce72582 100644
117 +--- a/Documentation/x86/amd-memory-encryption.rst
118 ++++ b/Documentation/x86/amd-memory-encryption.rst
119 +@@ -95,3 +95,39 @@ by supplying mem_encrypt=on on the kernel command line. However, if BIOS does
120 + not enable SME, then Linux will not be able to activate memory encryption, even
121 + if configured to do so by default or the mem_encrypt=on command line parameter
122 + is specified.
123 ++
124 ++Secure Nested Paging (SNP)
125 ++==========================
126 ++
127 ++SEV-SNP introduces new features (SEV_FEATURES[1:63]) which can be enabled
128 ++by the hypervisor for security enhancements. Some of these features need
129 ++guest side implementation to function correctly. The below table lists the
130 ++expected guest behavior with various possible scenarios of guest/hypervisor
131 ++SNP feature support.
132 ++
133 +++-----------------+---------------+---------------+------------------+
134 ++| Feature Enabled | Guest needs | Guest has | Guest boot |
135 ++| by the HV | implementation| implementation| behaviour |
136 +++=================+===============+===============+==================+
137 ++| No | No | No | Boot |
138 ++| | | | |
139 +++-----------------+---------------+---------------+------------------+
140 ++| No | Yes | No | Boot |
141 ++| | | | |
142 +++-----------------+---------------+---------------+------------------+
143 ++| No | Yes | Yes | Boot |
144 ++| | | | |
145 +++-----------------+---------------+---------------+------------------+
146 ++| Yes | No | No | Boot with |
147 ++| | | | feature enabled |
148 +++-----------------+---------------+---------------+------------------+
149 ++| Yes | Yes | No | Graceful boot |
150 ++| | | | failure |
151 +++-----------------+---------------+---------------+------------------+
152 ++| Yes | Yes | Yes | Boot with |
153 ++| | | | feature enabled |
154 +++-----------------+---------------+---------------+------------------+
155 ++
156 ++More details in AMD64 APM[1] Vol 2: 15.34.10 SEV_STATUS MSR
157 ++
158 ++[1] https://www.amd.com/system/files/TechDocs/40332.pdf
159 +diff --git a/Makefile b/Makefile
160 +index 49261450039a1..3778b422fa113 100644
161 +--- a/Makefile
162 ++++ b/Makefile
163 +@@ -1,7 +1,7 @@
164 + # SPDX-License-Identifier: GPL-2.0
165 + VERSION = 6
166 + PATCHLEVEL = 1
167 +-SUBLEVEL = 8
168 ++SUBLEVEL = 9
169 + EXTRAVERSION =
170 + NAME = Hurr durr I'ma ninja sloth
171 +
172 +@@ -538,7 +538,7 @@ LDFLAGS_MODULE =
173 + CFLAGS_KERNEL =
174 + RUSTFLAGS_KERNEL =
175 + AFLAGS_KERNEL =
176 +-export LDFLAGS_vmlinux =
177 ++LDFLAGS_vmlinux =
178 +
179 + # Use USERINCLUDE when you must reference the UAPI directories only.
180 + USERINCLUDE := \
181 +@@ -1232,6 +1232,18 @@ vmlinux.o modules.builtin.modinfo modules.builtin: vmlinux_o
182 + @:
183 +
184 + PHONY += vmlinux
185 ++# LDFLAGS_vmlinux in the top Makefile defines linker flags for the top vmlinux,
186 ++# not for decompressors. LDFLAGS_vmlinux in arch/*/boot/compressed/Makefile is
187 ++# unrelated; the decompressors just happen to have the same base name,
188 ++# arch/*/boot/compressed/vmlinux.
189 ++# Export LDFLAGS_vmlinux only to scripts/Makefile.vmlinux.
190 ++#
191 ++# _LDFLAGS_vmlinux is a workaround for the 'private export' bug:
192 ++# https://savannah.gnu.org/bugs/?61463
193 ++# For Make > 4.4, the following simple code will work:
194 ++# vmlinux: private export LDFLAGS_vmlinux := $(LDFLAGS_vmlinux)
195 ++vmlinux: private _LDFLAGS_vmlinux := $(LDFLAGS_vmlinux)
196 ++vmlinux: export LDFLAGS_vmlinux = $(_LDFLAGS_vmlinux)
197 + vmlinux: vmlinux.o $(KBUILD_LDS) modpost
198 + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.vmlinux
199 +
200 +@@ -1517,6 +1529,7 @@ endif
201 + # *.ko are usually independent of vmlinux, but CONFIG_DEBUG_INFOBTF_MODULES
202 + # is an exception.
203 + ifdef CONFIG_DEBUG_INFO_BTF_MODULES
204 ++KBUILD_BUILTIN := 1
205 + modules: vmlinux
206 + endif
207 +
208 +diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
209 +index 4bc4371e6bae5..4b81a975c979d 100644
210 +--- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
211 ++++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
212 +@@ -632,7 +632,6 @@
213 + &uart1 {
214 + pinctrl-names = "default";
215 + pinctrl-0 = <&pinctrl_uart1>;
216 +- uart-has-rtscts;
217 + rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
218 + status = "okay";
219 + };
220 +diff --git a/arch/arm/boot/dts/imx6ul-pico-dwarf.dts b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts
221 +index 162dc259edc8c..5a74c7f68eb62 100644
222 +--- a/arch/arm/boot/dts/imx6ul-pico-dwarf.dts
223 ++++ b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts
224 +@@ -32,7 +32,7 @@
225 + };
226 +
227 + &i2c2 {
228 +- clock_frequency = <100000>;
229 ++ clock-frequency = <100000>;
230 + pinctrl-names = "default";
231 + pinctrl-0 = <&pinctrl_i2c2>;
232 + status = "okay";
233 +diff --git a/arch/arm/boot/dts/imx7d-pico-dwarf.dts b/arch/arm/boot/dts/imx7d-pico-dwarf.dts
234 +index 5162fe227d1ea..fdc10563f1473 100644
235 +--- a/arch/arm/boot/dts/imx7d-pico-dwarf.dts
236 ++++ b/arch/arm/boot/dts/imx7d-pico-dwarf.dts
237 +@@ -32,7 +32,7 @@
238 + };
239 +
240 + &i2c1 {
241 +- clock_frequency = <100000>;
242 ++ clock-frequency = <100000>;
243 + pinctrl-names = "default";
244 + pinctrl-0 = <&pinctrl_i2c1>;
245 + status = "okay";
246 +@@ -52,7 +52,7 @@
247 + };
248 +
249 + &i2c4 {
250 +- clock_frequency = <100000>;
251 ++ clock-frequency = <100000>;
252 + pinctrl-names = "default";
253 + pinctrl-0 = <&pinctrl_i2c1>;
254 + status = "okay";
255 +diff --git a/arch/arm/boot/dts/imx7d-pico-nymph.dts b/arch/arm/boot/dts/imx7d-pico-nymph.dts
256 +index 104a85254adbb..5afb1674e0125 100644
257 +--- a/arch/arm/boot/dts/imx7d-pico-nymph.dts
258 ++++ b/arch/arm/boot/dts/imx7d-pico-nymph.dts
259 +@@ -43,7 +43,7 @@
260 + };
261 +
262 + &i2c1 {
263 +- clock_frequency = <100000>;
264 ++ clock-frequency = <100000>;
265 + pinctrl-names = "default";
266 + pinctrl-0 = <&pinctrl_i2c1>;
267 + status = "okay";
268 +@@ -64,7 +64,7 @@
269 + };
270 +
271 + &i2c2 {
272 +- clock_frequency = <100000>;
273 ++ clock-frequency = <100000>;
274 + pinctrl-names = "default";
275 + pinctrl-0 = <&pinctrl_i2c2>;
276 + status = "okay";
277 +diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi
278 +index 8f5477e307dd4..37a5d96aaf642 100644
279 +--- a/arch/arm/boot/dts/sam9x60.dtsi
280 ++++ b/arch/arm/boot/dts/sam9x60.dtsi
281 +@@ -564,7 +564,7 @@
282 + mpddrc: mpddrc@ffffe800 {
283 + compatible = "microchip,sam9x60-ddramc", "atmel,sama5d3-ddramc";
284 + reg = <0xffffe800 0x200>;
285 +- clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_CORE PMC_MCK>;
286 ++ clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_PERIPHERAL 49>;
287 + clock-names = "ddrck", "mpddr";
288 + };
289 +
290 +diff --git a/arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi b/arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi
291 +index d865ab5d866b9..dd23de85100c4 100644
292 +--- a/arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi
293 ++++ b/arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi
294 +@@ -101,8 +101,12 @@
295 +
296 + &qspi {
297 + pinctrl-names = "default", "sleep";
298 +- pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
299 +- pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
300 ++ pinctrl-0 = <&qspi_clk_pins_a
301 ++ &qspi_bk1_pins_a
302 ++ &qspi_cs1_pins_a>;
303 ++ pinctrl-1 = <&qspi_clk_sleep_pins_a
304 ++ &qspi_bk1_sleep_pins_a
305 ++ &qspi_cs1_sleep_pins_a>;
306 + reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
307 + #address-cells = <1>;
308 + #size-cells = <0>;
309 +diff --git a/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi b/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
310 +index 30156b7546ed6..d540550f7da26 100644
311 +--- a/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
312 ++++ b/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
313 +@@ -391,8 +391,12 @@
314 +
315 + &qspi {
316 + pinctrl-names = "default", "sleep";
317 +- pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
318 +- pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
319 ++ pinctrl-0 = <&qspi_clk_pins_a
320 ++ &qspi_bk1_pins_a
321 ++ &qspi_cs1_pins_a>;
322 ++ pinctrl-1 = <&qspi_clk_sleep_pins_a
323 ++ &qspi_bk1_sleep_pins_a
324 ++ &qspi_cs1_sleep_pins_a>;
325 + reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
326 + #address-cells = <1>;
327 + #size-cells = <0>;
328 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
329 +index 238a611192e72..d3b85a8764d74 100644
330 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
331 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
332 +@@ -428,8 +428,12 @@
333 +
334 + &qspi {
335 + pinctrl-names = "default", "sleep";
336 +- pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
337 +- pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
338 ++ pinctrl-0 = <&qspi_clk_pins_a
339 ++ &qspi_bk1_pins_a
340 ++ &qspi_cs1_pins_a>;
341 ++ pinctrl-1 = <&qspi_clk_sleep_pins_a
342 ++ &qspi_bk1_sleep_pins_a
343 ++ &qspi_cs1_sleep_pins_a>;
344 + reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
345 + #address-cells = <1>;
346 + #size-cells = <0>;
347 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
348 +index 134a798ad3f23..bb40fb46da81d 100644
349 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
350 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
351 +@@ -247,8 +247,12 @@
352 +
353 + &qspi {
354 + pinctrl-names = "default", "sleep";
355 +- pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
356 +- pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
357 ++ pinctrl-0 = <&qspi_clk_pins_a
358 ++ &qspi_bk1_pins_a
359 ++ &qspi_cs1_pins_a>;
360 ++ pinctrl-1 = <&qspi_clk_sleep_pins_a
361 ++ &qspi_bk1_sleep_pins_a
362 ++ &qspi_cs1_sleep_pins_a>;
363 + reg = <0x58003000 0x1000>, <0x70000000 0x200000>;
364 + #address-cells = <1>;
365 + #size-cells = <0>;
366 +diff --git a/arch/arm/mach-imx/cpu-imx25.c b/arch/arm/mach-imx/cpu-imx25.c
367 +index 3e63445cde062..cc86977d0a340 100644
368 +--- a/arch/arm/mach-imx/cpu-imx25.c
369 ++++ b/arch/arm/mach-imx/cpu-imx25.c
370 +@@ -23,6 +23,7 @@ static int mx25_read_cpu_rev(void)
371 +
372 + np = of_find_compatible_node(NULL, NULL, "fsl,imx25-iim");
373 + iim_base = of_iomap(np, 0);
374 ++ of_node_put(np);
375 + BUG_ON(!iim_base);
376 + rev = readl(iim_base + MXC_IIMSREV);
377 + iounmap(iim_base);
378 +diff --git a/arch/arm/mach-imx/cpu-imx27.c b/arch/arm/mach-imx/cpu-imx27.c
379 +index bf70e13bbe9ee..1d28939083683 100644
380 +--- a/arch/arm/mach-imx/cpu-imx27.c
381 ++++ b/arch/arm/mach-imx/cpu-imx27.c
382 +@@ -28,6 +28,7 @@ static int mx27_read_cpu_rev(void)
383 +
384 + np = of_find_compatible_node(NULL, NULL, "fsl,imx27-ccm");
385 + ccm_base = of_iomap(np, 0);
386 ++ of_node_put(np);
387 + BUG_ON(!ccm_base);
388 + /*
389 + * now we have access to the IO registers. As we need
390 +diff --git a/arch/arm/mach-imx/cpu-imx31.c b/arch/arm/mach-imx/cpu-imx31.c
391 +index b9c24b851d1ab..35c544924e509 100644
392 +--- a/arch/arm/mach-imx/cpu-imx31.c
393 ++++ b/arch/arm/mach-imx/cpu-imx31.c
394 +@@ -39,6 +39,7 @@ static int mx31_read_cpu_rev(void)
395 +
396 + np = of_find_compatible_node(NULL, NULL, "fsl,imx31-iim");
397 + iim_base = of_iomap(np, 0);
398 ++ of_node_put(np);
399 + BUG_ON(!iim_base);
400 +
401 + /* read SREV register from IIM module */
402 +diff --git a/arch/arm/mach-imx/cpu-imx35.c b/arch/arm/mach-imx/cpu-imx35.c
403 +index 80e7d8ab9f1bb..1fe75b39c2d99 100644
404 +--- a/arch/arm/mach-imx/cpu-imx35.c
405 ++++ b/arch/arm/mach-imx/cpu-imx35.c
406 +@@ -21,6 +21,7 @@ static int mx35_read_cpu_rev(void)
407 +
408 + np = of_find_compatible_node(NULL, NULL, "fsl,imx35-iim");
409 + iim_base = of_iomap(np, 0);
410 ++ of_node_put(np);
411 + BUG_ON(!iim_base);
412 +
413 + rev = imx_readl(iim_base + MXC_IIMSREV);
414 +diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c
415 +index ad56263778f93..a67c89bf155dd 100644
416 +--- a/arch/arm/mach-imx/cpu-imx5.c
417 ++++ b/arch/arm/mach-imx/cpu-imx5.c
418 +@@ -28,6 +28,7 @@ static u32 imx5_read_srev_reg(const char *compat)
419 +
420 + np = of_find_compatible_node(NULL, NULL, compat);
421 + iim_base = of_iomap(np, 0);
422 ++ of_node_put(np);
423 + WARN_ON(!iim_base);
424 +
425 + srev = readl(iim_base + IIM_SREV) & 0xff;
426 +diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
427 +index c1494a4dee25b..53f2d8774fdb9 100644
428 +--- a/arch/arm/mm/nommu.c
429 ++++ b/arch/arm/mm/nommu.c
430 +@@ -161,7 +161,7 @@ void __init paging_init(const struct machine_desc *mdesc)
431 + mpu_setup();
432 +
433 + /* allocate the zero page. */
434 +- zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
435 ++ zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
436 + if (!zero_page)
437 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
438 + __func__, PAGE_SIZE, PAGE_SIZE);
439 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
440 +index 03266bd90a06b..169f047fbca50 100644
441 +--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
442 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
443 +@@ -120,7 +120,7 @@
444 + &ecspi2 {
445 + pinctrl-names = "default";
446 + pinctrl-0 = <&pinctrl_espi2>;
447 +- cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
448 ++ cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
449 + status = "okay";
450 +
451 + eeprom@0 {
452 +@@ -316,7 +316,7 @@
453 + MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82
454 + MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82
455 + MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82
456 +- MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41
457 ++ MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x41
458 + >;
459 + };
460 +
461 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
462 +index d3ee6fc4baabd..72311b55f06da 100644
463 +--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
464 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
465 +@@ -759,6 +759,7 @@
466 + &usbotg2 {
467 + dr_mode = "host";
468 + vbus-supply = <&reg_usb2_vbus>;
469 ++ over-current-active-low;
470 + status = "okay";
471 + };
472 +
473 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
474 +index c2a5c2f7b204b..7c3f5c54f0400 100644
475 +--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
476 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
477 +@@ -9,6 +9,7 @@
478 + simple-audio-card,bitclock-master = <&dailink_master>;
479 + simple-audio-card,format = "i2s";
480 + simple-audio-card,frame-master = <&dailink_master>;
481 ++ simple-audio-card,mclk-fs = <256>;
482 + simple-audio-card,name = "imx8mm-wm8904";
483 + simple-audio-card,routing =
484 + "Headphone Jack", "HPOUTL",
485 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
486 +index 73cc3fafa0180..b2bcd22821702 100644
487 +--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
488 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
489 +@@ -11,6 +11,7 @@
490 + simple-audio-card,bitclock-master = <&dailink_master>;
491 + simple-audio-card,format = "i2s";
492 + simple-audio-card,frame-master = <&dailink_master>;
493 ++ simple-audio-card,mclk-fs = <256>;
494 + simple-audio-card,name = "imx8mm-nau8822";
495 + simple-audio-card,routing =
496 + "Headphones", "LHP",
497 +diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
498 +index b4c1ef2559f20..126c839b45f2d 100644
499 +--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
500 ++++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
501 +@@ -36,8 +36,8 @@
502 +
503 + pcie0_refclk: pcie0-refclk {
504 + compatible = "fixed-clock";
505 +- #clock-cells = <0>;
506 +- clock-frequency = <100000000>;
507 ++ #clock-cells = <0>;
508 ++ clock-frequency = <100000000>;
509 + };
510 +
511 + reg_can1_stby: regulator-can1-stby {
512 +diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
513 +index 79b290a002c19..ecc4bce6db97c 100644
514 +--- a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
515 ++++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
516 +@@ -99,7 +99,6 @@
517 +
518 + regulators {
519 + buck1: BUCK1 {
520 +- regulator-compatible = "BUCK1";
521 + regulator-min-microvolt = <600000>;
522 + regulator-max-microvolt = <2187500>;
523 + regulator-boot-on;
524 +@@ -108,7 +107,6 @@
525 + };
526 +
527 + buck2: BUCK2 {
528 +- regulator-compatible = "BUCK2";
529 + regulator-min-microvolt = <600000>;
530 + regulator-max-microvolt = <2187500>;
531 + regulator-boot-on;
532 +@@ -119,7 +117,6 @@
533 + };
534 +
535 + buck4: BUCK4 {
536 +- regulator-compatible = "BUCK4";
537 + regulator-min-microvolt = <600000>;
538 + regulator-max-microvolt = <3400000>;
539 + regulator-boot-on;
540 +@@ -127,7 +124,6 @@
541 + };
542 +
543 + buck5: BUCK5 {
544 +- regulator-compatible = "BUCK5";
545 + regulator-min-microvolt = <600000>;
546 + regulator-max-microvolt = <3400000>;
547 + regulator-boot-on;
548 +@@ -135,7 +131,6 @@
549 + };
550 +
551 + buck6: BUCK6 {
552 +- regulator-compatible = "BUCK6";
553 + regulator-min-microvolt = <600000>;
554 + regulator-max-microvolt = <3400000>;
555 + regulator-boot-on;
556 +@@ -143,7 +138,6 @@
557 + };
558 +
559 + ldo1: LDO1 {
560 +- regulator-compatible = "LDO1";
561 + regulator-min-microvolt = <1600000>;
562 + regulator-max-microvolt = <3300000>;
563 + regulator-boot-on;
564 +@@ -151,7 +145,6 @@
565 + };
566 +
567 + ldo2: LDO2 {
568 +- regulator-compatible = "LDO2";
569 + regulator-min-microvolt = <800000>;
570 + regulator-max-microvolt = <1150000>;
571 + regulator-boot-on;
572 +@@ -159,7 +152,6 @@
573 + };
574 +
575 + ldo3: LDO3 {
576 +- regulator-compatible = "LDO3";
577 + regulator-min-microvolt = <800000>;
578 + regulator-max-microvolt = <3300000>;
579 + regulator-boot-on;
580 +@@ -167,13 +159,11 @@
581 + };
582 +
583 + ldo4: LDO4 {
584 +- regulator-compatible = "LDO4";
585 + regulator-min-microvolt = <800000>;
586 + regulator-max-microvolt = <3300000>;
587 + };
588 +
589 + ldo5: LDO5 {
590 +- regulator-compatible = "LDO5";
591 + regulator-min-microvolt = <1800000>;
592 + regulator-max-microvolt = <3300000>;
593 + regulator-boot-on;
594 +diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
595 +index d944ecca1b3c2..47fd6a0ba05ad 100644
596 +--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
597 ++++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
598 +@@ -523,6 +523,7 @@
599 + compatible = "fsl,imx8mp-gpc";
600 + reg = <0x303a0000 0x1000>;
601 + interrupt-parent = <&gic>;
602 ++ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
603 + interrupt-controller;
604 + #interrupt-cells = <3>;
605 +
606 +@@ -589,7 +590,7 @@
607 + reg = <IMX8MP_POWER_DOMAIN_MIPI_PHY2>;
608 + };
609 +
610 +- pgc_hsiomix: power-domains@17 {
611 ++ pgc_hsiomix: power-domain@17 {
612 + #power-domain-cells = <0>;
613 + reg = <IMX8MP_POWER_DOMAIN_HSIOMIX>;
614 + clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
615 +diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
616 +index 69786c326db00..27f9a9f331346 100644
617 +--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
618 ++++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
619 +@@ -74,7 +74,7 @@
620 +
621 + pinctrl_usdhc1: usdhc1grp {
622 + fsl,pins = <
623 +- MX93_PAD_SD1_CLK__USDHC1_CLK 0x17fe
624 ++ MX93_PAD_SD1_CLK__USDHC1_CLK 0x15fe
625 + MX93_PAD_SD1_CMD__USDHC1_CMD 0x13fe
626 + MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x13fe
627 + MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x13fe
628 +@@ -84,7 +84,7 @@
629 + MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x13fe
630 + MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x13fe
631 + MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x13fe
632 +- MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x17fe
633 ++ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe
634 + >;
635 + };
636 +
637 +@@ -102,7 +102,7 @@
638 +
639 + pinctrl_usdhc2: usdhc2grp {
640 + fsl,pins = <
641 +- MX93_PAD_SD2_CLK__USDHC2_CLK 0x17fe
642 ++ MX93_PAD_SD2_CLK__USDHC2_CLK 0x15fe
643 + MX93_PAD_SD2_CMD__USDHC2_CMD 0x13fe
644 + MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x13fe
645 + MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x13fe
646 +diff --git a/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi b/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
647 +index 44ed6f963b75a..8e2ac687a410b 100644
648 +--- a/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
649 ++++ b/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
650 +@@ -97,7 +97,7 @@
651 +
652 + uart1: serial@12100 {
653 + compatible = "snps,dw-apb-uart";
654 +- reg = <0x11000 0x100>;
655 ++ reg = <0x12100 0x100>;
656 + reg-shift = <2>;
657 + interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
658 + reg-io-width = <1>;
659 +diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
660 +index c4e87d0aec42f..3ab0ad14e8704 100644
661 +--- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
662 ++++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
663 +@@ -11,6 +11,12 @@
664 + #include <dt-bindings/gpio/gpio.h>
665 + #include <dt-bindings/input/gpio-keys.h>
666 +
667 ++/delete-node/ &adsp_mem;
668 ++/delete-node/ &audio_mem;
669 ++/delete-node/ &mpss_mem;
670 ++/delete-node/ &peripheral_region;
671 ++/delete-node/ &rmtfs_mem;
672 ++
673 + / {
674 + model = "Xiaomi Mi 4C";
675 + compatible = "xiaomi,libra", "qcom,msm8992";
676 +@@ -70,25 +76,67 @@
677 + #size-cells = <2>;
678 + ranges;
679 +
680 +- /* This is for getting crash logs using Android downstream kernels */
681 +- ramoops@dfc00000 {
682 +- compatible = "ramoops";
683 +- reg = <0x0 0xdfc00000 0x0 0x40000>;
684 +- console-size = <0x10000>;
685 +- record-size = <0x10000>;
686 +- ftrace-size = <0x10000>;
687 +- pmsg-size = <0x20000>;
688 ++ memory_hole: hole@6400000 {
689 ++ reg = <0 0x06400000 0 0x600000>;
690 ++ no-map;
691 ++ };
692 ++
693 ++ memory_hole2: hole2@6c00000 {
694 ++ reg = <0 0x06c00000 0 0x2400000>;
695 ++ no-map;
696 ++ };
697 ++
698 ++ mpss_mem: mpss@9000000 {
699 ++ reg = <0 0x09000000 0 0x5a00000>;
700 ++ no-map;
701 ++ };
702 ++
703 ++ tzapp: tzapp@ea00000 {
704 ++ reg = <0 0x0ea00000 0 0x1900000>;
705 ++ no-map;
706 ++ };
707 ++
708 ++ mdm_rfsa_mem: mdm-rfsa@ca0b0000 {
709 ++ reg = <0 0xca0b0000 0 0x10000>;
710 ++ no-map;
711 ++ };
712 ++
713 ++ rmtfs_mem: rmtfs@ca100000 {
714 ++ compatible = "qcom,rmtfs-mem";
715 ++ reg = <0 0xca100000 0 0x180000>;
716 ++ no-map;
717 ++
718 ++ qcom,client-id = <1>;
719 + };
720 +
721 +- modem_region: modem_region@9000000 {
722 +- reg = <0x0 0x9000000 0x0 0x5a00000>;
723 ++ audio_mem: audio@cb400000 {
724 ++ reg = <0 0xcb000000 0 0x400000>;
725 ++ no-mem;
726 ++ };
727 ++
728 ++ qseecom_mem: qseecom@cb400000 {
729 ++ reg = <0 0xcb400000 0 0x1c00000>;
730 ++ no-mem;
731 ++ };
732 ++
733 ++ adsp_rfsa_mem: adsp-rfsa@cd000000 {
734 ++ reg = <0 0xcd000000 0 0x10000>;
735 + no-map;
736 + };
737 +
738 +- tzapp: modem_region@ea00000 {
739 +- reg = <0x0 0xea00000 0x0 0x1900000>;
740 ++ sensor_rfsa_mem: sensor-rfsa@cd010000 {
741 ++ reg = <0 0xcd010000 0 0x10000>;
742 + no-map;
743 + };
744 ++
745 ++ ramoops@dfc00000 {
746 ++ compatible = "ramoops";
747 ++ reg = <0 0xdfc00000 0 0x40000>;
748 ++ console-size = <0x10000>;
749 ++ record-size = <0x10000>;
750 ++ ftrace-size = <0x10000>;
751 ++ pmsg-size = <0x20000>;
752 ++ };
753 + };
754 + };
755 +
756 +@@ -130,11 +178,6 @@
757 + status = "okay";
758 + };
759 +
760 +-&peripheral_region {
761 +- reg = <0x0 0x7400000 0x0 0x1c00000>;
762 +- no-map;
763 +-};
764 +-
765 + &pm8994_spmi_regulators {
766 + VDD_APC0: s8 {
767 + regulator-min-microvolt = <680000>;
768 +diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi
769 +index 750643763a760..f4be09fc1b151 100644
770 +--- a/arch/arm64/boot/dts/qcom/msm8992.dtsi
771 ++++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi
772 +@@ -36,10 +36,6 @@
773 + compatible = "qcom,rpmcc-msm8992", "qcom,rpmcc";
774 + };
775 +
776 +-&tcsr_mutex {
777 +- compatible = "qcom,sfpb-mutex";
778 +-};
779 +-
780 + &timer {
781 + interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
782 + <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
783 +diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
784 +index 9f2a136d5cbc5..146a4285c3952 100644
785 +--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
786 ++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
787 +@@ -1173,7 +1173,7 @@
788 + clock-names = "aux", "ref_clk_src", "ref", "com_aux";
789 +
790 + resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
791 +- <&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
792 ++ <&gcc GCC_USB4_DP_PHY_PRIM_BCR>;
793 + reset-names = "phy", "common";
794 +
795 + power-domains = <&gcc USB30_PRIM_GDSC>;
796 +diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
797 +index b9f3165075c9d..b13c22046de58 100644
798 +--- a/arch/arm64/include/asm/efi.h
799 ++++ b/arch/arm64/include/asm/efi.h
800 +@@ -14,8 +14,16 @@
801 +
802 + #ifdef CONFIG_EFI
803 + extern void efi_init(void);
804 ++
805 ++bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg);
806 + #else
807 + #define efi_init()
808 ++
809 ++static inline
810 ++bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
811 ++{
812 ++ return false;
813 ++}
814 + #endif
815 +
816 + int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
817 +@@ -40,8 +48,17 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
818 + })
819 +
820 + extern spinlock_t efi_rt_lock;
821 ++extern u64 *efi_rt_stack_top;
822 + efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
823 +
824 ++/*
825 ++ * efi_rt_stack_top[-1] contains the value the stack pointer had before
826 ++ * switching to the EFI runtime stack.
827 ++ */
828 ++#define current_in_efi() \
829 ++ (!preemptible() && efi_rt_stack_top != NULL && \
830 ++ on_task_stack(current, READ_ONCE(efi_rt_stack_top[-1]), 1))
831 ++
832 + #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
833 +
834 + /*
835 +diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
836 +index 5a0edb064ea47..327cdcfcb1db0 100644
837 +--- a/arch/arm64/include/asm/stacktrace.h
838 ++++ b/arch/arm64/include/asm/stacktrace.h
839 +@@ -104,4 +104,19 @@ static inline struct stack_info stackinfo_get_sdei_critical(void)
840 + #define stackinfo_get_sdei_critical() stackinfo_get_unknown()
841 + #endif
842 +
843 ++#ifdef CONFIG_EFI
844 ++extern u64 *efi_rt_stack_top;
845 ++
846 ++static inline struct stack_info stackinfo_get_efi(void)
847 ++{
848 ++ unsigned long high = (u64)efi_rt_stack_top;
849 ++ unsigned long low = high - THREAD_SIZE;
850 ++
851 ++ return (struct stack_info) {
852 ++ .low = low,
853 ++ .high = high,
854 ++ };
855 ++}
856 ++#endif
857 ++
858 + #endif /* __ASM_STACKTRACE_H */
859 +diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S
860 +index 2d3c4b02393e4..e8ae803662cf1 100644
861 +--- a/arch/arm64/kernel/efi-rt-wrapper.S
862 ++++ b/arch/arm64/kernel/efi-rt-wrapper.S
863 +@@ -7,7 +7,7 @@
864 + #include <asm/assembler.h>
865 +
866 + SYM_FUNC_START(__efi_rt_asm_wrapper)
867 +- stp x29, x30, [sp, #-32]!
868 ++ stp x29, x30, [sp, #-112]!
869 + mov x29, sp
870 +
871 + /*
872 +@@ -17,11 +17,21 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
873 + */
874 + stp x1, x18, [sp, #16]
875 +
876 ++ /*
877 ++ * Preserve all callee saved registers and preserve the stack pointer
878 ++ * value at the base of the EFI runtime stack so we can recover from
879 ++ * synchronous exceptions occurring while executing the firmware
880 ++ * routines.
881 ++ */
882 ++ stp x19, x20, [sp, #32]
883 ++ stp x21, x22, [sp, #48]
884 ++ stp x23, x24, [sp, #64]
885 ++ stp x25, x26, [sp, #80]
886 ++ stp x27, x28, [sp, #96]
887 ++
888 + ldr_l x16, efi_rt_stack_top
889 + mov sp, x16
890 +-#ifdef CONFIG_SHADOW_CALL_STACK
891 +- str x18, [sp, #-16]!
892 +-#endif
893 ++ stp x18, x29, [sp, #-16]!
894 +
895 + /*
896 + * We are lucky enough that no EFI runtime services take more than
897 +@@ -36,10 +46,13 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
898 + mov x4, x6
899 + blr x8
900 +
901 ++ mov x16, sp
902 + mov sp, x29
903 ++ str xzr, [x16, #8] // clear recorded task SP value
904 ++
905 + ldp x1, x2, [sp, #16]
906 + cmp x2, x18
907 +- ldp x29, x30, [sp], #32
908 ++ ldp x29, x30, [sp], #112
909 + b.ne 0f
910 + ret
911 + 0:
912 +@@ -57,3 +70,18 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
913 +
914 + b efi_handle_corrupted_x18 // tail call
915 + SYM_FUNC_END(__efi_rt_asm_wrapper)
916 ++
917 ++SYM_CODE_START(__efi_rt_asm_recover)
918 ++ mov sp, x30
919 ++
920 ++ ldr_l x16, efi_rt_stack_top // clear recorded task SP value
921 ++ str xzr, [x16, #-8]
922 ++
923 ++ ldp x19, x20, [sp, #32]
924 ++ ldp x21, x22, [sp, #48]
925 ++ ldp x23, x24, [sp, #64]
926 ++ ldp x25, x26, [sp, #80]
927 ++ ldp x27, x28, [sp, #96]
928 ++ ldp x29, x30, [sp], #112
929 ++ ret
930 ++SYM_CODE_END(__efi_rt_asm_recover)
931 +diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
932 +index 386bd81ca12bb..b273900f45668 100644
933 +--- a/arch/arm64/kernel/efi.c
934 ++++ b/arch/arm64/kernel/efi.c
935 +@@ -11,6 +11,7 @@
936 + #include <linux/init.h>
937 +
938 + #include <asm/efi.h>
939 ++#include <asm/stacktrace.h>
940 +
941 + static bool region_is_misaligned(const efi_memory_desc_t *md)
942 + {
943 +@@ -149,6 +150,28 @@ DEFINE_SPINLOCK(efi_rt_lock);
944 +
945 + asmlinkage u64 *efi_rt_stack_top __ro_after_init;
946 +
947 ++asmlinkage efi_status_t __efi_rt_asm_recover(void);
948 ++
949 ++bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
950 ++{
951 ++ /* Check whether the exception occurred while running the firmware */
952 ++ if (!current_in_efi() || regs->pc >= TASK_SIZE_64)
953 ++ return false;
954 ++
955 ++ pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg);
956 ++ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
957 ++ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
958 ++
959 ++ regs->regs[0] = EFI_ABORTED;
960 ++ regs->regs[30] = efi_rt_stack_top[-1];
961 ++ regs->pc = (u64)__efi_rt_asm_recover;
962 ++
963 ++ if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
964 ++ regs->regs[18] = efi_rt_stack_top[-2];
965 ++
966 ++ return true;
967 ++}
968 ++
969 + /* EFI requires 8 KiB of stack space for runtime services */
970 + static_assert(THREAD_SIZE >= SZ_8K);
971 +
972 +diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
973 +index 117e2c180f3c7..83154303e682c 100644
974 +--- a/arch/arm64/kernel/stacktrace.c
975 ++++ b/arch/arm64/kernel/stacktrace.c
976 +@@ -5,6 +5,7 @@
977 + * Copyright (C) 2012 ARM Ltd.
978 + */
979 + #include <linux/kernel.h>
980 ++#include <linux/efi.h>
981 + #include <linux/export.h>
982 + #include <linux/ftrace.h>
983 + #include <linux/sched.h>
984 +@@ -12,6 +13,7 @@
985 + #include <linux/sched/task_stack.h>
986 + #include <linux/stacktrace.h>
987 +
988 ++#include <asm/efi.h>
989 + #include <asm/irq.h>
990 + #include <asm/stack_pointer.h>
991 + #include <asm/stacktrace.h>
992 +@@ -186,6 +188,13 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
993 + : stackinfo_get_unknown(); \
994 + })
995 +
996 ++#define STACKINFO_EFI \
997 ++ ({ \
998 ++ ((task == current) && current_in_efi()) \
999 ++ ? stackinfo_get_efi() \
1000 ++ : stackinfo_get_unknown(); \
1001 ++ })
1002 ++
1003 + noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
1004 + void *cookie, struct task_struct *task,
1005 + struct pt_regs *regs)
1006 +@@ -199,6 +208,9 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
1007 + #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
1008 + STACKINFO_SDEI(normal),
1009 + STACKINFO_SDEI(critical),
1010 ++#endif
1011 ++#ifdef CONFIG_EFI
1012 ++ STACKINFO_EFI,
1013 + #endif
1014 + };
1015 + struct unwind_state state = {
1016 +diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
1017 +index 826ff6f2a4e7b..5bdada3137287 100644
1018 +--- a/arch/arm64/kvm/vgic/vgic-v3.c
1019 ++++ b/arch/arm64/kvm/vgic/vgic-v3.c
1020 +@@ -350,26 +350,23 @@ retry:
1021 + * The deactivation of the doorbell interrupt will trigger the
1022 + * unmapping of the associated vPE.
1023 + */
1024 +-static void unmap_all_vpes(struct vgic_dist *dist)
1025 ++static void unmap_all_vpes(struct kvm *kvm)
1026 + {
1027 +- struct irq_desc *desc;
1028 ++ struct vgic_dist *dist = &kvm->arch.vgic;
1029 + int i;
1030 +
1031 +- for (i = 0; i < dist->its_vm.nr_vpes; i++) {
1032 +- desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
1033 +- irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
1034 +- }
1035 ++ for (i = 0; i < dist->its_vm.nr_vpes; i++)
1036 ++ free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
1037 + }
1038 +
1039 +-static void map_all_vpes(struct vgic_dist *dist)
1040 ++static void map_all_vpes(struct kvm *kvm)
1041 + {
1042 +- struct irq_desc *desc;
1043 ++ struct vgic_dist *dist = &kvm->arch.vgic;
1044 + int i;
1045 +
1046 +- for (i = 0; i < dist->its_vm.nr_vpes; i++) {
1047 +- desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
1048 +- irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
1049 +- }
1050 ++ for (i = 0; i < dist->its_vm.nr_vpes; i++)
1051 ++ WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i),
1052 ++ dist->its_vm.vpes[i]->irq));
1053 + }
1054 +
1055 + /**
1056 +@@ -394,7 +391,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
1057 + * and enabling of the doorbells have already been done.
1058 + */
1059 + if (kvm_vgic_global_state.has_gicv4_1) {
1060 +- unmap_all_vpes(dist);
1061 ++ unmap_all_vpes(kvm);
1062 + vlpi_avail = true;
1063 + }
1064 +
1065 +@@ -444,7 +441,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
1066 +
1067 + out:
1068 + if (vlpi_avail)
1069 +- map_all_vpes(dist);
1070 ++ map_all_vpes(kvm);
1071 +
1072 + return ret;
1073 + }
1074 +diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
1075 +index ad06ba6c9b009..a413718be92b8 100644
1076 +--- a/arch/arm64/kvm/vgic/vgic-v4.c
1077 ++++ b/arch/arm64/kvm/vgic/vgic-v4.c
1078 +@@ -222,6 +222,11 @@ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
1079 + *val = !!(*ptr & mask);
1080 + }
1081 +
1082 ++int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
1083 ++{
1084 ++ return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
1085 ++}
1086 ++
1087 + /**
1088 + * vgic_v4_init - Initialize the GICv4 data structures
1089 + * @kvm: Pointer to the VM being initialized
1090 +@@ -283,8 +288,7 @@ int vgic_v4_init(struct kvm *kvm)
1091 + irq_flags &= ~IRQ_NOAUTOEN;
1092 + irq_set_status_flags(irq, irq_flags);
1093 +
1094 +- ret = request_irq(irq, vgic_v4_doorbell_handler,
1095 +- 0, "vcpu", vcpu);
1096 ++ ret = vgic_v4_request_vpe_irq(vcpu, irq);
1097 + if (ret) {
1098 + kvm_err("failed to allocate vcpu IRQ%d\n", irq);
1099 + /*
1100 +diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
1101 +index 0c8da72953f06..23e280fa0a16f 100644
1102 +--- a/arch/arm64/kvm/vgic/vgic.h
1103 ++++ b/arch/arm64/kvm/vgic/vgic.h
1104 +@@ -331,5 +331,6 @@ int vgic_v4_init(struct kvm *kvm);
1105 + void vgic_v4_teardown(struct kvm *kvm);
1106 + void vgic_v4_configure_vsgis(struct kvm *kvm);
1107 + void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
1108 ++int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
1109 +
1110 + #endif
1111 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
1112 +index 74f76514a48d0..3eb2825d08cff 100644
1113 +--- a/arch/arm64/mm/fault.c
1114 ++++ b/arch/arm64/mm/fault.c
1115 +@@ -30,6 +30,7 @@
1116 + #include <asm/bug.h>
1117 + #include <asm/cmpxchg.h>
1118 + #include <asm/cpufeature.h>
1119 ++#include <asm/efi.h>
1120 + #include <asm/exception.h>
1121 + #include <asm/daifflags.h>
1122 + #include <asm/debug-monitors.h>
1123 +@@ -397,6 +398,9 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
1124 + msg = "paging request";
1125 + }
1126 +
1127 ++ if (efi_runtime_fixup_exception(regs, msg))
1128 ++ return;
1129 ++
1130 + die_kernel_fault(msg, addr, esr, regs);
1131 + }
1132 +
1133 +diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
1134 +index b865046e4dbbc..4bf6c449d78b6 100644
1135 +--- a/arch/riscv/kernel/head.S
1136 ++++ b/arch/riscv/kernel/head.S
1137 +@@ -326,7 +326,7 @@ clear_bss_done:
1138 + call soc_early_init
1139 + tail start_kernel
1140 +
1141 +-#if CONFIG_RISCV_BOOT_SPINWAIT
1142 ++#ifdef CONFIG_RISCV_BOOT_SPINWAIT
1143 + .Lsecondary_start:
1144 + /* Set trap vector to spin forever to help debug */
1145 + la a3, .Lsecondary_park
1146 +diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
1147 +index d73e96f6ed7c5..a20568bd1f1a8 100644
1148 +--- a/arch/riscv/kernel/probes/simulate-insn.c
1149 ++++ b/arch/riscv/kernel/probes/simulate-insn.c
1150 +@@ -71,11 +71,11 @@ bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *reg
1151 + u32 rd_index = (opcode >> 7) & 0x1f;
1152 + u32 rs1_index = (opcode >> 15) & 0x1f;
1153 +
1154 +- ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
1155 ++ ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
1156 + if (!ret)
1157 + return ret;
1158 +
1159 +- ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
1160 ++ ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
1161 + if (!ret)
1162 + return ret;
1163 +
1164 +diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
1165 +index 3373df413c882..ddb2afba6d255 100644
1166 +--- a/arch/riscv/kernel/smpboot.c
1167 ++++ b/arch/riscv/kernel/smpboot.c
1168 +@@ -39,7 +39,6 @@ static DECLARE_COMPLETION(cpu_running);
1169 +
1170 + void __init smp_prepare_boot_cpu(void)
1171 + {
1172 +- init_cpu_topology();
1173 + }
1174 +
1175 + void __init smp_prepare_cpus(unsigned int max_cpus)
1176 +@@ -48,6 +47,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1177 + int ret;
1178 + unsigned int curr_cpuid;
1179 +
1180 ++ init_cpu_topology();
1181 ++
1182 + curr_cpuid = smp_processor_id();
1183 + store_cpu_topology(curr_cpuid);
1184 + numa_store_cpu_info(curr_cpuid);
1185 +diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
1186 +index 77f24262c25c1..ac665b9670c5d 100644
1187 +--- a/arch/s390/include/asm/debug.h
1188 ++++ b/arch/s390/include/asm/debug.h
1189 +@@ -4,8 +4,8 @@
1190 + *
1191 + * Copyright IBM Corp. 1999, 2020
1192 + */
1193 +-#ifndef DEBUG_H
1194 +-#define DEBUG_H
1195 ++#ifndef _ASM_S390_DEBUG_H
1196 ++#define _ASM_S390_DEBUG_H
1197 +
1198 + #include <linux/string.h>
1199 + #include <linux/spinlock.h>
1200 +@@ -487,4 +487,4 @@ void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas);
1201 +
1202 + #endif /* MODULE */
1203 +
1204 +-#endif /* DEBUG_H */
1205 ++#endif /* _ASM_S390_DEBUG_H */
1206 +diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
1207 +index 5ea3830af0ccf..f81d96710595a 100644
1208 +--- a/arch/s390/kernel/vmlinux.lds.S
1209 ++++ b/arch/s390/kernel/vmlinux.lds.S
1210 +@@ -79,6 +79,7 @@ SECTIONS
1211 + _end_amode31_refs = .;
1212 + }
1213 +
1214 ++ . = ALIGN(PAGE_SIZE);
1215 + _edata = .; /* End of data section */
1216 +
1217 + /* will be freed after init */
1218 +@@ -193,6 +194,7 @@ SECTIONS
1219 +
1220 + BSS_SECTION(PAGE_SIZE, 4 * PAGE_SIZE, PAGE_SIZE)
1221 +
1222 ++ . = ALIGN(PAGE_SIZE);
1223 + _end = . ;
1224 +
1225 + /*
1226 +diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
1227 +index ab569faf0df24..6d74acea5e859 100644
1228 +--- a/arch/s390/kvm/interrupt.c
1229 ++++ b/arch/s390/kvm/interrupt.c
1230 +@@ -83,8 +83,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
1231 + struct esca_block *sca = vcpu->kvm->arch.sca;
1232 + union esca_sigp_ctrl *sigp_ctrl =
1233 + &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
1234 +- union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
1235 ++ union esca_sigp_ctrl new_val = {0}, old_val;
1236 +
1237 ++ old_val = READ_ONCE(*sigp_ctrl);
1238 + new_val.scn = src_id;
1239 + new_val.c = 1;
1240 + old_val.c = 0;
1241 +@@ -95,8 +96,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
1242 + struct bsca_block *sca = vcpu->kvm->arch.sca;
1243 + union bsca_sigp_ctrl *sigp_ctrl =
1244 + &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
1245 +- union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
1246 ++ union bsca_sigp_ctrl new_val = {0}, old_val;
1247 +
1248 ++ old_val = READ_ONCE(*sigp_ctrl);
1249 + new_val.scn = src_id;
1250 + new_val.c = 1;
1251 + old_val.c = 0;
1252 +@@ -126,16 +128,18 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
1253 + struct esca_block *sca = vcpu->kvm->arch.sca;
1254 + union esca_sigp_ctrl *sigp_ctrl =
1255 + &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
1256 +- union esca_sigp_ctrl old = *sigp_ctrl;
1257 ++ union esca_sigp_ctrl old;
1258 +
1259 ++ old = READ_ONCE(*sigp_ctrl);
1260 + expect = old.value;
1261 + rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
1262 + } else {
1263 + struct bsca_block *sca = vcpu->kvm->arch.sca;
1264 + union bsca_sigp_ctrl *sigp_ctrl =
1265 + &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
1266 +- union bsca_sigp_ctrl old = *sigp_ctrl;
1267 ++ union bsca_sigp_ctrl old;
1268 +
1269 ++ old = READ_ONCE(*sigp_ctrl);
1270 + expect = old.value;
1271 + rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
1272 + }
1273 +diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
1274 +index d4a314cc50d6e..321a5011042d4 100644
1275 +--- a/arch/x86/boot/compressed/ident_map_64.c
1276 ++++ b/arch/x86/boot/compressed/ident_map_64.c
1277 +@@ -180,6 +180,12 @@ void initialize_identity_maps(void *rmode)
1278 +
1279 + /* Load the new page-table. */
1280 + write_cr3(top_level_pgt);
1281 ++
1282 ++ /*
1283 ++ * Now that the required page table mappings are established and a
1284 ++ * GHCB can be used, check for SNP guest/HV feature compatibility.
1285 ++ */
1286 ++ snp_check_features();
1287 + }
1288 +
1289 + static pte_t *split_large_pmd(struct x86_mapping_info *info,
1290 +diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
1291 +index 62208ec04ca4b..20118fb7c53bb 100644
1292 +--- a/arch/x86/boot/compressed/misc.h
1293 ++++ b/arch/x86/boot/compressed/misc.h
1294 +@@ -126,6 +126,7 @@ static inline void console_init(void)
1295 +
1296 + #ifdef CONFIG_AMD_MEM_ENCRYPT
1297 + void sev_enable(struct boot_params *bp);
1298 ++void snp_check_features(void);
1299 + void sev_es_shutdown_ghcb(void);
1300 + extern bool sev_es_check_ghcb_fault(unsigned long address);
1301 + void snp_set_page_private(unsigned long paddr);
1302 +@@ -143,6 +144,7 @@ static inline void sev_enable(struct boot_params *bp)
1303 + if (bp)
1304 + bp->cc_blob_address = 0;
1305 + }
1306 ++static inline void snp_check_features(void) { }
1307 + static inline void sev_es_shutdown_ghcb(void) { }
1308 + static inline bool sev_es_check_ghcb_fault(unsigned long address)
1309 + {
1310 +diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
1311 +index c93930d5ccbd0..d63ad8f99f83a 100644
1312 +--- a/arch/x86/boot/compressed/sev.c
1313 ++++ b/arch/x86/boot/compressed/sev.c
1314 +@@ -208,6 +208,23 @@ void sev_es_shutdown_ghcb(void)
1315 + error("Can't unmap GHCB page");
1316 + }
1317 +
1318 ++static void __noreturn sev_es_ghcb_terminate(struct ghcb *ghcb, unsigned int set,
1319 ++ unsigned int reason, u64 exit_info_2)
1320 ++{
1321 ++ u64 exit_info_1 = SVM_VMGEXIT_TERM_REASON(set, reason);
1322 ++
1323 ++ vc_ghcb_invalidate(ghcb);
1324 ++ ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_TERM_REQUEST);
1325 ++ ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
1326 ++ ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
1327 ++
1328 ++ sev_es_wr_ghcb_msr(__pa(ghcb));
1329 ++ VMGEXIT();
1330 ++
1331 ++ while (true)
1332 ++ asm volatile("hlt\n" : : : "memory");
1333 ++}
1334 ++
1335 + bool sev_es_check_ghcb_fault(unsigned long address)
1336 + {
1337 + /* Check whether the fault was on the GHCB page */
1338 +@@ -270,6 +287,59 @@ static void enforce_vmpl0(void)
1339 + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0);
1340 + }
1341 +
1342 ++/*
1343 ++ * SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need
1344 ++ * guest side implementation for proper functioning of the guest. If any
1345 ++ * of these features are enabled in the hypervisor but are lacking guest
1346 ++ * side implementation, the behavior of the guest will be undefined. The
1347 ++ * guest could fail in non-obvious way making it difficult to debug.
1348 ++ *
1349 ++ * As the behavior of reserved feature bits is unknown to be on the
1350 ++ * safe side add them to the required features mask.
1351 ++ */
1352 ++#define SNP_FEATURES_IMPL_REQ (MSR_AMD64_SNP_VTOM | \
1353 ++ MSR_AMD64_SNP_REFLECT_VC | \
1354 ++ MSR_AMD64_SNP_RESTRICTED_INJ | \
1355 ++ MSR_AMD64_SNP_ALT_INJ | \
1356 ++ MSR_AMD64_SNP_DEBUG_SWAP | \
1357 ++ MSR_AMD64_SNP_VMPL_SSS | \
1358 ++ MSR_AMD64_SNP_SECURE_TSC | \
1359 ++ MSR_AMD64_SNP_VMGEXIT_PARAM | \
1360 ++ MSR_AMD64_SNP_VMSA_REG_PROTECTION | \
1361 ++ MSR_AMD64_SNP_RESERVED_BIT13 | \
1362 ++ MSR_AMD64_SNP_RESERVED_BIT15 | \
1363 ++ MSR_AMD64_SNP_RESERVED_MASK)
1364 ++
1365 ++/*
1366 ++ * SNP_FEATURES_PRESENT is the mask of SNP features that are implemented
1367 ++ * by the guest kernel. As and when a new feature is implemented in the
1368 ++ * guest kernel, a corresponding bit should be added to the mask.
1369 ++ */
1370 ++#define SNP_FEATURES_PRESENT (0)
1371 ++
1372 ++void snp_check_features(void)
1373 ++{
1374 ++ u64 unsupported;
1375 ++
1376 ++ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
1377 ++ return;
1378 ++
1379 ++ /*
1380 ++ * Terminate the boot if hypervisor has enabled any feature lacking
1381 ++ * guest side implementation. Pass on the unsupported features mask through
1382 ++ * EXIT_INFO_2 of the GHCB protocol so that those features can be reported
1383 ++ * as part of the guest boot failure.
1384 ++ */
1385 ++ unsupported = sev_status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
1386 ++ if (unsupported) {
1387 ++ if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb()))
1388 ++ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
1389 ++
1390 ++ sev_es_ghcb_terminate(boot_ghcb, SEV_TERM_SET_GEN,
1391 ++ GHCB_SNP_UNSUPPORTED, unsupported);
1392 ++ }
1393 ++}
1394 ++
1395 + void sev_enable(struct boot_params *bp)
1396 + {
1397 + unsigned int eax, ebx, ecx, edx;
1398 +diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
1399 +index d6f3703e41194..4386b10682ce4 100644
1400 +--- a/arch/x86/events/amd/core.c
1401 ++++ b/arch/x86/events/amd/core.c
1402 +@@ -1387,7 +1387,7 @@ static int __init amd_core_pmu_init(void)
1403 + * numbered counter following it.
1404 + */
1405 + for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
1406 +- even_ctr_mask |= 1 << i;
1407 ++ even_ctr_mask |= BIT_ULL(i);
1408 +
1409 + pair_constraint = (struct event_constraint)
1410 + __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
1411 +diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
1412 +index a2834bc93149a..3019fb1926e35 100644
1413 +--- a/arch/x86/events/intel/cstate.c
1414 ++++ b/arch/x86/events/intel/cstate.c
1415 +@@ -41,6 +41,7 @@
1416 + * MSR_CORE_C1_RES: CORE C1 Residency Counter
1417 + * perf code: 0x00
1418 + * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
1419 ++ * MTL
1420 + * Scope: Core (each processor core has a MSR)
1421 + * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
1422 + * perf code: 0x01
1423 +@@ -51,50 +52,50 @@
1424 + * perf code: 0x02
1425 + * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
1426 + * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
1427 +- * TGL,TNT,RKL,ADL,RPL,SPR
1428 ++ * TGL,TNT,RKL,ADL,RPL,SPR,MTL
1429 + * Scope: Core
1430 + * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
1431 + * perf code: 0x03
1432 + * Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
1433 +- * ICL,TGL,RKL,ADL,RPL
1434 ++ * ICL,TGL,RKL,ADL,RPL,MTL
1435 + * Scope: Core
1436 + * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
1437 + * perf code: 0x00
1438 + * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
1439 + * KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
1440 +- * RPL,SPR
1441 ++ * RPL,SPR,MTL
1442 + * Scope: Package (physical package)
1443 + * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
1444 + * perf code: 0x01
1445 + * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
1446 + * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
1447 +- * ADL,RPL
1448 ++ * ADL,RPL,MTL
1449 + * Scope: Package (physical package)
1450 + * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
1451 + * perf code: 0x02
1452 + * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
1453 + * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
1454 +- * TGL,TNT,RKL,ADL,RPL,SPR
1455 ++ * TGL,TNT,RKL,ADL,RPL,SPR,MTL
1456 + * Scope: Package (physical package)
1457 + * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
1458 + * perf code: 0x03
1459 + * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
1460 +- * KBL,CML,ICL,TGL,RKL,ADL,RPL
1461 ++ * KBL,CML,ICL,TGL,RKL,ADL,RPL,MTL
1462 + * Scope: Package (physical package)
1463 + * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
1464 + * perf code: 0x04
1465 + * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
1466 +- * ADL,RPL
1467 ++ * ADL,RPL,MTL
1468 + * Scope: Package (physical package)
1469 + * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
1470 + * perf code: 0x05
1471 + * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
1472 +- * ADL,RPL
1473 ++ * ADL,RPL,MTL
1474 + * Scope: Package (physical package)
1475 + * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
1476 + * perf code: 0x06
1477 + * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
1478 +- * TNT,RKL,ADL,RPL
1479 ++ * TNT,RKL,ADL,RPL,MTL
1480 + * Scope: Package (physical package)
1481 + *
1482 + */
1483 +@@ -686,6 +687,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
1484 + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_cstates),
1485 + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_cstates),
1486 + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_cstates),
1487 ++ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &adl_cstates),
1488 ++ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &adl_cstates),
1489 + { },
1490 + };
1491 + MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
1492 +diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
1493 +index 6f1ccc57a6921..459b1aafd4d4a 100644
1494 +--- a/arch/x86/events/intel/uncore.c
1495 ++++ b/arch/x86/events/intel/uncore.c
1496 +@@ -1833,6 +1833,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1497 + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_uncore_init),
1498 + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_uncore_init),
1499 + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init),
1500 ++ X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init),
1501 + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
1502 + {},
1503 + };
1504 +diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
1505 +index ecced3a52668a..c65d8906cbcf4 100644
1506 +--- a/arch/x86/events/msr.c
1507 ++++ b/arch/x86/events/msr.c
1508 +@@ -69,6 +69,7 @@ static bool test_intel(int idx, void *data)
1509 + case INTEL_FAM6_BROADWELL_G:
1510 + case INTEL_FAM6_BROADWELL_X:
1511 + case INTEL_FAM6_SAPPHIRERAPIDS_X:
1512 ++ case INTEL_FAM6_EMERALDRAPIDS_X:
1513 +
1514 + case INTEL_FAM6_ATOM_SILVERMONT:
1515 + case INTEL_FAM6_ATOM_SILVERMONT_D:
1516 +@@ -107,6 +108,8 @@ static bool test_intel(int idx, void *data)
1517 + case INTEL_FAM6_RAPTORLAKE:
1518 + case INTEL_FAM6_RAPTORLAKE_P:
1519 + case INTEL_FAM6_RAPTORLAKE_S:
1520 ++ case INTEL_FAM6_METEORLAKE:
1521 ++ case INTEL_FAM6_METEORLAKE_L:
1522 + if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
1523 + return true;
1524 + break;
1525 +diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
1526 +index 65064d9f7fa6e..8eb74cf386dbe 100644
1527 +--- a/arch/x86/include/asm/acpi.h
1528 ++++ b/arch/x86/include/asm/acpi.h
1529 +@@ -14,6 +14,7 @@
1530 + #include <asm/mmu.h>
1531 + #include <asm/mpspec.h>
1532 + #include <asm/x86_init.h>
1533 ++#include <asm/cpufeature.h>
1534 +
1535 + #ifdef CONFIG_ACPI_APEI
1536 + # include <asm/pgtable_types.h>
1537 +@@ -63,6 +64,13 @@ extern int (*acpi_suspend_lowlevel)(void);
1538 + /* Physical address to resume after wakeup */
1539 + unsigned long acpi_get_wakeup_address(void);
1540 +
1541 ++static inline bool acpi_skip_set_wakeup_address(void)
1542 ++{
1543 ++ return cpu_feature_enabled(X86_FEATURE_XENPV);
1544 ++}
1545 ++
1546 ++#define acpi_skip_set_wakeup_address acpi_skip_set_wakeup_address
1547 ++
1548 + /*
1549 + * Check if the CPU can handle C2 and deeper
1550 + */
1551 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
1552 +index 4a2af82553e4f..91447f018f6e4 100644
1553 +--- a/arch/x86/include/asm/msr-index.h
1554 ++++ b/arch/x86/include/asm/msr-index.h
1555 +@@ -571,6 +571,26 @@
1556 + #define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT)
1557 + #define MSR_AMD64_SEV_SNP_ENABLED BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT)
1558 +
1559 ++/* SNP feature bits enabled by the hypervisor */
1560 ++#define MSR_AMD64_SNP_VTOM BIT_ULL(3)
1561 ++#define MSR_AMD64_SNP_REFLECT_VC BIT_ULL(4)
1562 ++#define MSR_AMD64_SNP_RESTRICTED_INJ BIT_ULL(5)
1563 ++#define MSR_AMD64_SNP_ALT_INJ BIT_ULL(6)
1564 ++#define MSR_AMD64_SNP_DEBUG_SWAP BIT_ULL(7)
1565 ++#define MSR_AMD64_SNP_PREVENT_HOST_IBS BIT_ULL(8)
1566 ++#define MSR_AMD64_SNP_BTB_ISOLATION BIT_ULL(9)
1567 ++#define MSR_AMD64_SNP_VMPL_SSS BIT_ULL(10)
1568 ++#define MSR_AMD64_SNP_SECURE_TSC BIT_ULL(11)
1569 ++#define MSR_AMD64_SNP_VMGEXIT_PARAM BIT_ULL(12)
1570 ++#define MSR_AMD64_SNP_IBS_VIRT BIT_ULL(14)
1571 ++#define MSR_AMD64_SNP_VMSA_REG_PROTECTION BIT_ULL(16)
1572 ++#define MSR_AMD64_SNP_SMT_PROTECTION BIT_ULL(17)
1573 ++
1574 ++/* SNP feature bits reserved for future use. */
1575 ++#define MSR_AMD64_SNP_RESERVED_BIT13 BIT_ULL(13)
1576 ++#define MSR_AMD64_SNP_RESERVED_BIT15 BIT_ULL(15)
1577 ++#define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, 18)
1578 ++
1579 + #define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
1580 +
1581 + /* AMD Collaborative Processor Performance Control MSRs */
1582 +diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
1583 +index f69c168391aa5..80e1df482337d 100644
1584 +--- a/arch/x86/include/uapi/asm/svm.h
1585 ++++ b/arch/x86/include/uapi/asm/svm.h
1586 +@@ -116,6 +116,12 @@
1587 + #define SVM_VMGEXIT_AP_CREATE 1
1588 + #define SVM_VMGEXIT_AP_DESTROY 2
1589 + #define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
1590 ++#define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe
1591 ++#define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code) \
1592 ++ /* SW_EXITINFO1[3:0] */ \
1593 ++ (((((u64)reason_set) & 0xf)) | \
1594 ++ /* SW_EXITINFO1[11:4] */ \
1595 ++ ((((u64)reason_code) & 0xff) << 4))
1596 + #define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
1597 +
1598 + /* Exit code reserved for hypervisor/software use */
1599 +diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
1600 +index 15aefa3f3e18e..f91e5e31aa4f0 100644
1601 +--- a/arch/x86/kernel/i8259.c
1602 ++++ b/arch/x86/kernel/i8259.c
1603 +@@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq)
1604 + disable_irq_nosync(irq);
1605 + io_apic_irqs &= ~(1<<irq);
1606 + irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
1607 ++ irq_set_status_flags(irq, IRQ_LEVEL);
1608 + enable_irq(irq);
1609 + lapic_assign_legacy_vector(irq, true);
1610 + }
1611 +diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
1612 +index beb1bada1b0ab..c683666876f1c 100644
1613 +--- a/arch/x86/kernel/irqinit.c
1614 ++++ b/arch/x86/kernel/irqinit.c
1615 +@@ -65,8 +65,10 @@ void __init init_ISA_irqs(void)
1616 +
1617 + legacy_pic->init(0);
1618 +
1619 +- for (i = 0; i < nr_legacy_irqs(); i++)
1620 ++ for (i = 0; i < nr_legacy_irqs(); i++) {
1621 + irq_set_chip_and_handler(i, chip, handle_level_irq);
1622 ++ irq_set_status_flags(i, IRQ_LEVEL);
1623 ++ }
1624 + }
1625 +
1626 + void __init init_IRQ(void)
1627 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
1628 +index 63247c57c72cc..4ae248e87f5ed 100644
1629 +--- a/arch/x86/kvm/vmx/vmx.c
1630 ++++ b/arch/x86/kvm/vmx/vmx.c
1631 +@@ -3412,18 +3412,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
1632 + {
1633 + u32 ar;
1634 +
1635 +- if (var->unusable || !var->present)
1636 +- ar = 1 << 16;
1637 +- else {
1638 +- ar = var->type & 15;
1639 +- ar |= (var->s & 1) << 4;
1640 +- ar |= (var->dpl & 3) << 5;
1641 +- ar |= (var->present & 1) << 7;
1642 +- ar |= (var->avl & 1) << 12;
1643 +- ar |= (var->l & 1) << 13;
1644 +- ar |= (var->db & 1) << 14;
1645 +- ar |= (var->g & 1) << 15;
1646 +- }
1647 ++ ar = var->type & 15;
1648 ++ ar |= (var->s & 1) << 4;
1649 ++ ar |= (var->dpl & 3) << 5;
1650 ++ ar |= (var->present & 1) << 7;
1651 ++ ar |= (var->avl & 1) << 12;
1652 ++ ar |= (var->l & 1) << 13;
1653 ++ ar |= (var->db & 1) << 14;
1654 ++ ar |= (var->g & 1) << 15;
1655 ++ ar |= (var->unusable || !var->present) << 16;
1656 +
1657 + return ar;
1658 + }
1659 +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
1660 +index 16dcd31d124fe..192d1784e409b 100644
1661 +--- a/drivers/acpi/resource.c
1662 ++++ b/drivers/acpi/resource.c
1663 +@@ -432,6 +432,13 @@ static const struct dmi_system_id asus_laptop[] = {
1664 + DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
1665 + },
1666 + },
1667 ++ {
1668 ++ .ident = "Asus ExpertBook B2402CBA",
1669 ++ .matches = {
1670 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1671 ++ DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
1672 ++ },
1673 ++ },
1674 + {
1675 + .ident = "Asus ExpertBook B2502",
1676 + .matches = {
1677 +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
1678 +index 0b557c0d405ef..4ca6672512722 100644
1679 +--- a/drivers/acpi/sleep.c
1680 ++++ b/drivers/acpi/sleep.c
1681 +@@ -60,13 +60,17 @@ static struct notifier_block tts_notifier = {
1682 + .priority = 0,
1683 + };
1684 +
1685 ++#ifndef acpi_skip_set_wakeup_address
1686 ++#define acpi_skip_set_wakeup_address() false
1687 ++#endif
1688 ++
1689 + static int acpi_sleep_prepare(u32 acpi_state)
1690 + {
1691 + #ifdef CONFIG_ACPI_SLEEP
1692 + unsigned long acpi_wakeup_address;
1693 +
1694 + /* do we have a wakeup address for S2 and S3? */
1695 +- if (acpi_state == ACPI_STATE_S3) {
1696 ++ if (acpi_state == ACPI_STATE_S3 && !acpi_skip_set_wakeup_address()) {
1697 + acpi_wakeup_address = acpi_get_wakeup_address();
1698 + if (!acpi_wakeup_address)
1699 + return -EFAULT;
1700 +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
1701 +index 1db8e68cd8bce..5c32b318c173d 100644
1702 +--- a/drivers/acpi/video_detect.c
1703 ++++ b/drivers/acpi/video_detect.c
1704 +@@ -110,26 +110,6 @@ static bool nvidia_wmi_ec_supported(void)
1705 + }
1706 + #endif
1707 +
1708 +-static bool apple_gmux_backlight_present(void)
1709 +-{
1710 +- struct acpi_device *adev;
1711 +- struct device *dev;
1712 +-
1713 +- adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
1714 +- if (!adev)
1715 +- return false;
1716 +-
1717 +- dev = acpi_get_first_physical_node(adev);
1718 +- if (!dev)
1719 +- return false;
1720 +-
1721 +- /*
1722 +- * drivers/platform/x86/apple-gmux.c only supports old style
1723 +- * Apple GMUX with an IO-resource.
1724 +- */
1725 +- return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL;
1726 +-}
1727 +-
1728 + /* Force to use vendor driver when the ACPI device is known to be
1729 + * buggy */
1730 + static int video_detect_force_vendor(const struct dmi_system_id *d)
1731 +@@ -600,6 +580,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
1732 + DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
1733 + },
1734 + },
1735 ++ {
1736 ++ .callback = video_detect_force_native,
1737 ++ /* Asus U46E */
1738 ++ .matches = {
1739 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
1740 ++ DMI_MATCH(DMI_PRODUCT_NAME, "U46E"),
1741 ++ },
1742 ++ },
1743 + {
1744 + .callback = video_detect_force_native,
1745 + /* Asus UX303UB */
1746 +@@ -608,6 +596,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
1747 + DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"),
1748 + },
1749 + },
1750 ++ {
1751 ++ .callback = video_detect_force_native,
1752 ++ /* HP EliteBook 8460p */
1753 ++ .matches = {
1754 ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1755 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8460p"),
1756 ++ },
1757 ++ },
1758 ++ {
1759 ++ .callback = video_detect_force_native,
1760 ++ /* HP Pavilion g6-1d80nr / B4U19UA */
1761 ++ .matches = {
1762 ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1763 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
1764 ++ DMI_MATCH(DMI_PRODUCT_SKU, "B4U19UA"),
1765 ++ },
1766 ++ },
1767 + {
1768 + .callback = video_detect_force_native,
1769 + /* Samsung N150P */
1770 +@@ -756,6 +761,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
1771 + {
1772 + static DEFINE_MUTEX(init_mutex);
1773 + static bool nvidia_wmi_ec_present;
1774 ++ static bool apple_gmux_present;
1775 + static bool native_available;
1776 + static bool init_done;
1777 + static long video_caps;
1778 +@@ -769,6 +775,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
1779 + ACPI_UINT32_MAX, find_video, NULL,
1780 + &video_caps, NULL);
1781 + nvidia_wmi_ec_present = nvidia_wmi_ec_supported();
1782 ++ apple_gmux_present = apple_gmux_detect(NULL, NULL);
1783 + init_done = true;
1784 + }
1785 + if (native)
1786 +@@ -790,7 +797,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
1787 + if (nvidia_wmi_ec_present)
1788 + return acpi_backlight_nvidia_wmi_ec;
1789 +
1790 +- if (apple_gmux_backlight_present())
1791 ++ if (apple_gmux_present)
1792 + return acpi_backlight_apple_gmux;
1793 +
1794 + /* Use ACPI video if available, except when native should be preferred. */
1795 +diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
1796 +index 36833a8629980..d9b305a3427f7 100644
1797 +--- a/drivers/ata/Kconfig
1798 ++++ b/drivers/ata/Kconfig
1799 +@@ -650,6 +650,7 @@ config PATA_CS5530
1800 + config PATA_CS5535
1801 + tristate "CS5535 PATA support (Experimental)"
1802 + depends on PCI && (X86_32 || (X86_64 && COMPILE_TEST))
1803 ++ depends on !UML
1804 + help
1805 + This option enables support for the NatSemi/AMD CS5535
1806 + companion chip used with the Geode processor family.
1807 +diff --git a/drivers/base/property.c b/drivers/base/property.c
1808 +index 2a5a37fcd9987..7f338cb4fb7b8 100644
1809 +--- a/drivers/base/property.c
1810 ++++ b/drivers/base/property.c
1811 +@@ -989,26 +989,32 @@ struct fwnode_handle *
1812 + fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
1813 + struct fwnode_handle *prev)
1814 + {
1815 ++ struct fwnode_handle *ep, *port_parent = NULL;
1816 + const struct fwnode_handle *parent;
1817 +- struct fwnode_handle *ep;
1818 +
1819 + /*
1820 + * If this function is in a loop and the previous iteration returned
1821 + * an endpoint from fwnode->secondary, then we need to use the secondary
1822 + * as parent rather than @fwnode.
1823 + */
1824 +- if (prev)
1825 +- parent = fwnode_graph_get_port_parent(prev);
1826 +- else
1827 ++ if (prev) {
1828 ++ port_parent = fwnode_graph_get_port_parent(prev);
1829 ++ parent = port_parent;
1830 ++ } else {
1831 + parent = fwnode;
1832 ++ }
1833 + if (IS_ERR_OR_NULL(parent))
1834 + return NULL;
1835 +
1836 + ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev);
1837 + if (ep)
1838 +- return ep;
1839 ++ goto out_put_port_parent;
1840 ++
1841 ++ ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL);
1842 +
1843 +- return fwnode_graph_get_next_endpoint(parent->secondary, NULL);
1844 ++out_put_port_parent:
1845 ++ fwnode_handle_put(port_parent);
1846 ++ return ep;
1847 + }
1848 + EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
1849 +
1850 +diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
1851 +index 4d1976ca50727..929410d0dd6fe 100644
1852 +--- a/drivers/base/test/test_async_driver_probe.c
1853 ++++ b/drivers/base/test/test_async_driver_probe.c
1854 +@@ -145,7 +145,7 @@ static int __init test_async_probe_init(void)
1855 + calltime = ktime_get();
1856 + for_each_online_cpu(cpu) {
1857 + nid = cpu_to_node(cpu);
1858 +- pdev = &sync_dev[sync_id];
1859 ++ pdev = &async_dev[async_id];
1860 +
1861 + *pdev = test_platform_device_register_node("test_async_driver",
1862 + async_id,
1863 +diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
1864 +index 78334da74d8bf..5eb8c7855970d 100644
1865 +--- a/drivers/block/rnbd/rnbd-clt.c
1866 ++++ b/drivers/block/rnbd/rnbd-clt.c
1867 +@@ -1440,7 +1440,7 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
1868 + goto out_alloc;
1869 + }
1870 +
1871 +- ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS),
1872 ++ ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
1873 + GFP_KERNEL);
1874 + if (ret < 0) {
1875 + pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
1876 +diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
1877 +index 17b677b5d3b22..e546932046309 100644
1878 +--- a/drivers/block/ublk_drv.c
1879 ++++ b/drivers/block/ublk_drv.c
1880 +@@ -2092,13 +2092,12 @@ static void __exit ublk_exit(void)
1881 + struct ublk_device *ub;
1882 + int id;
1883 +
1884 +- class_destroy(ublk_chr_class);
1885 +-
1886 +- misc_deregister(&ublk_misc);
1887 +-
1888 + idr_for_each_entry(&ublk_index_idr, ub, id)
1889 + ublk_remove(ub);
1890 +
1891 ++ class_destroy(ublk_chr_class);
1892 ++ misc_deregister(&ublk_misc);
1893 ++
1894 + idr_destroy(&ublk_index_idr);
1895 + unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
1896 + }
1897 +diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
1898 +index c10fc33b29b18..b74289a95a171 100644
1899 +--- a/drivers/cpufreq/armada-37xx-cpufreq.c
1900 ++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
1901 +@@ -445,7 +445,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
1902 + return -ENODEV;
1903 + }
1904 +
1905 +- clk = clk_get(cpu_dev, 0);
1906 ++ clk = clk_get(cpu_dev, NULL);
1907 + if (IS_ERR(clk)) {
1908 + dev_err(cpu_dev, "Cannot get clock for CPU0\n");
1909 + return PTR_ERR(clk);
1910 +diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
1911 +index 432dfb4e8027e..022e3555407c8 100644
1912 +--- a/drivers/cpufreq/cppc_cpufreq.c
1913 ++++ b/drivers/cpufreq/cppc_cpufreq.c
1914 +@@ -487,7 +487,8 @@ static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
1915 + cpu_data = policy->driver_data;
1916 + perf_caps = &cpu_data->perf_caps;
1917 + max_cap = arch_scale_cpu_capacity(cpu);
1918 +- min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf);
1919 ++ min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
1920 ++ perf_caps->highest_perf);
1921 + if ((min_cap == 0) || (max_cap < min_cap))
1922 + return 0;
1923 + return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
1924 +@@ -519,10 +520,10 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
1925 + cpu_data = policy->driver_data;
1926 + perf_caps = &cpu_data->perf_caps;
1927 + max_cap = arch_scale_cpu_capacity(cpu_dev->id);
1928 +- min_cap = div_u64(max_cap * perf_caps->lowest_perf,
1929 +- perf_caps->highest_perf);
1930 +-
1931 +- perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
1932 ++ min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
1933 ++ perf_caps->highest_perf);
1934 ++ perf_step = div_u64((u64)CPPC_EM_CAP_STEP * perf_caps->highest_perf,
1935 ++ max_cap);
1936 + min_step = min_cap / CPPC_EM_CAP_STEP;
1937 + max_step = max_cap / CPPC_EM_CAP_STEP;
1938 +
1939 +diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
1940 +index 6ac3800db4508..69a8742c0a7a3 100644
1941 +--- a/drivers/cpufreq/cpufreq-dt-platdev.c
1942 ++++ b/drivers/cpufreq/cpufreq-dt-platdev.c
1943 +@@ -135,6 +135,7 @@ static const struct of_device_id blocklist[] __initconst = {
1944 + { .compatible = "nvidia,tegra30", },
1945 + { .compatible = "nvidia,tegra124", },
1946 + { .compatible = "nvidia,tegra210", },
1947 ++ { .compatible = "nvidia,tegra234", },
1948 +
1949 + { .compatible = "qcom,apq8096", },
1950 + { .compatible = "qcom,msm8996", },
1951 +@@ -148,6 +149,7 @@ static const struct of_device_id blocklist[] __initconst = {
1952 + { .compatible = "qcom,sdm845", },
1953 + { .compatible = "qcom,sm6115", },
1954 + { .compatible = "qcom,sm6350", },
1955 ++ { .compatible = "qcom,sm6375", },
1956 + { .compatible = "qcom,sm8150", },
1957 + { .compatible = "qcom,sm8250", },
1958 + { .compatible = "qcom,sm8350", },
1959 +diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
1960 +index c741b6431958c..8a6e6b60d66f3 100644
1961 +--- a/drivers/dma/dmaengine.c
1962 ++++ b/drivers/dma/dmaengine.c
1963 +@@ -451,7 +451,8 @@ static int dma_chan_get(struct dma_chan *chan)
1964 + /* The channel is already in use, update client count */
1965 + if (chan->client_count) {
1966 + __module_get(owner);
1967 +- goto out;
1968 ++ chan->client_count++;
1969 ++ return 0;
1970 + }
1971 +
1972 + if (!try_module_get(owner))
1973 +@@ -470,11 +471,11 @@ static int dma_chan_get(struct dma_chan *chan)
1974 + goto err_out;
1975 + }
1976 +
1977 ++ chan->client_count++;
1978 ++
1979 + if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
1980 + balance_ref_count(chan);
1981 +
1982 +-out:
1983 +- chan->client_count++;
1984 + return 0;
1985 +
1986 + err_out:
1987 +diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
1988 +index 377da23012ac2..a2bf13ff18b6d 100644
1989 +--- a/drivers/dma/ptdma/ptdma-dev.c
1990 ++++ b/drivers/dma/ptdma/ptdma-dev.c
1991 +@@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
1992 + bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
1993 + u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
1994 + u32 tail;
1995 ++ unsigned long flags;
1996 +
1997 + if (soc) {
1998 + desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
1999 + desc->dw0 &= ~DWORD0_SOC;
2000 + }
2001 +- mutex_lock(&cmd_q->q_mutex);
2002 ++ spin_lock_irqsave(&cmd_q->q_lock, flags);
2003 +
2004 + /* Copy 32-byte command descriptor to hw queue. */
2005 + memcpy(q_desc, desc, 32);
2006 +@@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
2007 +
2008 + /* Turn the queue back on using our cached control register */
2009 + pt_start_queue(cmd_q);
2010 +- mutex_unlock(&cmd_q->q_mutex);
2011 ++ spin_unlock_irqrestore(&cmd_q->q_lock, flags);
2012 +
2013 + return 0;
2014 + }
2015 +@@ -199,7 +200,7 @@ int pt_core_init(struct pt_device *pt)
2016 +
2017 + cmd_q->pt = pt;
2018 + cmd_q->dma_pool = dma_pool;
2019 +- mutex_init(&cmd_q->q_mutex);
2020 ++ spin_lock_init(&cmd_q->q_lock);
2021 +
2022 + /* Page alignment satisfies our needs for N <= 128 */
2023 + cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
2024 +diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
2025 +index d093c43b7d134..21b4bf895200b 100644
2026 +--- a/drivers/dma/ptdma/ptdma.h
2027 ++++ b/drivers/dma/ptdma/ptdma.h
2028 +@@ -196,7 +196,7 @@ struct pt_cmd_queue {
2029 + struct ptdma_desc *qbase;
2030 +
2031 + /* Aligned queue start address (per requirement) */
2032 +- struct mutex q_mutex ____cacheline_aligned;
2033 ++ spinlock_t q_lock ____cacheline_aligned;
2034 + unsigned int qidx;
2035 +
2036 + unsigned int qsize;
2037 +diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
2038 +index 3f56514bbef8f..98d45ee4b4e34 100644
2039 +--- a/drivers/dma/qcom/gpi.c
2040 ++++ b/drivers/dma/qcom/gpi.c
2041 +@@ -1756,6 +1756,7 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
2042 + tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
2043 + if (spi->cmd == SPI_RX) {
2044 + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
2045 ++ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
2046 + } else if (spi->cmd == SPI_TX) {
2047 + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
2048 + } else { /* SPI_DUPLEX */
2049 +diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
2050 +index fa9bda4a2bc6f..75af3488a3baf 100644
2051 +--- a/drivers/dma/tegra186-gpc-dma.c
2052 ++++ b/drivers/dma/tegra186-gpc-dma.c
2053 +@@ -707,6 +707,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
2054 + return err;
2055 + }
2056 +
2057 ++ vchan_terminate_vdesc(&tdc->dma_desc->vd);
2058 + tegra_dma_disable(tdc);
2059 + tdc->dma_desc = NULL;
2060 + }
2061 +diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
2062 +index 7b5081989b3d6..b86b809eb1f7e 100644
2063 +--- a/drivers/dma/ti/k3-udma.c
2064 ++++ b/drivers/dma/ti/k3-udma.c
2065 +@@ -761,11 +761,12 @@ static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
2066 + if (uc->desc->dir == DMA_DEV_TO_MEM) {
2067 + udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
2068 + udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
2069 +- udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
2070 ++ if (uc->config.ep_type != PSIL_EP_NATIVE)
2071 ++ udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
2072 + } else {
2073 + udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
2074 + udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
2075 +- if (!uc->bchan)
2076 ++ if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
2077 + udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
2078 + }
2079 + }
2080 +diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
2081 +index 8cd4e69dc7b4c..7660175704883 100644
2082 +--- a/drivers/dma/xilinx/xilinx_dma.c
2083 ++++ b/drivers/dma/xilinx/xilinx_dma.c
2084 +@@ -3141,8 +3141,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
2085 + /* Initialize the channels */
2086 + for_each_child_of_node(node, child) {
2087 + err = xilinx_dma_child_probe(xdev, child);
2088 +- if (err < 0)
2089 ++ if (err < 0) {
2090 ++ of_node_put(child);
2091 + goto error;
2092 ++ }
2093 + }
2094 +
2095 + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2096 +diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
2097 +index 878deb4880cdb..0689e15107213 100644
2098 +--- a/drivers/edac/edac_device.c
2099 ++++ b/drivers/edac/edac_device.c
2100 +@@ -34,6 +34,9 @@
2101 + static DEFINE_MUTEX(device_ctls_mutex);
2102 + static LIST_HEAD(edac_device_list);
2103 +
2104 ++/* Default workqueue processing interval on this instance, in msecs */
2105 ++#define DEFAULT_POLL_INTERVAL 1000
2106 ++
2107 + #ifdef CONFIG_EDAC_DEBUG
2108 + static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
2109 + {
2110 +@@ -336,7 +339,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
2111 + * whole one second to save timers firing all over the period
2112 + * between integral seconds
2113 + */
2114 +- if (edac_dev->poll_msec == 1000)
2115 ++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
2116 + edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
2117 + else
2118 + edac_queue_work(&edac_dev->work, edac_dev->delay);
2119 +@@ -366,7 +369,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
2120 + * timers firing on sub-second basis, while they are happy
2121 + * to fire together on the 1 second exactly
2122 + */
2123 +- if (edac_dev->poll_msec == 1000)
2124 ++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
2125 + edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
2126 + else
2127 + edac_queue_work(&edac_dev->work, edac_dev->delay);
2128 +@@ -400,7 +403,7 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
2129 + edac_dev->delay = msecs_to_jiffies(msec);
2130 +
2131 + /* See comment in edac_device_workq_setup() above */
2132 +- if (edac_dev->poll_msec == 1000)
2133 ++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
2134 + edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
2135 + else
2136 + edac_mod_work(&edac_dev->work, edac_dev->delay);
2137 +@@ -442,11 +445,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
2138 + /* This instance is NOW RUNNING */
2139 + edac_dev->op_state = OP_RUNNING_POLL;
2140 +
2141 +- /*
2142 +- * enable workq processing on this instance,
2143 +- * default = 1000 msec
2144 +- */
2145 +- edac_device_workq_setup(edac_dev, 1000);
2146 ++ edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
2147 + } else {
2148 + edac_dev->op_state = OP_RUNNING_INTERRUPT;
2149 + }
2150 +diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
2151 +index 61b76ec226af1..19fba258ae108 100644
2152 +--- a/drivers/edac/highbank_mc_edac.c
2153 ++++ b/drivers/edac/highbank_mc_edac.c
2154 +@@ -174,8 +174,10 @@ static int highbank_mc_probe(struct platform_device *pdev)
2155 + drvdata = mci->pvt_info;
2156 + platform_set_drvdata(pdev, mci);
2157 +
2158 +- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2159 +- return -ENOMEM;
2160 ++ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
2161 ++ res = -ENOMEM;
2162 ++ goto free;
2163 ++ }
2164 +
2165 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2166 + if (!r) {
2167 +@@ -243,6 +245,7 @@ err2:
2168 + edac_mc_del_mc(&pdev->dev);
2169 + err:
2170 + devres_release_group(&pdev->dev, NULL);
2171 ++free:
2172 + edac_mc_free(mci);
2173 + return res;
2174 + }
2175 +diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
2176 +index 97a27e42dd610..c45519f59dc11 100644
2177 +--- a/drivers/edac/qcom_edac.c
2178 ++++ b/drivers/edac/qcom_edac.c
2179 +@@ -252,7 +252,7 @@ clear:
2180 + static int
2181 + dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
2182 + {
2183 +- struct llcc_drv_data *drv = edev_ctl->pvt_info;
2184 ++ struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
2185 + int ret;
2186 +
2187 + ret = dump_syn_reg_values(drv, bank, err_type);
2188 +@@ -289,7 +289,7 @@ static irqreturn_t
2189 + llcc_ecc_irq_handler(int irq, void *edev_ctl)
2190 + {
2191 + struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
2192 +- struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
2193 ++ struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
2194 + irqreturn_t irq_rc = IRQ_NONE;
2195 + u32 drp_error, trp_error, i;
2196 + int ret;
2197 +@@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
2198 + edev_ctl->dev_name = dev_name(dev);
2199 + edev_ctl->ctl_name = "llcc";
2200 + edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
2201 +- edev_ctl->pvt_info = llcc_driv_data;
2202 +
2203 + rc = edac_device_add_device(edev_ctl);
2204 + if (rc)
2205 +diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
2206 +index 1dfe534b85184..87b4f4d35f062 100644
2207 +--- a/drivers/firmware/arm_scmi/shmem.c
2208 ++++ b/drivers/firmware/arm_scmi/shmem.c
2209 +@@ -81,10 +81,11 @@ u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
2210 + void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
2211 + struct scmi_xfer *xfer)
2212 + {
2213 ++ size_t len = ioread32(&shmem->length);
2214 ++
2215 + xfer->hdr.status = ioread32(shmem->msg_payload);
2216 + /* Skip the length of header and status in shmem area i.e 8 bytes */
2217 +- xfer->rx.len = min_t(size_t, xfer->rx.len,
2218 +- ioread32(&shmem->length) - 8);
2219 ++ xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
2220 +
2221 + /* Take a copy to the rx buffer.. */
2222 + memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
2223 +@@ -93,8 +94,10 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
2224 + void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
2225 + size_t max_len, struct scmi_xfer *xfer)
2226 + {
2227 ++ size_t len = ioread32(&shmem->length);
2228 ++
2229 + /* Skip only the length of header in shmem area i.e 4 bytes */
2230 +- xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
2231 ++ xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
2232 +
2233 + /* Take a copy to the rx buffer.. */
2234 + memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
2235 +diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
2236 +index 33c9b81a55cd1..1db975c088969 100644
2237 +--- a/drivers/firmware/arm_scmi/virtio.c
2238 ++++ b/drivers/firmware/arm_scmi/virtio.c
2239 +@@ -160,7 +160,6 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
2240 + }
2241 +
2242 + vioch->shutdown_done = &vioch_shutdown_done;
2243 +- virtio_break_device(vioch->vqueue->vdev);
2244 + if (!vioch->is_rx && vioch->deferred_tx_wq)
2245 + /* Cannot be kicked anymore after this...*/
2246 + vioch->deferred_tx_wq = NULL;
2247 +@@ -482,6 +481,12 @@ static int virtio_chan_free(int id, void *p, void *data)
2248 + struct scmi_chan_info *cinfo = p;
2249 + struct scmi_vio_channel *vioch = cinfo->transport_info;
2250 +
2251 ++ /*
2252 ++ * Break device to inhibit further traffic flowing while shutting down
2253 ++ * the channels: doing it later holding vioch->lock creates unsafe
2254 ++ * locking dependency chains as reported by LOCKDEP.
2255 ++ */
2256 ++ virtio_break_device(vioch->vqueue->vdev);
2257 + scmi_vio_channel_cleanup_sync(vioch);
2258 +
2259 + scmi_free_channel(cinfo, data, id);
2260 +diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
2261 +index 60075e0e4943a..1fba4e09cdcff 100644
2262 +--- a/drivers/firmware/efi/runtime-wrappers.c
2263 ++++ b/drivers/firmware/efi/runtime-wrappers.c
2264 +@@ -84,6 +84,7 @@ struct efi_runtime_work efi_rts_work;
2265 + else \
2266 + pr_err("Failed to queue work to efi_rts_wq.\n"); \
2267 + \
2268 ++ WARN_ON_ONCE(efi_rts_work.status == EFI_ABORTED); \
2269 + exit: \
2270 + efi_rts_work.efi_rts_id = EFI_NONE; \
2271 + efi_rts_work.status; \
2272 +diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
2273 +index 9ca21feb9d454..f3694d3478019 100644
2274 +--- a/drivers/firmware/google/coreboot_table.c
2275 ++++ b/drivers/firmware/google/coreboot_table.c
2276 +@@ -93,7 +93,12 @@ static int coreboot_table_populate(struct device *dev, void *ptr)
2277 + for (i = 0; i < header->table_entries; i++) {
2278 + entry = ptr_entry;
2279 +
2280 +- device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL);
2281 ++ if (entry->size < sizeof(*entry)) {
2282 ++ dev_warn(dev, "coreboot table entry too small!\n");
2283 ++ return -EINVAL;
2284 ++ }
2285 ++
2286 ++ device = kzalloc(sizeof(device->dev) + entry->size, GFP_KERNEL);
2287 + if (!device)
2288 + return -ENOMEM;
2289 +
2290 +@@ -101,7 +106,7 @@ static int coreboot_table_populate(struct device *dev, void *ptr)
2291 + device->dev.parent = dev;
2292 + device->dev.bus = &coreboot_bus_type;
2293 + device->dev.release = coreboot_device_release;
2294 +- memcpy(&device->entry, ptr_entry, entry->size);
2295 ++ memcpy(device->raw, ptr_entry, entry->size);
2296 +
2297 + ret = device_register(&device->dev);
2298 + if (ret) {
2299 +diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
2300 +index beb778674acdc..4a89277b99a39 100644
2301 +--- a/drivers/firmware/google/coreboot_table.h
2302 ++++ b/drivers/firmware/google/coreboot_table.h
2303 +@@ -66,6 +66,7 @@ struct coreboot_device {
2304 + struct coreboot_table_entry entry;
2305 + struct lb_cbmem_ref cbmem_ref;
2306 + struct lb_framebuffer framebuffer;
2307 ++ DECLARE_FLEX_ARRAY(u8, raw);
2308 + };
2309 + };
2310 +
2311 +diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
2312 +index 2e17797091133..7edcdc5750802 100644
2313 +--- a/drivers/gpio/gpio-ep93xx.c
2314 ++++ b/drivers/gpio/gpio-ep93xx.c
2315 +@@ -148,7 +148,7 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
2316 + */
2317 + struct irq_chip *irqchip = irq_desc_get_chip(desc);
2318 + unsigned int irq = irq_desc_get_irq(desc);
2319 +- int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */
2320 ++ int port_f_idx = (irq & 7) ^ 4; /* {20..23,48..51} -> {0..7} */
2321 + int gpio_irq = EP93XX_GPIO_F_IRQ_BASE + port_f_idx;
2322 +
2323 + chained_irq_enter(irqchip, desc);
2324 +diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
2325 +index c871602fc5ba9..853d9aa6b3b1f 100644
2326 +--- a/drivers/gpio/gpio-mxc.c
2327 ++++ b/drivers/gpio/gpio-mxc.c
2328 +@@ -18,6 +18,7 @@
2329 + #include <linux/module.h>
2330 + #include <linux/platform_device.h>
2331 + #include <linux/slab.h>
2332 ++#include <linux/spinlock.h>
2333 + #include <linux/syscore_ops.h>
2334 + #include <linux/gpio/driver.h>
2335 + #include <linux/of.h>
2336 +@@ -147,6 +148,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
2337 + {
2338 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
2339 + struct mxc_gpio_port *port = gc->private;
2340 ++ unsigned long flags;
2341 + u32 bit, val;
2342 + u32 gpio_idx = d->hwirq;
2343 + int edge;
2344 +@@ -185,6 +187,8 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
2345 + return -EINVAL;
2346 + }
2347 +
2348 ++ raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
2349 ++
2350 + if (GPIO_EDGE_SEL >= 0) {
2351 + val = readl(port->base + GPIO_EDGE_SEL);
2352 + if (edge == GPIO_INT_BOTH_EDGES)
2353 +@@ -204,15 +208,20 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
2354 +
2355 + writel(1 << gpio_idx, port->base + GPIO_ISR);
2356 +
2357 +- return 0;
2358 ++ raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
2359 ++
2360 ++ return port->gc.direction_input(&port->gc, gpio_idx);
2361 + }
2362 +
2363 + static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
2364 + {
2365 + void __iomem *reg = port->base;
2366 ++ unsigned long flags;
2367 + u32 bit, val;
2368 + int edge;
2369 +
2370 ++ raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
2371 ++
2372 + reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
2373 + bit = gpio & 0xf;
2374 + val = readl(reg);
2375 +@@ -227,9 +236,12 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
2376 + } else {
2377 + pr_err("mxc: invalid configuration for GPIO %d: %x\n",
2378 + gpio, edge);
2379 +- return;
2380 ++ goto unlock;
2381 + }
2382 + writel(val | (edge << (bit << 1)), reg);
2383 ++
2384 ++unlock:
2385 ++ raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
2386 + }
2387 +
2388 + /* handle 32 interrupts in one status register */
2389 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2390 +index 29f045079a3e1..404c839683b1c 100644
2391 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2392 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2393 +@@ -2130,7 +2130,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_b
2394 + }
2395 +
2396 + amdgpu_amdkfd_remove_eviction_fence(
2397 +- bo, bo->kfd_bo->process_info->eviction_fence);
2398 ++ bo, bo->vm_bo->vm->process_info->eviction_fence);
2399 +
2400 + amdgpu_bo_unreserve(bo);
2401 +
2402 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2403 +index 99f5e38c4835e..3380daf42da8a 100644
2404 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2405 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2406 +@@ -586,10 +586,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
2407 + if (adev->gfx.gfx_off_req_count == 0 &&
2408 + !adev->gfx.gfx_off_state) {
2409 + /* If going to s2idle, no need to wait */
2410 +- if (adev->in_s0ix)
2411 +- delay = GFX_OFF_NO_DELAY;
2412 +- schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
2413 ++ if (adev->in_s0ix) {
2414 ++ if (!amdgpu_dpm_set_powergating_by_smu(adev,
2415 ++ AMD_IP_BLOCK_TYPE_GFX, true))
2416 ++ adev->gfx.gfx_off_state = true;
2417 ++ } else {
2418 ++ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
2419 + delay);
2420 ++ }
2421 + }
2422 + } else {
2423 + if (adev->gfx.gfx_off_req_count == 0) {
2424 +diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
2425 +index 725876b4f02ed..32b0ea8757fa5 100644
2426 +--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
2427 ++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
2428 +@@ -192,7 +192,6 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
2429 + mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
2430 + mes_add_queue_pkt.tma_addr = input->tma_addr;
2431 + mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
2432 +- mes_add_queue_pkt.trap_en = 1;
2433 +
2434 + /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
2435 + mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
2436 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2437 +index ecb4c3abc6297..c06ada0844ba1 100644
2438 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2439 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2440 +@@ -200,7 +200,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
2441 + queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
2442 +
2443 + if (q->wptr_bo) {
2444 +- wptr_addr_off = (uint64_t)q->properties.write_ptr - (uint64_t)q->wptr_bo->kfd_bo->va;
2445 ++ wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
2446 + queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
2447 + }
2448 +
2449 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2450 +index 64fdf63093a00..63feea08904cb 100644
2451 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2452 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2453 +@@ -570,6 +570,15 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
2454 + goto reserve_bo_failed;
2455 + }
2456 +
2457 ++ if (clear) {
2458 ++ r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
2459 ++ if (r) {
2460 ++ pr_debug("failed %d to sync bo\n", r);
2461 ++ amdgpu_bo_unreserve(bo);
2462 ++ goto reserve_bo_failed;
2463 ++ }
2464 ++ }
2465 ++
2466 + r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
2467 + if (r) {
2468 + pr_debug("failed %d to reserve bo\n", r);
2469 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2470 +index e10f1f15c9c43..85bd1f18259c7 100644
2471 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2472 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2473 +@@ -1737,10 +1737,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
2474 + adev->dm.vblank_control_workqueue = NULL;
2475 + }
2476 +
2477 +- for (i = 0; i < adev->dm.display_indexes_num; i++) {
2478 +- drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
2479 +- }
2480 +-
2481 + amdgpu_dm_destroy_drm_device(&adev->dm);
2482 +
2483 + #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
2484 +@@ -9404,6 +9400,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
2485 + bool lock_and_validation_needed = false;
2486 + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
2487 + #if defined(CONFIG_DRM_AMD_DC_DCN)
2488 ++ struct drm_dp_mst_topology_mgr *mgr;
2489 ++ struct drm_dp_mst_topology_state *mst_state;
2490 + struct dsc_mst_fairness_vars vars[MAX_PIPES];
2491 + #endif
2492 +
2493 +@@ -9652,6 +9650,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
2494 + lock_and_validation_needed = true;
2495 + }
2496 +
2497 ++#if defined(CONFIG_DRM_AMD_DC_DCN)
2498 ++ /* set the slot info for each mst_state based on the link encoding format */
2499 ++ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
2500 ++ struct amdgpu_dm_connector *aconnector;
2501 ++ struct drm_connector *connector;
2502 ++ struct drm_connector_list_iter iter;
2503 ++ u8 link_coding_cap;
2504 ++
2505 ++ drm_connector_list_iter_begin(dev, &iter);
2506 ++ drm_for_each_connector_iter(connector, &iter) {
2507 ++ if (connector->index == mst_state->mgr->conn_base_id) {
2508 ++ aconnector = to_amdgpu_dm_connector(connector);
2509 ++ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
2510 ++ drm_dp_mst_update_slots(mst_state, link_coding_cap);
2511 ++
2512 ++ break;
2513 ++ }
2514 ++ }
2515 ++ drm_connector_list_iter_end(&iter);
2516 ++ }
2517 ++#endif
2518 ++
2519 + /**
2520 + * Streams and planes are reset when there are changes that affect
2521 + * bandwidth. Anything that affects bandwidth needs to go through
2522 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
2523 +index f72c013d3a5b0..16623f73ddbe6 100644
2524 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
2525 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
2526 +@@ -120,23 +120,50 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
2527 + }
2528 +
2529 + static void
2530 +-fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state,
2531 +- struct amdgpu_dm_connector *aconnector,
2532 ++fill_dc_mst_payload_table_from_drm(struct dc_link *link,
2533 ++ bool enable,
2534 ++ struct drm_dp_mst_atomic_payload *target_payload,
2535 + struct dc_dp_mst_stream_allocation_table *table)
2536 + {
2537 + struct dc_dp_mst_stream_allocation_table new_table = { 0 };
2538 + struct dc_dp_mst_stream_allocation *sa;
2539 +- struct drm_dp_mst_atomic_payload *payload;
2540 ++ struct link_mst_stream_allocation_table copy_of_link_table =
2541 ++ link->mst_stream_alloc_table;
2542 ++
2543 ++ int i;
2544 ++ int current_hw_table_stream_cnt = copy_of_link_table.stream_count;
2545 ++ struct link_mst_stream_allocation *dc_alloc;
2546 ++
2547 ++ /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/
2548 ++ if (enable) {
2549 ++ dc_alloc =
2550 ++ &copy_of_link_table.stream_allocations[current_hw_table_stream_cnt];
2551 ++ dc_alloc->vcp_id = target_payload->vcpi;
2552 ++ dc_alloc->slot_count = target_payload->time_slots;
2553 ++ } else {
2554 ++ for (i = 0; i < copy_of_link_table.stream_count; i++) {
2555 ++ dc_alloc =
2556 ++ &copy_of_link_table.stream_allocations[i];
2557 ++
2558 ++ if (dc_alloc->vcp_id == target_payload->vcpi) {
2559 ++ dc_alloc->vcp_id = 0;
2560 ++ dc_alloc->slot_count = 0;
2561 ++ break;
2562 ++ }
2563 ++ }
2564 ++ ASSERT(i != copy_of_link_table.stream_count);
2565 ++ }
2566 +
2567 + /* Fill payload info*/
2568 +- list_for_each_entry(payload, &mst_state->payloads, next) {
2569 +- if (payload->delete)
2570 +- continue;
2571 +-
2572 +- sa = &new_table.stream_allocations[new_table.stream_count];
2573 +- sa->slot_count = payload->time_slots;
2574 +- sa->vcp_id = payload->vcpi;
2575 +- new_table.stream_count++;
2576 ++ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
2577 ++ dc_alloc =
2578 ++ &copy_of_link_table.stream_allocations[i];
2579 ++ if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) {
2580 ++ sa = &new_table.stream_allocations[new_table.stream_count];
2581 ++ sa->slot_count = dc_alloc->slot_count;
2582 ++ sa->vcp_id = dc_alloc->vcp_id;
2583 ++ new_table.stream_count++;
2584 ++ }
2585 + }
2586 +
2587 + /* Overwrite the old table */
2588 +@@ -185,7 +212,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
2589 + * AUX message. The sequence is slot 1-63 allocated sequence for each
2590 + * stream. AMD ASIC stream slot allocation should follow the same
2591 + * sequence. copy DRM MST allocation to dc */
2592 +- fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table);
2593 ++ fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
2594 +
2595 + return true;
2596 + }
2597 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
2598 +index 6483ba266893d..8561e9b017a2e 100644
2599 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
2600 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
2601 +@@ -468,7 +468,6 @@ static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs
2602 + static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
2603 + {
2604 + drm_encoder_cleanup(encoder);
2605 +- kfree(encoder);
2606 + }
2607 +
2608 + static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2609 +@@ -897,11 +896,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
2610 + if (IS_ERR(mst_state))
2611 + return PTR_ERR(mst_state);
2612 +
2613 +- mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
2614 +-#if defined(CONFIG_DRM_AMD_DC_DCN)
2615 +- drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link));
2616 +-#endif
2617 +-
2618 + /* Set up params */
2619 + for (i = 0; i < dc_state->stream_count; i++) {
2620 + struct dc_dsc_policy dsc_policy = {0};
2621 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
2622 +index d7b1ace6328a0..40b9d2ce08e66 100644
2623 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
2624 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
2625 +@@ -3995,10 +3995,13 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
2626 + struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
2627 + int i;
2628 + bool mst_mode = (link->type == dc_connection_mst_branch);
2629 ++ /* adjust for drm changes*/
2630 ++ bool update_drm_mst_state = true;
2631 + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
2632 + const struct dc_link_settings empty_link_settings = {0};
2633 + DC_LOGGER_INIT(link->ctx->logger);
2634 +
2635 ++
2636 + /* deallocate_mst_payload is called before disable link. When mode or
2637 + * disable/enable monitor, new stream is created which is not in link
2638 + * stream[] yet. For this, payload is not allocated yet, so de-alloc
2639 +@@ -4014,7 +4017,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
2640 + &empty_link_settings,
2641 + avg_time_slots_per_mtp);
2642 +
2643 +- if (mst_mode) {
2644 ++ if (mst_mode || update_drm_mst_state) {
2645 + /* when link is in mst mode, reply on mst manager to remove
2646 + * payload
2647 + */
2648 +@@ -4077,11 +4080,18 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
2649 + stream->ctx,
2650 + stream);
2651 +
2652 ++ if (!update_drm_mst_state)
2653 ++ dm_helpers_dp_mst_send_payload_allocation(
2654 ++ stream->ctx,
2655 ++ stream,
2656 ++ false);
2657 ++ }
2658 ++
2659 ++ if (update_drm_mst_state)
2660 + dm_helpers_dp_mst_send_payload_allocation(
2661 + stream->ctx,
2662 + stream,
2663 + false);
2664 +- }
2665 +
2666 + return DC_OK;
2667 + }
2668 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2669 +index 4c20d17e7416e..cf96c3f2affe4 100644
2670 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2671 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2672 +@@ -145,6 +145,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
2673 + MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
2674 + PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
2675 + MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
2676 ++ MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
2677 + };
2678 +
2679 + static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
2680 +diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
2681 +index 51a46689cda70..4ca37261584a9 100644
2682 +--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
2683 ++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
2684 +@@ -3372,6 +3372,9 @@ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
2685 +
2686 + mgr->payload_count--;
2687 + mgr->next_start_slot -= payload->time_slots;
2688 ++
2689 ++ if (payload->delete)
2690 ++ drm_dp_mst_put_port_malloc(payload->port);
2691 + }
2692 + EXPORT_SYMBOL(drm_dp_remove_payload);
2693 +
2694 +@@ -4327,7 +4330,6 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
2695 +
2696 + drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
2697 + if (!payload->delete) {
2698 +- drm_dp_mst_put_port_malloc(port);
2699 + payload->pbn = 0;
2700 + payload->delete = true;
2701 + topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
2702 +diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
2703 +index 52d8800a8ab86..3659f0465a724 100644
2704 +--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
2705 ++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
2706 +@@ -304,6 +304,12 @@ static const struct dmi_system_id orientation_data[] = {
2707 + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
2708 + },
2709 + .driver_data = (void *)&lcd1200x1920_rightside_up,
2710 ++ }, { /* Lenovo Ideapad D330-10IGL (HD) */
2711 ++ .matches = {
2712 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2713 ++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
2714 ++ },
2715 ++ .driver_data = (void *)&lcd800x1280_rightside_up,
2716 + }, { /* Lenovo Yoga Book X90F / X91F / X91L */
2717 + .matches = {
2718 + /* Non exact match to match all versions */
2719 +diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
2720 +index 7de37f8c68fd0..83229a031af0f 100644
2721 +--- a/drivers/gpu/drm/drm_vma_manager.c
2722 ++++ b/drivers/gpu/drm/drm_vma_manager.c
2723 +@@ -240,27 +240,8 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
2724 + }
2725 + EXPORT_SYMBOL(drm_vma_offset_remove);
2726 +
2727 +-/**
2728 +- * drm_vma_node_allow - Add open-file to list of allowed users
2729 +- * @node: Node to modify
2730 +- * @tag: Tag of file to remove
2731 +- *
2732 +- * Add @tag to the list of allowed open-files for this node. If @tag is
2733 +- * already on this list, the ref-count is incremented.
2734 +- *
2735 +- * The list of allowed-users is preserved across drm_vma_offset_add() and
2736 +- * drm_vma_offset_remove() calls. You may even call it if the node is currently
2737 +- * not added to any offset-manager.
2738 +- *
2739 +- * You must remove all open-files the same number of times as you added them
2740 +- * before destroying the node. Otherwise, you will leak memory.
2741 +- *
2742 +- * This is locked against concurrent access internally.
2743 +- *
2744 +- * RETURNS:
2745 +- * 0 on success, negative error code on internal failure (out-of-mem)
2746 +- */
2747 +-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
2748 ++static int vma_node_allow(struct drm_vma_offset_node *node,
2749 ++ struct drm_file *tag, bool ref_counted)
2750 + {
2751 + struct rb_node **iter;
2752 + struct rb_node *parent = NULL;
2753 +@@ -282,7 +263,8 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
2754 + entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
2755 +
2756 + if (tag == entry->vm_tag) {
2757 +- entry->vm_count++;
2758 ++ if (ref_counted)
2759 ++ entry->vm_count++;
2760 + goto unlock;
2761 + } else if (tag > entry->vm_tag) {
2762 + iter = &(*iter)->rb_right;
2763 +@@ -307,8 +289,58 @@ unlock:
2764 + kfree(new);
2765 + return ret;
2766 + }
2767 ++
2768 ++/**
2769 ++ * drm_vma_node_allow - Add open-file to list of allowed users
2770 ++ * @node: Node to modify
2771 ++ * @tag: Tag of file to remove
2772 ++ *
2773 ++ * Add @tag to the list of allowed open-files for this node. If @tag is
2774 ++ * already on this list, the ref-count is incremented.
2775 ++ *
2776 ++ * The list of allowed-users is preserved across drm_vma_offset_add() and
2777 ++ * drm_vma_offset_remove() calls. You may even call it if the node is currently
2778 ++ * not added to any offset-manager.
2779 ++ *
2780 ++ * You must remove all open-files the same number of times as you added them
2781 ++ * before destroying the node. Otherwise, you will leak memory.
2782 ++ *
2783 ++ * This is locked against concurrent access internally.
2784 ++ *
2785 ++ * RETURNS:
2786 ++ * 0 on success, negative error code on internal failure (out-of-mem)
2787 ++ */
2788 ++int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
2789 ++{
2790 ++ return vma_node_allow(node, tag, true);
2791 ++}
2792 + EXPORT_SYMBOL(drm_vma_node_allow);
2793 +
2794 ++/**
2795 ++ * drm_vma_node_allow_once - Add open-file to list of allowed users
2796 ++ * @node: Node to modify
2797 ++ * @tag: Tag of file to remove
2798 ++ *
2799 ++ * Add @tag to the list of allowed open-files for this node.
2800 ++ *
2801 ++ * The list of allowed-users is preserved across drm_vma_offset_add() and
2802 ++ * drm_vma_offset_remove() calls. You may even call it if the node is currently
2803 ++ * not added to any offset-manager.
2804 ++ *
2805 ++ * This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke()
2806 ++ * should only be called once after this.
2807 ++ *
2808 ++ * This is locked against concurrent access internally.
2809 ++ *
2810 ++ * RETURNS:
2811 ++ * 0 on success, negative error code on internal failure (out-of-mem)
2812 ++ */
2813 ++int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag)
2814 ++{
2815 ++ return vma_node_allow(node, tag, false);
2816 ++}
2817 ++EXPORT_SYMBOL(drm_vma_node_allow_once);
2818 ++
2819 + /**
2820 + * drm_vma_node_revoke - Remove open-file from list of allowed users
2821 + * @node: Node to modify
2822 +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
2823 +index 78b3427471bd7..b94bcceeff705 100644
2824 +--- a/drivers/gpu/drm/i915/display/intel_dp.c
2825 ++++ b/drivers/gpu/drm/i915/display/intel_dp.c
2826 +@@ -5216,9 +5216,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
2827 + intel_bios_init_panel(dev_priv, &intel_connector->panel,
2828 + encoder->devdata, IS_ERR(edid) ? NULL : edid);
2829 +
2830 +- intel_panel_add_edid_fixed_modes(intel_connector,
2831 +- intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE ||
2832 +- intel_vrr_is_capable(intel_connector));
2833 ++ intel_panel_add_edid_fixed_modes(intel_connector, true);
2834 +
2835 + /* MSO requires information from the EDID */
2836 + intel_edp_mso_init(intel_dp);
2837 +diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
2838 +index 41cec9dc42231..f72f4646c0d70 100644
2839 +--- a/drivers/gpu/drm/i915/display/intel_panel.c
2840 ++++ b/drivers/gpu/drm/i915/display/intel_panel.c
2841 +@@ -85,9 +85,10 @@ static bool is_alt_drrs_mode(const struct drm_display_mode *mode,
2842 + static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
2843 + const struct drm_display_mode *preferred_mode)
2844 + {
2845 +- return drm_mode_match(mode, preferred_mode,
2846 +- DRM_MODE_MATCH_FLAGS |
2847 +- DRM_MODE_MATCH_3D_FLAGS) &&
2848 ++ u32 sync_flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC |
2849 ++ DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC;
2850 ++
2851 ++ return (mode->flags & ~sync_flags) == (preferred_mode->flags & ~sync_flags) &&
2852 + mode->hdisplay == preferred_mode->hdisplay &&
2853 + mode->vdisplay == preferred_mode->vdisplay;
2854 + }
2855 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
2856 +index 354c1d6dab846..d445e2d63c9c8 100644
2857 +--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
2858 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
2859 +@@ -697,7 +697,7 @@ insert:
2860 + GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
2861 + out:
2862 + if (file)
2863 +- drm_vma_node_allow(&mmo->vma_node, file);
2864 ++ drm_vma_node_allow_once(&mmo->vma_node, file);
2865 + return mmo;
2866 +
2867 + err:
2868 +diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
2869 +index c570cf780079a..436598f19522c 100644
2870 +--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
2871 ++++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
2872 +@@ -1697,7 +1697,7 @@ static int igt_shrink_thp(void *arg)
2873 + I915_SHRINK_ACTIVE);
2874 + i915_vma_unpin(vma);
2875 + if (err)
2876 +- goto out_put;
2877 ++ goto out_wf;
2878 +
2879 + /*
2880 + * Now that the pages are *unpinned* shrinking should invoke
2881 +@@ -1713,19 +1713,19 @@ static int igt_shrink_thp(void *arg)
2882 + pr_err("unexpected pages mismatch, should_swap=%s\n",
2883 + str_yes_no(should_swap));
2884 + err = -EINVAL;
2885 +- goto out_put;
2886 ++ goto out_wf;
2887 + }
2888 +
2889 + if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
2890 + pr_err("unexpected residual page-size bits, should_swap=%s\n",
2891 + str_yes_no(should_swap));
2892 + err = -EINVAL;
2893 +- goto out_put;
2894 ++ goto out_wf;
2895 + }
2896 +
2897 + err = i915_vma_pin(vma, 0, 0, flags);
2898 + if (err)
2899 +- goto out_put;
2900 ++ goto out_wf;
2901 +
2902 + while (n--) {
2903 + err = cpu_check(obj, n, 0xdeadbeaf);
2904 +diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
2905 +index 310fb83c527eb..2990dd4d4a0d8 100644
2906 +--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
2907 ++++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
2908 +@@ -28,8 +28,7 @@ struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
2909 +
2910 + int intel_selftest_modify_policy(struct intel_engine_cs *engine,
2911 + struct intel_selftest_saved_policy *saved,
2912 +- u32 modify_type)
2913 +-
2914 ++ enum selftest_scheduler_modify modify_type)
2915 + {
2916 + int err;
2917 +
2918 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2919 +index e033d6a67a20c..870252bef23f3 100644
2920 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2921 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2922 +@@ -876,7 +876,8 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
2923 + #define GBIF_CLIENT_HALT_MASK BIT(0)
2924 + #define GBIF_ARB_HALT_MASK BIT(1)
2925 +
2926 +-static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
2927 ++static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu,
2928 ++ bool gx_off)
2929 + {
2930 + struct msm_gpu *gpu = &adreno_gpu->base;
2931 +
2932 +@@ -889,9 +890,11 @@ static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
2933 + return;
2934 + }
2935 +
2936 +- /* Halt the gx side of GBIF */
2937 +- gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
2938 +- spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
2939 ++ if (gx_off) {
2940 ++ /* Halt the gx side of GBIF */
2941 ++ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
2942 ++ spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
2943 ++ }
2944 +
2945 + /* Halt new client requests on GBIF */
2946 + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
2947 +@@ -929,7 +932,7 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
2948 + /* Halt the gmu cm3 core */
2949 + gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
2950 +
2951 +- a6xx_bus_clear_pending_transactions(adreno_gpu);
2952 ++ a6xx_bus_clear_pending_transactions(adreno_gpu, true);
2953 +
2954 + /* Reset GPU core blocks */
2955 + gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
2956 +@@ -1083,7 +1086,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
2957 + return;
2958 + }
2959 +
2960 +- a6xx_bus_clear_pending_transactions(adreno_gpu);
2961 ++ a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
2962 +
2963 + /* tell the GMU we want to slumber */
2964 + ret = a6xx_gmu_notify_slumber(gmu);
2965 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2966 +index e846e629c00d8..9d7fc44c1e2a9 100644
2967 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2968 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2969 +@@ -1277,6 +1277,12 @@ static void a6xx_recover(struct msm_gpu *gpu)
2970 + if (hang_debug)
2971 + a6xx_dump(gpu);
2972 +
2973 ++ /*
2974 ++ * To handle recovery specific sequences during the rpm suspend we are
2975 ++ * about to trigger
2976 ++ */
2977 ++ a6xx_gpu->hung = true;
2978 ++
2979 + /* Halt SQE first */
2980 + gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
2981 +
2982 +@@ -1319,6 +1325,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
2983 + mutex_unlock(&gpu->active_lock);
2984 +
2985 + msm_gpu_hw_init(gpu);
2986 ++ a6xx_gpu->hung = false;
2987 + }
2988 +
2989 + static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
2990 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
2991 +index ab853f61db632..eea2e60ce3b7b 100644
2992 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
2993 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
2994 +@@ -32,6 +32,7 @@ struct a6xx_gpu {
2995 + void *llc_slice;
2996 + void *htw_llc_slice;
2997 + bool have_mmu500;
2998 ++ bool hung;
2999 + };
3000 +
3001 + #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
3002 +diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
3003 +index 5a0e8491cd3a0..2e7531d2a5d6e 100644
3004 +--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
3005 ++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
3006 +@@ -351,6 +351,8 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
3007 + /* Ensure string is null terminated: */
3008 + str[len] = '\0';
3009 +
3010 ++ mutex_lock(&gpu->lock);
3011 ++
3012 + if (param == MSM_PARAM_COMM) {
3013 + paramp = &ctx->comm;
3014 + } else {
3015 +@@ -360,6 +362,8 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
3016 + kfree(*paramp);
3017 + *paramp = str;
3018 +
3019 ++ mutex_unlock(&gpu->lock);
3020 ++
3021 + return 0;
3022 + }
3023 + case MSM_PARAM_SYSPROF:
3024 +diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
3025 +index 021f4e29b613b..4f495eecc34ba 100644
3026 +--- a/drivers/gpu/drm/msm/msm_gpu.c
3027 ++++ b/drivers/gpu/drm/msm/msm_gpu.c
3028 +@@ -335,6 +335,8 @@ static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **
3029 + struct msm_file_private *ctx = submit->queue->ctx;
3030 + struct task_struct *task;
3031 +
3032 ++ WARN_ON(!mutex_is_locked(&submit->gpu->lock));
3033 ++
3034 + /* Note that kstrdup will return NULL if argument is NULL: */
3035 + *comm = kstrdup(ctx->comm, GFP_KERNEL);
3036 + *cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
3037 +diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
3038 +index 58a72e6b14008..a89bfdc3d7f90 100644
3039 +--- a/drivers/gpu/drm/msm/msm_gpu.h
3040 ++++ b/drivers/gpu/drm/msm/msm_gpu.h
3041 +@@ -366,10 +366,18 @@ struct msm_file_private {
3042 + */
3043 + int sysprof;
3044 +
3045 +- /** comm: Overridden task comm, see MSM_PARAM_COMM */
3046 ++ /**
3047 ++ * comm: Overridden task comm, see MSM_PARAM_COMM
3048 ++ *
3049 ++ * Accessed under msm_gpu::lock
3050 ++ */
3051 + char *comm;
3052 +
3053 +- /** cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE */
3054 ++ /**
3055 ++ * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
3056 ++ *
3057 ++ * Accessed under msm_gpu::lock
3058 ++ */
3059 + char *cmdline;
3060 +
3061 + /**
3062 +diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig
3063 +index 079600328be18..e6403a9d66ade 100644
3064 +--- a/drivers/gpu/drm/panfrost/Kconfig
3065 ++++ b/drivers/gpu/drm/panfrost/Kconfig
3066 +@@ -3,7 +3,8 @@
3067 + config DRM_PANFROST
3068 + tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
3069 + depends on DRM
3070 +- depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
3071 ++ depends on ARM || ARM64 || COMPILE_TEST
3072 ++ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
3073 + depends on MMU
3074 + select DRM_SCHED
3075 + select IOMMU_SUPPORT
3076 +diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
3077 +index 231add8b8e127..ce0ea446bd707 100644
3078 +--- a/drivers/gpu/drm/vc4/vc4_bo.c
3079 ++++ b/drivers/gpu/drm/vc4/vc4_bo.c
3080 +@@ -179,6 +179,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
3081 + bo->validated_shader = NULL;
3082 + }
3083 +
3084 ++ mutex_destroy(&bo->madv_lock);
3085 + drm_gem_dma_free(&bo->base);
3086 + }
3087 +
3088 +@@ -394,7 +395,6 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
3089 + {
3090 + struct vc4_dev *vc4 = to_vc4_dev(dev);
3091 + struct vc4_bo *bo;
3092 +- int ret;
3093 +
3094 + if (WARN_ON_ONCE(vc4->is_vc5))
3095 + return ERR_PTR(-ENODEV);
3096 +@@ -406,9 +406,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
3097 + bo->madv = VC4_MADV_WILLNEED;
3098 + refcount_set(&bo->usecnt, 0);
3099 +
3100 +- ret = drmm_mutex_init(dev, &bo->madv_lock);
3101 +- if (ret)
3102 +- return ERR_PTR(ret);
3103 ++ mutex_init(&bo->madv_lock);
3104 +
3105 + mutex_lock(&vc4->bo_lock);
3106 + bo->label = VC4_BO_TYPE_KERNEL;
3107 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
3108 +old mode 100755
3109 +new mode 100644
3110 +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
3111 +index ab125f79408f2..1fb0f7105fb21 100644
3112 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
3113 ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
3114 +@@ -282,7 +282,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
3115 + }
3116 + rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]);
3117 + if (rc)
3118 +- return rc;
3119 ++ goto cleanup;
3120 + mp2_ops->start(privdata, info);
3121 + status = amd_sfh_wait_for_response
3122 + (privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
3123 +diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
3124 +index 4da2f9f62aba3..a1d6e08fab7d4 100644
3125 +--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
3126 ++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
3127 +@@ -160,7 +160,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
3128 + }
3129 + rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]);
3130 + if (rc)
3131 +- return rc;
3132 ++ goto cleanup;
3133 +
3134 + writel(0, privdata->mmio + AMD_P2C_MSG(0));
3135 + mp2_ops->start(privdata, info);
3136 +diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c
3137 +index 467d789f9bc2d..25ed7b9a917e4 100644
3138 +--- a/drivers/hid/hid-betopff.c
3139 ++++ b/drivers/hid/hid-betopff.c
3140 +@@ -60,7 +60,6 @@ static int betopff_init(struct hid_device *hid)
3141 + struct list_head *report_list =
3142 + &hid->report_enum[HID_OUTPUT_REPORT].report_list;
3143 + struct input_dev *dev;
3144 +- int field_count = 0;
3145 + int error;
3146 + int i, j;
3147 +
3148 +@@ -86,19 +85,21 @@ static int betopff_init(struct hid_device *hid)
3149 + * -----------------------------------------
3150 + * Do init them with default value.
3151 + */
3152 ++ if (report->maxfield < 4) {
3153 ++ hid_err(hid, "not enough fields in the report: %d\n",
3154 ++ report->maxfield);
3155 ++ return -ENODEV;
3156 ++ }
3157 + for (i = 0; i < report->maxfield; i++) {
3158 ++ if (report->field[i]->report_count < 1) {
3159 ++ hid_err(hid, "no values in the field\n");
3160 ++ return -ENODEV;
3161 ++ }
3162 + for (j = 0; j < report->field[i]->report_count; j++) {
3163 + report->field[i]->value[j] = 0x00;
3164 +- field_count++;
3165 + }
3166 + }
3167 +
3168 +- if (field_count < 4) {
3169 +- hid_err(hid, "not enough fields in the report: %d\n",
3170 +- field_count);
3171 +- return -ENODEV;
3172 +- }
3173 +-
3174 + betopff = kzalloc(sizeof(*betopff), GFP_KERNEL);
3175 + if (!betopff)
3176 + return -ENOMEM;
3177 +diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
3178 +index e8c5e3ac9fff1..e8b16665860d6 100644
3179 +--- a/drivers/hid/hid-bigbenff.c
3180 ++++ b/drivers/hid/hid-bigbenff.c
3181 +@@ -344,6 +344,11 @@ static int bigben_probe(struct hid_device *hid,
3182 + }
3183 +
3184 + report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
3185 ++ if (list_empty(report_list)) {
3186 ++ hid_err(hid, "no output report found\n");
3187 ++ error = -ENODEV;
3188 ++ goto error_hw_stop;
3189 ++ }
3190 + bigben->report = list_entry(report_list->next,
3191 + struct hid_report, list);
3192 +
3193 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
3194 +index bd47628da6be0..3e1803592bd4a 100644
3195 +--- a/drivers/hid/hid-core.c
3196 ++++ b/drivers/hid/hid-core.c
3197 +@@ -993,8 +993,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
3198 + * Validating on id 0 means we should examine the first
3199 + * report in the list.
3200 + */
3201 +- report = list_entry(
3202 +- hid->report_enum[type].report_list.next,
3203 ++ report = list_first_entry_or_null(
3204 ++ &hid->report_enum[type].report_list,
3205 + struct hid_report, list);
3206 + } else {
3207 + report = hid->report_enum[type].report_id_hash[id];
3208 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
3209 +index 82713ef3aaa64..c3735848ed5db 100644
3210 +--- a/drivers/hid/hid-ids.h
3211 ++++ b/drivers/hid/hid-ids.h
3212 +@@ -274,7 +274,6 @@
3213 + #define USB_DEVICE_ID_CH_AXIS_295 0x001c
3214 +
3215 + #define USB_VENDOR_ID_CHERRY 0x046a
3216 +-#define USB_DEVICE_ID_CHERRY_MOUSE_000C 0x000c
3217 + #define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
3218 + #define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027
3219 +
3220 +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
3221 +index 0e9702c7f7d6c..be3ad02573de8 100644
3222 +--- a/drivers/hid/hid-quirks.c
3223 ++++ b/drivers/hid/hid-quirks.c
3224 +@@ -54,7 +54,6 @@ static const struct hid_device_id hid_quirks[] = {
3225 + { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
3226 + { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
3227 + { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
3228 +- { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL },
3229 + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
3230 + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
3231 + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
3232 +diff --git a/drivers/hid/intel-ish-hid/ishtp/dma-if.c b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
3233 +index 40554c8daca07..00046cbfd4ed0 100644
3234 +--- a/drivers/hid/intel-ish-hid/ishtp/dma-if.c
3235 ++++ b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
3236 +@@ -104,6 +104,11 @@ void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
3237 + int required_slots = (size / DMA_SLOT_SIZE)
3238 + + 1 * (size % DMA_SLOT_SIZE != 0);
3239 +
3240 ++ if (!dev->ishtp_dma_tx_map) {
3241 ++ dev_err(dev->devc, "Fail to allocate Tx map\n");
3242 ++ return NULL;
3243 ++ }
3244 ++
3245 + spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
3246 + for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
3247 + free = 1;
3248 +@@ -150,6 +155,11 @@ void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
3249 + return;
3250 + }
3251 +
3252 ++ if (!dev->ishtp_dma_tx_map) {
3253 ++ dev_err(dev->devc, "Fail to allocate Tx map\n");
3254 ++ return;
3255 ++ }
3256 ++
3257 + i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
3258 + spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
3259 + for (j = 0; j < acked_slots; j++) {
3260 +diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
3261 +index c023b691441ea..bceaf70f4e237 100644
3262 +--- a/drivers/i2c/busses/i2c-designware-common.c
3263 ++++ b/drivers/i2c/busses/i2c-designware-common.c
3264 +@@ -351,7 +351,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
3265 + *
3266 + * If your hardware is free from tHD;STA issue, try this one.
3267 + */
3268 +- return DIV_ROUND_CLOSEST(ic_clk * tSYMBOL, MICRO) - 8 + offset;
3269 ++ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * tSYMBOL, MICRO) -
3270 ++ 8 + offset;
3271 + else
3272 + /*
3273 + * Conditional expression:
3274 +@@ -367,7 +368,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
3275 + * The reason why we need to take into account "tf" here,
3276 + * is the same as described in i2c_dw_scl_lcnt().
3277 + */
3278 +- return DIV_ROUND_CLOSEST(ic_clk * (tSYMBOL + tf), MICRO) - 3 + offset;
3279 ++ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) -
3280 ++ 3 + offset;
3281 + }
3282 +
3283 + u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
3284 +@@ -383,7 +385,8 @@ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
3285 + * account the fall time of SCL signal (tf). Default tf value
3286 + * should be 0.3 us, for safety.
3287 + */
3288 +- return DIV_ROUND_CLOSEST(ic_clk * (tLOW + tf), MICRO) - 1 + offset;
3289 ++ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) -
3290 ++ 1 + offset;
3291 + }
3292 +
3293 + int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev)
3294 +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
3295 +index ba043b5473936..74182db03a88b 100644
3296 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c
3297 ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
3298 +@@ -351,13 +351,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
3299 +
3300 + if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
3301 + dev_pm_set_driver_flags(&pdev->dev,
3302 +- DPM_FLAG_SMART_PREPARE |
3303 +- DPM_FLAG_MAY_SKIP_RESUME);
3304 ++ DPM_FLAG_SMART_PREPARE);
3305 + } else {
3306 + dev_pm_set_driver_flags(&pdev->dev,
3307 + DPM_FLAG_SMART_PREPARE |
3308 +- DPM_FLAG_SMART_SUSPEND |
3309 +- DPM_FLAG_MAY_SKIP_RESUME);
3310 ++ DPM_FLAG_SMART_SUSPEND);
3311 + }
3312 +
3313 + device_enable_async_suspend(&pdev->dev);
3314 +@@ -419,21 +417,8 @@ static int dw_i2c_plat_prepare(struct device *dev)
3315 + */
3316 + return !has_acpi_companion(dev);
3317 + }
3318 +-
3319 +-static void dw_i2c_plat_complete(struct device *dev)
3320 +-{
3321 +- /*
3322 +- * The device can only be in runtime suspend at this point if it has not
3323 +- * been resumed throughout the ending system suspend/resume cycle, so if
3324 +- * the platform firmware might mess up with it, request the runtime PM
3325 +- * framework to resume it.
3326 +- */
3327 +- if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
3328 +- pm_request_resume(dev);
3329 +-}
3330 + #else
3331 + #define dw_i2c_plat_prepare NULL
3332 +-#define dw_i2c_plat_complete NULL
3333 + #endif
3334 +
3335 + #ifdef CONFIG_PM
3336 +@@ -483,7 +468,6 @@ static int __maybe_unused dw_i2c_plat_resume(struct device *dev)
3337 +
3338 + static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
3339 + .prepare = dw_i2c_plat_prepare,
3340 +- .complete = dw_i2c_plat_complete,
3341 + SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
3342 + SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, dw_i2c_plat_runtime_resume, NULL)
3343 + };
3344 +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
3345 +index 26b021f43ba40..11b1c1603aeb4 100644
3346 +--- a/drivers/infiniband/core/verbs.c
3347 ++++ b/drivers/infiniband/core/verbs.c
3348 +@@ -2957,15 +2957,18 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
3349 + bool __rdma_block_iter_next(struct ib_block_iter *biter)
3350 + {
3351 + unsigned int block_offset;
3352 ++ unsigned int sg_delta;
3353 +
3354 + if (!biter->__sg_nents || !biter->__sg)
3355 + return false;
3356 +
3357 + biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
3358 + block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
3359 +- biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
3360 ++ sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
3361 +
3362 +- if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
3363 ++ if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
3364 ++ biter->__sg_advance += sg_delta;
3365 ++ } else {
3366 + biter->__sg_advance = 0;
3367 + biter->__sg = sg_next(biter->__sg);
3368 + biter->__sg_nents--;
3369 +diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
3370 +index 186d302912606..b02f2f0809c81 100644
3371 +--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
3372 ++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
3373 +@@ -23,18 +23,25 @@ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
3374 + static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
3375 + const struct mmu_notifier_range *range,
3376 + unsigned long cur_seq);
3377 ++static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
3378 ++ const struct mmu_notifier_range *range,
3379 ++ unsigned long cur_seq);
3380 + static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
3381 + struct tid_group *grp,
3382 + unsigned int start, u16 count,
3383 + u32 *tidlist, unsigned int *tididx,
3384 + unsigned int *pmapped);
3385 +-static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
3386 +- struct tid_group **grp);
3387 ++static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo);
3388 ++static void __clear_tid_node(struct hfi1_filedata *fd,
3389 ++ struct tid_rb_node *node);
3390 + static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
3391 +
3392 + static const struct mmu_interval_notifier_ops tid_mn_ops = {
3393 + .invalidate = tid_rb_invalidate,
3394 + };
3395 ++static const struct mmu_interval_notifier_ops tid_cover_ops = {
3396 ++ .invalidate = tid_cover_invalidate,
3397 ++};
3398 +
3399 + /*
3400 + * Initialize context and file private data needed for Expected
3401 +@@ -253,53 +260,65 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
3402 + tididx = 0, mapped, mapped_pages = 0;
3403 + u32 *tidlist = NULL;
3404 + struct tid_user_buf *tidbuf;
3405 ++ unsigned long mmu_seq = 0;
3406 +
3407 + if (!PAGE_ALIGNED(tinfo->vaddr))
3408 + return -EINVAL;
3409 ++ if (tinfo->length == 0)
3410 ++ return -EINVAL;
3411 +
3412 + tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
3413 + if (!tidbuf)
3414 + return -ENOMEM;
3415 +
3416 ++ mutex_init(&tidbuf->cover_mutex);
3417 + tidbuf->vaddr = tinfo->vaddr;
3418 + tidbuf->length = tinfo->length;
3419 + tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
3420 + GFP_KERNEL);
3421 + if (!tidbuf->psets) {
3422 +- kfree(tidbuf);
3423 +- return -ENOMEM;
3424 ++ ret = -ENOMEM;
3425 ++ goto fail_release_mem;
3426 ++ }
3427 ++
3428 ++ if (fd->use_mn) {
3429 ++ ret = mmu_interval_notifier_insert(
3430 ++ &tidbuf->notifier, current->mm,
3431 ++ tidbuf->vaddr, tidbuf->npages * PAGE_SIZE,
3432 ++ &tid_cover_ops);
3433 ++ if (ret)
3434 ++ goto fail_release_mem;
3435 ++ mmu_seq = mmu_interval_read_begin(&tidbuf->notifier);
3436 + }
3437 +
3438 + pinned = pin_rcv_pages(fd, tidbuf);
3439 + if (pinned <= 0) {
3440 +- kfree(tidbuf->psets);
3441 +- kfree(tidbuf);
3442 +- return pinned;
3443 ++ ret = (pinned < 0) ? pinned : -ENOSPC;
3444 ++ goto fail_unpin;
3445 + }
3446 +
3447 + /* Find sets of physically contiguous pages */
3448 + tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
3449 +
3450 +- /*
3451 +- * We don't need to access this under a lock since tid_used is per
3452 +- * process and the same process cannot be in hfi1_user_exp_rcv_clear()
3453 +- * and hfi1_user_exp_rcv_setup() at the same time.
3454 +- */
3455 ++ /* Reserve the number of expected tids to be used. */
3456 + spin_lock(&fd->tid_lock);
3457 + if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
3458 + pageset_count = fd->tid_limit - fd->tid_used;
3459 + else
3460 + pageset_count = tidbuf->n_psets;
3461 ++ fd->tid_used += pageset_count;
3462 + spin_unlock(&fd->tid_lock);
3463 +
3464 +- if (!pageset_count)
3465 +- goto bail;
3466 ++ if (!pageset_count) {
3467 ++ ret = -ENOSPC;
3468 ++ goto fail_unreserve;
3469 ++ }
3470 +
3471 + ngroups = pageset_count / dd->rcv_entries.group_size;
3472 + tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
3473 + if (!tidlist) {
3474 + ret = -ENOMEM;
3475 +- goto nomem;
3476 ++ goto fail_unreserve;
3477 + }
3478 +
3479 + tididx = 0;
3480 +@@ -395,43 +414,78 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
3481 + }
3482 + unlock:
3483 + mutex_unlock(&uctxt->exp_mutex);
3484 +-nomem:
3485 + hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
3486 + mapped_pages, ret);
3487 +- if (tididx) {
3488 +- spin_lock(&fd->tid_lock);
3489 +- fd->tid_used += tididx;
3490 +- spin_unlock(&fd->tid_lock);
3491 +- tinfo->tidcnt = tididx;
3492 +- tinfo->length = mapped_pages * PAGE_SIZE;
3493 +-
3494 +- if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
3495 +- tidlist, sizeof(tidlist[0]) * tididx)) {
3496 +- /*
3497 +- * On failure to copy to the user level, we need to undo
3498 +- * everything done so far so we don't leak resources.
3499 +- */
3500 +- tinfo->tidlist = (unsigned long)&tidlist;
3501 +- hfi1_user_exp_rcv_clear(fd, tinfo);
3502 +- tinfo->tidlist = 0;
3503 +- ret = -EFAULT;
3504 +- goto bail;
3505 ++
3506 ++ /* fail if nothing was programmed, set error if none provided */
3507 ++ if (tididx == 0) {
3508 ++ if (ret >= 0)
3509 ++ ret = -ENOSPC;
3510 ++ goto fail_unreserve;
3511 ++ }
3512 ++
3513 ++ /* adjust reserved tid_used to actual count */
3514 ++ spin_lock(&fd->tid_lock);
3515 ++ fd->tid_used -= pageset_count - tididx;
3516 ++ spin_unlock(&fd->tid_lock);
3517 ++
3518 ++ /* unpin all pages not covered by a TID */
3519 ++ unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
3520 ++ false);
3521 ++
3522 ++ if (fd->use_mn) {
3523 ++ /* check for an invalidate during setup */
3524 ++ bool fail = false;
3525 ++
3526 ++ mutex_lock(&tidbuf->cover_mutex);
3527 ++ fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq);
3528 ++ mutex_unlock(&tidbuf->cover_mutex);
3529 ++
3530 ++ if (fail) {
3531 ++ ret = -EBUSY;
3532 ++ goto fail_unprogram;
3533 + }
3534 + }
3535 +
3536 +- /*
3537 +- * If not everything was mapped (due to insufficient RcvArray entries,
3538 +- * for example), unpin all unmapped pages so we can pin them nex time.
3539 +- */
3540 +- if (mapped_pages != pinned)
3541 +- unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
3542 +- (pinned - mapped_pages), false);
3543 +-bail:
3544 ++ tinfo->tidcnt = tididx;
3545 ++ tinfo->length = mapped_pages * PAGE_SIZE;
3546 ++
3547 ++ if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
3548 ++ tidlist, sizeof(tidlist[0]) * tididx)) {
3549 ++ ret = -EFAULT;
3550 ++ goto fail_unprogram;
3551 ++ }
3552 ++
3553 ++ if (fd->use_mn)
3554 ++ mmu_interval_notifier_remove(&tidbuf->notifier);
3555 ++ kfree(tidbuf->pages);
3556 + kfree(tidbuf->psets);
3557 ++ kfree(tidbuf);
3558 + kfree(tidlist);
3559 ++ return 0;
3560 ++
3561 ++fail_unprogram:
3562 ++ /* unprogram, unmap, and unpin all allocated TIDs */
3563 ++ tinfo->tidlist = (unsigned long)tidlist;
3564 ++ hfi1_user_exp_rcv_clear(fd, tinfo);
3565 ++ tinfo->tidlist = 0;
3566 ++ pinned = 0; /* nothing left to unpin */
3567 ++ pageset_count = 0; /* nothing left reserved */
3568 ++fail_unreserve:
3569 ++ spin_lock(&fd->tid_lock);
3570 ++ fd->tid_used -= pageset_count;
3571 ++ spin_unlock(&fd->tid_lock);
3572 ++fail_unpin:
3573 ++ if (fd->use_mn)
3574 ++ mmu_interval_notifier_remove(&tidbuf->notifier);
3575 ++ if (pinned > 0)
3576 ++ unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
3577 ++fail_release_mem:
3578 + kfree(tidbuf->pages);
3579 ++ kfree(tidbuf->psets);
3580 + kfree(tidbuf);
3581 +- return ret > 0 ? 0 : ret;
3582 ++ kfree(tidlist);
3583 ++ return ret;
3584 + }
3585 +
3586 + int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
3587 +@@ -452,7 +506,7 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
3588 +
3589 + mutex_lock(&uctxt->exp_mutex);
3590 + for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
3591 +- ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
3592 ++ ret = unprogram_rcvarray(fd, tidinfo[tididx]);
3593 + if (ret) {
3594 + hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
3595 + ret);
3596 +@@ -706,6 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
3597 + }
3598 +
3599 + node->fdata = fd;
3600 ++ mutex_init(&node->invalidate_mutex);
3601 + node->phys = page_to_phys(pages[0]);
3602 + node->npages = npages;
3603 + node->rcventry = rcventry;
3604 +@@ -721,11 +776,6 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
3605 + &tid_mn_ops);
3606 + if (ret)
3607 + goto out_unmap;
3608 +- /*
3609 +- * FIXME: This is in the wrong order, the notifier should be
3610 +- * established before the pages are pinned by pin_rcv_pages.
3611 +- */
3612 +- mmu_interval_read_begin(&node->notifier);
3613 + }
3614 + fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
3615 +
3616 +@@ -745,8 +795,7 @@ out_unmap:
3617 + return -EFAULT;
3618 + }
3619 +
3620 +-static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
3621 +- struct tid_group **grp)
3622 ++static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo)
3623 + {
3624 + struct hfi1_ctxtdata *uctxt = fd->uctxt;
3625 + struct hfi1_devdata *dd = uctxt->dd;
3626 +@@ -769,9 +818,6 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
3627 + if (!node || node->rcventry != (uctxt->expected_base + rcventry))
3628 + return -EBADF;
3629 +
3630 +- if (grp)
3631 +- *grp = node->grp;
3632 +-
3633 + if (fd->use_mn)
3634 + mmu_interval_notifier_remove(&node->notifier);
3635 + cacheless_tid_rb_remove(fd, node);
3636 +@@ -779,23 +825,34 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
3637 + return 0;
3638 + }
3639 +
3640 +-static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
3641 ++static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
3642 + {
3643 + struct hfi1_ctxtdata *uctxt = fd->uctxt;
3644 + struct hfi1_devdata *dd = uctxt->dd;
3645 +
3646 ++ mutex_lock(&node->invalidate_mutex);
3647 ++ if (node->freed)
3648 ++ goto done;
3649 ++ node->freed = true;
3650 ++
3651 + trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
3652 + node->npages,
3653 + node->notifier.interval_tree.start, node->phys,
3654 + node->dma_addr);
3655 +
3656 +- /*
3657 +- * Make sure device has seen the write before we unpin the
3658 +- * pages.
3659 +- */
3660 ++ /* Make sure device has seen the write before pages are unpinned */
3661 + hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
3662 +
3663 + unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
3664 ++done:
3665 ++ mutex_unlock(&node->invalidate_mutex);
3666 ++}
3667 ++
3668 ++static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
3669 ++{
3670 ++ struct hfi1_ctxtdata *uctxt = fd->uctxt;
3671 ++
3672 ++ __clear_tid_node(fd, node);
3673 +
3674 + node->grp->used--;
3675 + node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
3676 +@@ -854,10 +911,16 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
3677 + if (node->freed)
3678 + return true;
3679 +
3680 ++ /* take action only if unmapping */
3681 ++ if (range->event != MMU_NOTIFY_UNMAP)
3682 ++ return true;
3683 ++
3684 + trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
3685 + node->notifier.interval_tree.start,
3686 + node->rcventry, node->npages, node->dma_addr);
3687 +- node->freed = true;
3688 ++
3689 ++ /* clear the hardware rcvarray entry */
3690 ++ __clear_tid_node(fdata, node);
3691 +
3692 + spin_lock(&fdata->invalid_lock);
3693 + if (fdata->invalid_tid_idx < uctxt->expected_count) {
3694 +@@ -887,6 +950,23 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
3695 + return true;
3696 + }
3697 +
3698 ++static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
3699 ++ const struct mmu_notifier_range *range,
3700 ++ unsigned long cur_seq)
3701 ++{
3702 ++ struct tid_user_buf *tidbuf =
3703 ++ container_of(mni, struct tid_user_buf, notifier);
3704 ++
3705 ++ /* take action only if unmapping */
3706 ++ if (range->event == MMU_NOTIFY_UNMAP) {
3707 ++ mutex_lock(&tidbuf->cover_mutex);
3708 ++ mmu_interval_set_seq(mni, cur_seq);
3709 ++ mutex_unlock(&tidbuf->cover_mutex);
3710 ++ }
3711 ++
3712 ++ return true;
3713 ++}
3714 ++
3715 + static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
3716 + struct tid_rb_node *tnode)
3717 + {
3718 +diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
3719 +index 8c53e416bf843..f8ee997d0050e 100644
3720 +--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
3721 ++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
3722 +@@ -16,6 +16,8 @@ struct tid_pageset {
3723 + };
3724 +
3725 + struct tid_user_buf {
3726 ++ struct mmu_interval_notifier notifier;
3727 ++ struct mutex cover_mutex;
3728 + unsigned long vaddr;
3729 + unsigned long length;
3730 + unsigned int npages;
3731 +@@ -27,6 +29,7 @@ struct tid_user_buf {
3732 + struct tid_rb_node {
3733 + struct mmu_interval_notifier notifier;
3734 + struct hfi1_filedata *fdata;
3735 ++ struct mutex invalidate_mutex; /* covers hw removal */
3736 + unsigned long phys;
3737 + struct tid_group *grp;
3738 + u32 rcventry;
3739 +diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
3740 +index 86c7a8bf3cbbd..fa41009ce8a9d 100644
3741 +--- a/drivers/infiniband/sw/rxe/rxe_param.h
3742 ++++ b/drivers/infiniband/sw/rxe/rxe_param.h
3743 +@@ -91,11 +91,11 @@ enum rxe_device_param {
3744 + RXE_MAX_SRQ = DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX,
3745 +
3746 + RXE_MIN_MR_INDEX = 0x00000001,
3747 +- RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE,
3748 +- RXE_MAX_MR = DEFAULT_MAX_VALUE - RXE_MIN_MR_INDEX,
3749 +- RXE_MIN_MW_INDEX = 0x00010001,
3750 +- RXE_MAX_MW_INDEX = 0x00020000,
3751 +- RXE_MAX_MW = 0x00001000,
3752 ++ RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE >> 1,
3753 ++ RXE_MAX_MR = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX,
3754 ++ RXE_MIN_MW_INDEX = RXE_MAX_MR_INDEX + 1,
3755 ++ RXE_MAX_MW_INDEX = DEFAULT_MAX_VALUE,
3756 ++ RXE_MAX_MW = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX,
3757 +
3758 + RXE_MAX_PKT_PER_ACK = 64,
3759 +
3760 +diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
3761 +index f50620f5a0a14..1151c0b5cceab 100644
3762 +--- a/drivers/infiniband/sw/rxe/rxe_pool.c
3763 ++++ b/drivers/infiniband/sw/rxe/rxe_pool.c
3764 +@@ -23,16 +23,16 @@ static const struct rxe_type_info {
3765 + .size = sizeof(struct rxe_ucontext),
3766 + .elem_offset = offsetof(struct rxe_ucontext, elem),
3767 + .min_index = 1,
3768 +- .max_index = UINT_MAX,
3769 +- .max_elem = UINT_MAX,
3770 ++ .max_index = RXE_MAX_UCONTEXT,
3771 ++ .max_elem = RXE_MAX_UCONTEXT,
3772 + },
3773 + [RXE_TYPE_PD] = {
3774 + .name = "pd",
3775 + .size = sizeof(struct rxe_pd),
3776 + .elem_offset = offsetof(struct rxe_pd, elem),
3777 + .min_index = 1,
3778 +- .max_index = UINT_MAX,
3779 +- .max_elem = UINT_MAX,
3780 ++ .max_index = RXE_MAX_PD,
3781 ++ .max_elem = RXE_MAX_PD,
3782 + },
3783 + [RXE_TYPE_AH] = {
3784 + .name = "ah",
3785 +@@ -40,7 +40,7 @@ static const struct rxe_type_info {
3786 + .elem_offset = offsetof(struct rxe_ah, elem),
3787 + .min_index = RXE_MIN_AH_INDEX,
3788 + .max_index = RXE_MAX_AH_INDEX,
3789 +- .max_elem = RXE_MAX_AH_INDEX - RXE_MIN_AH_INDEX + 1,
3790 ++ .max_elem = RXE_MAX_AH,
3791 + },
3792 + [RXE_TYPE_SRQ] = {
3793 + .name = "srq",
3794 +@@ -49,7 +49,7 @@ static const struct rxe_type_info {
3795 + .cleanup = rxe_srq_cleanup,
3796 + .min_index = RXE_MIN_SRQ_INDEX,
3797 + .max_index = RXE_MAX_SRQ_INDEX,
3798 +- .max_elem = RXE_MAX_SRQ_INDEX - RXE_MIN_SRQ_INDEX + 1,
3799 ++ .max_elem = RXE_MAX_SRQ,
3800 + },
3801 + [RXE_TYPE_QP] = {
3802 + .name = "qp",
3803 +@@ -58,7 +58,7 @@ static const struct rxe_type_info {
3804 + .cleanup = rxe_qp_cleanup,
3805 + .min_index = RXE_MIN_QP_INDEX,
3806 + .max_index = RXE_MAX_QP_INDEX,
3807 +- .max_elem = RXE_MAX_QP_INDEX - RXE_MIN_QP_INDEX + 1,
3808 ++ .max_elem = RXE_MAX_QP,
3809 + },
3810 + [RXE_TYPE_CQ] = {
3811 + .name = "cq",
3812 +@@ -66,8 +66,8 @@ static const struct rxe_type_info {
3813 + .elem_offset = offsetof(struct rxe_cq, elem),
3814 + .cleanup = rxe_cq_cleanup,
3815 + .min_index = 1,
3816 +- .max_index = UINT_MAX,
3817 +- .max_elem = UINT_MAX,
3818 ++ .max_index = RXE_MAX_CQ,
3819 ++ .max_elem = RXE_MAX_CQ,
3820 + },
3821 + [RXE_TYPE_MR] = {
3822 + .name = "mr",
3823 +@@ -76,7 +76,7 @@ static const struct rxe_type_info {
3824 + .cleanup = rxe_mr_cleanup,
3825 + .min_index = RXE_MIN_MR_INDEX,
3826 + .max_index = RXE_MAX_MR_INDEX,
3827 +- .max_elem = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX + 1,
3828 ++ .max_elem = RXE_MAX_MR,
3829 + },
3830 + [RXE_TYPE_MW] = {
3831 + .name = "mw",
3832 +@@ -85,7 +85,7 @@ static const struct rxe_type_info {
3833 + .cleanup = rxe_mw_cleanup,
3834 + .min_index = RXE_MIN_MW_INDEX,
3835 + .max_index = RXE_MAX_MW_INDEX,
3836 +- .max_elem = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX + 1,
3837 ++ .max_elem = RXE_MAX_MW,
3838 + },
3839 + };
3840 +
3841 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
3842 +index b0f776448a1cd..fa021af8506e4 100644
3843 +--- a/drivers/input/mouse/synaptics.c
3844 ++++ b/drivers/input/mouse/synaptics.c
3845 +@@ -192,7 +192,6 @@ static const char * const smbus_pnp_ids[] = {
3846 + "SYN3221", /* HP 15-ay000 */
3847 + "SYN323d", /* HP Spectre X360 13-w013dx */
3848 + "SYN3257", /* HP Envy 13-ad105ng */
3849 +- "SYN3286", /* HP Laptop 15-da3001TU */
3850 + NULL
3851 + };
3852 +
3853 +diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
3854 +index 46f8a694291ed..efc61736099b9 100644
3855 +--- a/drivers/input/serio/i8042-acpipnpio.h
3856 ++++ b/drivers/input/serio/i8042-acpipnpio.h
3857 +@@ -1238,6 +1238,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
3858 + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
3859 + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
3860 + },
3861 ++ {
3862 ++ .matches = {
3863 ++ DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"),
3864 ++ },
3865 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
3866 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
3867 ++ },
3868 + {
3869 + .matches = {
3870 + DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
3871 +diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
3872 +index c2903ae3b3bc3..25a1a32bc611f 100644
3873 +--- a/drivers/interconnect/qcom/msm8996.c
3874 ++++ b/drivers/interconnect/qcom/msm8996.c
3875 +@@ -33,6 +33,13 @@ static const char * const bus_a0noc_clocks[] = {
3876 + "aggre0_noc_mpu_cfg"
3877 + };
3878 +
3879 ++static const char * const bus_a2noc_clocks[] = {
3880 ++ "bus",
3881 ++ "bus_a",
3882 ++ "aggre2_ufs_axi",
3883 ++ "ufs_axi"
3884 ++};
3885 ++
3886 + static const u16 mas_a0noc_common_links[] = {
3887 + MSM8996_SLAVE_A0NOC_SNOC
3888 + };
3889 +@@ -1806,7 +1813,7 @@ static const struct regmap_config msm8996_a0noc_regmap_config = {
3890 + .reg_bits = 32,
3891 + .reg_stride = 4,
3892 + .val_bits = 32,
3893 +- .max_register = 0x9000,
3894 ++ .max_register = 0x6000,
3895 + .fast_io = true
3896 + };
3897 +
3898 +@@ -1830,7 +1837,7 @@ static const struct regmap_config msm8996_a1noc_regmap_config = {
3899 + .reg_bits = 32,
3900 + .reg_stride = 4,
3901 + .val_bits = 32,
3902 +- .max_register = 0x7000,
3903 ++ .max_register = 0x5000,
3904 + .fast_io = true
3905 + };
3906 +
3907 +@@ -1851,7 +1858,7 @@ static const struct regmap_config msm8996_a2noc_regmap_config = {
3908 + .reg_bits = 32,
3909 + .reg_stride = 4,
3910 + .val_bits = 32,
3911 +- .max_register = 0xa000,
3912 ++ .max_register = 0x7000,
3913 + .fast_io = true
3914 + };
3915 +
3916 +@@ -1859,6 +1866,8 @@ static const struct qcom_icc_desc msm8996_a2noc = {
3917 + .type = QCOM_ICC_NOC,
3918 + .nodes = a2noc_nodes,
3919 + .num_nodes = ARRAY_SIZE(a2noc_nodes),
3920 ++ .clocks = bus_a2noc_clocks,
3921 ++ .num_clocks = ARRAY_SIZE(bus_a2noc_clocks),
3922 + .regmap_cfg = &msm8996_a2noc_regmap_config
3923 + };
3924 +
3925 +@@ -1877,7 +1886,7 @@ static const struct regmap_config msm8996_bimc_regmap_config = {
3926 + .reg_bits = 32,
3927 + .reg_stride = 4,
3928 + .val_bits = 32,
3929 +- .max_register = 0x62000,
3930 ++ .max_register = 0x5a000,
3931 + .fast_io = true
3932 + };
3933 +
3934 +@@ -1988,7 +1997,7 @@ static const struct regmap_config msm8996_mnoc_regmap_config = {
3935 + .reg_bits = 32,
3936 + .reg_stride = 4,
3937 + .val_bits = 32,
3938 +- .max_register = 0x20000,
3939 ++ .max_register = 0x1c000,
3940 + .fast_io = true
3941 + };
3942 +
3943 +diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
3944 +index 9c49d00c2a966..ea6e9e1eaf046 100644
3945 +--- a/drivers/memory/atmel-sdramc.c
3946 ++++ b/drivers/memory/atmel-sdramc.c
3947 +@@ -47,19 +47,17 @@ static int atmel_ramc_probe(struct platform_device *pdev)
3948 + caps = of_device_get_match_data(&pdev->dev);
3949 +
3950 + if (caps->has_ddrck) {
3951 +- clk = devm_clk_get(&pdev->dev, "ddrck");
3952 ++ clk = devm_clk_get_enabled(&pdev->dev, "ddrck");
3953 + if (IS_ERR(clk))
3954 + return PTR_ERR(clk);
3955 +- clk_prepare_enable(clk);
3956 + }
3957 +
3958 + if (caps->has_mpddr_clk) {
3959 +- clk = devm_clk_get(&pdev->dev, "mpddr");
3960 ++ clk = devm_clk_get_enabled(&pdev->dev, "mpddr");
3961 + if (IS_ERR(clk)) {
3962 + pr_err("AT91 RAMC: couldn't get mpddr clock\n");
3963 + return PTR_ERR(clk);
3964 + }
3965 +- clk_prepare_enable(clk);
3966 + }
3967 +
3968 + return 0;
3969 +diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
3970 +index 8450638e86700..efc6c08db2b70 100644
3971 +--- a/drivers/memory/mvebu-devbus.c
3972 ++++ b/drivers/memory/mvebu-devbus.c
3973 +@@ -280,10 +280,9 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
3974 + if (IS_ERR(devbus->base))
3975 + return PTR_ERR(devbus->base);
3976 +
3977 +- clk = devm_clk_get(&pdev->dev, NULL);
3978 ++ clk = devm_clk_get_enabled(&pdev->dev, NULL);
3979 + if (IS_ERR(clk))
3980 + return PTR_ERR(clk);
3981 +- clk_prepare_enable(clk);
3982 +
3983 + /*
3984 + * Obtain clock period in picoseconds,
3985 +diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
3986 +index 62477e592bf5f..7bb73f06fad3e 100644
3987 +--- a/drivers/memory/tegra/tegra186.c
3988 ++++ b/drivers/memory/tegra/tegra186.c
3989 +@@ -22,32 +22,6 @@
3990 + #define MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED BIT(16)
3991 + #define MC_SID_STREAMID_SECURITY_OVERRIDE BIT(8)
3992 +
3993 +-static void tegra186_mc_program_sid(struct tegra_mc *mc)
3994 +-{
3995 +- unsigned int i;
3996 +-
3997 +- for (i = 0; i < mc->soc->num_clients; i++) {
3998 +- const struct tegra_mc_client *client = &mc->soc->clients[i];
3999 +- u32 override, security;
4000 +-
4001 +- override = readl(mc->regs + client->regs.sid.override);
4002 +- security = readl(mc->regs + client->regs.sid.security);
4003 +-
4004 +- dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
4005 +- client->name, override, security);
4006 +-
4007 +- dev_dbg(mc->dev, "setting SID %u for %s\n", client->sid,
4008 +- client->name);
4009 +- writel(client->sid, mc->regs + client->regs.sid.override);
4010 +-
4011 +- override = readl(mc->regs + client->regs.sid.override);
4012 +- security = readl(mc->regs + client->regs.sid.security);
4013 +-
4014 +- dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
4015 +- client->name, override, security);
4016 +- }
4017 +-}
4018 +-
4019 + static int tegra186_mc_probe(struct tegra_mc *mc)
4020 + {
4021 + struct platform_device *pdev = to_platform_device(mc->dev);
4022 +@@ -85,8 +59,6 @@ populate:
4023 + if (err < 0)
4024 + return err;
4025 +
4026 +- tegra186_mc_program_sid(mc);
4027 +-
4028 + return 0;
4029 + }
4030 +
4031 +@@ -95,13 +67,6 @@ static void tegra186_mc_remove(struct tegra_mc *mc)
4032 + of_platform_depopulate(mc->dev);
4033 + }
4034 +
4035 +-static int tegra186_mc_resume(struct tegra_mc *mc)
4036 +-{
4037 +- tegra186_mc_program_sid(mc);
4038 +-
4039 +- return 0;
4040 +-}
4041 +-
4042 + #if IS_ENABLED(CONFIG_IOMMU_API)
4043 + static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
4044 + const struct tegra_mc_client *client,
4045 +@@ -173,7 +138,6 @@ static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
4046 + const struct tegra_mc_ops tegra186_mc_ops = {
4047 + .probe = tegra186_mc_probe,
4048 + .remove = tegra186_mc_remove,
4049 +- .resume = tegra186_mc_resume,
4050 + .probe_device = tegra186_mc_probe_device,
4051 + .handle_irq = tegra30_mc_handle_irq,
4052 + };
4053 +diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
4054 +index a6a0321a89310..a736971470534 100644
4055 +--- a/drivers/net/dsa/microchip/ksz9477.c
4056 ++++ b/drivers/net/dsa/microchip/ksz9477.c
4057 +@@ -548,10 +548,10 @@ int ksz9477_fdb_del(struct ksz_device *dev, int port,
4058 + ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
4059 +
4060 + /* clear forwarding port */
4061 +- alu_table[2] &= ~BIT(port);
4062 ++ alu_table[1] &= ~BIT(port);
4063 +
4064 + /* if there is no port to forward, clear table */
4065 +- if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
4066 ++ if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
4067 + alu_table[0] = 0;
4068 + alu_table[1] = 0;
4069 + alu_table[2] = 0;
4070 +diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
4071 +index 3763930dc6fc4..aae1dadef882d 100644
4072 +--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
4073 ++++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
4074 +@@ -105,7 +105,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
4075 + },
4076 + {
4077 + .compatible = "microchip,ksz8563",
4078 +- .data = &ksz_switch_chips[KSZ9893]
4079 ++ .data = &ksz_switch_chips[KSZ8563]
4080 + },
4081 + {
4082 + .compatible = "microchip,ksz9567",
4083 +diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
4084 +index 9d8dfe1729948..ecce5f7a549f2 100644
4085 +--- a/drivers/net/ethernet/adi/adin1110.c
4086 ++++ b/drivers/net/ethernet/adi/adin1110.c
4087 +@@ -356,7 +356,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
4088 +
4089 + if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
4090 + (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST))
4091 +- rxb->offload_fwd_mark = 1;
4092 ++ rxb->offload_fwd_mark = port_priv->priv->forwarding;
4093 +
4094 + netif_rx(rxb);
4095 +
4096 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
4097 +index 3936543a74d8f..4030d619e84f5 100644
4098 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
4099 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
4100 +@@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
4101 + netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
4102 + }
4103 +
4104 ++static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
4105 ++{
4106 ++ unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
4107 ++
4108 ++ /* From MAC ver 30H the TFCR is per priority, instead of per queue */
4109 ++ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
4110 ++ return max_q_count;
4111 ++ else
4112 ++ return min_t(unsigned int, pdata->tx_q_count, max_q_count);
4113 ++}
4114 ++
4115 + static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
4116 + {
4117 +- unsigned int max_q_count, q_count;
4118 + unsigned int reg, reg_val;
4119 +- unsigned int i;
4120 ++ unsigned int i, q_count;
4121 +
4122 + /* Clear MTL flow control */
4123 + for (i = 0; i < pdata->rx_q_count; i++)
4124 + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
4125 +
4126 + /* Clear MAC flow control */
4127 +- max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
4128 +- q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
4129 ++ q_count = xgbe_get_fc_queue_count(pdata);
4130 + reg = MAC_Q0TFCR;
4131 + for (i = 0; i < q_count; i++) {
4132 + reg_val = XGMAC_IOREAD(pdata, reg);
4133 +@@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
4134 + {
4135 + struct ieee_pfc *pfc = pdata->pfc;
4136 + struct ieee_ets *ets = pdata->ets;
4137 +- unsigned int max_q_count, q_count;
4138 + unsigned int reg, reg_val;
4139 +- unsigned int i;
4140 ++ unsigned int i, q_count;
4141 +
4142 + /* Set MTL flow control */
4143 + for (i = 0; i < pdata->rx_q_count; i++) {
4144 +@@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
4145 + }
4146 +
4147 + /* Set MAC flow control */
4148 +- max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
4149 +- q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
4150 ++ q_count = xgbe_get_fc_queue_count(pdata);
4151 + reg = MAC_Q0TFCR;
4152 + for (i = 0; i < q_count; i++) {
4153 + reg_val = XGMAC_IOREAD(pdata, reg);
4154 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
4155 +index 0c5c1b1556830..43fdd111235a6 100644
4156 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
4157 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
4158 +@@ -496,6 +496,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
4159 + reg |= XGBE_KR_TRAINING_ENABLE;
4160 + reg |= XGBE_KR_TRAINING_START;
4161 + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
4162 ++ pdata->kr_start_time = jiffies;
4163 +
4164 + netif_dbg(pdata, link, pdata->netdev,
4165 + "KR training initiated\n");
4166 +@@ -632,6 +633,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
4167 +
4168 + xgbe_switch_mode(pdata);
4169 +
4170 ++ pdata->an_result = XGBE_AN_READY;
4171 ++
4172 + xgbe_an_restart(pdata);
4173 +
4174 + return XGBE_AN_INCOMPAT_LINK;
4175 +@@ -1275,9 +1278,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
4176 + static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
4177 + {
4178 + unsigned long link_timeout;
4179 ++ unsigned long kr_time;
4180 ++ int wait;
4181 +
4182 + link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
4183 + if (time_after(jiffies, link_timeout)) {
4184 ++ if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) &&
4185 ++ pdata->phy.autoneg == AUTONEG_ENABLE) {
4186 ++ /* AN restart should not happen while KR training is in progress.
4187 ++ * The while loop ensures no AN restart during KR training,
4188 ++ * waits up to 500ms and AN restart is triggered only if KR
4189 ++ * training is failed.
4190 ++ */
4191 ++ wait = XGBE_KR_TRAINING_WAIT_ITER;
4192 ++ while (wait--) {
4193 ++ kr_time = pdata->kr_start_time +
4194 ++ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
4195 ++ if (time_after(jiffies, kr_time))
4196 ++ break;
4197 ++ /* AN restart is not required, if AN result is COMPLETE */
4198 ++ if (pdata->an_result == XGBE_AN_COMPLETE)
4199 ++ return;
4200 ++ usleep_range(10000, 11000);
4201 ++ }
4202 ++ }
4203 + netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
4204 + xgbe_phy_config_aneg(pdata);
4205 + }
4206 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
4207 +index 71f24cb479355..7a41367c437dd 100644
4208 +--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
4209 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
4210 +@@ -290,6 +290,7 @@
4211 + /* Auto-negotiation */
4212 + #define XGBE_AN_MS_TIMEOUT 500
4213 + #define XGBE_LINK_TIMEOUT 5
4214 ++#define XGBE_KR_TRAINING_WAIT_ITER 50
4215 +
4216 + #define XGBE_SGMII_AN_LINK_STATUS BIT(1)
4217 + #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
4218 +@@ -1280,6 +1281,7 @@ struct xgbe_prv_data {
4219 + unsigned int parallel_detect;
4220 + unsigned int fec_ability;
4221 + unsigned long an_start;
4222 ++ unsigned long kr_start_time;
4223 + enum xgbe_an_mode an_mode;
4224 +
4225 + /* I2C support */
4226 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
4227 +index 8cad15c458b39..703fc163235f9 100644
4228 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
4229 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
4230 +@@ -3865,7 +3865,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
4231 + test_info->timeout = HWRM_CMD_TIMEOUT;
4232 + for (i = 0; i < bp->num_tests; i++) {
4233 + char *str = test_info->string[i];
4234 +- char *fw_str = resp->test0_name + i * 32;
4235 ++ char *fw_str = resp->test_name[i];
4236 +
4237 + if (i == BNXT_MACLPBK_TEST_IDX) {
4238 + strcpy(str, "Mac loopback test (offline)");
4239 +@@ -3876,14 +3876,9 @@ void bnxt_ethtool_init(struct bnxt *bp)
4240 + } else if (i == BNXT_IRQ_TEST_IDX) {
4241 + strcpy(str, "Interrupt_test (offline)");
4242 + } else {
4243 +- strscpy(str, fw_str, ETH_GSTRING_LEN);
4244 +- strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
4245 +- if (test_info->offline_mask & (1 << i))
4246 +- strncat(str, " (offline)",
4247 +- ETH_GSTRING_LEN - strlen(str));
4248 +- else
4249 +- strncat(str, " (online)",
4250 +- ETH_GSTRING_LEN - strlen(str));
4251 ++ snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
4252 ++ fw_str, test_info->offline_mask & (1 << i) ?
4253 ++ "offline" : "online");
4254 + }
4255 + }
4256 +
4257 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
4258 +index b753032a10474..fb78fc38530da 100644
4259 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
4260 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
4261 +@@ -10099,14 +10099,7 @@ struct hwrm_selftest_qlist_output {
4262 + u8 unused_0;
4263 + __le16 test_timeout;
4264 + u8 unused_1[2];
4265 +- char test0_name[32];
4266 +- char test1_name[32];
4267 +- char test2_name[32];
4268 +- char test3_name[32];
4269 +- char test4_name[32];
4270 +- char test5_name[32];
4271 +- char test6_name[32];
4272 +- char test7_name[32];
4273 ++ char test_name[8][32];
4274 + u8 eyescope_target_BER_support;
4275 + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
4276 + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
4277 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
4278 +index 4179a12fc8819..af9ea5e4371b3 100644
4279 +--- a/drivers/net/ethernet/broadcom/tg3.c
4280 ++++ b/drivers/net/ethernet/broadcom/tg3.c
4281 +@@ -11174,7 +11174,7 @@ static void tg3_reset_task(struct work_struct *work)
4282 + rtnl_lock();
4283 + tg3_full_lock(tp, 0);
4284 +
4285 +- if (!netif_running(tp->dev)) {
4286 ++ if (tp->pcierr_recovery || !netif_running(tp->dev)) {
4287 + tg3_flag_clear(tp, RESET_TASK_PENDING);
4288 + tg3_full_unlock(tp);
4289 + rtnl_unlock();
4290 +@@ -18109,6 +18109,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
4291 +
4292 + netdev_info(netdev, "PCI I/O error detected\n");
4293 +
4294 ++ /* Want to make sure that the reset task doesn't run */
4295 ++ tg3_reset_task_cancel(tp);
4296 ++
4297 + rtnl_lock();
4298 +
4299 + /* Could be second call or maybe we don't have netdev yet */
4300 +@@ -18125,9 +18128,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
4301 +
4302 + tg3_timer_stop(tp);
4303 +
4304 +- /* Want to make sure that the reset task doesn't run */
4305 +- tg3_reset_task_cancel(tp);
4306 +-
4307 + netif_device_detach(netdev);
4308 +
4309 + /* Clean up software state, even if MMIO is blocked */
4310 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
4311 +index 4f63f1ba3161c..300f47ca42e3e 100644
4312 +--- a/drivers/net/ethernet/cadence/macb_main.c
4313 ++++ b/drivers/net/ethernet/cadence/macb_main.c
4314 +@@ -2188,7 +2188,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
4315 + bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
4316 + skb_is_nonlinear(*skb);
4317 + int padlen = ETH_ZLEN - (*skb)->len;
4318 +- int headroom = skb_headroom(*skb);
4319 + int tailroom = skb_tailroom(*skb);
4320 + struct sk_buff *nskb;
4321 + u32 fcs;
4322 +@@ -2202,9 +2201,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
4323 + /* FCS could be appeded to tailroom. */
4324 + if (tailroom >= ETH_FCS_LEN)
4325 + goto add_fcs;
4326 +- /* FCS could be appeded by moving data to headroom. */
4327 +- else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
4328 +- padlen = 0;
4329 + /* No room for FCS, need to reallocate skb. */
4330 + else
4331 + padlen = ETH_FCS_LEN;
4332 +@@ -2213,10 +2209,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
4333 + padlen += ETH_FCS_LEN;
4334 + }
4335 +
4336 +- if (!cloned && headroom + tailroom >= padlen) {
4337 +- (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
4338 +- skb_set_tail_pointer(*skb, (*skb)->len);
4339 +- } else {
4340 ++ if (cloned || tailroom < padlen) {
4341 + nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
4342 + if (!nskb)
4343 + return -ENOMEM;
4344 +diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
4345 +index 13d5ff4e0e020..6bf3cc11d2121 100644
4346 +--- a/drivers/net/ethernet/engleder/tsnep_main.c
4347 ++++ b/drivers/net/ethernet/engleder/tsnep_main.c
4348 +@@ -419,7 +419,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
4349 + /* ring full, shall not happen because queue is stopped if full
4350 + * below
4351 + */
4352 +- netif_stop_queue(tx->adapter->netdev);
4353 ++ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
4354 +
4355 + spin_unlock_irqrestore(&tx->lock, flags);
4356 +
4357 +@@ -462,7 +462,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
4358 +
4359 + if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
4360 + /* ring can get full with next frame */
4361 +- netif_stop_queue(tx->adapter->netdev);
4362 ++ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
4363 + }
4364 +
4365 + spin_unlock_irqrestore(&tx->lock, flags);
4366 +@@ -472,11 +472,14 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
4367 +
4368 + static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
4369 + {
4370 ++ struct tsnep_tx_entry *entry;
4371 ++ struct netdev_queue *nq;
4372 + unsigned long flags;
4373 + int budget = 128;
4374 +- struct tsnep_tx_entry *entry;
4375 +- int count;
4376 + int length;
4377 ++ int count;
4378 ++
4379 ++ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
4380 +
4381 + spin_lock_irqsave(&tx->lock, flags);
4382 +
4383 +@@ -533,8 +536,8 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
4384 + } while (likely(budget));
4385 +
4386 + if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
4387 +- netif_queue_stopped(tx->adapter->netdev)) {
4388 +- netif_wake_queue(tx->adapter->netdev);
4389 ++ netif_tx_queue_stopped(nq)) {
4390 ++ netif_tx_wake_queue(nq);
4391 + }
4392 +
4393 + spin_unlock_irqrestore(&tx->lock, flags);
4394 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
4395 +index 3a79ead5219ae..e96449eedfb54 100644
4396 +--- a/drivers/net/ethernet/freescale/enetc/enetc.c
4397 ++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
4398 +@@ -2290,14 +2290,14 @@ static void enetc_tx_onestep_tstamp(struct work_struct *work)
4399 +
4400 + priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
4401 +
4402 +- netif_tx_lock(priv->ndev);
4403 ++ netif_tx_lock_bh(priv->ndev);
4404 +
4405 + clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
4406 + skb = skb_dequeue(&priv->tx_skbs);
4407 + if (skb)
4408 + enetc_start_xmit(skb, priv->ndev);
4409 +
4410 +- netif_tx_unlock(priv->ndev);
4411 ++ netif_tx_unlock_bh(priv->ndev);
4412 + }
4413 +
4414 + static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
4415 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
4416 +index f250b0df27fbb..6f914180f4797 100644
4417 +--- a/drivers/net/ethernet/freescale/fec_main.c
4418 ++++ b/drivers/net/ethernet/freescale/fec_main.c
4419 +@@ -3089,7 +3089,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
4420 + for (q = 0; q < fep->num_rx_queues; q++) {
4421 + rxq = fep->rx_queue[q];
4422 + for (i = 0; i < rxq->bd.ring_size; i++)
4423 +- page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);
4424 ++ page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
4425 +
4426 + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
4427 + xdp_rxq_info_unreg(&rxq->xdp_rxq);
4428 +diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
4429 +index 0d1bab4ac1b07..2a9f1eeeb7015 100644
4430 +--- a/drivers/net/ethernet/intel/iavf/iavf.h
4431 ++++ b/drivers/net/ethernet/intel/iavf/iavf.h
4432 +@@ -249,6 +249,7 @@ struct iavf_cloud_filter {
4433 +
4434 + /* board specific private data structure */
4435 + struct iavf_adapter {
4436 ++ struct workqueue_struct *wq;
4437 + struct work_struct reset_task;
4438 + struct work_struct adminq_task;
4439 + struct delayed_work client_task;
4440 +@@ -459,7 +460,6 @@ struct iavf_device {
4441 +
4442 + /* needed by iavf_ethtool.c */
4443 + extern char iavf_driver_name[];
4444 +-extern struct workqueue_struct *iavf_wq;
4445 +
4446 + static inline const char *iavf_state_str(enum iavf_state_t state)
4447 + {
4448 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
4449 +index a056e15456153..83cfc54a47062 100644
4450 +--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
4451 ++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
4452 +@@ -532,7 +532,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
4453 + if (changed_flags & IAVF_FLAG_LEGACY_RX) {
4454 + if (netif_running(netdev)) {
4455 + adapter->flags |= IAVF_FLAG_RESET_NEEDED;
4456 +- queue_work(iavf_wq, &adapter->reset_task);
4457 ++ queue_work(adapter->wq, &adapter->reset_task);
4458 + }
4459 + }
4460 +
4461 +@@ -672,7 +672,7 @@ static int iavf_set_ringparam(struct net_device *netdev,
4462 +
4463 + if (netif_running(netdev)) {
4464 + adapter->flags |= IAVF_FLAG_RESET_NEEDED;
4465 +- queue_work(iavf_wq, &adapter->reset_task);
4466 ++ queue_work(adapter->wq, &adapter->reset_task);
4467 + }
4468 +
4469 + return 0;
4470 +@@ -1433,7 +1433,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
4471 + adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
4472 + spin_unlock_bh(&adapter->fdir_fltr_lock);
4473 +
4474 +- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
4475 ++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
4476 +
4477 + ret:
4478 + if (err && fltr)
4479 +@@ -1474,7 +1474,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
4480 + spin_unlock_bh(&adapter->fdir_fltr_lock);
4481 +
4482 + if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
4483 +- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
4484 ++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
4485 +
4486 + return err;
4487 + }
4488 +@@ -1658,7 +1658,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
4489 + spin_unlock_bh(&adapter->adv_rss_lock);
4490 +
4491 + if (!err)
4492 +- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
4493 ++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
4494 +
4495 + mutex_unlock(&adapter->crit_lock);
4496 +
4497 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
4498 +index 260c55951c287..3dad834b9b8e5 100644
4499 +--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
4500 ++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
4501 +@@ -49,7 +49,6 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver")
4502 + MODULE_LICENSE("GPL v2");
4503 +
4504 + static const struct net_device_ops iavf_netdev_ops;
4505 +-struct workqueue_struct *iavf_wq;
4506 +
4507 + int iavf_status_to_errno(enum iavf_status status)
4508 + {
4509 +@@ -277,7 +276,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
4510 + if (!(adapter->flags &
4511 + (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
4512 + adapter->flags |= IAVF_FLAG_RESET_NEEDED;
4513 +- queue_work(iavf_wq, &adapter->reset_task);
4514 ++ queue_work(adapter->wq, &adapter->reset_task);
4515 + }
4516 + }
4517 +
4518 +@@ -291,7 +290,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
4519 + void iavf_schedule_request_stats(struct iavf_adapter *adapter)
4520 + {
4521 + adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
4522 +- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
4523 ++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
4524 + }
4525 +
4526 + /**
4527 +@@ -411,7 +410,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
4528 +
4529 + if (adapter->state != __IAVF_REMOVE)
4530 + /* schedule work on the private workqueue */
4531 +- queue_work(iavf_wq, &adapter->adminq_task);
4532 ++ queue_work(adapter->wq, &adapter->adminq_task);
4533 +
4534 + return IRQ_HANDLED;
4535 + }
4536 +@@ -1034,7 +1033,7 @@ int iavf_replace_primary_mac(struct iavf_adapter *adapter,
4537 +
4538 + /* schedule the watchdog task to immediately process the request */
4539 + if (f) {
4540 +- queue_work(iavf_wq, &adapter->watchdog_task.work);
4541 ++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
4542 + return 0;
4543 + }
4544 + return -ENOMEM;
4545 +@@ -1257,7 +1256,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
4546 + adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
4547 + if (CLIENT_ENABLED(adapter))
4548 + adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
4549 +- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
4550 ++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
4551 + }
4552 +
4553 + /**
4554 +@@ -1414,7 +1413,7 @@ void iavf_down(struct iavf_adapter *adapter)
4555 + adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
4556 + }
4557 +
4558 +- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
4559 ++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
4560 + }
4561 +
4562 + /**
4563 +@@ -2248,7 +2247,7 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
4564 +
4565 + if (aq_required) {
4566 + adapter->aq_required |= aq_required;
4567 +- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
4568 ++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
4569 + }
4570 + }
4571 +
4572 +@@ -2700,7 +2699,7 @@ static void iavf_watchdog_task(struct work_struct *work)
4573 + adapter->aq_required = 0;
4574 + adapter->current_op = VIRTCHNL_OP_UNKNOWN;
4575 + mutex_unlock(&adapter->crit_lock);
4576 +- queue_work(iavf_wq, &adapter->reset_task);
4577 ++ queue_work(adapter->wq, &adapter->reset_task);
4578 + return;
4579 + }
4580 +
4581 +@@ -2708,31 +2707,31 @@ static void iavf_watchdog_task(struct work_struct *work)
4582 + case __IAVF_STARTUP:
4583 + iavf_startup(adapter);
4584 + mutex_unlock(&adapter->crit_lock);
4585 +- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4586 ++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
4587 + msecs_to_jiffies(30));
4588 + return;
4589 + case __IAVF_INIT_VERSION_CHECK:
4590 + iavf_init_version_check(adapter);
4591 + mutex_unlock(&adapter->crit_lock);
4592 +- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4593 ++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
4594 + msecs_to_jiffies(30));
4595 + return;
4596 + case __IAVF_INIT_GET_RESOURCES:
4597 + iavf_init_get_resources(adapter);
4598 + mutex_unlock(&adapter->crit_lock);
4599 +- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4600 ++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
4601 + msecs_to_jiffies(1));
4602 + return;
4603 + case __IAVF_INIT_EXTENDED_CAPS:
4604 + iavf_init_process_extended_caps(adapter);
4605 + mutex_unlock(&adapter->crit_lock);
4606 +- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4607 ++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
4608 + msecs_to_jiffies(1));
4609 + return;
4610 + case __IAVF_INIT_CONFIG_ADAPTER:
4611 + iavf_init_config_adapter(adapter);
4612 + mutex_unlock(&adapter->crit_lock);
4613 +- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4614 ++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
4615 + msecs_to_jiffies(1));
4616 + return;
4617 + case __IAVF_INIT_FAILED:
4618 +@@ -2751,14 +2750,14 @@ static void iavf_watchdog_task(struct work_struct *work)
4619 + adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
4620 + iavf_shutdown_adminq(hw);
4621 + mutex_unlock(&adapter->crit_lock);
4622 +- queue_delayed_work(iavf_wq,
4623 ++ queue_delayed_work(adapter->wq,
4624 + &adapter->watchdog_task, (5 * HZ));
4625 + return;
4626 + }
4627 + /* Try again from failed step*/
4628 + iavf_change_state(adapter, adapter->last_state);
4629 + mutex_unlock(&adapter->crit_lock);
4630 +- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
4631 ++ queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
4632 + return;
4633 + case __IAVF_COMM_FAILED:
4634 + if (test_bit(__IAVF_IN_REMOVE_TASK,
4635 +@@ -2789,13 +2788,14 @@ static void iavf_watchdog_task(struct work_struct *work)
4636 + adapter->aq_required = 0;
4637 + adapter->current_op = VIRTCHNL_OP_UNKNOWN;
4638 + mutex_unlock(&adapter->crit_lock);
4639 +- queue_delayed_work(iavf_wq,
4640 ++ queue_delayed_work(adapter->wq,
4641 + &adapter->watchdog_task,
4642 + msecs_to_jiffies(10));
4643 + return;
4644 + case __IAVF_RESETTING:
4645 + mutex_unlock(&adapter->crit_lock);
4646 +- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
4647 ++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
4648 ++ HZ * 2);
4649 + return;
4650 + case __IAVF_DOWN:
4651 + case __IAVF_DOWN_PENDING:
4652 +@@ -2834,9 +2834,9 @@ static void iavf_watchdog_task(struct work_struct *work)
4653 + adapter->aq_required = 0;
4654 + adapter->current_op = VIRTCHNL_OP_UNKNOWN;
4655 + dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
4656 +- queue_work(iavf_wq, &adapter->reset_task);
4657 ++ queue_work(adapter->wq, &adapter->reset_task);
4658 + mutex_unlock(&adapter->crit_lock);
4659 +- queue_delayed_work(iavf_wq,
4660 ++ queue_delayed_work(adapter->wq,
4661 + &adapter->watchdog_task, HZ * 2);
4662 + return;
4663 + }
4664 +@@ -2845,12 +2845,13 @@ static void iavf_watchdog_task(struct work_struct *work)
4665 + mutex_unlock(&adapter->crit_lock);
4666 + restart_watchdog:
4667 + if (adapter->state >= __IAVF_DOWN)
4668 +- queue_work(iavf_wq, &adapter->adminq_task);
4669 ++ queue_work(adapter->wq, &adapter->adminq_task);
4670 + if (adapter->aq_required)
4671 +- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4672 ++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
4673 + msecs_to_jiffies(20));
4674 + else
4675 +- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
4676 ++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
4677 ++ HZ * 2);
4678 + }
4679 +
4680 + /**
4681 +@@ -2952,7 +2953,7 @@ static void iavf_reset_task(struct work_struct *work)
4682 + */
4683 + if (!mutex_trylock(&adapter->crit_lock)) {
4684 + if (adapter->state != __IAVF_REMOVE)
4685 +- queue_work(iavf_wq, &adapter->reset_task);
4686 ++ queue_work(adapter->wq, &adapter->reset_task);
4687 +
4688 + goto reset_finish;
4689 + }
4690 +@@ -3116,7 +3117,7 @@ continue_reset:
4691 + bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
4692 + bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
4693 +
4694 +- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
4695 ++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
4696 +
4697 + /* We were running when the reset started, so we need to restore some
4698 + * state here.
4699 +@@ -3208,7 +3209,7 @@ static void iavf_adminq_task(struct work_struct *work)
4700 + if (adapter->state == __IAVF_REMOVE)
4701 + return;
4702 +
4703 +- queue_work(iavf_wq, &adapter->adminq_task);
4704 ++ queue_work(adapter->wq, &adapter->adminq_task);
4705 + goto out;
4706 + }
4707 +
4708 +@@ -4349,7 +4350,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
4709 +
4710 + if (netif_running(netdev)) {
4711 + adapter->flags |= IAVF_FLAG_RESET_NEEDED;
4712 +- queue_work(iavf_wq, &adapter->reset_task);
4713 ++ queue_work(adapter->wq, &adapter->reset_task);
4714 + }
4715 +
4716 + return 0;
4717 +@@ -4898,6 +4899,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4718 + hw = &adapter->hw;
4719 + hw->back = adapter;
4720 +
4721 ++ adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4722 ++ iavf_driver_name);
4723 ++ if (!adapter->wq) {
4724 ++ err = -ENOMEM;
4725 ++ goto err_alloc_wq;
4726 ++ }
4727 ++
4728 + adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4729 + iavf_change_state(adapter, __IAVF_STARTUP);
4730 +
4731 +@@ -4942,7 +4950,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4732 + INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
4733 + INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
4734 + INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
4735 +- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4736 ++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
4737 + msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
4738 +
4739 + /* Setup the wait queue for indicating transition to down status */
4740 +@@ -4954,6 +4962,8 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4741 + return 0;
4742 +
4743 + err_ioremap:
4744 ++ destroy_workqueue(adapter->wq);
4745 ++err_alloc_wq:
4746 + free_netdev(netdev);
4747 + err_alloc_etherdev:
4748 + pci_disable_pcie_error_reporting(pdev);
4749 +@@ -5023,7 +5033,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
4750 + return err;
4751 + }
4752 +
4753 +- queue_work(iavf_wq, &adapter->reset_task);
4754 ++ queue_work(adapter->wq, &adapter->reset_task);
4755 +
4756 + netif_device_attach(adapter->netdev);
4757 +
4758 +@@ -5170,6 +5180,8 @@ static void iavf_remove(struct pci_dev *pdev)
4759 + }
4760 + spin_unlock_bh(&adapter->adv_rss_lock);
4761 +
4762 ++ destroy_workqueue(adapter->wq);
4763 ++
4764 + free_netdev(netdev);
4765 +
4766 + pci_disable_pcie_error_reporting(pdev);
4767 +@@ -5196,24 +5208,11 @@ static struct pci_driver iavf_driver = {
4768 + **/
4769 + static int __init iavf_init_module(void)
4770 + {
4771 +- int ret;
4772 +-
4773 + pr_info("iavf: %s\n", iavf_driver_string);
4774 +
4775 + pr_info("%s\n", iavf_copyright);
4776 +
4777 +- iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
4778 +- iavf_driver_name);
4779 +- if (!iavf_wq) {
4780 +- pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
4781 +- return -ENOMEM;
4782 +- }
4783 +-
4784 +- ret = pci_register_driver(&iavf_driver);
4785 +- if (ret)
4786 +- destroy_workqueue(iavf_wq);
4787 +-
4788 +- return ret;
4789 ++ return pci_register_driver(&iavf_driver);
4790 + }
4791 +
4792 + module_init(iavf_init_module);
4793 +@@ -5227,7 +5226,6 @@ module_init(iavf_init_module);
4794 + static void __exit iavf_exit_module(void)
4795 + {
4796 + pci_unregister_driver(&iavf_driver);
4797 +- destroy_workqueue(iavf_wq);
4798 + }
4799 +
4800 + module_exit(iavf_exit_module);
4801 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
4802 +index 24a701fd140e9..0752fd67c96e5 100644
4803 +--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
4804 ++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
4805 +@@ -1952,7 +1952,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
4806 + if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
4807 + adapter->flags |= IAVF_FLAG_RESET_PENDING;
4808 + dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
4809 +- queue_work(iavf_wq, &adapter->reset_task);
4810 ++ queue_work(adapter->wq, &adapter->reset_task);
4811 + }
4812 + break;
4813 + default:
4814 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
4815 +index 6dac76fa58a3f..09d441ecb9f6d 100644
4816 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
4817 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
4818 +@@ -637,7 +637,7 @@ mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
4819 + if (child->bw_share == old_bw_share)
4820 + continue;
4821 +
4822 +- err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share,
4823 ++ err_one = mlx5_qos_update_node(htb->mdev, child->bw_share,
4824 + child->max_average_bw, child->hw_id);
4825 + if (!err && err_one) {
4826 + err = err_one;
4827 +@@ -671,7 +671,7 @@ mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
4828 + mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
4829 + mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
4830 +
4831 +- err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share,
4832 ++ err = mlx5_qos_update_node(htb->mdev, bw_share,
4833 + max_average_bw, node->hw_id);
4834 + if (err) {
4835 + NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
4836 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
4837 +index 1cbd2eb9d04f9..f2c2c752bd1c3 100644
4838 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
4839 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
4840 +@@ -477,7 +477,6 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
4841 + struct mlx5e_sample_flow *sample_flow;
4842 + struct mlx5e_sample_attr *sample_attr;
4843 + struct mlx5_flow_attr *pre_attr;
4844 +- u32 tunnel_id = attr->tunnel_id;
4845 + struct mlx5_eswitch *esw;
4846 + u32 default_tbl_id;
4847 + u32 obj_id;
4848 +@@ -522,7 +521,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
4849 + restore_obj.sample.group_id = sample_attr->group_num;
4850 + restore_obj.sample.rate = sample_attr->rate;
4851 + restore_obj.sample.trunc_size = sample_attr->trunc_size;
4852 +- restore_obj.sample.tunnel_id = tunnel_id;
4853 ++ restore_obj.sample.tunnel_id = attr->tunnel_id;
4854 + err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
4855 + if (err)
4856 + goto err_obj_id;
4857 +@@ -548,7 +547,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
4858 + /* For decap action, do decap in the original flow table instead of the
4859 + * default flow table.
4860 + */
4861 +- if (tunnel_id)
4862 ++ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
4863 + pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
4864 + pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
4865 + pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
4866 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
4867 +index 4c313b7424bf5..c1cf3917baa43 100644
4868 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
4869 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
4870 +@@ -157,6 +157,7 @@ struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
4871 + * it's different than the ht->mutex here.
4872 + */
4873 + static struct lock_class_key tc_ht_lock_key;
4874 ++static struct lock_class_key tc_ht_wq_key;
4875 +
4876 + static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
4877 + static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
4878 +@@ -4971,6 +4972,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4879 + return err;
4880 +
4881 + lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
4882 ++ lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
4883 +
4884 + mapping_id = mlx5_query_nic_system_image_guid(dev);
4885 +
4886 +@@ -5077,6 +5079,7 @@ int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
4887 + return err;
4888 +
4889 + lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
4890 ++ lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
4891 +
4892 + return 0;
4893 + }
4894 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
4895 +index 4f8a24d84a86a..75015d370922e 100644
4896 +--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
4897 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
4898 +@@ -22,15 +22,13 @@ struct mlx5_esw_rate_group {
4899 + };
4900 +
4901 + static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx,
4902 +- u32 parent_ix, u32 tsar_ix,
4903 +- u32 max_rate, u32 bw_share)
4904 ++ u32 tsar_ix, u32 max_rate, u32 bw_share)
4905 + {
4906 + u32 bitmask = 0;
4907 +
4908 + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
4909 + return -EOPNOTSUPP;
4910 +
4911 +- MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_ix);
4912 + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
4913 + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
4914 + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
4915 +@@ -51,7 +49,7 @@ static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_g
4916 + int err;
4917 +
4918 + err = esw_qos_tsar_config(dev, sched_ctx,
4919 +- esw->qos.root_tsar_ix, group->tsar_ix,
4920 ++ group->tsar_ix,
4921 + max_rate, bw_share);
4922 + if (err)
4923 + NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed");
4924 +@@ -67,23 +65,13 @@ static int esw_qos_vport_config(struct mlx5_eswitch *esw,
4925 + struct netlink_ext_ack *extack)
4926 + {
4927 + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
4928 +- struct mlx5_esw_rate_group *group = vport->qos.group;
4929 + struct mlx5_core_dev *dev = esw->dev;
4930 +- u32 parent_tsar_ix;
4931 +- void *vport_elem;
4932 + int err;
4933 +
4934 + if (!vport->qos.enabled)
4935 + return -EIO;
4936 +
4937 +- parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
4938 +- MLX5_SET(scheduling_context, sched_ctx, element_type,
4939 +- SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
4940 +- vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
4941 +- element_attributes);
4942 +- MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
4943 +-
4944 +- err = esw_qos_tsar_config(dev, sched_ctx, parent_tsar_ix, vport->qos.esw_tsar_ix,
4945 ++ err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix,
4946 + max_rate, bw_share);
4947 + if (err) {
4948 + esw_warn(esw->dev,
4949 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
4950 +index 788a6ab5c4636..43ba00d5e36ec 100644
4951 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
4952 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
4953 +@@ -1421,6 +1421,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
4954 + mlx5_lag_disable_change(esw->dev);
4955 + down_write(&esw->mode_lock);
4956 + mlx5_eswitch_disable_locked(esw);
4957 ++ esw->mode = MLX5_ESWITCH_LEGACY;
4958 + up_write(&esw->mode_lock);
4959 + mlx5_lag_enable_change(esw->dev);
4960 + }
4961 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
4962 +index 00758312df065..d4db1adae3e3d 100644
4963 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
4964 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
4965 +@@ -2082,7 +2082,7 @@ static void mlx5_core_verify_params(void)
4966 + }
4967 + }
4968 +
4969 +-static int __init init(void)
4970 ++static int __init mlx5_init(void)
4971 + {
4972 + int err;
4973 +
4974 +@@ -2117,7 +2117,7 @@ err_debug:
4975 + return err;
4976 + }
4977 +
4978 +-static void __exit cleanup(void)
4979 ++static void __exit mlx5_cleanup(void)
4980 + {
4981 + mlx5e_cleanup();
4982 + mlx5_sf_driver_unregister();
4983 +@@ -2125,5 +2125,5 @@ static void __exit cleanup(void)
4984 + mlx5_unregister_debugfs();
4985 + }
4986 +
4987 +-module_init(init);
4988 +-module_exit(cleanup);
4989 ++module_init(mlx5_init);
4990 ++module_exit(mlx5_cleanup);
4991 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
4992 +index 0777be24a3074..8bce730b5c5be 100644
4993 +--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c
4994 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
4995 +@@ -62,13 +62,12 @@ int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id)
4996 + return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id);
4997 + }
4998 +
4999 +-int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id,
5000 ++int mlx5_qos_update_node(struct mlx5_core_dev *mdev,
5001 + u32 bw_share, u32 max_avg_bw, u32 id)
5002 + {
5003 + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
5004 + u32 bitmask = 0;
5005 +
5006 +- MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
5007 + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
5008 + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
5009 +
5010 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/qos.h
5011 +index 125e4e47e6f71..624ce822b7f59 100644
5012 +--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.h
5013 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.h
5014 +@@ -23,7 +23,7 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
5015 + int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
5016 + u32 bw_share, u32 max_avg_bw, u32 *id);
5017 + int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id);
5018 +-int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share,
5019 ++int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 bw_share,
5020 + u32 max_avg_bw, u32 id);
5021 + int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id);
5022 +
5023 +diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
5024 +index 20ee5b28f70a5..569108c49cbc5 100644
5025 +--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
5026 ++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
5027 +@@ -1022,11 +1022,6 @@ static int lan966x_probe(struct platform_device *pdev)
5028 + lan966x->base_mac[5] &= 0xf0;
5029 + }
5030 +
5031 +- ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
5032 +- if (!ports)
5033 +- return dev_err_probe(&pdev->dev, -ENODEV,
5034 +- "no ethernet-ports child found\n");
5035 +-
5036 + err = lan966x_create_targets(pdev, lan966x);
5037 + if (err)
5038 + return dev_err_probe(&pdev->dev, err,
5039 +@@ -1104,6 +1099,11 @@ static int lan966x_probe(struct platform_device *pdev)
5040 + }
5041 + }
5042 +
5043 ++ ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
5044 ++ if (!ports)
5045 ++ return dev_err_probe(&pdev->dev, -ENODEV,
5046 ++ "no ethernet-ports child found\n");
5047 ++
5048 + /* init switch */
5049 + lan966x_init(lan966x);
5050 + lan966x_stats_init(lan966x);
5051 +@@ -1138,6 +1138,8 @@ static int lan966x_probe(struct platform_device *pdev)
5052 + lan966x_port_init(lan966x->ports[p]);
5053 + }
5054 +
5055 ++ fwnode_handle_put(ports);
5056 ++
5057 + lan966x_mdb_init(lan966x);
5058 + err = lan966x_fdb_init(lan966x);
5059 + if (err)
5060 +@@ -1160,6 +1162,7 @@ cleanup_fdb:
5061 + lan966x_fdb_deinit(lan966x);
5062 +
5063 + cleanup_ports:
5064 ++ fwnode_handle_put(ports);
5065 + fwnode_handle_put(portnp);
5066 +
5067 + lan966x_cleanup_ports(lan966x);
5068 +diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h
5069 +index 65c24ee49efd9..48b0ab56bdb0a 100644
5070 +--- a/drivers/net/ethernet/microsoft/mana/gdma.h
5071 ++++ b/drivers/net/ethernet/microsoft/mana/gdma.h
5072 +@@ -324,9 +324,12 @@ struct gdma_queue_spec {
5073 + };
5074 + };
5075 +
5076 ++#define MANA_IRQ_NAME_SZ 32
5077 ++
5078 + struct gdma_irq_context {
5079 + void (*handler)(void *arg);
5080 + void *arg;
5081 ++ char name[MANA_IRQ_NAME_SZ];
5082 + };
5083 +
5084 + struct gdma_context {
5085 +diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
5086 +index a6f99b4344d93..d674ebda2053d 100644
5087 +--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
5088 ++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
5089 +@@ -1233,13 +1233,20 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
5090 + gic->handler = NULL;
5091 + gic->arg = NULL;
5092 +
5093 ++ if (!i)
5094 ++ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
5095 ++ pci_name(pdev));
5096 ++ else
5097 ++ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
5098 ++ i - 1, pci_name(pdev));
5099 ++
5100 + irq = pci_irq_vector(pdev, i);
5101 + if (irq < 0) {
5102 + err = irq;
5103 + goto free_irq;
5104 + }
5105 +
5106 +- err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
5107 ++ err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
5108 + if (err)
5109 + goto free_irq;
5110 + }
5111 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
5112 +index b4e0fc7f65bdf..0f54849a38235 100644
5113 +--- a/drivers/net/ethernet/renesas/ravb_main.c
5114 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
5115 +@@ -1101,14 +1101,14 @@ static void ravb_error_interrupt(struct net_device *ndev)
5116 + ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
5117 + if (eis & EIS_QFS) {
5118 + ris2 = ravb_read(ndev, RIS2);
5119 +- ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
5120 ++ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
5121 + RIS2);
5122 +
5123 + /* Receive Descriptor Empty int */
5124 + if (ris2 & RIS2_QFF0)
5125 + priv->stats[RAVB_BE].rx_over_errors++;
5126 +
5127 +- /* Receive Descriptor Empty int */
5128 ++ /* Receive Descriptor Empty int */
5129 + if (ris2 & RIS2_QFF1)
5130 + priv->stats[RAVB_NC].rx_over_errors++;
5131 +
5132 +@@ -2973,6 +2973,9 @@ static int __maybe_unused ravb_suspend(struct device *dev)
5133 + else
5134 + ret = ravb_close(ndev);
5135 +
5136 ++ if (priv->info->ccc_gac)
5137 ++ ravb_ptp_stop(ndev);
5138 ++
5139 + return ret;
5140 + }
5141 +
5142 +@@ -3011,6 +3014,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
5143 + /* Restore descriptor base address table */
5144 + ravb_write(ndev, priv->desc_bat_dma, DBAT);
5145 +
5146 ++ if (priv->info->ccc_gac)
5147 ++ ravb_ptp_init(ndev, priv->pdev);
5148 ++
5149 + if (netif_running(ndev)) {
5150 + if (priv->wol_enabled) {
5151 + ret = ravb_wol_restore(ndev);
5152 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
5153 +index 9c2d40f853ed0..413f660172199 100644
5154 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
5155 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
5156 +@@ -186,11 +186,25 @@ static void dwmac5_handle_dma_err(struct net_device *ndev,
5157 + int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
5158 + struct stmmac_safety_feature_cfg *safety_feat_cfg)
5159 + {
5160 ++ struct stmmac_safety_feature_cfg all_safety_feats = {
5161 ++ .tsoee = 1,
5162 ++ .mrxpee = 1,
5163 ++ .mestee = 1,
5164 ++ .mrxee = 1,
5165 ++ .mtxee = 1,
5166 ++ .epsi = 1,
5167 ++ .edpp = 1,
5168 ++ .prtyen = 1,
5169 ++ .tmouten = 1,
5170 ++ };
5171 + u32 value;
5172 +
5173 + if (!asp)
5174 + return -EINVAL;
5175 +
5176 ++ if (!safety_feat_cfg)
5177 ++ safety_feat_cfg = &all_safety_feats;
5178 ++
5179 + /* 1. Enable Safety Features */
5180 + value = readl(ioaddr + MTL_ECC_CONTROL);
5181 + value |= MEEAO; /* MTL ECC Error Addr Status Override */
5182 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
5183 +index f453b0d093663..35c8dd92d3692 100644
5184 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
5185 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
5186 +@@ -551,16 +551,16 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
5187 + p = (char *)priv + offsetof(struct stmmac_priv,
5188 + xstats.txq_stats[q].tx_pkt_n);
5189 + for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
5190 +- *data++ = (*(u64 *)p);
5191 +- p += sizeof(u64 *);
5192 ++ *data++ = (*(unsigned long *)p);
5193 ++ p += sizeof(unsigned long);
5194 + }
5195 + }
5196 + for (q = 0; q < rx_cnt; q++) {
5197 + p = (char *)priv + offsetof(struct stmmac_priv,
5198 + xstats.rxq_stats[q].rx_pkt_n);
5199 + for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
5200 +- *data++ = (*(u64 *)p);
5201 +- p += sizeof(u64 *);
5202 ++ *data++ = (*(unsigned long *)p);
5203 ++ p += sizeof(unsigned long);
5204 + }
5205 + }
5206 + }
5207 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5208 +index feb209d4b991e..4bba0444c764a 100644
5209 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5210 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5211 +@@ -1148,6 +1148,11 @@ static int stmmac_init_phy(struct net_device *dev)
5212 + int addr = priv->plat->phy_addr;
5213 + struct phy_device *phydev;
5214 +
5215 ++ if (addr < 0) {
5216 ++ netdev_err(priv->dev, "no phy found\n");
5217 ++ return -ENODEV;
5218 ++ }
5219 ++
5220 + phydev = mdiobus_get_phy(priv->mii, addr);
5221 + if (!phydev) {
5222 + netdev_err(priv->dev, "no phy at addr %d\n", addr);
5223 +diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
5224 +index c269432f9c2ee..a89a3e3ff81c3 100644
5225 +--- a/drivers/net/ipa/ipa_interrupt.c
5226 ++++ b/drivers/net/ipa/ipa_interrupt.c
5227 +@@ -127,6 +127,16 @@ out_power_put:
5228 + return IRQ_HANDLED;
5229 + }
5230 +
5231 ++void ipa_interrupt_irq_disable(struct ipa *ipa)
5232 ++{
5233 ++ disable_irq(ipa->interrupt->irq);
5234 ++}
5235 ++
5236 ++void ipa_interrupt_irq_enable(struct ipa *ipa)
5237 ++{
5238 ++ enable_irq(ipa->interrupt->irq);
5239 ++}
5240 ++
5241 + /* Common function used to enable/disable TX_SUSPEND for an endpoint */
5242 + static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
5243 + u32 endpoint_id, bool enable)
5244 +diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
5245 +index f31fd9965fdc6..8a1bd5b893932 100644
5246 +--- a/drivers/net/ipa/ipa_interrupt.h
5247 ++++ b/drivers/net/ipa/ipa_interrupt.h
5248 +@@ -85,6 +85,22 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
5249 + */
5250 + void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
5251 +
5252 ++/**
5253 ++ * ipa_interrupt_irq_enable() - Enable IPA interrupts
5254 ++ * @ipa: IPA pointer
5255 ++ *
5256 ++ * This enables the IPA interrupt line
5257 ++ */
5258 ++void ipa_interrupt_irq_enable(struct ipa *ipa);
5259 ++
5260 ++/**
5261 ++ * ipa_interrupt_irq_disable() - Disable IPA interrupts
5262 ++ * @ipa: IPA pointer
5263 ++ *
5264 ++ * This disables the IPA interrupt line
5265 ++ */
5266 ++void ipa_interrupt_irq_disable(struct ipa *ipa);
5267 ++
5268 + /**
5269 + * ipa_interrupt_config() - Configure the IPA interrupt framework
5270 + * @ipa: IPA pointer
5271 +diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
5272 +index 8420f93128a26..8057be8cda801 100644
5273 +--- a/drivers/net/ipa/ipa_power.c
5274 ++++ b/drivers/net/ipa/ipa_power.c
5275 +@@ -181,6 +181,17 @@ static int ipa_suspend(struct device *dev)
5276 +
5277 + __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
5278 +
5279 ++ /* Increment the disable depth to ensure that the IRQ won't
5280 ++ * be re-enabled until the matching _enable call in
5281 ++ * ipa_resume(). We do this to ensure that the interrupt
5282 ++ * handler won't run whilst PM runtime is disabled.
5283 ++ *
5284 ++ * Note that disabling the IRQ is NOT the same as disabling
5285 ++ * irq wake. If wakeup is enabled for the IPA then the IRQ
5286 ++ * will still cause the system to wake up, see irq_set_irq_wake().
5287 ++ */
5288 ++ ipa_interrupt_irq_disable(ipa);
5289 ++
5290 + return pm_runtime_force_suspend(dev);
5291 + }
5292 +
5293 +@@ -193,6 +204,12 @@ static int ipa_resume(struct device *dev)
5294 +
5295 + __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
5296 +
5297 ++ /* Now that PM runtime is enabled again it's safe
5298 ++ * to turn the IRQ back on and process any data
5299 ++ * that was received during suspend.
5300 ++ */
5301 ++ ipa_interrupt_irq_enable(ipa);
5302 ++
5303 + return ret;
5304 + }
5305 +
5306 +diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
5307 +index 4a2e94faf57e2..c4542ecf56230 100644
5308 +--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
5309 ++++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
5310 +@@ -4,6 +4,7 @@
5311 + */
5312 +
5313 + #include <linux/bitfield.h>
5314 ++#include <linux/delay.h>
5315 + #include <linux/clk.h>
5316 + #include <linux/clk-provider.h>
5317 + #include <linux/device.h>
5318 +@@ -150,6 +151,7 @@ static const struct clk_ops g12a_ephy_pll_ops = {
5319 +
5320 + static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
5321 + {
5322 ++ u32 value;
5323 + int ret;
5324 +
5325 + /* Enable the phy clock */
5326 +@@ -163,18 +165,25 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
5327 +
5328 + /* Initialize ephy control */
5329 + writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
5330 +- writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
5331 +- FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
5332 +- FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
5333 +- PHY_CNTL1_CLK_EN |
5334 +- PHY_CNTL1_CLKFREQ |
5335 +- PHY_CNTL1_PHY_ENB,
5336 +- priv->regs + ETH_PHY_CNTL1);
5337 ++
5338 ++ /* Make sure we get a 0 -> 1 transition on the enable bit */
5339 ++ value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
5340 ++ FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
5341 ++ FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
5342 ++ PHY_CNTL1_CLK_EN |
5343 ++ PHY_CNTL1_CLKFREQ;
5344 ++ writel(value, priv->regs + ETH_PHY_CNTL1);
5345 + writel(PHY_CNTL2_USE_INTERNAL |
5346 + PHY_CNTL2_SMI_SRC_MAC |
5347 + PHY_CNTL2_RX_CLK_EPHY,
5348 + priv->regs + ETH_PHY_CNTL2);
5349 +
5350 ++ value |= PHY_CNTL1_PHY_ENB;
5351 ++ writel(value, priv->regs + ETH_PHY_CNTL1);
5352 ++
5353 ++ /* The phy needs a bit of time to power up */
5354 ++ mdelay(10);
5355 ++
5356 + return 0;
5357 + }
5358 +
5359 +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
5360 +index 1cd604cd1fa1b..16e021b477f06 100644
5361 +--- a/drivers/net/phy/mdio_bus.c
5362 ++++ b/drivers/net/phy/mdio_bus.c
5363 +@@ -108,7 +108,12 @@ EXPORT_SYMBOL(mdiobus_unregister_device);
5364 +
5365 + struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
5366 + {
5367 +- struct mdio_device *mdiodev = bus->mdio_map[addr];
5368 ++ struct mdio_device *mdiodev;
5369 ++
5370 ++ if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map))
5371 ++ return NULL;
5372 ++
5373 ++ mdiodev = bus->mdio_map[addr];
5374 +
5375 + if (!mdiodev)
5376 + return NULL;
5377 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
5378 +index e11f70911acc1..fb5f59d0d55d7 100644
5379 +--- a/drivers/net/usb/cdc_ether.c
5380 ++++ b/drivers/net/usb/cdc_ether.c
5381 +@@ -1001,6 +1001,12 @@ static const struct usb_device_id products[] = {
5382 + USB_CDC_SUBCLASS_ETHERNET,
5383 + USB_CDC_PROTO_NONE),
5384 + .driver_info = (unsigned long)&wwan_info,
5385 ++}, {
5386 ++ /* Cinterion PLS62-W modem by GEMALTO/THALES */
5387 ++ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x005b, USB_CLASS_COMM,
5388 ++ USB_CDC_SUBCLASS_ETHERNET,
5389 ++ USB_CDC_PROTO_NONE),
5390 ++ .driver_info = (unsigned long)&wwan_info,
5391 + }, {
5392 + /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
5393 + USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
5394 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
5395 +index a481a1d831e2f..23da1d9dafd1f 100644
5396 +--- a/drivers/net/usb/r8152.c
5397 ++++ b/drivers/net/usb/r8152.c
5398 +@@ -9836,6 +9836,7 @@ static const struct usb_device_id rtl8152_table[] = {
5399 + REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab),
5400 + REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6),
5401 + REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
5402 ++ REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e),
5403 + REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
5404 + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
5405 + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
5406 +diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
5407 +index 5a53e63d33a60..3164451e1010c 100644
5408 +--- a/drivers/net/usb/sr9700.c
5409 ++++ b/drivers/net/usb/sr9700.c
5410 +@@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
5411 + /* ignore the CRC length */
5412 + len = (skb->data[1] | (skb->data[2] << 8)) - 4;
5413 +
5414 +- if (len > ETH_FRAME_LEN || len > skb->len)
5415 ++ if (len > ETH_FRAME_LEN || len > skb->len || len < 0)
5416 + return 0;
5417 +
5418 + /* the last packet of current skb */
5419 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
5420 +index 86e52454b5b5c..3cd15f16090f1 100644
5421 +--- a/drivers/net/virtio_net.c
5422 ++++ b/drivers/net/virtio_net.c
5423 +@@ -1873,8 +1873,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
5424 + */
5425 + if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
5426 + netif_stop_subqueue(dev, qnum);
5427 +- if (!use_napi &&
5428 +- unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
5429 ++ if (use_napi) {
5430 ++ if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
5431 ++ virtqueue_napi_schedule(&sq->napi, sq->vq);
5432 ++ } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
5433 + /* More just got used, free them then recheck. */
5434 + free_old_xmit_skbs(sq, false);
5435 + if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
5436 +diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
5437 +index 22edea6ca4b81..1c53b55469270 100644
5438 +--- a/drivers/net/wan/fsl_ucc_hdlc.c
5439 ++++ b/drivers/net/wan/fsl_ucc_hdlc.c
5440 +@@ -1243,9 +1243,11 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
5441 + free_dev:
5442 + free_netdev(dev);
5443 + undo_uhdlc_init:
5444 +- iounmap(utdm->siram);
5445 ++ if (utdm)
5446 ++ iounmap(utdm->siram);
5447 + unmap_si_regs:
5448 +- iounmap(utdm->si_regs);
5449 ++ if (utdm)
5450 ++ iounmap(utdm->si_regs);
5451 + free_utdm:
5452 + if (uhdlc_priv->tsa)
5453 + kfree(utdm);
5454 +diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
5455 +index 82a7458e01aec..bf72e5fd39cf4 100644
5456 +--- a/drivers/net/wireless/rndis_wlan.c
5457 ++++ b/drivers/net/wireless/rndis_wlan.c
5458 +@@ -696,8 +696,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
5459 + struct rndis_query *get;
5460 + struct rndis_query_c *get_c;
5461 + } u;
5462 +- int ret, buflen;
5463 +- int resplen, respoffs, copylen;
5464 ++ int ret;
5465 ++ size_t buflen, resplen, respoffs, copylen;
5466 +
5467 + buflen = *len + sizeof(*u.get);
5468 + if (buflen < CONTROL_BUFFER_SIZE)
5469 +@@ -732,22 +732,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
5470 +
5471 + if (respoffs > buflen) {
5472 + /* Device returned data offset outside buffer, error. */
5473 +- netdev_dbg(dev->net, "%s(%s): received invalid "
5474 +- "data offset: %d > %d\n", __func__,
5475 +- oid_to_string(oid), respoffs, buflen);
5476 ++ netdev_dbg(dev->net,
5477 ++ "%s(%s): received invalid data offset: %zu > %zu\n",
5478 ++ __func__, oid_to_string(oid), respoffs, buflen);
5479 +
5480 + ret = -EINVAL;
5481 + goto exit_unlock;
5482 + }
5483 +
5484 +- if ((resplen + respoffs) > buflen) {
5485 +- /* Device would have returned more data if buffer would
5486 +- * have been big enough. Copy just the bits that we got.
5487 +- */
5488 +- copylen = buflen - respoffs;
5489 +- } else {
5490 +- copylen = resplen;
5491 +- }
5492 ++ copylen = min(resplen, buflen - respoffs);
5493 +
5494 + if (copylen > *len)
5495 + copylen = *len;
5496 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
5497 +index 1ded96d1bfd21..25ade4ce8e0a7 100644
5498 +--- a/drivers/nvme/host/core.c
5499 ++++ b/drivers/nvme/host/core.c
5500 +@@ -1088,7 +1088,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
5501 + if (ns) {
5502 + if (ns->head->effects)
5503 + effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
5504 +- if (ns->head->ids.csi == NVME_CAP_CSS_NVM)
5505 ++ if (ns->head->ids.csi == NVME_CSI_NVM)
5506 + effects |= nvme_known_nvm_effects(opcode);
5507 + if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
5508 + dev_warn_once(ctrl->device,
5509 +@@ -3903,10 +3903,11 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
5510 + return a->mode;
5511 + }
5512 +
5513 +-static const struct attribute_group nvme_dev_attrs_group = {
5514 ++const struct attribute_group nvme_dev_attrs_group = {
5515 + .attrs = nvme_dev_attrs,
5516 + .is_visible = nvme_dev_attrs_are_visible,
5517 + };
5518 ++EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
5519 +
5520 + static const struct attribute_group *nvme_dev_attr_groups[] = {
5521 + &nvme_dev_attrs_group,
5522 +@@ -4839,8 +4840,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
5523 + EXPORT_SYMBOL_GPL(nvme_complete_async_event);
5524 +
5525 + int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
5526 +- const struct blk_mq_ops *ops, unsigned int flags,
5527 +- unsigned int cmd_size)
5528 ++ const struct blk_mq_ops *ops, unsigned int cmd_size)
5529 + {
5530 + int ret;
5531 +
5532 +@@ -4850,7 +4850,9 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
5533 + if (ctrl->ops->flags & NVME_F_FABRICS)
5534 + set->reserved_tags = NVMF_RESERVED_TAGS;
5535 + set->numa_node = ctrl->numa_node;
5536 +- set->flags = flags;
5537 ++ set->flags = BLK_MQ_F_NO_SCHED;
5538 ++ if (ctrl->ops->flags & NVME_F_BLOCKING)
5539 ++ set->flags |= BLK_MQ_F_BLOCKING;
5540 + set->cmd_size = cmd_size;
5541 + set->driver_data = ctrl;
5542 + set->nr_hw_queues = 1;
5543 +@@ -4894,8 +4896,8 @@ void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
5544 + EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
5545 +
5546 + int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
5547 +- const struct blk_mq_ops *ops, unsigned int flags,
5548 +- unsigned int nr_maps, unsigned int cmd_size)
5549 ++ const struct blk_mq_ops *ops, unsigned int nr_maps,
5550 ++ unsigned int cmd_size)
5551 + {
5552 + int ret;
5553 +
5554 +@@ -4904,7 +4906,9 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
5555 + set->queue_depth = ctrl->sqsize + 1;
5556 + set->reserved_tags = NVMF_RESERVED_TAGS;
5557 + set->numa_node = ctrl->numa_node;
5558 +- set->flags = flags;
5559 ++ set->flags = BLK_MQ_F_SHOULD_MERGE;
5560 ++ if (ctrl->ops->flags & NVME_F_BLOCKING)
5561 ++ set->flags |= BLK_MQ_F_BLOCKING;
5562 + set->cmd_size = cmd_size,
5563 + set->driver_data = ctrl;
5564 + set->nr_hw_queues = ctrl->queue_count - 1;
5565 +@@ -5080,7 +5084,10 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
5566 + ctrl->instance);
5567 + ctrl->device->class = nvme_class;
5568 + ctrl->device->parent = ctrl->dev;
5569 +- ctrl->device->groups = nvme_dev_attr_groups;
5570 ++ if (ops->dev_attr_groups)
5571 ++ ctrl->device->groups = ops->dev_attr_groups;
5572 ++ else
5573 ++ ctrl->device->groups = nvme_dev_attr_groups;
5574 + ctrl->device->release = nvme_free_ctrl;
5575 + dev_set_drvdata(ctrl->device, ctrl);
5576 + ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
5577 +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
5578 +index 20b0c29a9a341..6c3d469eed7e3 100644
5579 +--- a/drivers/nvme/host/fc.c
5580 ++++ b/drivers/nvme/host/fc.c
5581 +@@ -2903,7 +2903,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
5582 + nvme_fc_init_io_queues(ctrl);
5583 +
5584 + ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
5585 +- &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
5586 ++ &nvme_fc_mq_ops, 1,
5587 + struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
5588 + ctrl->lport->ops->fcprqst_priv_sz));
5589 + if (ret)
5590 +@@ -3508,13 +3508,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
5591 +
5592 + nvme_fc_init_queue(ctrl, 0);
5593 +
5594 +- ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
5595 +- &nvme_fc_admin_mq_ops, BLK_MQ_F_NO_SCHED,
5596 +- struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
5597 +- ctrl->lport->ops->fcprqst_priv_sz));
5598 +- if (ret)
5599 +- goto out_free_queues;
5600 +-
5601 + /*
5602 + * Would have been nice to init io queues tag set as well.
5603 + * However, we require interaction from the controller
5604 +@@ -3524,10 +3517,17 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
5605 +
5606 + ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
5607 + if (ret)
5608 +- goto out_cleanup_tagset;
5609 ++ goto out_free_queues;
5610 +
5611 + /* at this point, teardown path changes to ref counting on nvme ctrl */
5612 +
5613 ++ ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
5614 ++ &nvme_fc_admin_mq_ops,
5615 ++ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
5616 ++ ctrl->lport->ops->fcprqst_priv_sz));
5617 ++ if (ret)
5618 ++ goto fail_ctrl;
5619 ++
5620 + spin_lock_irqsave(&rport->lock, flags);
5621 + list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
5622 + spin_unlock_irqrestore(&rport->lock, flags);
5623 +@@ -3579,8 +3579,6 @@ fail_ctrl:
5624 +
5625 + return ERR_PTR(-EIO);
5626 +
5627 +-out_cleanup_tagset:
5628 +- nvme_remove_admin_tag_set(&ctrl->ctrl);
5629 + out_free_queues:
5630 + kfree(ctrl->queues);
5631 + out_free_ida:
5632 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
5633 +index cbda8a19409bf..01d90424af534 100644
5634 +--- a/drivers/nvme/host/nvme.h
5635 ++++ b/drivers/nvme/host/nvme.h
5636 +@@ -508,6 +508,9 @@ struct nvme_ctrl_ops {
5637 + unsigned int flags;
5638 + #define NVME_F_FABRICS (1 << 0)
5639 + #define NVME_F_METADATA_SUPPORTED (1 << 1)
5640 ++#define NVME_F_BLOCKING (1 << 2)
5641 ++
5642 ++ const struct attribute_group **dev_attr_groups;
5643 + int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
5644 + int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
5645 + int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
5646 +@@ -738,12 +741,11 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl);
5647 + void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
5648 + int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
5649 + int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
5650 +- const struct blk_mq_ops *ops, unsigned int flags,
5651 +- unsigned int cmd_size);
5652 ++ const struct blk_mq_ops *ops, unsigned int cmd_size);
5653 + void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
5654 + int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
5655 +- const struct blk_mq_ops *ops, unsigned int flags,
5656 +- unsigned int nr_maps, unsigned int cmd_size);
5657 ++ const struct blk_mq_ops *ops, unsigned int nr_maps,
5658 ++ unsigned int cmd_size);
5659 + void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
5660 +
5661 + void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
5662 +@@ -857,6 +859,7 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
5663 + extern const struct attribute_group *nvme_ns_id_attr_groups[];
5664 + extern const struct pr_ops nvme_pr_ops;
5665 + extern const struct block_device_operations nvme_ns_head_ops;
5666 ++extern const struct attribute_group nvme_dev_attrs_group;
5667 +
5668 + struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
5669 + #ifdef CONFIG_NVME_MULTIPATH
5670 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
5671 +index 115d81def5671..d839689af17ce 100644
5672 +--- a/drivers/nvme/host/pci.c
5673 ++++ b/drivers/nvme/host/pci.c
5674 +@@ -158,8 +158,6 @@ struct nvme_dev {
5675 + unsigned int nr_allocated_queues;
5676 + unsigned int nr_write_queues;
5677 + unsigned int nr_poll_queues;
5678 +-
5679 +- bool attrs_added;
5680 + };
5681 +
5682 + static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
5683 +@@ -1367,7 +1365,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
5684 + else
5685 + nvme_poll_irqdisable(nvmeq);
5686 +
5687 +- if (blk_mq_request_completed(req)) {
5688 ++ if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
5689 + dev_warn(dev->ctrl.device,
5690 + "I/O %d QID %d timeout, completion polled\n",
5691 + req->tag, nvmeq->qid);
5692 +@@ -2241,11 +2239,17 @@ static struct attribute *nvme_pci_attrs[] = {
5693 + NULL,
5694 + };
5695 +
5696 +-static const struct attribute_group nvme_pci_attr_group = {
5697 ++static const struct attribute_group nvme_pci_dev_attrs_group = {
5698 + .attrs = nvme_pci_attrs,
5699 + .is_visible = nvme_pci_attrs_are_visible,
5700 + };
5701 +
5702 ++static const struct attribute_group *nvme_pci_dev_attr_groups[] = {
5703 ++ &nvme_dev_attrs_group,
5704 ++ &nvme_pci_dev_attrs_group,
5705 ++ NULL,
5706 ++};
5707 ++
5708 + /*
5709 + * nirqs is the number of interrupts available for write and read
5710 + * queues. The core already reserved an interrupt for the admin queue.
5711 +@@ -2935,10 +2939,6 @@ static void nvme_reset_work(struct work_struct *work)
5712 + goto out;
5713 + }
5714 +
5715 +- if (!dev->attrs_added && !sysfs_create_group(&dev->ctrl.device->kobj,
5716 +- &nvme_pci_attr_group))
5717 +- dev->attrs_added = true;
5718 +-
5719 + nvme_start_ctrl(&dev->ctrl);
5720 + return;
5721 +
5722 +@@ -3011,6 +3011,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
5723 + .name = "pcie",
5724 + .module = THIS_MODULE,
5725 + .flags = NVME_F_METADATA_SUPPORTED,
5726 ++ .dev_attr_groups = nvme_pci_dev_attr_groups,
5727 + .reg_read32 = nvme_pci_reg_read32,
5728 + .reg_write32 = nvme_pci_reg_write32,
5729 + .reg_read64 = nvme_pci_reg_read64,
5730 +@@ -3209,13 +3210,6 @@ static void nvme_shutdown(struct pci_dev *pdev)
5731 + nvme_disable_prepare_reset(dev, true);
5732 + }
5733 +
5734 +-static void nvme_remove_attrs(struct nvme_dev *dev)
5735 +-{
5736 +- if (dev->attrs_added)
5737 +- sysfs_remove_group(&dev->ctrl.device->kobj,
5738 +- &nvme_pci_attr_group);
5739 +-}
5740 +-
5741 + /*
5742 + * The driver's remove may be called on a device in a partially initialized
5743 + * state. This function must not have any dependencies on the device state in
5744 +@@ -3237,7 +3231,6 @@ static void nvme_remove(struct pci_dev *pdev)
5745 + nvme_stop_ctrl(&dev->ctrl);
5746 + nvme_remove_namespaces(&dev->ctrl);
5747 + nvme_dev_disable(dev, true);
5748 +- nvme_remove_attrs(dev);
5749 + nvme_free_host_mem(dev);
5750 + nvme_dev_remove_admin(dev);
5751 + nvme_free_queues(dev, 0);
5752 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
5753 +index a55d3e8b607d5..6f918e61b6aef 100644
5754 +--- a/drivers/nvme/host/rdma.c
5755 ++++ b/drivers/nvme/host/rdma.c
5756 +@@ -798,7 +798,7 @@ static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
5757 + NVME_RDMA_METADATA_SGL_SIZE;
5758 +
5759 + return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
5760 +- &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE,
5761 ++ &nvme_rdma_mq_ops,
5762 + ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
5763 + cmd_size);
5764 + }
5765 +@@ -848,7 +848,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
5766 + if (new) {
5767 + error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
5768 + &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
5769 +- BLK_MQ_F_NO_SCHED,
5770 + sizeof(struct nvme_rdma_request) +
5771 + NVME_RDMA_DATA_SGL_SIZE);
5772 + if (error)
5773 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
5774 +index 83735c52d34a0..eacd445b5333f 100644
5775 +--- a/drivers/nvme/host/tcp.c
5776 ++++ b/drivers/nvme/host/tcp.c
5777 +@@ -1867,7 +1867,6 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
5778 + if (new) {
5779 + ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
5780 + &nvme_tcp_mq_ops,
5781 +- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
5782 + ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
5783 + sizeof(struct nvme_tcp_request));
5784 + if (ret)
5785 +@@ -1943,7 +1942,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
5786 + if (new) {
5787 + error = nvme_alloc_admin_tag_set(ctrl,
5788 + &to_tcp_ctrl(ctrl)->admin_tag_set,
5789 +- &nvme_tcp_admin_mq_ops, BLK_MQ_F_BLOCKING,
5790 ++ &nvme_tcp_admin_mq_ops,
5791 + sizeof(struct nvme_tcp_request));
5792 + if (error)
5793 + goto out_free_queue;
5794 +@@ -2524,7 +2523,7 @@ static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
5795 + static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
5796 + .name = "tcp",
5797 + .module = THIS_MODULE,
5798 +- .flags = NVME_F_FABRICS,
5799 ++ .flags = NVME_F_FABRICS | NVME_F_BLOCKING,
5800 + .reg_read32 = nvmf_reg_read32,
5801 + .reg_read64 = nvmf_reg_read64,
5802 + .reg_write32 = nvmf_reg_write32,
5803 +diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
5804 +index 08c583258e90f..c864e902e91e2 100644
5805 +--- a/drivers/nvme/target/loop.c
5806 ++++ b/drivers/nvme/target/loop.c
5807 +@@ -353,7 +353,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
5808 + ctrl->ctrl.queue_count = 1;
5809 +
5810 + error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
5811 +- &nvme_loop_admin_mq_ops, BLK_MQ_F_NO_SCHED,
5812 ++ &nvme_loop_admin_mq_ops,
5813 + sizeof(struct nvme_loop_iod) +
5814 + NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
5815 + if (error)
5816 +@@ -494,7 +494,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
5817 + return ret;
5818 +
5819 + ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
5820 +- &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
5821 ++ &nvme_loop_mq_ops, 1,
5822 + sizeof(struct nvme_loop_iod) +
5823 + NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
5824 + if (ret)
5825 +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
5826 +index b80a9b74662b1..1deb61b22bc76 100644
5827 +--- a/drivers/perf/arm-cmn.c
5828 ++++ b/drivers/perf/arm-cmn.c
5829 +@@ -1576,7 +1576,6 @@ static int arm_cmn_event_init(struct perf_event *event)
5830 + hw->dn++;
5831 + continue;
5832 + }
5833 +- hw->dtcs_used |= arm_cmn_node_to_xp(cmn, dn)->dtc;
5834 + hw->num_dns++;
5835 + if (bynodeid)
5836 + break;
5837 +@@ -1589,6 +1588,12 @@ static int arm_cmn_event_init(struct perf_event *event)
5838 + nodeid, nid.x, nid.y, nid.port, nid.dev, type);
5839 + return -EINVAL;
5840 + }
5841 ++ /*
5842 ++ * Keep assuming non-cycles events count in all DTC domains; turns out
5843 ++ * it's hard to make a worthwhile optimisation around this, short of
5844 ++ * going all-in with domain-local counter allocation as well.
5845 ++ */
5846 ++ hw->dtcs_used = (1U << cmn->num_dtcs) - 1;
5847 +
5848 + return arm_cmn_validate_group(cmn, event);
5849 + }
5850 +diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
5851 +index 95c6dbb52da72..ce511ad5d3690 100644
5852 +--- a/drivers/phy/phy-can-transceiver.c
5853 ++++ b/drivers/phy/phy-can-transceiver.c
5854 +@@ -99,6 +99,7 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
5855 + struct gpio_desc *standby_gpio;
5856 + struct gpio_desc *enable_gpio;
5857 + u32 max_bitrate = 0;
5858 ++ int err;
5859 +
5860 + can_transceiver_phy = devm_kzalloc(dev, sizeof(struct can_transceiver_phy), GFP_KERNEL);
5861 + if (!can_transceiver_phy)
5862 +@@ -124,8 +125,8 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
5863 + return PTR_ERR(phy);
5864 + }
5865 +
5866 +- device_property_read_u32(dev, "max-bitrate", &max_bitrate);
5867 +- if (!max_bitrate)
5868 ++ err = device_property_read_u32(dev, "max-bitrate", &max_bitrate);
5869 ++ if ((err != -EINVAL) && !max_bitrate)
5870 + dev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit\n");
5871 + phy->attrs.max_link_rate = max_bitrate;
5872 +
5873 +diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
5874 +index e6ededc515239..a0bc10aa79618 100644
5875 +--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
5876 ++++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
5877 +@@ -485,8 +485,10 @@ static int rockchip_usb2phy_power_on(struct phy *phy)
5878 + return ret;
5879 +
5880 + ret = property_enable(base, &rport->port_cfg->phy_sus, false);
5881 +- if (ret)
5882 ++ if (ret) {
5883 ++ clk_disable_unprepare(rphy->clk480m);
5884 + return ret;
5885 ++ }
5886 +
5887 + /* waiting for the utmi_clk to become stable */
5888 + usleep_range(1500, 2000);
5889 +diff --git a/drivers/phy/sunplus/phy-sunplus-usb2.c b/drivers/phy/sunplus/phy-sunplus-usb2.c
5890 +index e827b79f6d493..56de41091d639 100644
5891 +--- a/drivers/phy/sunplus/phy-sunplus-usb2.c
5892 ++++ b/drivers/phy/sunplus/phy-sunplus-usb2.c
5893 +@@ -254,6 +254,9 @@ static int sp_usb_phy_probe(struct platform_device *pdev)
5894 + return PTR_ERR(usbphy->phy_regs);
5895 +
5896 + usbphy->moon4_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "moon4");
5897 ++ if (!usbphy->moon4_res_mem)
5898 ++ return -EINVAL;
5899 ++
5900 + usbphy->moon4_regs = devm_ioremap(&pdev->dev, usbphy->moon4_res_mem->start,
5901 + resource_size(usbphy->moon4_res_mem));
5902 + if (!usbphy->moon4_regs)
5903 +diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
5904 +index 15a3bcf323086..b905902d57508 100644
5905 +--- a/drivers/phy/ti/Kconfig
5906 ++++ b/drivers/phy/ti/Kconfig
5907 +@@ -23,7 +23,7 @@ config PHY_DM816X_USB
5908 +
5909 + config PHY_AM654_SERDES
5910 + tristate "TI AM654 SERDES support"
5911 +- depends on OF && ARCH_K3 || COMPILE_TEST
5912 ++ depends on OF && (ARCH_K3 || COMPILE_TEST)
5913 + depends on COMMON_CLK
5914 + select GENERIC_PHY
5915 + select MULTIPLEXER
5916 +@@ -35,7 +35,7 @@ config PHY_AM654_SERDES
5917 +
5918 + config PHY_J721E_WIZ
5919 + tristate "TI J721E WIZ (SERDES Wrapper) support"
5920 +- depends on OF && ARCH_K3 || COMPILE_TEST
5921 ++ depends on OF && (ARCH_K3 || COMPILE_TEST)
5922 + depends on HAS_IOMEM && OF_ADDRESS
5923 + depends on COMMON_CLK
5924 + select GENERIC_PHY
5925 +diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
5926 +index da974ff2d75d0..5eeac92f610a0 100644
5927 +--- a/drivers/pinctrl/pinctrl-rockchip.c
5928 ++++ b/drivers/pinctrl/pinctrl-rockchip.c
5929 +@@ -926,19 +926,19 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
5930 + RK_MUXROUTE_PMU(0, RK_PB5, 4, 0x0110, WRITE_MASK_VAL(3, 2, 1)), /* PWM1 IO mux M1 */
5931 + RK_MUXROUTE_PMU(0, RK_PC1, 1, 0x0110, WRITE_MASK_VAL(5, 4, 0)), /* PWM2 IO mux M0 */
5932 + RK_MUXROUTE_PMU(0, RK_PB6, 4, 0x0110, WRITE_MASK_VAL(5, 4, 1)), /* PWM2 IO mux M1 */
5933 +- RK_MUXROUTE_PMU(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
5934 ++ RK_MUXROUTE_GRF(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
5935 + RK_MUXROUTE_GRF(2, RK_PA1, 4, 0x0300, WRITE_MASK_VAL(0, 0, 1)), /* CAN0 IO mux M1 */
5936 + RK_MUXROUTE_GRF(1, RK_PA1, 3, 0x0300, WRITE_MASK_VAL(2, 2, 0)), /* CAN1 IO mux M0 */
5937 + RK_MUXROUTE_GRF(4, RK_PC3, 3, 0x0300, WRITE_MASK_VAL(2, 2, 1)), /* CAN1 IO mux M1 */
5938 + RK_MUXROUTE_GRF(4, RK_PB5, 3, 0x0300, WRITE_MASK_VAL(4, 4, 0)), /* CAN2 IO mux M0 */
5939 + RK_MUXROUTE_GRF(2, RK_PB2, 4, 0x0300, WRITE_MASK_VAL(4, 4, 1)), /* CAN2 IO mux M1 */
5940 + RK_MUXROUTE_GRF(4, RK_PC4, 1, 0x0300, WRITE_MASK_VAL(6, 6, 0)), /* HPDIN IO mux M0 */
5941 +- RK_MUXROUTE_PMU(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
5942 ++ RK_MUXROUTE_GRF(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
5943 + RK_MUXROUTE_GRF(3, RK_PB1, 3, 0x0300, WRITE_MASK_VAL(8, 8, 0)), /* GMAC1 IO mux M0 */
5944 + RK_MUXROUTE_GRF(4, RK_PA7, 3, 0x0300, WRITE_MASK_VAL(8, 8, 1)), /* GMAC1 IO mux M1 */
5945 + RK_MUXROUTE_GRF(4, RK_PD1, 1, 0x0300, WRITE_MASK_VAL(10, 10, 0)), /* HDMITX IO mux M0 */
5946 +- RK_MUXROUTE_PMU(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
5947 +- RK_MUXROUTE_PMU(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
5948 ++ RK_MUXROUTE_GRF(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
5949 ++ RK_MUXROUTE_GRF(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
5950 + RK_MUXROUTE_GRF(4, RK_PB4, 1, 0x0300, WRITE_MASK_VAL(14, 14, 1)), /* I2C2 IO mux M1 */
5951 + RK_MUXROUTE_GRF(1, RK_PA0, 1, 0x0304, WRITE_MASK_VAL(0, 0, 0)), /* I2C3 IO mux M0 */
5952 + RK_MUXROUTE_GRF(3, RK_PB6, 4, 0x0304, WRITE_MASK_VAL(0, 0, 1)), /* I2C3 IO mux M1 */
5953 +@@ -964,7 +964,7 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
5954 + RK_MUXROUTE_GRF(4, RK_PC3, 1, 0x0308, WRITE_MASK_VAL(12, 12, 1)), /* PWM15 IO mux M1 */
5955 + RK_MUXROUTE_GRF(3, RK_PD2, 3, 0x0308, WRITE_MASK_VAL(14, 14, 0)), /* SDMMC2 IO mux M0 */
5956 + RK_MUXROUTE_GRF(3, RK_PA5, 5, 0x0308, WRITE_MASK_VAL(14, 14, 1)), /* SDMMC2 IO mux M1 */
5957 +- RK_MUXROUTE_PMU(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
5958 ++ RK_MUXROUTE_GRF(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
5959 + RK_MUXROUTE_GRF(2, RK_PD3, 3, 0x030c, WRITE_MASK_VAL(0, 0, 1)), /* SPI0 IO mux M1 */
5960 + RK_MUXROUTE_GRF(2, RK_PB5, 3, 0x030c, WRITE_MASK_VAL(2, 2, 0)), /* SPI1 IO mux M0 */
5961 + RK_MUXROUTE_GRF(3, RK_PC3, 3, 0x030c, WRITE_MASK_VAL(2, 2, 1)), /* SPI1 IO mux M1 */
5962 +@@ -973,8 +973,8 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
5963 + RK_MUXROUTE_GRF(4, RK_PB3, 4, 0x030c, WRITE_MASK_VAL(6, 6, 0)), /* SPI3 IO mux M0 */
5964 + RK_MUXROUTE_GRF(4, RK_PC2, 2, 0x030c, WRITE_MASK_VAL(6, 6, 1)), /* SPI3 IO mux M1 */
5965 + RK_MUXROUTE_GRF(2, RK_PB4, 2, 0x030c, WRITE_MASK_VAL(8, 8, 0)), /* UART1 IO mux M0 */
5966 +- RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
5967 +- RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
5968 ++ RK_MUXROUTE_GRF(3, RK_PD6, 4, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
5969 ++ RK_MUXROUTE_GRF(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
5970 + RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x030c, WRITE_MASK_VAL(10, 10, 1)), /* UART2 IO mux M1 */
5971 + RK_MUXROUTE_GRF(1, RK_PA1, 2, 0x030c, WRITE_MASK_VAL(12, 12, 0)), /* UART3 IO mux M0 */
5972 + RK_MUXROUTE_GRF(3, RK_PB7, 4, 0x030c, WRITE_MASK_VAL(12, 12, 1)), /* UART3 IO mux M1 */
5973 +@@ -1004,13 +1004,13 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
5974 + RK_MUXROUTE_GRF(3, RK_PD6, 5, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
5975 + RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
5976 + RK_MUXROUTE_GRF(3, RK_PC4, 5, 0x0314, WRITE_MASK_VAL(1, 0, 2)), /* PDM IO mux M2 */
5977 +- RK_MUXROUTE_PMU(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
5978 ++ RK_MUXROUTE_GRF(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
5979 + RK_MUXROUTE_GRF(2, RK_PD0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 1)), /* PCIE20 IO mux M1 */
5980 + RK_MUXROUTE_GRF(1, RK_PB0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 2)), /* PCIE20 IO mux M2 */
5981 +- RK_MUXROUTE_PMU(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
5982 ++ RK_MUXROUTE_GRF(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
5983 + RK_MUXROUTE_GRF(2, RK_PD2, 4, 0x0314, WRITE_MASK_VAL(5, 4, 1)), /* PCIE30X1 IO mux M1 */
5984 + RK_MUXROUTE_GRF(1, RK_PA5, 4, 0x0314, WRITE_MASK_VAL(5, 4, 2)), /* PCIE30X1 IO mux M2 */
5985 +- RK_MUXROUTE_PMU(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
5986 ++ RK_MUXROUTE_GRF(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
5987 + RK_MUXROUTE_GRF(2, RK_PD4, 4, 0x0314, WRITE_MASK_VAL(7, 6, 1)), /* PCIE30X2 IO mux M1 */
5988 + RK_MUXROUTE_GRF(4, RK_PC2, 4, 0x0314, WRITE_MASK_VAL(7, 6, 2)), /* PCIE30X2 IO mux M2 */
5989 + };
5990 +@@ -2436,10 +2436,19 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
5991 + case RK3308:
5992 + case RK3368:
5993 + case RK3399:
5994 ++ case RK3568:
5995 + case RK3588:
5996 + pull_type = bank->pull_type[pin_num / 8];
5997 + data >>= bit;
5998 + data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
5999 ++ /*
6000 ++ * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6,
6001 ++ * where that pull up value becomes 3.
6002 ++ */
6003 ++ if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
6004 ++ if (data == 3)
6005 ++ data = 1;
6006 ++ }
6007 +
6008 + return rockchip_pull_list[pull_type][data];
6009 + default:
6010 +@@ -2497,7 +2506,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
6011 + }
6012 + }
6013 + /*
6014 +- * In the TRM, pull-up being 1 for everything except the GPIO0_D0-D6,
6015 ++ * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6,
6016 + * where that pull up value becomes 3.
6017 + */
6018 + if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
6019 +diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
6020 +index ca33df7ea550e..9333f82cfa8a0 100644
6021 +--- a/drivers/platform/x86/apple-gmux.c
6022 ++++ b/drivers/platform/x86/apple-gmux.c
6023 +@@ -64,29 +64,6 @@ struct apple_gmux_data {
6024 +
6025 + static struct apple_gmux_data *apple_gmux_data;
6026 +
6027 +-/*
6028 +- * gmux port offsets. Many of these are not yet used, but may be in the
6029 +- * future, and it's useful to have them documented here anyhow.
6030 +- */
6031 +-#define GMUX_PORT_VERSION_MAJOR 0x04
6032 +-#define GMUX_PORT_VERSION_MINOR 0x05
6033 +-#define GMUX_PORT_VERSION_RELEASE 0x06
6034 +-#define GMUX_PORT_SWITCH_DISPLAY 0x10
6035 +-#define GMUX_PORT_SWITCH_GET_DISPLAY 0x11
6036 +-#define GMUX_PORT_INTERRUPT_ENABLE 0x14
6037 +-#define GMUX_PORT_INTERRUPT_STATUS 0x16
6038 +-#define GMUX_PORT_SWITCH_DDC 0x28
6039 +-#define GMUX_PORT_SWITCH_EXTERNAL 0x40
6040 +-#define GMUX_PORT_SWITCH_GET_EXTERNAL 0x41
6041 +-#define GMUX_PORT_DISCRETE_POWER 0x50
6042 +-#define GMUX_PORT_MAX_BRIGHTNESS 0x70
6043 +-#define GMUX_PORT_BRIGHTNESS 0x74
6044 +-#define GMUX_PORT_VALUE 0xc2
6045 +-#define GMUX_PORT_READ 0xd0
6046 +-#define GMUX_PORT_WRITE 0xd4
6047 +-
6048 +-#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4)
6049 +-
6050 + #define GMUX_INTERRUPT_ENABLE 0xff
6051 + #define GMUX_INTERRUPT_DISABLE 0x00
6052 +
6053 +@@ -249,23 +226,6 @@ static void gmux_write32(struct apple_gmux_data *gmux_data, int port,
6054 + gmux_pio_write32(gmux_data, port, val);
6055 + }
6056 +
6057 +-static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
6058 +-{
6059 +- u16 val;
6060 +-
6061 +- outb(0xaa, gmux_data->iostart + 0xcc);
6062 +- outb(0x55, gmux_data->iostart + 0xcd);
6063 +- outb(0x00, gmux_data->iostart + 0xce);
6064 +-
6065 +- val = inb(gmux_data->iostart + 0xcc) |
6066 +- (inb(gmux_data->iostart + 0xcd) << 8);
6067 +-
6068 +- if (val == 0x55aa)
6069 +- return true;
6070 +-
6071 +- return false;
6072 +-}
6073 +-
6074 + /**
6075 + * DOC: Backlight control
6076 + *
6077 +@@ -605,60 +565,43 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
6078 + int ret = -ENXIO;
6079 + acpi_status status;
6080 + unsigned long long gpe;
6081 ++ bool indexed = false;
6082 ++ u32 version;
6083 +
6084 + if (apple_gmux_data)
6085 + return -EBUSY;
6086 +
6087 ++ if (!apple_gmux_detect(pnp, &indexed)) {
6088 ++ pr_info("gmux device not present\n");
6089 ++ return -ENODEV;
6090 ++ }
6091 ++
6092 + gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL);
6093 + if (!gmux_data)
6094 + return -ENOMEM;
6095 + pnp_set_drvdata(pnp, gmux_data);
6096 +
6097 + res = pnp_get_resource(pnp, IORESOURCE_IO, 0);
6098 +- if (!res) {
6099 +- pr_err("Failed to find gmux I/O resource\n");
6100 +- goto err_free;
6101 +- }
6102 +-
6103 + gmux_data->iostart = res->start;
6104 + gmux_data->iolen = resource_size(res);
6105 +
6106 +- if (gmux_data->iolen < GMUX_MIN_IO_LEN) {
6107 +- pr_err("gmux I/O region too small (%lu < %u)\n",
6108 +- gmux_data->iolen, GMUX_MIN_IO_LEN);
6109 +- goto err_free;
6110 +- }
6111 +-
6112 + if (!request_region(gmux_data->iostart, gmux_data->iolen,
6113 + "Apple gmux")) {
6114 + pr_err("gmux I/O already in use\n");
6115 + goto err_free;
6116 + }
6117 +
6118 +- /*
6119 +- * Invalid version information may indicate either that the gmux
6120 +- * device isn't present or that it's a new one that uses indexed
6121 +- * io
6122 +- */
6123 +-
6124 +- ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
6125 +- ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
6126 +- ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
6127 +- if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
6128 +- if (gmux_is_indexed(gmux_data)) {
6129 +- u32 version;
6130 +- mutex_init(&gmux_data->index_lock);
6131 +- gmux_data->indexed = true;
6132 +- version = gmux_read32(gmux_data,
6133 +- GMUX_PORT_VERSION_MAJOR);
6134 +- ver_major = (version >> 24) & 0xff;
6135 +- ver_minor = (version >> 16) & 0xff;
6136 +- ver_release = (version >> 8) & 0xff;
6137 +- } else {
6138 +- pr_info("gmux device not present\n");
6139 +- ret = -ENODEV;
6140 +- goto err_release;
6141 +- }
6142 ++ if (indexed) {
6143 ++ mutex_init(&gmux_data->index_lock);
6144 ++ gmux_data->indexed = true;
6145 ++ version = gmux_read32(gmux_data, GMUX_PORT_VERSION_MAJOR);
6146 ++ ver_major = (version >> 24) & 0xff;
6147 ++ ver_minor = (version >> 16) & 0xff;
6148 ++ ver_release = (version >> 8) & 0xff;
6149 ++ } else {
6150 ++ ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
6151 ++ ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
6152 ++ ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
6153 + }
6154 + pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor,
6155 + ver_release, (gmux_data->indexed ? "indexed" : "classic"));
6156 +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
6157 +index c685a705b73dd..cb15acdf14a30 100644
6158 +--- a/drivers/platform/x86/asus-nb-wmi.c
6159 ++++ b/drivers/platform/x86/asus-nb-wmi.c
6160 +@@ -121,6 +121,10 @@ static struct quirk_entry quirk_asus_tablet_mode = {
6161 + .tablet_switch_mode = asus_wmi_lid_flip_rog_devid,
6162 + };
6163 +
6164 ++static struct quirk_entry quirk_asus_ignore_fan = {
6165 ++ .wmi_ignore_fan = true,
6166 ++};
6167 ++
6168 + static int dmi_matched(const struct dmi_system_id *dmi)
6169 + {
6170 + pr_info("Identified laptop model '%s'\n", dmi->ident);
6171 +@@ -473,6 +477,15 @@ static const struct dmi_system_id asus_quirks[] = {
6172 + },
6173 + .driver_data = &quirk_asus_tablet_mode,
6174 + },
6175 ++ {
6176 ++ .callback = dmi_matched,
6177 ++ .ident = "ASUS VivoBook E410MA",
6178 ++ .matches = {
6179 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
6180 ++ DMI_MATCH(DMI_PRODUCT_NAME, "E410MA"),
6181 ++ },
6182 ++ .driver_data = &quirk_asus_ignore_fan,
6183 ++ },
6184 + {},
6185 + };
6186 +
6187 +@@ -511,6 +524,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
6188 + { KE_KEY, 0x30, { KEY_VOLUMEUP } },
6189 + { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
6190 + { KE_KEY, 0x32, { KEY_MUTE } },
6191 ++ { KE_KEY, 0x33, { KEY_SCREENLOCK } },
6192 + { KE_KEY, 0x35, { KEY_SCREENLOCK } },
6193 + { KE_KEY, 0x38, { KEY_PROG3 } }, /* Armoury Crate */
6194 + { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
6195 +@@ -544,6 +558,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
6196 + { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
6197 + { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
6198 + { KE_KEY, 0x82, { KEY_CAMERA } },
6199 ++ { KE_KEY, 0x85, { KEY_CAMERA } },
6200 + { KE_KEY, 0x86, { KEY_PROG1 } }, /* MyASUS Key */
6201 + { KE_KEY, 0x88, { KEY_RFKILL } }, /* Radio Toggle Key */
6202 + { KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
6203 +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
6204 +index f051b21653d61..02bf286924183 100644
6205 +--- a/drivers/platform/x86/asus-wmi.c
6206 ++++ b/drivers/platform/x86/asus-wmi.c
6207 +@@ -225,6 +225,7 @@ struct asus_wmi {
6208 +
6209 + int tablet_switch_event_code;
6210 + u32 tablet_switch_dev_id;
6211 ++ bool tablet_switch_inverted;
6212 +
6213 + enum fan_type fan_type;
6214 + enum fan_type gpu_fan_type;
6215 +@@ -493,6 +494,13 @@ static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id)
6216 + }
6217 +
6218 + /* Input **********************************************************************/
6219 ++static void asus_wmi_tablet_sw_report(struct asus_wmi *asus, bool value)
6220 ++{
6221 ++ input_report_switch(asus->inputdev, SW_TABLET_MODE,
6222 ++ asus->tablet_switch_inverted ? !value : value);
6223 ++ input_sync(asus->inputdev);
6224 ++}
6225 ++
6226 + static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event_code)
6227 + {
6228 + struct device *dev = &asus->platform_device->dev;
6229 +@@ -501,7 +509,7 @@ static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event
6230 + result = asus_wmi_get_devstate_simple(asus, dev_id);
6231 + if (result >= 0) {
6232 + input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
6233 +- input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
6234 ++ asus_wmi_tablet_sw_report(asus, result);
6235 + asus->tablet_switch_dev_id = dev_id;
6236 + asus->tablet_switch_event_code = event_code;
6237 + } else if (result == -ENODEV) {
6238 +@@ -534,6 +542,7 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
6239 + case asus_wmi_no_tablet_switch:
6240 + break;
6241 + case asus_wmi_kbd_dock_devid:
6242 ++ asus->tablet_switch_inverted = true;
6243 + asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_KBD_DOCK, NOTIFY_KBD_DOCK_CHANGE);
6244 + break;
6245 + case asus_wmi_lid_flip_devid:
6246 +@@ -573,10 +582,8 @@ static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus)
6247 + return;
6248 +
6249 + result = asus_wmi_get_devstate_simple(asus, asus->tablet_switch_dev_id);
6250 +- if (result >= 0) {
6251 +- input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
6252 +- input_sync(asus->inputdev);
6253 +- }
6254 ++ if (result >= 0)
6255 ++ asus_wmi_tablet_sw_report(asus, result);
6256 + }
6257 +
6258 + /* dGPU ********************************************************************/
6259 +@@ -2243,7 +2250,9 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
6260 + asus->fan_type = FAN_TYPE_NONE;
6261 + asus->agfn_pwm = -1;
6262 +
6263 +- if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
6264 ++ if (asus->driver->quirks->wmi_ignore_fan)
6265 ++ asus->fan_type = FAN_TYPE_NONE;
6266 ++ else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
6267 + asus->fan_type = FAN_TYPE_SPEC83;
6268 + else if (asus_wmi_has_agfn_fan(asus))
6269 + asus->fan_type = FAN_TYPE_AGFN;
6270 +diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
6271 +index 65316998b898a..a478ebfd34dfa 100644
6272 +--- a/drivers/platform/x86/asus-wmi.h
6273 ++++ b/drivers/platform/x86/asus-wmi.h
6274 +@@ -38,6 +38,7 @@ struct quirk_entry {
6275 + bool store_backlight_power;
6276 + bool wmi_backlight_set_devstate;
6277 + bool wmi_force_als_set;
6278 ++ bool wmi_ignore_fan;
6279 + enum asus_wmi_tablet_switch_mode tablet_switch_mode;
6280 + int wapf;
6281 + /*
6282 +diff --git a/drivers/platform/x86/simatic-ipc.c b/drivers/platform/x86/simatic-ipc.c
6283 +index ca76076fc706a..b3622419cd1a4 100644
6284 +--- a/drivers/platform/x86/simatic-ipc.c
6285 ++++ b/drivers/platform/x86/simatic-ipc.c
6286 +@@ -46,7 +46,8 @@ static struct {
6287 + {SIMATIC_IPC_IPC427D, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE},
6288 + {SIMATIC_IPC_IPC427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E},
6289 + {SIMATIC_IPC_IPC477E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E},
6290 +- {SIMATIC_IPC_IPC427G, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
6291 ++ {SIMATIC_IPC_IPCBX_39A, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
6292 ++ {SIMATIC_IPC_IPCPX_39A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227G},
6293 + };
6294 +
6295 + static int register_platform_devices(u32 station_id)
6296 +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
6297 +index 4e95d2243161a..7fd735c67a8e6 100644
6298 +--- a/drivers/platform/x86/thinkpad_acpi.c
6299 ++++ b/drivers/platform/x86/thinkpad_acpi.c
6300 +@@ -10500,8 +10500,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
6301 + if (err)
6302 + goto unlock;
6303 + }
6304 +- }
6305 +- if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
6306 ++ } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
6307 + err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
6308 + if (err)
6309 + goto unlock;
6310 +@@ -10529,14 +10528,16 @@ static void dytc_profile_refresh(void)
6311 + err = dytc_command(DYTC_CMD_MMC_GET, &output);
6312 + else
6313 + err = dytc_cql_command(DYTC_CMD_GET, &output);
6314 +- } else if (dytc_capabilities & BIT(DYTC_FC_PSC))
6315 ++ funcmode = DYTC_FUNCTION_MMC;
6316 ++ } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
6317 + err = dytc_command(DYTC_CMD_GET, &output);
6318 +-
6319 ++ /* Check if we are PSC mode, or have AMT enabled */
6320 ++ funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
6321 ++ }
6322 + mutex_unlock(&dytc_mutex);
6323 + if (err)
6324 + return;
6325 +
6326 +- funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
6327 + perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
6328 + convert_dytc_to_profile(funcmode, perfmode, &profile);
6329 + if (profile != dytc_current_profile) {
6330 +diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
6331 +index baae3120efd05..f00995390fdfe 100644
6332 +--- a/drivers/platform/x86/touchscreen_dmi.c
6333 ++++ b/drivers/platform/x86/touchscreen_dmi.c
6334 +@@ -264,6 +264,23 @@ static const struct ts_dmi_data connect_tablet9_data = {
6335 + .properties = connect_tablet9_props,
6336 + };
6337 +
6338 ++static const struct property_entry csl_panther_tab_hd_props[] = {
6339 ++ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
6340 ++ PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
6341 ++ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
6342 ++ PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
6343 ++ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
6344 ++ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
6345 ++ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"),
6346 ++ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
6347 ++ { }
6348 ++};
6349 ++
6350 ++static const struct ts_dmi_data csl_panther_tab_hd_data = {
6351 ++ .acpi_name = "MSSL1680:00",
6352 ++ .properties = csl_panther_tab_hd_props,
6353 ++};
6354 ++
6355 + static const struct property_entry cube_iwork8_air_props[] = {
6356 + PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
6357 + PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
6358 +@@ -1124,6 +1141,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
6359 + DMI_MATCH(DMI_PRODUCT_NAME, "Tablet 9"),
6360 + },
6361 + },
6362 ++ {
6363 ++ /* CSL Panther Tab HD */
6364 ++ .driver_data = (void *)&csl_panther_tab_hd_data,
6365 ++ .matches = {
6366 ++ DMI_MATCH(DMI_SYS_VENDOR, "CSL Computer GmbH & Co. KG"),
6367 ++ DMI_MATCH(DMI_PRODUCT_NAME, "CSL Panther Tab HD"),
6368 ++ },
6369 ++ },
6370 + {
6371 + /* CUBE iwork8 Air */
6372 + .driver_data = (void *)&cube_iwork8_air_data,
6373 +diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
6374 +index de176c2fbad96..2a52c990d4fec 100644
6375 +--- a/drivers/reset/Kconfig
6376 ++++ b/drivers/reset/Kconfig
6377 +@@ -257,7 +257,7 @@ config RESET_SUNXI
6378 +
6379 + config RESET_TI_SCI
6380 + tristate "TI System Control Interface (TI-SCI) reset driver"
6381 +- depends on TI_SCI_PROTOCOL || COMPILE_TEST
6382 ++ depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n)
6383 + help
6384 + This enables the reset driver support over TI System Control Interface
6385 + available on some new TI's SoCs. If you wish to use reset resources
6386 +diff --git a/drivers/reset/reset-uniphier-glue.c b/drivers/reset/reset-uniphier-glue.c
6387 +index 146fd5d45e99d..15abac9fc72c0 100644
6388 +--- a/drivers/reset/reset-uniphier-glue.c
6389 ++++ b/drivers/reset/reset-uniphier-glue.c
6390 +@@ -47,7 +47,6 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
6391 + struct device *dev = &pdev->dev;
6392 + struct uniphier_glue_reset_priv *priv;
6393 + struct resource *res;
6394 +- resource_size_t size;
6395 + int i, ret;
6396 +
6397 + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
6398 +@@ -60,7 +59,6 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
6399 + return -EINVAL;
6400 +
6401 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6402 +- size = resource_size(res);
6403 + priv->rdata.membase = devm_ioremap_resource(dev, res);
6404 + if (IS_ERR(priv->rdata.membase))
6405 + return PTR_ERR(priv->rdata.membase);
6406 +@@ -96,7 +94,7 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
6407 +
6408 + spin_lock_init(&priv->rdata.lock);
6409 + priv->rdata.rcdev.owner = THIS_MODULE;
6410 +- priv->rdata.rcdev.nr_resets = size * BITS_PER_BYTE;
6411 ++ priv->rdata.rcdev.nr_resets = resource_size(res) * BITS_PER_BYTE;
6412 + priv->rdata.rcdev.ops = &reset_simple_ops;
6413 + priv->rdata.rcdev.of_node = dev->of_node;
6414 + priv->rdata.active_low = true;
6415 +diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
6416 +index 699b07abb6b0b..02fa3c00dcccf 100644
6417 +--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
6418 ++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
6419 +@@ -714,7 +714,7 @@ static int hisi_sas_init_device(struct domain_device *device)
6420 + int_to_scsilun(0, &lun);
6421 +
6422 + while (retry-- > 0) {
6423 +- rc = sas_clear_task_set(device, lun.scsi_lun);
6424 ++ rc = sas_abort_task_set(device, lun.scsi_lun);
6425 + if (rc == TMF_RESP_FUNC_COMPLETE) {
6426 + hisi_sas_release_task(hisi_hba, device);
6427 + break;
6428 +@@ -1334,7 +1334,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
6429 + device->linkrate = phy->sas_phy.linkrate;
6430 +
6431 + hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
6432 +- } else
6433 ++ } else if (!port->port_attached)
6434 + port->id = 0xff;
6435 + }
6436 + }
6437 +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
6438 +index 4dbf51e2623ad..f6da34850af9d 100644
6439 +--- a/drivers/scsi/hpsa.c
6440 ++++ b/drivers/scsi/hpsa.c
6441 +@@ -5850,7 +5850,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
6442 + {
6443 + struct Scsi_Host *sh;
6444 +
6445 +- sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
6446 ++ sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
6447 + if (sh == NULL) {
6448 + dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
6449 + return -ENOMEM;
6450 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
6451 +index f473c002fa4d6..bf834e72595a3 100644
6452 +--- a/drivers/scsi/scsi_transport_iscsi.c
6453 ++++ b/drivers/scsi/scsi_transport_iscsi.c
6454 +@@ -1677,6 +1677,13 @@ static const char *iscsi_session_state_name(int state)
6455 + return name;
6456 + }
6457 +
6458 ++static char *iscsi_session_target_state_name[] = {
6459 ++ [ISCSI_SESSION_TARGET_UNBOUND] = "UNBOUND",
6460 ++ [ISCSI_SESSION_TARGET_ALLOCATED] = "ALLOCATED",
6461 ++ [ISCSI_SESSION_TARGET_SCANNED] = "SCANNED",
6462 ++ [ISCSI_SESSION_TARGET_UNBINDING] = "UNBINDING",
6463 ++};
6464 ++
6465 + int iscsi_session_chkready(struct iscsi_cls_session *session)
6466 + {
6467 + int err;
6468 +@@ -1786,9 +1793,13 @@ static int iscsi_user_scan_session(struct device *dev, void *data)
6469 + if ((scan_data->channel == SCAN_WILD_CARD ||
6470 + scan_data->channel == 0) &&
6471 + (scan_data->id == SCAN_WILD_CARD ||
6472 +- scan_data->id == id))
6473 ++ scan_data->id == id)) {
6474 + scsi_scan_target(&session->dev, 0, id,
6475 + scan_data->lun, scan_data->rescan);
6476 ++ spin_lock_irqsave(&session->lock, flags);
6477 ++ session->target_state = ISCSI_SESSION_TARGET_SCANNED;
6478 ++ spin_unlock_irqrestore(&session->lock, flags);
6479 ++ }
6480 + }
6481 +
6482 + user_scan_exit:
6483 +@@ -1961,31 +1972,41 @@ static void __iscsi_unbind_session(struct work_struct *work)
6484 + struct iscsi_cls_host *ihost = shost->shost_data;
6485 + unsigned long flags;
6486 + unsigned int target_id;
6487 ++ bool remove_target = true;
6488 +
6489 + ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n");
6490 +
6491 + /* Prevent new scans and make sure scanning is not in progress */
6492 + mutex_lock(&ihost->mutex);
6493 + spin_lock_irqsave(&session->lock, flags);
6494 +- if (session->target_id == ISCSI_MAX_TARGET) {
6495 ++ if (session->target_state == ISCSI_SESSION_TARGET_ALLOCATED) {
6496 ++ remove_target = false;
6497 ++ } else if (session->target_state != ISCSI_SESSION_TARGET_SCANNED) {
6498 + spin_unlock_irqrestore(&session->lock, flags);
6499 + mutex_unlock(&ihost->mutex);
6500 +- goto unbind_session_exit;
6501 ++ ISCSI_DBG_TRANS_SESSION(session,
6502 ++ "Skipping target unbinding: Session is unbound/unbinding.\n");
6503 ++ return;
6504 + }
6505 +
6506 ++ session->target_state = ISCSI_SESSION_TARGET_UNBINDING;
6507 + target_id = session->target_id;
6508 + session->target_id = ISCSI_MAX_TARGET;
6509 + spin_unlock_irqrestore(&session->lock, flags);
6510 + mutex_unlock(&ihost->mutex);
6511 +
6512 +- scsi_remove_target(&session->dev);
6513 ++ if (remove_target)
6514 ++ scsi_remove_target(&session->dev);
6515 +
6516 + if (session->ida_used)
6517 + ida_free(&iscsi_sess_ida, target_id);
6518 +
6519 +-unbind_session_exit:
6520 + iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
6521 + ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
6522 ++
6523 ++ spin_lock_irqsave(&session->lock, flags);
6524 ++ session->target_state = ISCSI_SESSION_TARGET_UNBOUND;
6525 ++ spin_unlock_irqrestore(&session->lock, flags);
6526 + }
6527 +
6528 + static void __iscsi_destroy_session(struct work_struct *work)
6529 +@@ -2062,6 +2083,9 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
6530 + session->ida_used = true;
6531 + } else
6532 + session->target_id = target_id;
6533 ++ spin_lock_irqsave(&session->lock, flags);
6534 ++ session->target_state = ISCSI_SESSION_TARGET_ALLOCATED;
6535 ++ spin_unlock_irqrestore(&session->lock, flags);
6536 +
6537 + dev_set_name(&session->dev, "session%u", session->sid);
6538 + err = device_add(&session->dev);
6539 +@@ -4369,6 +4393,19 @@ iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0);
6540 + iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0);
6541 + iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0);
6542 +
6543 ++static ssize_t
6544 ++show_priv_session_target_state(struct device *dev, struct device_attribute *attr,
6545 ++ char *buf)
6546 ++{
6547 ++ struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
6548 ++
6549 ++ return sysfs_emit(buf, "%s\n",
6550 ++ iscsi_session_target_state_name[session->target_state]);
6551 ++}
6552 ++
6553 ++static ISCSI_CLASS_ATTR(priv_sess, target_state, S_IRUGO,
6554 ++ show_priv_session_target_state, NULL);
6555 ++
6556 + static ssize_t
6557 + show_priv_session_state(struct device *dev, struct device_attribute *attr,
6558 + char *buf)
6559 +@@ -4471,6 +4508,7 @@ static struct attribute *iscsi_session_attrs[] = {
6560 + &dev_attr_sess_boot_target.attr,
6561 + &dev_attr_priv_sess_recovery_tmo.attr,
6562 + &dev_attr_priv_sess_state.attr,
6563 ++ &dev_attr_priv_sess_target_state.attr,
6564 + &dev_attr_priv_sess_creator.attr,
6565 + &dev_attr_sess_chap_out_idx.attr,
6566 + &dev_attr_sess_chap_in_idx.attr,
6567 +@@ -4584,6 +4622,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
6568 + return S_IRUGO | S_IWUSR;
6569 + else if (attr == &dev_attr_priv_sess_state.attr)
6570 + return S_IRUGO;
6571 ++ else if (attr == &dev_attr_priv_sess_target_state.attr)
6572 ++ return S_IRUGO;
6573 + else if (attr == &dev_attr_priv_sess_creator.attr)
6574 + return S_IRUGO;
6575 + else if (attr == &dev_attr_priv_sess_target_id.attr)
6576 +diff --git a/drivers/soc/imx/imx8mp-blk-ctrl.c b/drivers/soc/imx/imx8mp-blk-ctrl.c
6577 +index 0e3b6ba22f943..0f13853901dfe 100644
6578 +--- a/drivers/soc/imx/imx8mp-blk-ctrl.c
6579 ++++ b/drivers/soc/imx/imx8mp-blk-ctrl.c
6580 +@@ -212,7 +212,7 @@ static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
6581 + break;
6582 + case IMX8MP_HDMIBLK_PD_LCDIF:
6583 + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
6584 +- BIT(7) | BIT(16) | BIT(17) | BIT(18) |
6585 ++ BIT(16) | BIT(17) | BIT(18) |
6586 + BIT(19) | BIT(20));
6587 + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
6588 + regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
6589 +@@ -241,6 +241,7 @@ static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
6590 + regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1));
6591 + break;
6592 + case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
6593 ++ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7));
6594 + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
6595 + regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
6596 + regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
6597 +@@ -270,7 +271,7 @@ static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
6598 + BIT(4) | BIT(5) | BIT(6));
6599 + regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
6600 + regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
6601 +- BIT(7) | BIT(16) | BIT(17) | BIT(18) |
6602 ++ BIT(16) | BIT(17) | BIT(18) |
6603 + BIT(19) | BIT(20));
6604 + break;
6605 + case IMX8MP_HDMIBLK_PD_PAI:
6606 +@@ -298,6 +299,7 @@ static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
6607 + case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
6608 + regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
6609 + regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
6610 ++ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7));
6611 + regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
6612 + break;
6613 + case IMX8MP_HDMIBLK_PD_HDCP:
6614 +@@ -590,7 +592,6 @@ static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
6615 + ret = PTR_ERR(domain->power_dev);
6616 + goto cleanup_pds;
6617 + }
6618 +- dev_set_name(domain->power_dev, "%s", data->name);
6619 +
6620 + domain->genpd.name = data->name;
6621 + domain->genpd.power_on = imx8mp_blk_ctrl_power_on;
6622 +diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
6623 +index 28144c699b0c3..32ed9dc88e455 100644
6624 +--- a/drivers/soc/imx/soc-imx8m.c
6625 ++++ b/drivers/soc/imx/soc-imx8m.c
6626 +@@ -66,8 +66,8 @@ static u32 __init imx8mq_soc_revision(void)
6627 + ocotp_base = of_iomap(np, 0);
6628 + WARN_ON(!ocotp_base);
6629 + clk = of_clk_get_by_name(np, NULL);
6630 +- if (!clk) {
6631 +- WARN_ON(!clk);
6632 ++ if (IS_ERR(clk)) {
6633 ++ WARN_ON(IS_ERR(clk));
6634 + return 0;
6635 + }
6636 +
6637 +diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c
6638 +index e9b854ed1bdfd..144ea68e0920a 100644
6639 +--- a/drivers/soc/qcom/cpr.c
6640 ++++ b/drivers/soc/qcom/cpr.c
6641 +@@ -1708,12 +1708,16 @@ static int cpr_probe(struct platform_device *pdev)
6642 +
6643 + ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
6644 + if (ret)
6645 +- return ret;
6646 ++ goto err_remove_genpd;
6647 +
6648 + platform_set_drvdata(pdev, drv);
6649 + cpr_debugfs_init(drv);
6650 +
6651 + return 0;
6652 ++
6653 ++err_remove_genpd:
6654 ++ pm_genpd_remove(&drv->pd);
6655 ++ return ret;
6656 + }
6657 +
6658 + static int cpr_remove(struct platform_device *pdev)
6659 +diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
6660 +index 9e187f9c6c95e..d28b8bd5b70bc 100644
6661 +--- a/drivers/spi/spi-cadence-xspi.c
6662 ++++ b/drivers/spi/spi-cadence-xspi.c
6663 +@@ -177,7 +177,10 @@
6664 + #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
6665 + FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
6666 + ((op)->data.nbytes >> 16) & 0xffff) | \
6667 +- FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, (op)->dummy.nbytes * 8))
6668 ++ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
6669 ++ (op)->dummy.buswidth != 0 ? \
6670 ++ (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
6671 ++ 0))
6672 +
6673 + #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
6674 + FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \
6675 +diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
6676 +index 6313e7d0cdf87..71c3db60e9687 100644
6677 +--- a/drivers/spi/spidev.c
6678 ++++ b/drivers/spi/spidev.c
6679 +@@ -601,7 +601,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
6680 + if (!spidev->tx_buffer) {
6681 + spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
6682 + if (!spidev->tx_buffer) {
6683 +- dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
6684 + status = -ENOMEM;
6685 + goto err_find_dev;
6686 + }
6687 +@@ -610,7 +609,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
6688 + if (!spidev->rx_buffer) {
6689 + spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
6690 + if (!spidev->rx_buffer) {
6691 +- dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
6692 + status = -ENOMEM;
6693 + goto err_alloc_rx_buf;
6694 + }
6695 +diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
6696 +index a4ee4661e9cc4..1cfeac16e7ac1 100644
6697 +--- a/drivers/thermal/gov_fair_share.c
6698 ++++ b/drivers/thermal/gov_fair_share.c
6699 +@@ -49,11 +49,7 @@ static int get_trip_level(struct thermal_zone_device *tz)
6700 + static long get_target_state(struct thermal_zone_device *tz,
6701 + struct thermal_cooling_device *cdev, int percentage, int level)
6702 + {
6703 +- unsigned long max_state;
6704 +-
6705 +- cdev->ops->get_max_state(cdev, &max_state);
6706 +-
6707 +- return (long)(percentage * level * max_state) / (100 * tz->num_trips);
6708 ++ return (long)(percentage * level * cdev->max_state) / (100 * tz->num_trips);
6709 + }
6710 +
6711 + /**
6712 +diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
6713 +index 62c0aa5d07837..0a4eaa307156d 100644
6714 +--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
6715 ++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
6716 +@@ -44,11 +44,13 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
6717 + int trip, int *temp)
6718 + {
6719 + struct int34x_thermal_zone *d = zone->devdata;
6720 +- int i;
6721 ++ int i, ret = 0;
6722 +
6723 + if (d->override_ops && d->override_ops->get_trip_temp)
6724 + return d->override_ops->get_trip_temp(zone, trip, temp);
6725 +
6726 ++ mutex_lock(&d->trip_mutex);
6727 ++
6728 + if (trip < d->aux_trip_nr)
6729 + *temp = d->aux_trips[trip];
6730 + else if (trip == d->crt_trip_id)
6731 +@@ -66,10 +68,12 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
6732 + }
6733 + }
6734 + if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
6735 +- return -EINVAL;
6736 ++ ret = -EINVAL;
6737 + }
6738 +
6739 +- return 0;
6740 ++ mutex_unlock(&d->trip_mutex);
6741 ++
6742 ++ return ret;
6743 + }
6744 +
6745 + static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
6746 +@@ -77,11 +81,13 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
6747 + enum thermal_trip_type *type)
6748 + {
6749 + struct int34x_thermal_zone *d = zone->devdata;
6750 +- int i;
6751 ++ int i, ret = 0;
6752 +
6753 + if (d->override_ops && d->override_ops->get_trip_type)
6754 + return d->override_ops->get_trip_type(zone, trip, type);
6755 +
6756 ++ mutex_lock(&d->trip_mutex);
6757 ++
6758 + if (trip < d->aux_trip_nr)
6759 + *type = THERMAL_TRIP_PASSIVE;
6760 + else if (trip == d->crt_trip_id)
6761 +@@ -99,10 +105,12 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
6762 + }
6763 + }
6764 + if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
6765 +- return -EINVAL;
6766 ++ ret = -EINVAL;
6767 + }
6768 +
6769 +- return 0;
6770 ++ mutex_unlock(&d->trip_mutex);
6771 ++
6772 ++ return ret;
6773 + }
6774 +
6775 + static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
6776 +@@ -180,6 +188,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
6777 + int trip_cnt = int34x_zone->aux_trip_nr;
6778 + int i;
6779 +
6780 ++ mutex_lock(&int34x_zone->trip_mutex);
6781 ++
6782 + int34x_zone->crt_trip_id = -1;
6783 + if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT",
6784 + &int34x_zone->crt_temp))
6785 +@@ -207,6 +217,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
6786 + int34x_zone->act_trips[i].valid = true;
6787 + }
6788 +
6789 ++ mutex_unlock(&int34x_zone->trip_mutex);
6790 ++
6791 + return trip_cnt;
6792 + }
6793 + EXPORT_SYMBOL_GPL(int340x_thermal_read_trips);
6794 +@@ -230,6 +242,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
6795 + if (!int34x_thermal_zone)
6796 + return ERR_PTR(-ENOMEM);
6797 +
6798 ++ mutex_init(&int34x_thermal_zone->trip_mutex);
6799 ++
6800 + int34x_thermal_zone->adev = adev;
6801 + int34x_thermal_zone->override_ops = override_ops;
6802 +
6803 +@@ -281,6 +295,7 @@ err_thermal_zone:
6804 + acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
6805 + kfree(int34x_thermal_zone->aux_trips);
6806 + err_trip_alloc:
6807 ++ mutex_destroy(&int34x_thermal_zone->trip_mutex);
6808 + kfree(int34x_thermal_zone);
6809 + return ERR_PTR(ret);
6810 + }
6811 +@@ -292,6 +307,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone
6812 + thermal_zone_device_unregister(int34x_thermal_zone->zone);
6813 + acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
6814 + kfree(int34x_thermal_zone->aux_trips);
6815 ++ mutex_destroy(&int34x_thermal_zone->trip_mutex);
6816 + kfree(int34x_thermal_zone);
6817 + }
6818 + EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
6819 +diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
6820 +index 3b4971df1b33b..8f9872afd0d3c 100644
6821 +--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
6822 ++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
6823 +@@ -32,6 +32,7 @@ struct int34x_thermal_zone {
6824 + struct thermal_zone_device_ops *override_ops;
6825 + void *priv_data;
6826 + struct acpi_lpat_conversion_table *lpat_table;
6827 ++ struct mutex trip_mutex;
6828 + };
6829 +
6830 + struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
6831 +diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
6832 +index 615fdda3a5de7..1eae4ec719a8f 100644
6833 +--- a/drivers/thermal/thermal_core.c
6834 ++++ b/drivers/thermal/thermal_core.c
6835 +@@ -603,8 +603,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
6836 + struct thermal_instance *pos;
6837 + struct thermal_zone_device *pos1;
6838 + struct thermal_cooling_device *pos2;
6839 +- unsigned long max_state;
6840 +- int result, ret;
6841 ++ int result;
6842 +
6843 + if (trip >= tz->num_trips || trip < 0)
6844 + return -EINVAL;
6845 +@@ -621,15 +620,11 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
6846 + if (tz != pos1 || cdev != pos2)
6847 + return -EINVAL;
6848 +
6849 +- ret = cdev->ops->get_max_state(cdev, &max_state);
6850 +- if (ret)
6851 +- return ret;
6852 +-
6853 + /* lower default 0, upper default max_state */
6854 + lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
6855 +- upper = upper == THERMAL_NO_LIMIT ? max_state : upper;
6856 ++ upper = upper == THERMAL_NO_LIMIT ? cdev->max_state : upper;
6857 +
6858 +- if (lower > upper || upper > max_state)
6859 ++ if (lower > upper || upper > cdev->max_state)
6860 + return -EINVAL;
6861 +
6862 + dev = kzalloc(sizeof(*dev), GFP_KERNEL);
6863 +@@ -896,12 +891,22 @@ __thermal_cooling_device_register(struct device_node *np,
6864 + cdev->updated = false;
6865 + cdev->device.class = &thermal_class;
6866 + cdev->devdata = devdata;
6867 ++
6868 ++ ret = cdev->ops->get_max_state(cdev, &cdev->max_state);
6869 ++ if (ret) {
6870 ++ kfree(cdev->type);
6871 ++ goto out_ida_remove;
6872 ++ }
6873 ++
6874 + thermal_cooling_device_setup_sysfs(cdev);
6875 ++
6876 + ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
6877 + if (ret) {
6878 ++ kfree(cdev->type);
6879 + thermal_cooling_device_destroy_sysfs(cdev);
6880 +- goto out_kfree_type;
6881 ++ goto out_ida_remove;
6882 + }
6883 ++
6884 + ret = device_register(&cdev->device);
6885 + if (ret)
6886 + goto out_kfree_type;
6887 +@@ -927,6 +932,8 @@ out_kfree_type:
6888 + thermal_cooling_device_destroy_sysfs(cdev);
6889 + kfree(cdev->type);
6890 + put_device(&cdev->device);
6891 ++
6892 ++ /* thermal_release() takes care of the rest */
6893 + cdev = NULL;
6894 + out_ida_remove:
6895 + ida_free(&thermal_cdev_ida, id);
6896 +diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
6897 +index ec495c7dff035..bd75961254615 100644
6898 +--- a/drivers/thermal/thermal_sysfs.c
6899 ++++ b/drivers/thermal/thermal_sysfs.c
6900 +@@ -589,13 +589,8 @@ static ssize_t max_state_show(struct device *dev, struct device_attribute *attr,
6901 + char *buf)
6902 + {
6903 + struct thermal_cooling_device *cdev = to_cooling_device(dev);
6904 +- unsigned long state;
6905 +- int ret;
6906 +
6907 +- ret = cdev->ops->get_max_state(cdev, &state);
6908 +- if (ret)
6909 +- return ret;
6910 +- return sprintf(buf, "%ld\n", state);
6911 ++ return sprintf(buf, "%ld\n", cdev->max_state);
6912 + }
6913 +
6914 + static ssize_t cur_state_show(struct device *dev, struct device_attribute *attr,
6915 +@@ -625,6 +620,10 @@ cur_state_store(struct device *dev, struct device_attribute *attr,
6916 + if ((long)state < 0)
6917 + return -EINVAL;
6918 +
6919 ++ /* Requested state should be less than max_state + 1 */
6920 ++ if (state > cdev->max_state)
6921 ++ return -EINVAL;
6922 ++
6923 + mutex_lock(&cdev->lock);
6924 +
6925 + result = cdev->ops->set_cur_state(cdev, state);
6926 +diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
6927 +index b048357d21e36..fb5c9e2fc5348 100644
6928 +--- a/drivers/ufs/core/ufshcd.c
6929 ++++ b/drivers/ufs/core/ufshcd.c
6930 +@@ -1231,12 +1231,14 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
6931 + * clock scaling is in progress
6932 + */
6933 + ufshcd_scsi_block_requests(hba);
6934 ++ mutex_lock(&hba->wb_mutex);
6935 + down_write(&hba->clk_scaling_lock);
6936 +
6937 + if (!hba->clk_scaling.is_allowed ||
6938 + ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
6939 + ret = -EBUSY;
6940 + up_write(&hba->clk_scaling_lock);
6941 ++ mutex_unlock(&hba->wb_mutex);
6942 + ufshcd_scsi_unblock_requests(hba);
6943 + goto out;
6944 + }
6945 +@@ -1248,12 +1250,16 @@ out:
6946 + return ret;
6947 + }
6948 +
6949 +-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
6950 ++static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
6951 + {
6952 +- if (writelock)
6953 +- up_write(&hba->clk_scaling_lock);
6954 +- else
6955 +- up_read(&hba->clk_scaling_lock);
6956 ++ up_write(&hba->clk_scaling_lock);
6957 ++
6958 ++ /* Enable Write Booster if we have scaled up else disable it */
6959 ++ if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
6960 ++ ufshcd_wb_toggle(hba, scale_up);
6961 ++
6962 ++ mutex_unlock(&hba->wb_mutex);
6963 ++
6964 + ufshcd_scsi_unblock_requests(hba);
6965 + ufshcd_release(hba);
6966 + }
6967 +@@ -1270,7 +1276,6 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
6968 + static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
6969 + {
6970 + int ret = 0;
6971 +- bool is_writelock = true;
6972 +
6973 + ret = ufshcd_clock_scaling_prepare(hba);
6974 + if (ret)
6975 +@@ -1299,15 +1304,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
6976 + }
6977 + }
6978 +
6979 +- /* Enable Write Booster if we have scaled up else disable it */
6980 +- if (ufshcd_enable_wb_if_scaling_up(hba)) {
6981 +- downgrade_write(&hba->clk_scaling_lock);
6982 +- is_writelock = false;
6983 +- ufshcd_wb_toggle(hba, scale_up);
6984 +- }
6985 +-
6986 + out_unprepare:
6987 +- ufshcd_clock_scaling_unprepare(hba, is_writelock);
6988 ++ ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
6989 + return ret;
6990 + }
6991 +
6992 +@@ -6104,9 +6102,11 @@ static void ufshcd_force_error_recovery(struct ufs_hba *hba)
6993 +
6994 + static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
6995 + {
6996 ++ mutex_lock(&hba->wb_mutex);
6997 + down_write(&hba->clk_scaling_lock);
6998 + hba->clk_scaling.is_allowed = allow;
6999 + up_write(&hba->clk_scaling_lock);
7000 ++ mutex_unlock(&hba->wb_mutex);
7001 + }
7002 +
7003 + static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
7004 +@@ -9773,6 +9773,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
7005 + /* Initialize mutex for exception event control */
7006 + mutex_init(&hba->ee_ctrl_mutex);
7007 +
7008 ++ mutex_init(&hba->wb_mutex);
7009 + init_rwsem(&hba->clk_scaling_lock);
7010 +
7011 + ufshcd_init_clk_gating(hba);
7012 +diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
7013 +index 03ededa86da1f..864fef540a394 100644
7014 +--- a/drivers/usb/dwc3/Kconfig
7015 ++++ b/drivers/usb/dwc3/Kconfig
7016 +@@ -3,6 +3,7 @@
7017 + config USB_DWC3
7018 + tristate "DesignWare USB3 DRD Core Support"
7019 + depends on (USB || USB_GADGET) && HAS_DMA
7020 ++ depends on (EXTCON || EXTCON=n)
7021 + select USB_XHCI_PLATFORM if USB_XHCI_HCD
7022 + select USB_ROLE_SWITCH if USB_DWC3_DUAL_ROLE
7023 + help
7024 +@@ -44,7 +45,6 @@ config USB_DWC3_GADGET
7025 + config USB_DWC3_DUAL_ROLE
7026 + bool "Dual Role mode"
7027 + depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3))
7028 +- depends on (EXTCON=y || EXTCON=USB_DWC3)
7029 + help
7030 + This is the default mode of working of DWC3 controller where
7031 + both host and gadget features are enabled.
7032 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
7033 +index 73dc10a77cdea..523a961b910bb 100644
7034 +--- a/drivers/usb/gadget/function/f_fs.c
7035 ++++ b/drivers/usb/gadget/function/f_fs.c
7036 +@@ -279,6 +279,9 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
7037 + struct usb_request *req = ffs->ep0req;
7038 + int ret;
7039 +
7040 ++ if (!req)
7041 ++ return -EINVAL;
7042 ++
7043 + req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
7044 +
7045 + spin_unlock_irq(&ffs->ev.waitq.lock);
7046 +@@ -1892,10 +1895,14 @@ static void functionfs_unbind(struct ffs_data *ffs)
7047 + ENTER();
7048 +
7049 + if (!WARN_ON(!ffs->gadget)) {
7050 ++ /* dequeue before freeing ep0req */
7051 ++ usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req);
7052 ++ mutex_lock(&ffs->mutex);
7053 + usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
7054 + ffs->ep0req = NULL;
7055 + ffs->gadget = NULL;
7056 + clear_bit(FFS_FL_BOUND, &ffs->flags);
7057 ++ mutex_unlock(&ffs->mutex);
7058 + ffs_data_put(ffs);
7059 + }
7060 + }
7061 +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
7062 +index eabe519013e78..1292241d581a6 100644
7063 +--- a/drivers/usb/typec/ucsi/ucsi.c
7064 ++++ b/drivers/usb/typec/ucsi/ucsi.c
7065 +@@ -187,6 +187,7 @@ EXPORT_SYMBOL_GPL(ucsi_send_command);
7066 +
7067 + struct ucsi_work {
7068 + struct delayed_work work;
7069 ++ struct list_head node;
7070 + unsigned long delay;
7071 + unsigned int count;
7072 + struct ucsi_connector *con;
7073 +@@ -202,6 +203,7 @@ static void ucsi_poll_worker(struct work_struct *work)
7074 + mutex_lock(&con->lock);
7075 +
7076 + if (!con->partner) {
7077 ++ list_del(&uwork->node);
7078 + mutex_unlock(&con->lock);
7079 + kfree(uwork);
7080 + return;
7081 +@@ -209,10 +211,12 @@ static void ucsi_poll_worker(struct work_struct *work)
7082 +
7083 + ret = uwork->cb(con);
7084 +
7085 +- if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT))
7086 ++ if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT)) {
7087 + queue_delayed_work(con->wq, &uwork->work, uwork->delay);
7088 +- else
7089 ++ } else {
7090 ++ list_del(&uwork->node);
7091 + kfree(uwork);
7092 ++ }
7093 +
7094 + mutex_unlock(&con->lock);
7095 + }
7096 +@@ -236,6 +240,7 @@ static int ucsi_partner_task(struct ucsi_connector *con,
7097 + uwork->con = con;
7098 + uwork->cb = cb;
7099 +
7100 ++ list_add_tail(&uwork->node, &con->partner_tasks);
7101 + queue_delayed_work(con->wq, &uwork->work, delay);
7102 +
7103 + return 0;
7104 +@@ -1056,6 +1061,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
7105 + INIT_WORK(&con->work, ucsi_handle_connector_change);
7106 + init_completion(&con->complete);
7107 + mutex_init(&con->lock);
7108 ++ INIT_LIST_HEAD(&con->partner_tasks);
7109 + con->num = index + 1;
7110 + con->ucsi = ucsi;
7111 +
7112 +@@ -1420,8 +1426,20 @@ void ucsi_unregister(struct ucsi *ucsi)
7113 + ucsi_unregister_altmodes(&ucsi->connector[i],
7114 + UCSI_RECIPIENT_CON);
7115 + ucsi_unregister_port_psy(&ucsi->connector[i]);
7116 +- if (ucsi->connector[i].wq)
7117 ++
7118 ++ if (ucsi->connector[i].wq) {
7119 ++ struct ucsi_work *uwork;
7120 ++
7121 ++ mutex_lock(&ucsi->connector[i].lock);
7122 ++ /*
7123 ++ * queue delayed items immediately so they can execute
7124 ++ * and free themselves before the wq is destroyed
7125 ++ */
7126 ++ list_for_each_entry(uwork, &ucsi->connector[i].partner_tasks, node)
7127 ++ mod_delayed_work(ucsi->connector[i].wq, &uwork->work, 0);
7128 ++ mutex_unlock(&ucsi->connector[i].lock);
7129 + destroy_workqueue(ucsi->connector[i].wq);
7130 ++ }
7131 + typec_unregister_port(ucsi->connector[i].port);
7132 + }
7133 +
7134 +diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
7135 +index c968474ee5473..60ce9fb6e7450 100644
7136 +--- a/drivers/usb/typec/ucsi/ucsi.h
7137 ++++ b/drivers/usb/typec/ucsi/ucsi.h
7138 +@@ -322,6 +322,7 @@ struct ucsi_connector {
7139 + struct work_struct work;
7140 + struct completion complete;
7141 + struct workqueue_struct *wq;
7142 ++ struct list_head partner_tasks;
7143 +
7144 + struct typec_port *port;
7145 + struct typec_partner *partner;
7146 +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
7147 +index 23c24fe98c00d..2209372f236db 100644
7148 +--- a/drivers/vfio/vfio_iommu_type1.c
7149 ++++ b/drivers/vfio/vfio_iommu_type1.c
7150 +@@ -1856,24 +1856,33 @@ unwind:
7151 + * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
7152 + * hugetlbfs is in use.
7153 + */
7154 +-static void vfio_test_domain_fgsp(struct vfio_domain *domain)
7155 ++static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
7156 + {
7157 +- struct page *pages;
7158 + int ret, order = get_order(PAGE_SIZE * 2);
7159 ++ struct vfio_iova *region;
7160 ++ struct page *pages;
7161 ++ dma_addr_t start;
7162 +
7163 + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
7164 + if (!pages)
7165 + return;
7166 +
7167 +- ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
7168 +- IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
7169 +- if (!ret) {
7170 +- size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
7171 ++ list_for_each_entry(region, regions, list) {
7172 ++ start = ALIGN(region->start, PAGE_SIZE * 2);
7173 ++ if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
7174 ++ continue;
7175 +
7176 +- if (unmapped == PAGE_SIZE)
7177 +- iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
7178 +- else
7179 +- domain->fgsp = true;
7180 ++ ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
7181 ++ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
7182 ++ if (!ret) {
7183 ++ size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
7184 ++
7185 ++ if (unmapped == PAGE_SIZE)
7186 ++ iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
7187 ++ else
7188 ++ domain->fgsp = true;
7189 ++ }
7190 ++ break;
7191 + }
7192 +
7193 + __free_pages(pages, order);
7194 +@@ -2326,7 +2335,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
7195 + }
7196 + }
7197 +
7198 +- vfio_test_domain_fgsp(domain);
7199 ++ vfio_test_domain_fgsp(domain, &iova_copy);
7200 +
7201 + /* replay mappings on new domains */
7202 + ret = vfio_iommu_replay(iommu, domain);
7203 +diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
7204 +index f2ae2e563dc54..4a2ddf730a3ac 100644
7205 +--- a/drivers/w1/w1.c
7206 ++++ b/drivers/w1/w1.c
7207 +@@ -1166,6 +1166,8 @@ int w1_process(void *data)
7208 + /* remainder if it woke up early */
7209 + unsigned long jremain = 0;
7210 +
7211 ++ atomic_inc(&dev->refcnt);
7212 ++
7213 + for (;;) {
7214 +
7215 + if (!jremain && dev->search_count) {
7216 +@@ -1193,8 +1195,10 @@ int w1_process(void *data)
7217 + */
7218 + mutex_unlock(&dev->list_mutex);
7219 +
7220 +- if (kthread_should_stop())
7221 ++ if (kthread_should_stop()) {
7222 ++ __set_current_state(TASK_RUNNING);
7223 + break;
7224 ++ }
7225 +
7226 + /* Only sleep when the search is active. */
7227 + if (dev->search_count) {
7228 +diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
7229 +index b3e1792d9c49f..3a71c5eb2f837 100644
7230 +--- a/drivers/w1/w1_int.c
7231 ++++ b/drivers/w1/w1_int.c
7232 +@@ -51,10 +51,9 @@ static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
7233 + dev->search_count = w1_search_count;
7234 + dev->enable_pullup = w1_enable_pullup;
7235 +
7236 +- /* 1 for w1_process to decrement
7237 +- * 1 for __w1_remove_master_device to decrement
7238 ++ /* For __w1_remove_master_device to decrement
7239 + */
7240 +- atomic_set(&dev->refcnt, 2);
7241 ++ atomic_set(&dev->refcnt, 1);
7242 +
7243 + INIT_LIST_HEAD(&dev->slist);
7244 + INIT_LIST_HEAD(&dev->async_list);
7245 +diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
7246 +index 1826e8e671251..9b569278788a4 100644
7247 +--- a/drivers/xen/pvcalls-front.c
7248 ++++ b/drivers/xen/pvcalls-front.c
7249 +@@ -225,6 +225,8 @@ again:
7250 + return IRQ_HANDLED;
7251 + }
7252 +
7253 ++static void free_active_ring(struct sock_mapping *map);
7254 ++
7255 + static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
7256 + struct sock_mapping *map)
7257 + {
7258 +@@ -240,7 +242,7 @@ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
7259 + for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
7260 + gnttab_end_foreign_access(map->active.ring->ref[i], NULL);
7261 + gnttab_end_foreign_access(map->active.ref, NULL);
7262 +- free_page((unsigned long)map->active.ring);
7263 ++ free_active_ring(map);
7264 +
7265 + kfree(map);
7266 + }
7267 +diff --git a/fs/affs/file.c b/fs/affs/file.c
7268 +index cefa222f7881c..8daeed31e1af9 100644
7269 +--- a/fs/affs/file.c
7270 ++++ b/fs/affs/file.c
7271 +@@ -880,7 +880,7 @@ affs_truncate(struct inode *inode)
7272 + if (inode->i_size > AFFS_I(inode)->mmu_private) {
7273 + struct address_space *mapping = inode->i_mapping;
7274 + struct page *page;
7275 +- void *fsdata;
7276 ++ void *fsdata = NULL;
7277 + loff_t isize = inode->i_size;
7278 + int res;
7279 +
7280 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
7281 +index 9e6d48ff45972..a3febabacec04 100644
7282 +--- a/fs/btrfs/ctree.h
7283 ++++ b/fs/btrfs/ctree.h
7284 +@@ -590,6 +590,12 @@ enum {
7285 + /* Indicate we have to finish a zone to do next allocation. */
7286 + BTRFS_FS_NEED_ZONE_FINISH,
7287 +
7288 ++ /*
7289 ++ * Indicate metadata over-commit is disabled. This is set when active
7290 ++ * zone tracking is needed.
7291 ++ */
7292 ++ BTRFS_FS_NO_OVERCOMMIT,
7293 ++
7294 + #if BITS_PER_LONG == 32
7295 + /* Indicate if we have error/warn message printed on 32bit systems */
7296 + BTRFS_FS_32BIT_ERROR,
7297 +diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
7298 +index f171bf8756336..65c010159fb5f 100644
7299 +--- a/fs/btrfs/space-info.c
7300 ++++ b/fs/btrfs/space-info.c
7301 +@@ -404,7 +404,8 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
7302 + return 0;
7303 +
7304 + used = btrfs_space_info_used(space_info, true);
7305 +- if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
7306 ++ if (test_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags) &&
7307 ++ (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
7308 + avail = 0;
7309 + else
7310 + avail = calc_available_free_space(fs_info, space_info, flush);
7311 +diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
7312 +index c9e2b0c853099..056f002263db5 100644
7313 +--- a/fs/btrfs/zoned.c
7314 ++++ b/fs/btrfs/zoned.c
7315 +@@ -538,6 +538,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
7316 + }
7317 + atomic_set(&zone_info->active_zones_left,
7318 + max_active_zones - nactive);
7319 ++ /* Overcommit does not work well with active zone tacking. */
7320 ++ set_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags);
7321 + }
7322 +
7323 + /* Validate superblock log */
7324 +diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
7325 +index 5db73c0f792a5..cbc18b4a9cb20 100644
7326 +--- a/fs/cifs/cifsencrypt.c
7327 ++++ b/fs/cifs/cifsencrypt.c
7328 +@@ -278,6 +278,7 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
7329 + * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) +
7330 + * unicode length of a netbios domain name
7331 + */
7332 ++ kfree_sensitive(ses->auth_key.response);
7333 + ses->auth_key.len = size + 2 * dlen;
7334 + ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
7335 + if (!ses->auth_key.response) {
7336 +diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
7337 +index e70915ad75410..4302dc75843cb 100644
7338 +--- a/fs/cifs/dfs_cache.c
7339 ++++ b/fs/cifs/dfs_cache.c
7340 +@@ -792,26 +792,27 @@ static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const
7341 + */
7342 + static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path)
7343 + {
7344 +- int rc;
7345 +- struct cache_entry *ce;
7346 + struct dfs_info3_param *refs = NULL;
7347 ++ struct cache_entry *ce;
7348 + int numrefs = 0;
7349 +- bool newent = false;
7350 ++ int rc;
7351 +
7352 + cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
7353 +
7354 +- down_write(&htable_rw_lock);
7355 ++ down_read(&htable_rw_lock);
7356 +
7357 + ce = lookup_cache_entry(path);
7358 +- if (!IS_ERR(ce)) {
7359 +- if (!cache_entry_expired(ce)) {
7360 +- dump_ce(ce);
7361 +- up_write(&htable_rw_lock);
7362 +- return 0;
7363 +- }
7364 +- } else {
7365 +- newent = true;
7366 ++ if (!IS_ERR(ce) && !cache_entry_expired(ce)) {
7367 ++ up_read(&htable_rw_lock);
7368 ++ return 0;
7369 + }
7370 ++ /*
7371 ++ * Unlock shared access as we don't want to hold any locks while getting
7372 ++ * a new referral. The @ses used for performing the I/O could be
7373 ++ * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
7374 ++ * in order to failover -- if necessary.
7375 ++ */
7376 ++ up_read(&htable_rw_lock);
7377 +
7378 + /*
7379 + * Either the entry was not found, or it is expired.
7380 +@@ -819,19 +820,22 @@ static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, cons
7381 + */
7382 + rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
7383 + if (rc)
7384 +- goto out_unlock;
7385 ++ goto out;
7386 +
7387 + dump_refs(refs, numrefs);
7388 +
7389 +- if (!newent) {
7390 +- rc = update_cache_entry_locked(ce, refs, numrefs);
7391 +- goto out_unlock;
7392 ++ down_write(&htable_rw_lock);
7393 ++ /* Re-check as another task might have it added or refreshed already */
7394 ++ ce = lookup_cache_entry(path);
7395 ++ if (!IS_ERR(ce)) {
7396 ++ if (cache_entry_expired(ce))
7397 ++ rc = update_cache_entry_locked(ce, refs, numrefs);
7398 ++ } else {
7399 ++ rc = add_cache_entry_locked(refs, numrefs);
7400 + }
7401 +
7402 +- rc = add_cache_entry_locked(refs, numrefs);
7403 +-
7404 +-out_unlock:
7405 + up_write(&htable_rw_lock);
7406 ++out:
7407 + free_dfs_info_array(refs, numrefs);
7408 + return rc;
7409 + }
7410 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
7411 +index 0b842a07e1579..c47b254f0d1e2 100644
7412 +--- a/fs/cifs/sess.c
7413 ++++ b/fs/cifs/sess.c
7414 +@@ -815,6 +815,7 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
7415 + return -EINVAL;
7416 + }
7417 + if (tilen) {
7418 ++ kfree_sensitive(ses->auth_key.response);
7419 + ses->auth_key.response = kmemdup(bcc_ptr + tioffset, tilen,
7420 + GFP_KERNEL);
7421 + if (!ses->auth_key.response) {
7422 +@@ -1428,6 +1429,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
7423 + goto out_put_spnego_key;
7424 + }
7425 +
7426 ++ kfree_sensitive(ses->auth_key.response);
7427 + ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
7428 + GFP_KERNEL);
7429 + if (!ses->auth_key.response) {
7430 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
7431 +index 92f39052d3117..2c9ffa921e6f6 100644
7432 +--- a/fs/cifs/smb2pdu.c
7433 ++++ b/fs/cifs/smb2pdu.c
7434 +@@ -1453,6 +1453,7 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
7435 +
7436 + /* keep session key if binding */
7437 + if (!is_binding) {
7438 ++ kfree_sensitive(ses->auth_key.response);
7439 + ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
7440 + GFP_KERNEL);
7441 + if (!ses->auth_key.response) {
7442 +diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
7443 +index 90789aaa6567e..8c816b25ce7c6 100644
7444 +--- a/fs/cifs/smbdirect.c
7445 ++++ b/fs/cifs/smbdirect.c
7446 +@@ -1405,6 +1405,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
7447 + destroy_workqueue(info->workqueue);
7448 + log_rdma_event(INFO, "rdma session destroyed\n");
7449 + kfree(info);
7450 ++ server->smbd_conn = NULL;
7451 + }
7452 +
7453 + /*
7454 +diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
7455 +index cf4871834ebb2..ee7c88c9b5afa 100644
7456 +--- a/fs/erofs/zdata.c
7457 ++++ b/fs/erofs/zdata.c
7458 +@@ -1047,12 +1047,12 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
7459 +
7460 + if (!be->decompressed_pages)
7461 + be->decompressed_pages =
7462 +- kvcalloc(be->nr_pages, sizeof(struct page *),
7463 +- GFP_KERNEL | __GFP_NOFAIL);
7464 ++ kcalloc(be->nr_pages, sizeof(struct page *),
7465 ++ GFP_KERNEL | __GFP_NOFAIL);
7466 + if (!be->compressed_pages)
7467 + be->compressed_pages =
7468 +- kvcalloc(pclusterpages, sizeof(struct page *),
7469 +- GFP_KERNEL | __GFP_NOFAIL);
7470 ++ kcalloc(pclusterpages, sizeof(struct page *),
7471 ++ GFP_KERNEL | __GFP_NOFAIL);
7472 +
7473 + z_erofs_parse_out_bvecs(be);
7474 + err2 = z_erofs_parse_in_bvecs(be, &overlapped);
7475 +@@ -1100,7 +1100,7 @@ out:
7476 + }
7477 + if (be->compressed_pages < be->onstack_pages ||
7478 + be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
7479 +- kvfree(be->compressed_pages);
7480 ++ kfree(be->compressed_pages);
7481 + z_erofs_fill_other_copies(be, err);
7482 +
7483 + for (i = 0; i < be->nr_pages; ++i) {
7484 +@@ -1119,7 +1119,7 @@ out:
7485 + }
7486 +
7487 + if (be->decompressed_pages != be->onstack_pages)
7488 +- kvfree(be->decompressed_pages);
7489 ++ kfree(be->decompressed_pages);
7490 +
7491 + pcl->length = 0;
7492 + pcl->partial = true;
7493 +diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
7494 +index fd0a288af299e..56be077e5d8ac 100644
7495 +--- a/fs/ksmbd/connection.c
7496 ++++ b/fs/ksmbd/connection.c
7497 +@@ -280,7 +280,7 @@ int ksmbd_conn_handler_loop(void *p)
7498 + {
7499 + struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
7500 + struct ksmbd_transport *t = conn->transport;
7501 +- unsigned int pdu_size;
7502 ++ unsigned int pdu_size, max_allowed_pdu_size;
7503 + char hdr_buf[4] = {0,};
7504 + int size;
7505 +
7506 +@@ -305,13 +305,26 @@ int ksmbd_conn_handler_loop(void *p)
7507 + pdu_size = get_rfc1002_len(hdr_buf);
7508 + ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
7509 +
7510 ++ if (conn->status == KSMBD_SESS_GOOD)
7511 ++ max_allowed_pdu_size =
7512 ++ SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
7513 ++ else
7514 ++ max_allowed_pdu_size = SMB3_MAX_MSGSIZE;
7515 ++
7516 ++ if (pdu_size > max_allowed_pdu_size) {
7517 ++ pr_err_ratelimited("PDU length(%u) excceed maximum allowed pdu size(%u) on connection(%d)\n",
7518 ++ pdu_size, max_allowed_pdu_size,
7519 ++ conn->status);
7520 ++ break;
7521 ++ }
7522 ++
7523 + /*
7524 + * Check if pdu size is valid (min : smb header size,
7525 + * max : 0x00FFFFFF).
7526 + */
7527 + if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
7528 + pdu_size > MAX_STREAM_PROT_LEN) {
7529 +- continue;
7530 ++ break;
7531 + }
7532 +
7533 + /* 4 for rfc1002 length field */
7534 +diff --git a/fs/ksmbd/ksmbd_netlink.h b/fs/ksmbd/ksmbd_netlink.h
7535 +index ff07c67f4565e..ce866ff159bfe 100644
7536 +--- a/fs/ksmbd/ksmbd_netlink.h
7537 ++++ b/fs/ksmbd/ksmbd_netlink.h
7538 +@@ -105,7 +105,8 @@ struct ksmbd_startup_request {
7539 + __u32 sub_auth[3]; /* Subauth value for Security ID */
7540 + __u32 smb2_max_credits; /* MAX credits */
7541 + __u32 smbd_max_io_size; /* smbd read write size */
7542 +- __u32 reserved[127]; /* Reserved room */
7543 ++ __u32 max_connections; /* Number of maximum simultaneous connections */
7544 ++ __u32 reserved[126]; /* Reserved room */
7545 + __u32 ifc_list_sz; /* interfaces list size */
7546 + __s8 ____payload[];
7547 + };
7548 +diff --git a/fs/ksmbd/ndr.c b/fs/ksmbd/ndr.c
7549 +index 0ae8d08d85a87..4d9e0b54e3dbf 100644
7550 +--- a/fs/ksmbd/ndr.c
7551 ++++ b/fs/ksmbd/ndr.c
7552 +@@ -242,7 +242,7 @@ int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
7553 + return ret;
7554 +
7555 + if (da->version != 3 && da->version != 4) {
7556 +- pr_err("v%d version is not supported\n", da->version);
7557 ++ ksmbd_debug(VFS, "v%d version is not supported\n", da->version);
7558 + return -EINVAL;
7559 + }
7560 +
7561 +@@ -251,7 +251,7 @@ int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
7562 + return ret;
7563 +
7564 + if (da->version != version2) {
7565 +- pr_err("ndr version mismatched(version: %d, version2: %d)\n",
7566 ++ ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
7567 + da->version, version2);
7568 + return -EINVAL;
7569 + }
7570 +@@ -457,7 +457,7 @@ int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
7571 + if (ret)
7572 + return ret;
7573 + if (acl->version != 4) {
7574 +- pr_err("v%d version is not supported\n", acl->version);
7575 ++ ksmbd_debug(VFS, "v%d version is not supported\n", acl->version);
7576 + return -EINVAL;
7577 + }
7578 +
7579 +@@ -465,7 +465,7 @@ int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
7580 + if (ret)
7581 + return ret;
7582 + if (acl->version != version2) {
7583 +- pr_err("ndr version mismatched(version: %d, version2: %d)\n",
7584 ++ ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
7585 + acl->version, version2);
7586 + return -EINVAL;
7587 + }
7588 +diff --git a/fs/ksmbd/server.h b/fs/ksmbd/server.h
7589 +index ac9d932f8c8aa..db72781817603 100644
7590 +--- a/fs/ksmbd/server.h
7591 ++++ b/fs/ksmbd/server.h
7592 +@@ -41,6 +41,7 @@ struct ksmbd_server_config {
7593 + unsigned int share_fake_fscaps;
7594 + struct smb_sid domain_sid;
7595 + unsigned int auth_mechs;
7596 ++ unsigned int max_connections;
7597 +
7598 + char *conf[SERVER_CONF_WORK_GROUP + 1];
7599 + };
7600 +diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
7601 +index 533742ebcb379..9b16ee657b51a 100644
7602 +--- a/fs/ksmbd/smb2pdu.c
7603 ++++ b/fs/ksmbd/smb2pdu.c
7604 +@@ -8657,6 +8657,7 @@ int smb3_decrypt_req(struct ksmbd_work *work)
7605 + bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
7606 + {
7607 + struct ksmbd_conn *conn = work->conn;
7608 ++ struct ksmbd_session *sess = work->sess;
7609 + struct smb2_hdr *rsp = smb2_get_msg(work->response_buf);
7610 +
7611 + if (conn->dialect < SMB30_PROT_ID)
7612 +@@ -8666,6 +8667,7 @@ bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
7613 + rsp = ksmbd_resp_buf_next(work);
7614 +
7615 + if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE &&
7616 ++ sess->user && !user_guest(sess->user) &&
7617 + rsp->Status == STATUS_SUCCESS)
7618 + return true;
7619 + return false;
7620 +diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
7621 +index 092fdd3f87505..f4baa9800f6ee 100644
7622 +--- a/fs/ksmbd/smb2pdu.h
7623 ++++ b/fs/ksmbd/smb2pdu.h
7624 +@@ -24,8 +24,9 @@
7625 +
7626 + #define SMB21_DEFAULT_IOSIZE (1024 * 1024)
7627 + #define SMB3_DEFAULT_TRANS_SIZE (1024 * 1024)
7628 +-#define SMB3_MIN_IOSIZE (64 * 1024)
7629 +-#define SMB3_MAX_IOSIZE (8 * 1024 * 1024)
7630 ++#define SMB3_MIN_IOSIZE (64 * 1024)
7631 ++#define SMB3_MAX_IOSIZE (8 * 1024 * 1024)
7632 ++#define SMB3_MAX_MSGSIZE (4 * 4096)
7633 +
7634 + /*
7635 + * Definitions for SMB2 Protocol Data Units (network frames)
7636 +diff --git a/fs/ksmbd/transport_ipc.c b/fs/ksmbd/transport_ipc.c
7637 +index c9aca21637d5b..40c721f9227e4 100644
7638 +--- a/fs/ksmbd/transport_ipc.c
7639 ++++ b/fs/ksmbd/transport_ipc.c
7640 +@@ -308,6 +308,9 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
7641 + if (req->smbd_max_io_size)
7642 + init_smbd_max_io_size(req->smbd_max_io_size);
7643 +
7644 ++ if (req->max_connections)
7645 ++ server_conf.max_connections = req->max_connections;
7646 ++
7647 + ret = ksmbd_set_netbios_name(req->netbios_name);
7648 + ret |= ksmbd_set_server_string(req->server_string);
7649 + ret |= ksmbd_set_work_group(req->work_group);
7650 +diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
7651 +index 4c6bd0b699791..603893fd87f57 100644
7652 +--- a/fs/ksmbd/transport_tcp.c
7653 ++++ b/fs/ksmbd/transport_tcp.c
7654 +@@ -15,6 +15,8 @@
7655 + #define IFACE_STATE_DOWN BIT(0)
7656 + #define IFACE_STATE_CONFIGURED BIT(1)
7657 +
7658 ++static atomic_t active_num_conn;
7659 ++
7660 + struct interface {
7661 + struct task_struct *ksmbd_kthread;
7662 + struct socket *ksmbd_socket;
7663 +@@ -185,8 +187,10 @@ static int ksmbd_tcp_new_connection(struct socket *client_sk)
7664 + struct tcp_transport *t;
7665 +
7666 + t = alloc_transport(client_sk);
7667 +- if (!t)
7668 ++ if (!t) {
7669 ++ sock_release(client_sk);
7670 + return -ENOMEM;
7671 ++ }
7672 +
7673 + csin = KSMBD_TCP_PEER_SOCKADDR(KSMBD_TRANS(t)->conn);
7674 + if (kernel_getpeername(client_sk, csin) < 0) {
7675 +@@ -239,6 +243,15 @@ static int ksmbd_kthread_fn(void *p)
7676 + continue;
7677 + }
7678 +
7679 ++ if (server_conf.max_connections &&
7680 ++ atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
7681 ++ pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
7682 ++ atomic_read(&active_num_conn));
7683 ++ atomic_dec(&active_num_conn);
7684 ++ sock_release(client_sk);
7685 ++ continue;
7686 ++ }
7687 ++
7688 + ksmbd_debug(CONN, "connect success: accepted new connection\n");
7689 + client_sk->sk->sk_rcvtimeo = KSMBD_TCP_RECV_TIMEOUT;
7690 + client_sk->sk->sk_sndtimeo = KSMBD_TCP_SEND_TIMEOUT;
7691 +@@ -368,6 +381,8 @@ static int ksmbd_tcp_writev(struct ksmbd_transport *t, struct kvec *iov,
7692 + static void ksmbd_tcp_disconnect(struct ksmbd_transport *t)
7693 + {
7694 + free_transport(TCP_TRANS(t));
7695 ++ if (server_conf.max_connections)
7696 ++ atomic_dec(&active_num_conn);
7697 + }
7698 +
7699 + static void tcp_destroy_socket(struct socket *ksmbd_socket)
7700 +diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
7701 +index ea6fb0e6b1655..142b3c928f76e 100644
7702 +--- a/fs/nfsd/filecache.c
7703 ++++ b/fs/nfsd/filecache.c
7704 +@@ -638,6 +638,39 @@ static struct shrinker nfsd_file_shrinker = {
7705 + .seeks = 1,
7706 + };
7707 +
7708 ++/**
7709 ++ * nfsd_file_cond_queue - conditionally unhash and queue a nfsd_file
7710 ++ * @nf: nfsd_file to attempt to queue
7711 ++ * @dispose: private list to queue successfully-put objects
7712 ++ *
7713 ++ * Unhash an nfsd_file, try to get a reference to it, and then put that
7714 ++ * reference. If it's the last reference, queue it to the dispose list.
7715 ++ */
7716 ++static void
7717 ++nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
7718 ++ __must_hold(RCU)
7719 ++{
7720 ++ int decrement = 1;
7721 ++
7722 ++ /* If we raced with someone else unhashing, ignore it */
7723 ++ if (!nfsd_file_unhash(nf))
7724 ++ return;
7725 ++
7726 ++ /* If we can't get a reference, ignore it */
7727 ++ if (!nfsd_file_get(nf))
7728 ++ return;
7729 ++
7730 ++ /* Extra decrement if we remove from the LRU */
7731 ++ if (nfsd_file_lru_remove(nf))
7732 ++ ++decrement;
7733 ++
7734 ++ /* If refcount goes to 0, then put on the dispose list */
7735 ++ if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
7736 ++ list_add(&nf->nf_lru, dispose);
7737 ++ trace_nfsd_file_closing(nf);
7738 ++ }
7739 ++}
7740 ++
7741 + /**
7742 + * nfsd_file_queue_for_close: try to close out any open nfsd_files for an inode
7743 + * @inode: inode on which to close out nfsd_files
7744 +@@ -665,30 +698,11 @@ nfsd_file_queue_for_close(struct inode *inode, struct list_head *dispose)
7745 +
7746 + rcu_read_lock();
7747 + do {
7748 +- int decrement = 1;
7749 +-
7750 + nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
7751 + nfsd_file_rhash_params);
7752 + if (!nf)
7753 + break;
7754 +-
7755 +- /* If we raced with someone else unhashing, ignore it */
7756 +- if (!nfsd_file_unhash(nf))
7757 +- continue;
7758 +-
7759 +- /* If we can't get a reference, ignore it */
7760 +- if (!nfsd_file_get(nf))
7761 +- continue;
7762 +-
7763 +- /* Extra decrement if we remove from the LRU */
7764 +- if (nfsd_file_lru_remove(nf))
7765 +- ++decrement;
7766 +-
7767 +- /* If refcount goes to 0, then put on the dispose list */
7768 +- if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
7769 +- list_add(&nf->nf_lru, dispose);
7770 +- trace_nfsd_file_closing(nf);
7771 +- }
7772 ++ nfsd_file_cond_queue(nf, dispose);
7773 + } while (1);
7774 + rcu_read_unlock();
7775 + }
7776 +@@ -905,11 +919,8 @@ __nfsd_file_cache_purge(struct net *net)
7777 +
7778 + nf = rhashtable_walk_next(&iter);
7779 + while (!IS_ERR_OR_NULL(nf)) {
7780 +- if (!net || nf->nf_net == net) {
7781 +- nfsd_file_unhash(nf);
7782 +- nfsd_file_lru_remove(nf);
7783 +- list_add(&nf->nf_lru, &dispose);
7784 +- }
7785 ++ if (!net || nf->nf_net == net)
7786 ++ nfsd_file_cond_queue(nf, &dispose);
7787 + nf = rhashtable_walk_next(&iter);
7788 + }
7789 +
7790 +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
7791 +index 30a08ec31a703..ba04ce9b9fa51 100644
7792 +--- a/fs/nfsd/nfs4proc.c
7793 ++++ b/fs/nfsd/nfs4proc.c
7794 +@@ -1331,6 +1331,7 @@ try_again:
7795 + /* allow 20secs for mount/unmount for now - revisit */
7796 + if (signal_pending(current) ||
7797 + (schedule_timeout(20*HZ) == 0)) {
7798 ++ finish_wait(&nn->nfsd_ssc_waitq, &wait);
7799 + kfree(work);
7800 + return nfserr_eagain;
7801 + }
7802 +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
7803 +index f436d8847f085..91a95bfad0d1c 100644
7804 +--- a/fs/overlayfs/copy_up.c
7805 ++++ b/fs/overlayfs/copy_up.c
7806 +@@ -754,7 +754,7 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
7807 + if (!c->metacopy && c->stat.size) {
7808 + err = ovl_copy_up_file(ofs, c->dentry, tmpfile, c->stat.size);
7809 + if (err)
7810 +- return err;
7811 ++ goto out_fput;
7812 + }
7813 +
7814 + err = ovl_copy_up_metadata(c, temp);
7815 +@@ -973,6 +973,10 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
7816 + if (err)
7817 + return err;
7818 +
7819 ++ if (!kuid_has_mapping(current_user_ns(), ctx.stat.uid) ||
7820 ++ !kgid_has_mapping(current_user_ns(), ctx.stat.gid))
7821 ++ return -EOVERFLOW;
7822 ++
7823 + ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags);
7824 +
7825 + if (parent) {
7826 +diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
7827 +index 4f8c35206f7cd..6c2a2f21dbf00 100644
7828 +--- a/include/drm/drm_vma_manager.h
7829 ++++ b/include/drm/drm_vma_manager.h
7830 +@@ -74,6 +74,7 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
7831 + struct drm_vma_offset_node *node);
7832 +
7833 + int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
7834 ++int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag);
7835 + void drm_vma_node_revoke(struct drm_vma_offset_node *node,
7836 + struct drm_file *tag);
7837 + bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
7838 +diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
7839 +index ddb10aa67b143..1f68b49bcd688 100644
7840 +--- a/include/linux/apple-gmux.h
7841 ++++ b/include/linux/apple-gmux.h
7842 +@@ -8,18 +8,118 @@
7843 + #define LINUX_APPLE_GMUX_H
7844 +
7845 + #include <linux/acpi.h>
7846 ++#include <linux/io.h>
7847 ++#include <linux/pnp.h>
7848 +
7849 + #define GMUX_ACPI_HID "APP000B"
7850 +
7851 ++/*
7852 ++ * gmux port offsets. Many of these are not yet used, but may be in the
7853 ++ * future, and it's useful to have them documented here anyhow.
7854 ++ */
7855 ++#define GMUX_PORT_VERSION_MAJOR 0x04
7856 ++#define GMUX_PORT_VERSION_MINOR 0x05
7857 ++#define GMUX_PORT_VERSION_RELEASE 0x06
7858 ++#define GMUX_PORT_SWITCH_DISPLAY 0x10
7859 ++#define GMUX_PORT_SWITCH_GET_DISPLAY 0x11
7860 ++#define GMUX_PORT_INTERRUPT_ENABLE 0x14
7861 ++#define GMUX_PORT_INTERRUPT_STATUS 0x16
7862 ++#define GMUX_PORT_SWITCH_DDC 0x28
7863 ++#define GMUX_PORT_SWITCH_EXTERNAL 0x40
7864 ++#define GMUX_PORT_SWITCH_GET_EXTERNAL 0x41
7865 ++#define GMUX_PORT_DISCRETE_POWER 0x50
7866 ++#define GMUX_PORT_MAX_BRIGHTNESS 0x70
7867 ++#define GMUX_PORT_BRIGHTNESS 0x74
7868 ++#define GMUX_PORT_VALUE 0xc2
7869 ++#define GMUX_PORT_READ 0xd0
7870 ++#define GMUX_PORT_WRITE 0xd4
7871 ++
7872 ++#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4)
7873 ++
7874 + #if IS_ENABLED(CONFIG_APPLE_GMUX)
7875 ++static inline bool apple_gmux_is_indexed(unsigned long iostart)
7876 ++{
7877 ++ u16 val;
7878 ++
7879 ++ outb(0xaa, iostart + 0xcc);
7880 ++ outb(0x55, iostart + 0xcd);
7881 ++ outb(0x00, iostart + 0xce);
7882 ++
7883 ++ val = inb(iostart + 0xcc) | (inb(iostart + 0xcd) << 8);
7884 ++ if (val == 0x55aa)
7885 ++ return true;
7886 ++
7887 ++ return false;
7888 ++}
7889 +
7890 + /**
7891 +- * apple_gmux_present() - detect if gmux is built into the machine
7892 ++ * apple_gmux_detect() - detect if gmux is built into the machine
7893 ++ *
7894 ++ * @pnp_dev: Device to probe or NULL to use the first matching device
7895 ++ * @indexed_ret: Returns (by reference) if the gmux is indexed or not
7896 ++ *
7897 ++ * Detect if a supported gmux device is present by actually probing it.
7898 ++ * This avoids the false positives returned on some models by
7899 ++ * apple_gmux_present().
7900 ++ *
7901 ++ * Return: %true if a supported gmux ACPI device is detected and the kernel
7902 ++ * was configured with CONFIG_APPLE_GMUX, %false otherwise.
7903 ++ */
7904 ++static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret)
7905 ++{
7906 ++ u8 ver_major, ver_minor, ver_release;
7907 ++ struct device *dev = NULL;
7908 ++ struct acpi_device *adev;
7909 ++ struct resource *res;
7910 ++ bool indexed = false;
7911 ++ bool ret = false;
7912 ++
7913 ++ if (!pnp_dev) {
7914 ++ adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
7915 ++ if (!adev)
7916 ++ return false;
7917 ++
7918 ++ dev = get_device(acpi_get_first_physical_node(adev));
7919 ++ acpi_dev_put(adev);
7920 ++ if (!dev)
7921 ++ return false;
7922 ++
7923 ++ pnp_dev = to_pnp_dev(dev);
7924 ++ }
7925 ++
7926 ++ res = pnp_get_resource(pnp_dev, IORESOURCE_IO, 0);
7927 ++ if (!res || resource_size(res) < GMUX_MIN_IO_LEN)
7928 ++ goto out;
7929 ++
7930 ++ /*
7931 ++ * Invalid version information may indicate either that the gmux
7932 ++ * device isn't present or that it's a new one that uses indexed io.
7933 ++ */
7934 ++ ver_major = inb(res->start + GMUX_PORT_VERSION_MAJOR);
7935 ++ ver_minor = inb(res->start + GMUX_PORT_VERSION_MINOR);
7936 ++ ver_release = inb(res->start + GMUX_PORT_VERSION_RELEASE);
7937 ++ if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
7938 ++ indexed = apple_gmux_is_indexed(res->start);
7939 ++ if (!indexed)
7940 ++ goto out;
7941 ++ }
7942 ++
7943 ++ if (indexed_ret)
7944 ++ *indexed_ret = indexed;
7945 ++
7946 ++ ret = true;
7947 ++out:
7948 ++ put_device(dev);
7949 ++ return ret;
7950 ++}
7951 ++
7952 ++/**
7953 ++ * apple_gmux_present() - check if gmux ACPI device is present
7954 + *
7955 + * Drivers may use this to activate quirks specific to dual GPU MacBook Pros
7956 + * and Mac Pros, e.g. for deferred probing, runtime pm and backlight.
7957 + *
7958 +- * Return: %true if gmux is present and the kernel was configured
7959 ++ * Return: %true if gmux ACPI device is present and the kernel was configured
7960 + * with CONFIG_APPLE_GMUX, %false otherwise.
7961 + */
7962 + static inline bool apple_gmux_present(void)
7963 +@@ -34,6 +134,11 @@ static inline bool apple_gmux_present(void)
7964 + return false;
7965 + }
7966 +
7967 ++static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret)
7968 ++{
7969 ++ return false;
7970 ++}
7971 ++
7972 + #endif /* !CONFIG_APPLE_GMUX */
7973 +
7974 + #endif /* LINUX_APPLE_GMUX_H */
7975 +diff --git a/include/linux/platform_data/x86/simatic-ipc.h b/include/linux/platform_data/x86/simatic-ipc.h
7976 +index 632320ec8f082..a48bb52409777 100644
7977 +--- a/include/linux/platform_data/x86/simatic-ipc.h
7978 ++++ b/include/linux/platform_data/x86/simatic-ipc.h
7979 +@@ -32,7 +32,8 @@ enum simatic_ipc_station_ids {
7980 + SIMATIC_IPC_IPC477E = 0x00000A02,
7981 + SIMATIC_IPC_IPC127E = 0x00000D01,
7982 + SIMATIC_IPC_IPC227G = 0x00000F01,
7983 +- SIMATIC_IPC_IPC427G = 0x00001001,
7984 ++ SIMATIC_IPC_IPCBX_39A = 0x00001001,
7985 ++ SIMATIC_IPC_IPCPX_39A = 0x00001002,
7986 + };
7987 +
7988 + static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
7989 +diff --git a/include/linux/thermal.h b/include/linux/thermal.h
7990 +index 9ecc128944a19..5e093602e8fcd 100644
7991 +--- a/include/linux/thermal.h
7992 ++++ b/include/linux/thermal.h
7993 +@@ -100,6 +100,7 @@ struct thermal_cooling_device_ops {
7994 + struct thermal_cooling_device {
7995 + int id;
7996 + char *type;
7997 ++ unsigned long max_state;
7998 + struct device device;
7999 + struct device_node *np;
8000 + void *devdata;
8001 +diff --git a/include/net/mac80211.h b/include/net/mac80211.h
8002 +index ac2bad57933f8..72b739dc6d530 100644
8003 +--- a/include/net/mac80211.h
8004 ++++ b/include/net/mac80211.h
8005 +@@ -1827,8 +1827,6 @@ struct ieee80211_vif_cfg {
8006 + * @drv_priv: data area for driver use, will always be aligned to
8007 + * sizeof(void \*).
8008 + * @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
8009 +- * @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped,
8010 +- * protected by fq->lock.
8011 + * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
8012 + * &enum ieee80211_offload_flags.
8013 + * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
8014 +@@ -1857,8 +1855,6 @@ struct ieee80211_vif {
8015 + bool probe_req_reg;
8016 + bool rx_mcast_action_reg;
8017 +
8018 +- bool txqs_stopped[IEEE80211_NUM_ACS];
8019 +-
8020 + struct ieee80211_vif *mbssid_tx_vif;
8021 +
8022 + /* must be last */
8023 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
8024 +index d5517719af4ef..af4aa66aaa4eb 100644
8025 +--- a/include/net/sch_generic.h
8026 ++++ b/include/net/sch_generic.h
8027 +@@ -1288,4 +1288,11 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
8028 +
8029 + int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
8030 +
8031 ++/* Make sure qdisc is no longer in SCHED state. */
8032 ++static inline void qdisc_synchronize(const struct Qdisc *q)
8033 ++{
8034 ++ while (test_bit(__QDISC_STATE_SCHED, &q->state))
8035 ++ msleep(1);
8036 ++}
8037 ++
8038 + #endif
8039 +diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
8040 +index cab52b0f11d0c..34c03707fb6ef 100644
8041 +--- a/include/scsi/scsi_transport_iscsi.h
8042 ++++ b/include/scsi/scsi_transport_iscsi.h
8043 +@@ -236,6 +236,14 @@ enum {
8044 + ISCSI_SESSION_FREE,
8045 + };
8046 +
8047 ++enum {
8048 ++ ISCSI_SESSION_TARGET_UNBOUND,
8049 ++ ISCSI_SESSION_TARGET_ALLOCATED,
8050 ++ ISCSI_SESSION_TARGET_SCANNED,
8051 ++ ISCSI_SESSION_TARGET_UNBINDING,
8052 ++ ISCSI_SESSION_TARGET_MAX,
8053 ++};
8054 ++
8055 + #define ISCSI_MAX_TARGET -1
8056 +
8057 + struct iscsi_cls_session {
8058 +@@ -264,6 +272,7 @@ struct iscsi_cls_session {
8059 + */
8060 + pid_t creator;
8061 + int state;
8062 ++ int target_state; /* session target bind state */
8063 + int sid; /* session id */
8064 + void *dd_data; /* LLD private data */
8065 + struct device dev; /* sysfs transport/container device */
8066 +diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
8067 +index edc6ddab0de6a..2d6f80d75ae74 100644
8068 +--- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h
8069 ++++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
8070 +@@ -15,7 +15,7 @@ enum sctp_conntrack {
8071 + SCTP_CONNTRACK_SHUTDOWN_RECD,
8072 + SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
8073 + SCTP_CONNTRACK_HEARTBEAT_SENT,
8074 +- SCTP_CONNTRACK_HEARTBEAT_ACKED,
8075 ++ SCTP_CONNTRACK_HEARTBEAT_ACKED, /* no longer used */
8076 + SCTP_CONNTRACK_MAX
8077 + };
8078 +
8079 +diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
8080 +index 6b20fb22717b2..aa805e6d4e284 100644
8081 +--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
8082 ++++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
8083 +@@ -94,7 +94,7 @@ enum ctattr_timeout_sctp {
8084 + CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
8085 + CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
8086 + CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
8087 +- CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
8088 ++ CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, /* no longer used */
8089 + __CTA_TIMEOUT_SCTP_MAX
8090 + };
8091 + #define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
8092 +diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
8093 +index 9f28349ebcff5..2bb89290da63c 100644
8094 +--- a/include/ufs/ufshcd.h
8095 ++++ b/include/ufs/ufshcd.h
8096 +@@ -806,6 +806,7 @@ struct ufs_hba_monitor {
8097 + * @urgent_bkops_lvl: keeps track of urgent bkops level for device
8098 + * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
8099 + * device is known or not.
8100 ++ * @wb_mutex: used to serialize devfreq and sysfs write booster toggling
8101 + * @clk_scaling_lock: used to serialize device commands and clock scaling
8102 + * @desc_size: descriptor sizes reported by device
8103 + * @scsi_block_reqs_cnt: reference counting for scsi block requests
8104 +@@ -948,6 +949,7 @@ struct ufs_hba {
8105 + enum bkops_status urgent_bkops_lvl;
8106 + bool is_urgent_bkops_lvl_checked;
8107 +
8108 ++ struct mutex wb_mutex;
8109 + struct rw_semaphore clk_scaling_lock;
8110 + unsigned char desc_size[QUERY_DESC_IDN_MAX];
8111 + atomic_t scsi_block_reqs_cnt;
8112 +diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
8113 +index cea5de98c4232..862e05e6691de 100644
8114 +--- a/io_uring/io_uring.c
8115 ++++ b/io_uring/io_uring.c
8116 +@@ -823,15 +823,19 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx,
8117 + return filled;
8118 + }
8119 +
8120 +-static void __io_req_complete_put(struct io_kiocb *req)
8121 ++void io_req_complete_post(struct io_kiocb *req)
8122 + {
8123 ++ struct io_ring_ctx *ctx = req->ctx;
8124 ++
8125 ++ io_cq_lock(ctx);
8126 ++ if (!(req->flags & REQ_F_CQE_SKIP))
8127 ++ __io_fill_cqe_req(ctx, req);
8128 ++
8129 + /*
8130 + * If we're the last reference to this request, add to our locked
8131 + * free_list cache.
8132 + */
8133 + if (req_ref_put_and_test(req)) {
8134 +- struct io_ring_ctx *ctx = req->ctx;
8135 +-
8136 + if (req->flags & IO_REQ_LINK_FLAGS) {
8137 + if (req->flags & IO_DISARM_MASK)
8138 + io_disarm_next(req);
8139 +@@ -852,21 +856,6 @@ static void __io_req_complete_put(struct io_kiocb *req)
8140 + wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
8141 + ctx->locked_free_nr++;
8142 + }
8143 +-}
8144 +-
8145 +-void __io_req_complete_post(struct io_kiocb *req)
8146 +-{
8147 +- if (!(req->flags & REQ_F_CQE_SKIP))
8148 +- __io_fill_cqe_req(req->ctx, req);
8149 +- __io_req_complete_put(req);
8150 +-}
8151 +-
8152 +-void io_req_complete_post(struct io_kiocb *req)
8153 +-{
8154 +- struct io_ring_ctx *ctx = req->ctx;
8155 +-
8156 +- io_cq_lock(ctx);
8157 +- __io_req_complete_post(req);
8158 + io_cq_unlock_post(ctx);
8159 + }
8160 +
8161 +@@ -876,9 +865,12 @@ inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
8162 + }
8163 +
8164 + void io_req_complete_failed(struct io_kiocb *req, s32 res)
8165 ++ __must_hold(&ctx->uring_lock)
8166 + {
8167 + const struct io_op_def *def = &io_op_defs[req->opcode];
8168 +
8169 ++ lockdep_assert_held(&req->ctx->uring_lock);
8170 ++
8171 + req_set_fail(req);
8172 + io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
8173 + if (def->fail)
8174 +@@ -1133,7 +1125,7 @@ static void io_req_local_work_add(struct io_kiocb *req)
8175 + percpu_ref_put(&ctx->refs);
8176 + }
8177 +
8178 +-static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
8179 ++void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
8180 + {
8181 + struct io_uring_task *tctx = req->task->io_uring;
8182 + struct io_ring_ctx *ctx = req->ctx;
8183 +@@ -1165,11 +1157,6 @@ static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local
8184 + }
8185 + }
8186 +
8187 +-void io_req_task_work_add(struct io_kiocb *req)
8188 +-{
8189 +- __io_req_task_work_add(req, true);
8190 +-}
8191 +-
8192 + static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
8193 + {
8194 + struct llist_node *node;
8195 +@@ -1243,18 +1230,6 @@ int io_run_local_work(struct io_ring_ctx *ctx)
8196 + return ret;
8197 + }
8198 +
8199 +-static void io_req_tw_post(struct io_kiocb *req, bool *locked)
8200 +-{
8201 +- io_req_complete_post(req);
8202 +-}
8203 +-
8204 +-void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags)
8205 +-{
8206 +- io_req_set_res(req, res, cflags);
8207 +- req->io_task_work.func = io_req_tw_post;
8208 +- io_req_task_work_add(req);
8209 +-}
8210 +-
8211 + static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
8212 + {
8213 + /* not needed for normal modes, but SQPOLL depends on it */
8214 +@@ -1641,6 +1616,7 @@ static u32 io_get_sequence(struct io_kiocb *req)
8215 + }
8216 +
8217 + static __cold void io_drain_req(struct io_kiocb *req)
8218 ++ __must_hold(&ctx->uring_lock)
8219 + {
8220 + struct io_ring_ctx *ctx = req->ctx;
8221 + struct io_defer_entry *de;
8222 +@@ -1658,17 +1634,12 @@ queue:
8223 + }
8224 + spin_unlock(&ctx->completion_lock);
8225 +
8226 +- ret = io_req_prep_async(req);
8227 +- if (ret) {
8228 +-fail:
8229 +- io_req_complete_failed(req, ret);
8230 +- return;
8231 +- }
8232 + io_prep_async_link(req);
8233 + de = kmalloc(sizeof(*de), GFP_KERNEL);
8234 + if (!de) {
8235 + ret = -ENOMEM;
8236 +- goto fail;
8237 ++ io_req_complete_failed(req, ret);
8238 ++ return;
8239 + }
8240 +
8241 + spin_lock(&ctx->completion_lock);
8242 +@@ -1942,13 +1913,16 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
8243 + req->flags &= ~REQ_F_HARDLINK;
8244 + req->flags |= REQ_F_LINK;
8245 + io_req_complete_failed(req, req->cqe.res);
8246 +- } else if (unlikely(req->ctx->drain_active)) {
8247 +- io_drain_req(req);
8248 + } else {
8249 + int ret = io_req_prep_async(req);
8250 +
8251 +- if (unlikely(ret))
8252 ++ if (unlikely(ret)) {
8253 + io_req_complete_failed(req, ret);
8254 ++ return;
8255 ++ }
8256 ++
8257 ++ if (unlikely(req->ctx->drain_active))
8258 ++ io_drain_req(req);
8259 + else
8260 + io_queue_iowq(req, NULL);
8261 + }
8262 +@@ -2877,7 +2851,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
8263 + while (!list_empty(&list)) {
8264 + de = list_first_entry(&list, struct io_defer_entry, list);
8265 + list_del_init(&de->list);
8266 +- io_req_complete_failed(de->req, -ECANCELED);
8267 ++ io_req_task_queue_fail(de->req, -ECANCELED);
8268 + kfree(de);
8269 + }
8270 + return true;
8271 +diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
8272 +index 4334cd30c423d..90b675c65b840 100644
8273 +--- a/io_uring/io_uring.h
8274 ++++ b/io_uring/io_uring.h
8275 +@@ -33,7 +33,6 @@ int io_run_local_work(struct io_ring_ctx *ctx);
8276 + void io_req_complete_failed(struct io_kiocb *req, s32 res);
8277 + void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
8278 + void io_req_complete_post(struct io_kiocb *req);
8279 +-void __io_req_complete_post(struct io_kiocb *req);
8280 + bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
8281 + bool allow_overflow);
8282 + bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
8283 +@@ -51,10 +50,9 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
8284 + return req->flags & REQ_F_FIXED_FILE;
8285 + }
8286 +
8287 ++void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
8288 + bool io_is_uring_fops(struct file *file);
8289 + bool io_alloc_async_data(struct io_kiocb *req);
8290 +-void io_req_task_work_add(struct io_kiocb *req);
8291 +-void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
8292 + void io_req_task_queue(struct io_kiocb *req);
8293 + void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
8294 + void io_req_task_complete(struct io_kiocb *req, bool *locked);
8295 +@@ -83,6 +81,11 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
8296 + bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
8297 + bool cancel_all);
8298 +
8299 ++static inline void io_req_task_work_add(struct io_kiocb *req)
8300 ++{
8301 ++ __io_req_task_work_add(req, true);
8302 ++}
8303 ++
8304 + #define io_for_each_link(pos, head) \
8305 + for (pos = (head); pos; pos = pos->link)
8306 +
8307 +@@ -376,4 +379,11 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
8308 + ctx->submitter_task == current);
8309 + }
8310 +
8311 ++static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
8312 ++{
8313 ++ io_req_set_res(req, res, 0);
8314 ++ req->io_task_work.func = io_req_task_complete;
8315 ++ io_req_task_work_add(req);
8316 ++}
8317 ++
8318 + #endif
8319 +diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
8320 +index a49ccab262d53..7d5b544cfc305 100644
8321 +--- a/io_uring/msg_ring.c
8322 ++++ b/io_uring/msg_ring.c
8323 +@@ -30,6 +30,8 @@ static int io_msg_ring_data(struct io_kiocb *req)
8324 +
8325 + if (msg->src_fd || msg->dst_fd || msg->flags)
8326 + return -EINVAL;
8327 ++ if (target_ctx->flags & IORING_SETUP_R_DISABLED)
8328 ++ return -EBADFD;
8329 +
8330 + if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
8331 + return 0;
8332 +@@ -84,6 +86,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
8333 +
8334 + if (target_ctx == ctx)
8335 + return -EINVAL;
8336 ++ if (target_ctx->flags & IORING_SETUP_R_DISABLED)
8337 ++ return -EBADFD;
8338 +
8339 + ret = io_double_lock_ctx(ctx, target_ctx, issue_flags);
8340 + if (unlikely(ret))
8341 +diff --git a/io_uring/net.c b/io_uring/net.c
8342 +index bdd2b4e370b35..9046e269e5a58 100644
8343 +--- a/io_uring/net.c
8344 ++++ b/io_uring/net.c
8345 +@@ -62,6 +62,7 @@ struct io_sr_msg {
8346 + u16 flags;
8347 + /* initialised and used only by !msg send variants */
8348 + u16 addr_len;
8349 ++ u16 buf_group;
8350 + void __user *addr;
8351 + /* used only for send zerocopy */
8352 + struct io_kiocb *notif;
8353 +@@ -565,6 +566,15 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
8354 + if (req->opcode == IORING_OP_RECV && sr->len)
8355 + return -EINVAL;
8356 + req->flags |= REQ_F_APOLL_MULTISHOT;
8357 ++ /*
8358 ++ * Store the buffer group for this multishot receive separately,
8359 ++ * as if we end up doing an io-wq based issue that selects a
8360 ++ * buffer, it has to be committed immediately and that will
8361 ++ * clear ->buf_list. This means we lose the link to the buffer
8362 ++ * list, and the eventual buffer put on completion then cannot
8363 ++ * restore it.
8364 ++ */
8365 ++ sr->buf_group = req->buf_index;
8366 + }
8367 +
8368 + #ifdef CONFIG_COMPAT
8369 +@@ -581,6 +591,7 @@ static inline void io_recv_prep_retry(struct io_kiocb *req)
8370 +
8371 + sr->done_io = 0;
8372 + sr->len = 0; /* get from the provided buffer */
8373 ++ req->buf_index = sr->buf_group;
8374 + }
8375 +
8376 + /*
8377 +diff --git a/io_uring/timeout.c b/io_uring/timeout.c
8378 +index 06200fe73a044..4c6a5666541cf 100644
8379 +--- a/io_uring/timeout.c
8380 ++++ b/io_uring/timeout.c
8381 +@@ -63,7 +63,7 @@ static bool io_kill_timeout(struct io_kiocb *req, int status)
8382 + atomic_set(&req->ctx->cq_timeouts,
8383 + atomic_read(&req->ctx->cq_timeouts) + 1);
8384 + list_del_init(&timeout->list);
8385 +- io_req_tw_post_queue(req, status, 0);
8386 ++ io_req_queue_tw_complete(req, status);
8387 + return true;
8388 + }
8389 + return false;
8390 +@@ -161,7 +161,7 @@ void io_disarm_next(struct io_kiocb *req)
8391 + req->flags &= ~REQ_F_ARM_LTIMEOUT;
8392 + if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
8393 + io_remove_next_linked(req);
8394 +- io_req_tw_post_queue(link, -ECANCELED, 0);
8395 ++ io_req_queue_tw_complete(link, -ECANCELED);
8396 + }
8397 + } else if (req->flags & REQ_F_LINK_TIMEOUT) {
8398 + struct io_ring_ctx *ctx = req->ctx;
8399 +@@ -170,7 +170,7 @@ void io_disarm_next(struct io_kiocb *req)
8400 + link = io_disarm_linked_timeout(req);
8401 + spin_unlock_irq(&ctx->timeout_lock);
8402 + if (link)
8403 +- io_req_tw_post_queue(link, -ECANCELED, 0);
8404 ++ io_req_queue_tw_complete(link, -ECANCELED);
8405 + }
8406 + if (unlikely((req->flags & REQ_F_FAIL) &&
8407 + !(req->flags & REQ_F_HARDLINK)))
8408 +@@ -284,11 +284,11 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
8409 + ret = io_try_cancel(req->task->io_uring, &cd, issue_flags);
8410 + }
8411 + io_req_set_res(req, ret ?: -ETIME, 0);
8412 +- io_req_complete_post(req);
8413 ++ io_req_task_complete(req, locked);
8414 + io_put_req(prev);
8415 + } else {
8416 + io_req_set_res(req, -ETIME, 0);
8417 +- io_req_complete_post(req);
8418 ++ io_req_task_complete(req, locked);
8419 + }
8420 + }
8421 +
8422 +diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
8423 +index f39ee3e055897..c4811984fafa4 100644
8424 +--- a/kernel/bpf/hashtab.c
8425 ++++ b/kernel/bpf/hashtab.c
8426 +@@ -152,7 +152,7 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
8427 + {
8428 + unsigned long flags;
8429 +
8430 +- hash = hash & HASHTAB_MAP_LOCK_MASK;
8431 ++ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
8432 +
8433 + preempt_disable();
8434 + if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
8435 +@@ -171,7 +171,7 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
8436 + struct bucket *b, u32 hash,
8437 + unsigned long flags)
8438 + {
8439 +- hash = hash & HASHTAB_MAP_LOCK_MASK;
8440 ++ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
8441 + raw_spin_unlock_irqrestore(&b->raw_lock, flags);
8442 + __this_cpu_dec(*(htab->map_locked[hash]));
8443 + preempt_enable();
8444 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
8445 +index b4d5b343c1912..398a0008aff72 100644
8446 +--- a/kernel/bpf/verifier.c
8447 ++++ b/kernel/bpf/verifier.c
8448 +@@ -3063,7 +3063,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
8449 + bool sanitize = reg && is_spillable_regtype(reg->type);
8450 +
8451 + for (i = 0; i < size; i++) {
8452 +- if (state->stack[spi].slot_type[i] == STACK_INVALID) {
8453 ++ u8 type = state->stack[spi].slot_type[i];
8454 ++
8455 ++ if (type != STACK_MISC && type != STACK_ZERO) {
8456 + sanitize = true;
8457 + break;
8458 + }
8459 +diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c
8460 +index dcec1b743c694..a60c561724be9 100644
8461 +--- a/kernel/kcsan/kcsan_test.c
8462 ++++ b/kernel/kcsan/kcsan_test.c
8463 +@@ -159,7 +159,7 @@ static bool __report_matches(const struct expect_report *r)
8464 + const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT;
8465 + bool ret = false;
8466 + unsigned long flags;
8467 +- typeof(observed.lines) expect;
8468 ++ typeof(*observed.lines) *expect;
8469 + const char *end;
8470 + char *cur;
8471 + int i;
8472 +@@ -168,6 +168,10 @@ static bool __report_matches(const struct expect_report *r)
8473 + if (!report_available())
8474 + return false;
8475 +
8476 ++ expect = kmalloc(sizeof(observed.lines), GFP_KERNEL);
8477 ++ if (WARN_ON(!expect))
8478 ++ return false;
8479 ++
8480 + /* Generate expected report contents. */
8481 +
8482 + /* Title */
8483 +@@ -253,6 +257,7 @@ static bool __report_matches(const struct expect_report *r)
8484 + strstr(observed.lines[2], expect[1])));
8485 + out:
8486 + spin_unlock_irqrestore(&observed.lock, flags);
8487 ++ kfree(expect);
8488 + return ret;
8489 + }
8490 +
8491 +diff --git a/kernel/module/main.c b/kernel/module/main.c
8492 +index d02d39c7174e1..7a627345d4fd9 100644
8493 +--- a/kernel/module/main.c
8494 ++++ b/kernel/module/main.c
8495 +@@ -2386,7 +2386,8 @@ static bool finished_loading(const char *name)
8496 + sched_annotate_sleep();
8497 + mutex_lock(&module_mutex);
8498 + mod = find_module_all(name, strlen(name), true);
8499 +- ret = !mod || mod->state == MODULE_STATE_LIVE;
8500 ++ ret = !mod || mod->state == MODULE_STATE_LIVE
8501 ++ || mod->state == MODULE_STATE_GOING;
8502 + mutex_unlock(&module_mutex);
8503 +
8504 + return ret;
8505 +@@ -2562,20 +2563,35 @@ static int add_unformed_module(struct module *mod)
8506 +
8507 + mod->state = MODULE_STATE_UNFORMED;
8508 +
8509 +-again:
8510 + mutex_lock(&module_mutex);
8511 + old = find_module_all(mod->name, strlen(mod->name), true);
8512 + if (old != NULL) {
8513 +- if (old->state != MODULE_STATE_LIVE) {
8514 ++ if (old->state == MODULE_STATE_COMING
8515 ++ || old->state == MODULE_STATE_UNFORMED) {
8516 + /* Wait in case it fails to load. */
8517 + mutex_unlock(&module_mutex);
8518 + err = wait_event_interruptible(module_wq,
8519 + finished_loading(mod->name));
8520 + if (err)
8521 + goto out_unlocked;
8522 +- goto again;
8523 ++
8524 ++ /* The module might have gone in the meantime. */
8525 ++ mutex_lock(&module_mutex);
8526 ++ old = find_module_all(mod->name, strlen(mod->name),
8527 ++ true);
8528 + }
8529 +- err = -EEXIST;
8530 ++
8531 ++ /*
8532 ++ * We are here only when the same module was being loaded. Do
8533 ++ * not try to load it again right now. It prevents long delays
8534 ++ * caused by serialized module load failures. It might happen
8535 ++ * when more devices of the same type trigger load of
8536 ++ * a particular module.
8537 ++ */
8538 ++ if (old && old->state == MODULE_STATE_LIVE)
8539 ++ err = -EEXIST;
8540 ++ else
8541 ++ err = -EBUSY;
8542 + goto out;
8543 + }
8544 + mod_update_bounds(mod);
8545 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
8546 +index 0f32acb05055f..2c3d0d49c80ea 100644
8547 +--- a/kernel/sched/fair.c
8548 ++++ b/kernel/sched/fair.c
8549 +@@ -7213,11 +7213,11 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
8550 + eenv_task_busy_time(&eenv, p, prev_cpu);
8551 +
8552 + for (; pd; pd = pd->next) {
8553 ++ unsigned long util_min = p_util_min, util_max = p_util_max;
8554 + unsigned long cpu_cap, cpu_thermal_cap, util;
8555 + unsigned long cur_delta, max_spare_cap = 0;
8556 + unsigned long rq_util_min, rq_util_max;
8557 +- unsigned long util_min, util_max;
8558 +- bool compute_prev_delta = false;
8559 ++ unsigned long prev_spare_cap = 0;
8560 + int max_spare_cap_cpu = -1;
8561 + unsigned long base_energy;
8562 +
8563 +@@ -7235,6 +7235,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
8564 + eenv.pd_cap = 0;
8565 +
8566 + for_each_cpu(cpu, cpus) {
8567 ++ struct rq *rq = cpu_rq(cpu);
8568 ++
8569 + eenv.pd_cap += cpu_thermal_cap;
8570 +
8571 + if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
8572 +@@ -7253,24 +7255,19 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
8573 + * much capacity we can get out of the CPU; this is
8574 + * aligned with sched_cpu_util().
8575 + */
8576 +- if (uclamp_is_used()) {
8577 +- if (uclamp_rq_is_idle(cpu_rq(cpu))) {
8578 +- util_min = p_util_min;
8579 +- util_max = p_util_max;
8580 +- } else {
8581 +- /*
8582 +- * Open code uclamp_rq_util_with() except for
8583 +- * the clamp() part. Ie: apply max aggregation
8584 +- * only. util_fits_cpu() logic requires to
8585 +- * operate on non clamped util but must use the
8586 +- * max-aggregated uclamp_{min, max}.
8587 +- */
8588 +- rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
8589 +- rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
8590 +-
8591 +- util_min = max(rq_util_min, p_util_min);
8592 +- util_max = max(rq_util_max, p_util_max);
8593 +- }
8594 ++ if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
8595 ++ /*
8596 ++ * Open code uclamp_rq_util_with() except for
8597 ++ * the clamp() part. Ie: apply max aggregation
8598 ++ * only. util_fits_cpu() logic requires to
8599 ++ * operate on non clamped util but must use the
8600 ++ * max-aggregated uclamp_{min, max}.
8601 ++ */
8602 ++ rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
8603 ++ rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
8604 ++
8605 ++ util_min = max(rq_util_min, p_util_min);
8606 ++ util_max = max(rq_util_max, p_util_max);
8607 + }
8608 + if (!util_fits_cpu(util, util_min, util_max, cpu))
8609 + continue;
8610 +@@ -7279,18 +7276,19 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
8611 +
8612 + if (cpu == prev_cpu) {
8613 + /* Always use prev_cpu as a candidate. */
8614 +- compute_prev_delta = true;
8615 ++ prev_spare_cap = cpu_cap;
8616 + } else if (cpu_cap > max_spare_cap) {
8617 + /*
8618 + * Find the CPU with the maximum spare capacity
8619 +- * in the performance domain.
8620 ++ * among the remaining CPUs in the performance
8621 ++ * domain.
8622 + */
8623 + max_spare_cap = cpu_cap;
8624 + max_spare_cap_cpu = cpu;
8625 + }
8626 + }
8627 +
8628 +- if (max_spare_cap_cpu < 0 && !compute_prev_delta)
8629 ++ if (max_spare_cap_cpu < 0 && prev_spare_cap == 0)
8630 + continue;
8631 +
8632 + eenv_pd_busy_time(&eenv, cpus, p);
8633 +@@ -7298,7 +7296,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
8634 + base_energy = compute_energy(&eenv, pd, cpus, p, -1);
8635 +
8636 + /* Evaluate the energy impact of using prev_cpu. */
8637 +- if (compute_prev_delta) {
8638 ++ if (prev_spare_cap > 0) {
8639 + prev_delta = compute_energy(&eenv, pd, cpus, p,
8640 + prev_cpu);
8641 + /* CPU utilization has changed */
8642 +@@ -7309,7 +7307,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
8643 + }
8644 +
8645 + /* Evaluate the energy impact of using max_spare_cap_cpu. */
8646 +- if (max_spare_cap_cpu >= 0) {
8647 ++ if (max_spare_cap_cpu >= 0 && max_spare_cap > prev_spare_cap) {
8648 + cur_delta = compute_energy(&eenv, pd, cpus, p,
8649 + max_spare_cap_cpu);
8650 + /* CPU utilization has changed */
8651 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
8652 +index 33236241f2364..6f726ea0fde01 100644
8653 +--- a/kernel/trace/ftrace.c
8654 ++++ b/kernel/trace/ftrace.c
8655 +@@ -1248,12 +1248,17 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
8656 + call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
8657 + }
8658 +
8659 ++/**
8660 ++ * ftrace_free_filter - remove all filters for an ftrace_ops
8661 ++ * @ops - the ops to remove the filters from
8662 ++ */
8663 + void ftrace_free_filter(struct ftrace_ops *ops)
8664 + {
8665 + ftrace_ops_init(ops);
8666 + free_ftrace_hash(ops->func_hash->filter_hash);
8667 + free_ftrace_hash(ops->func_hash->notrace_hash);
8668 + }
8669 ++EXPORT_SYMBOL_GPL(ftrace_free_filter);
8670 +
8671 + static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
8672 + {
8673 +@@ -5828,6 +5833,10 @@ EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi);
8674 + *
8675 + * Filters denote which functions should be enabled when tracing is enabled
8676 + * If @ip is NULL, it fails to update filter.
8677 ++ *
8678 ++ * This can allocate memory which must be freed before @ops can be freed,
8679 ++ * either by removing each filtered addr or by using
8680 ++ * ftrace_free_filter(@ops).
8681 + */
8682 + int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
8683 + int remove, int reset)
8684 +@@ -5847,7 +5856,11 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
8685 + *
8686 + * Filters denote which functions should be enabled when tracing is enabled
8687 + * If @ips array or any ip specified within is NULL , it fails to update filter.
8688 +- */
8689 ++ *
8690 ++ * This can allocate memory which must be freed before @ops can be freed,
8691 ++ * either by removing each filtered addr or by using
8692 ++ * ftrace_free_filter(@ops).
8693 ++*/
8694 + int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
8695 + unsigned int cnt, int remove, int reset)
8696 + {
8697 +@@ -5889,6 +5902,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
8698 + *
8699 + * Filters denote which functions should be enabled when tracing is enabled.
8700 + * If @buf is NULL and reset is set, all functions will be enabled for tracing.
8701 ++ *
8702 ++ * This can allocate memory which must be freed before @ops can be freed,
8703 ++ * either by removing each filtered addr or by using
8704 ++ * ftrace_free_filter(@ops).
8705 + */
8706 + int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
8707 + int len, int reset)
8708 +@@ -5908,6 +5925,10 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
8709 + * Notrace Filters denote which functions should not be enabled when tracing
8710 + * is enabled. If @buf is NULL and reset is set, all functions will be enabled
8711 + * for tracing.
8712 ++ *
8713 ++ * This can allocate memory which must be freed before @ops can be freed,
8714 ++ * either by removing each filtered addr or by using
8715 ++ * ftrace_free_filter(@ops).
8716 + */
8717 + int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
8718 + int len, int reset)
8719 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
8720 +index 3076af8dbf32e..546e84ae9993b 100644
8721 +--- a/kernel/trace/trace.c
8722 ++++ b/kernel/trace/trace.c
8723 +@@ -10291,6 +10291,8 @@ void __init early_trace_init(void)
8724 + static_key_enable(&tracepoint_printk_key.key);
8725 + }
8726 + tracer_alloc_buffers();
8727 ++
8728 ++ init_events();
8729 + }
8730 +
8731 + void __init trace_init(void)
8732 +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
8733 +index 5581754d97628..9e931f51328a2 100644
8734 +--- a/kernel/trace/trace.h
8735 ++++ b/kernel/trace/trace.h
8736 +@@ -1490,6 +1490,7 @@ extern void trace_event_enable_cmd_record(bool enable);
8737 + extern void trace_event_enable_tgid_record(bool enable);
8738 +
8739 + extern int event_trace_init(void);
8740 ++extern int init_events(void);
8741 + extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
8742 + extern int event_trace_del_tracer(struct trace_array *tr);
8743 + extern void __trace_early_add_events(struct trace_array *tr);
8744 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
8745 +index c6e406995c112..da3bfe8625d96 100644
8746 +--- a/kernel/trace/trace_events_hist.c
8747 ++++ b/kernel/trace/trace_events_hist.c
8748 +@@ -1975,6 +1975,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
8749 + hist_field->fn_num = flags & HIST_FIELD_FL_LOG2 ? HIST_FIELD_FN_LOG2 :
8750 + HIST_FIELD_FN_BUCKET;
8751 + hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
8752 ++ if (!hist_field->operands[0])
8753 ++ goto free;
8754 + hist_field->size = hist_field->operands[0]->size;
8755 + hist_field->type = kstrdup_const(hist_field->operands[0]->type, GFP_KERNEL);
8756 + if (!hist_field->type)
8757 +diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
8758 +index 4300c5dc4e5db..1c07efcb3d466 100644
8759 +--- a/kernel/trace/trace_osnoise.c
8760 ++++ b/kernel/trace/trace_osnoise.c
8761 +@@ -125,9 +125,8 @@ static void osnoise_unregister_instance(struct trace_array *tr)
8762 + * register/unregister serialization is provided by trace's
8763 + * trace_types_lock.
8764 + */
8765 +- lockdep_assert_held(&trace_types_lock);
8766 +-
8767 +- list_for_each_entry_rcu(inst, &osnoise_instances, list) {
8768 ++ list_for_each_entry_rcu(inst, &osnoise_instances, list,
8769 ++ lockdep_is_held(&trace_types_lock)) {
8770 + if (inst->tr == tr) {
8771 + list_del_rcu(&inst->list);
8772 + found = 1;
8773 +diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
8774 +index 67f47ea27921d..5cd4fb6563068 100644
8775 +--- a/kernel/trace/trace_output.c
8776 ++++ b/kernel/trace/trace_output.c
8777 +@@ -1568,7 +1568,7 @@ static struct trace_event *events[] __initdata = {
8778 + NULL
8779 + };
8780 +
8781 +-__init static int init_events(void)
8782 ++__init int init_events(void)
8783 + {
8784 + struct trace_event *event;
8785 + int i, ret;
8786 +@@ -1581,4 +1581,3 @@ __init static int init_events(void)
8787 +
8788 + return 0;
8789 + }
8790 +-early_initcall(init_events);
8791 +diff --git a/lib/lockref.c b/lib/lockref.c
8792 +index 45e93ece8ba0d..2afe4c5d89191 100644
8793 +--- a/lib/lockref.c
8794 ++++ b/lib/lockref.c
8795 +@@ -23,7 +23,6 @@
8796 + } \
8797 + if (!--retry) \
8798 + break; \
8799 +- cpu_relax(); \
8800 + } \
8801 + } while (0)
8802 +
8803 +diff --git a/lib/nlattr.c b/lib/nlattr.c
8804 +index b67a53e29b8fe..dffd60e4065fd 100644
8805 +--- a/lib/nlattr.c
8806 ++++ b/lib/nlattr.c
8807 +@@ -10,6 +10,7 @@
8808 + #include <linux/kernel.h>
8809 + #include <linux/errno.h>
8810 + #include <linux/jiffies.h>
8811 ++#include <linux/nospec.h>
8812 + #include <linux/skbuff.h>
8813 + #include <linux/string.h>
8814 + #include <linux/types.h>
8815 +@@ -381,6 +382,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
8816 + if (type <= 0 || type > maxtype)
8817 + return 0;
8818 +
8819 ++ type = array_index_nospec(type, maxtype + 1);
8820 + pt = &policy[type];
8821 +
8822 + BUG_ON(pt->type > NLA_TYPE_MAX);
8823 +@@ -596,6 +598,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
8824 + }
8825 + continue;
8826 + }
8827 ++ type = array_index_nospec(type, maxtype + 1);
8828 + if (policy) {
8829 + int err = validate_nla(nla, maxtype, policy,
8830 + validate, extack, depth);
8831 +diff --git a/mm/compaction.c b/mm/compaction.c
8832 +index ca1603524bbe0..8238e83385a79 100644
8833 +--- a/mm/compaction.c
8834 ++++ b/mm/compaction.c
8835 +@@ -1839,6 +1839,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
8836 + pfn = cc->zone->zone_start_pfn;
8837 + cc->fast_search_fail = 0;
8838 + found_block = true;
8839 ++ set_pageblock_skip(freepage);
8840 + break;
8841 + }
8842 + }
8843 +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
8844 +index 8aab2e882958c..3c3b79f2e4c03 100644
8845 +--- a/net/bluetooth/hci_conn.c
8846 ++++ b/net/bluetooth/hci_conn.c
8847 +@@ -821,6 +821,7 @@ static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
8848 + static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
8849 + {
8850 + struct iso_list_data *d;
8851 ++ int ret;
8852 +
8853 + bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
8854 +
8855 +@@ -832,8 +833,12 @@ static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
8856 + d->big = big;
8857 + d->bis = bis;
8858 +
8859 +- return hci_cmd_sync_queue(hdev, terminate_big_sync, d,
8860 +- terminate_big_destroy);
8861 ++ ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
8862 ++ terminate_big_destroy);
8863 ++ if (ret)
8864 ++ kfree(d);
8865 ++
8866 ++ return ret;
8867 + }
8868 +
8869 + static int big_terminate_sync(struct hci_dev *hdev, void *data)
8870 +@@ -858,6 +863,7 @@ static int big_terminate_sync(struct hci_dev *hdev, void *data)
8871 + static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
8872 + {
8873 + struct iso_list_data *d;
8874 ++ int ret;
8875 +
8876 + bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
8877 +
8878 +@@ -869,8 +875,12 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
8879 + d->big = big;
8880 + d->sync_handle = sync_handle;
8881 +
8882 +- return hci_cmd_sync_queue(hdev, big_terminate_sync, d,
8883 +- terminate_big_destroy);
8884 ++ ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
8885 ++ terminate_big_destroy);
8886 ++ if (ret)
8887 ++ kfree(d);
8888 ++
8889 ++ return ret;
8890 + }
8891 +
8892 + /* Cleanup BIS connection
8893 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
8894 +index faca701bce2a3..0e2425eb6aa79 100644
8895 +--- a/net/bluetooth/hci_event.c
8896 ++++ b/net/bluetooth/hci_event.c
8897 +@@ -3838,8 +3838,11 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
8898 + conn->handle, conn->link);
8899 +
8900 + /* Create CIS if LE is already connected */
8901 +- if (conn->link && conn->link->state == BT_CONNECTED)
8902 ++ if (conn->link && conn->link->state == BT_CONNECTED) {
8903 ++ rcu_read_unlock();
8904 + hci_le_create_cis(conn->link);
8905 ++ rcu_read_lock();
8906 ++ }
8907 +
8908 + if (i == rp->num_handles)
8909 + break;
8910 +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
8911 +index 8d6c8cbfe1de4..3eec688a88a92 100644
8912 +--- a/net/bluetooth/hci_sync.c
8913 ++++ b/net/bluetooth/hci_sync.c
8914 +@@ -4703,6 +4703,7 @@ int hci_dev_open_sync(struct hci_dev *hdev)
8915 + hdev->flush(hdev);
8916 +
8917 + if (hdev->sent_cmd) {
8918 ++ cancel_delayed_work_sync(&hdev->cmd_timer);
8919 + kfree_skb(hdev->sent_cmd);
8920 + hdev->sent_cmd = NULL;
8921 + }
8922 +@@ -6168,20 +6169,13 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
8923 +
8924 + static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
8925 + {
8926 +- u8 instance = *(u8 *)data;
8927 +-
8928 +- kfree(data);
8929 ++ u8 instance = PTR_ERR(data);
8930 +
8931 + return hci_update_adv_data_sync(hdev, instance);
8932 + }
8933 +
8934 + int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
8935 + {
8936 +- u8 *inst_ptr = kmalloc(1, GFP_KERNEL);
8937 +-
8938 +- if (!inst_ptr)
8939 +- return -ENOMEM;
8940 +-
8941 +- *inst_ptr = instance;
8942 +- return hci_cmd_sync_queue(hdev, _update_adv_data_sync, inst_ptr, NULL);
8943 ++ return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
8944 ++ ERR_PTR(instance), NULL);
8945 + }
8946 +diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
8947 +index 26db929b97c43..2dabef488eaae 100644
8948 +--- a/net/bluetooth/iso.c
8949 ++++ b/net/bluetooth/iso.c
8950 +@@ -261,13 +261,13 @@ static int iso_connect_bis(struct sock *sk)
8951 +
8952 + if (!bis_capable(hdev)) {
8953 + err = -EOPNOTSUPP;
8954 +- goto done;
8955 ++ goto unlock;
8956 + }
8957 +
8958 + /* Fail if out PHYs are marked as disabled */
8959 + if (!iso_pi(sk)->qos.out.phy) {
8960 + err = -EINVAL;
8961 +- goto done;
8962 ++ goto unlock;
8963 + }
8964 +
8965 + hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst, iso_pi(sk)->dst_type,
8966 +@@ -275,22 +275,27 @@ static int iso_connect_bis(struct sock *sk)
8967 + iso_pi(sk)->base);
8968 + if (IS_ERR(hcon)) {
8969 + err = PTR_ERR(hcon);
8970 +- goto done;
8971 ++ goto unlock;
8972 + }
8973 +
8974 + conn = iso_conn_add(hcon);
8975 + if (!conn) {
8976 + hci_conn_drop(hcon);
8977 + err = -ENOMEM;
8978 +- goto done;
8979 ++ goto unlock;
8980 + }
8981 +
8982 +- /* Update source addr of the socket */
8983 +- bacpy(&iso_pi(sk)->src, &hcon->src);
8984 ++ hci_dev_unlock(hdev);
8985 ++ hci_dev_put(hdev);
8986 +
8987 + err = iso_chan_add(conn, sk, NULL);
8988 + if (err)
8989 +- goto done;
8990 ++ return err;
8991 ++
8992 ++ lock_sock(sk);
8993 ++
8994 ++ /* Update source addr of the socket */
8995 ++ bacpy(&iso_pi(sk)->src, &hcon->src);
8996 +
8997 + if (hcon->state == BT_CONNECTED) {
8998 + iso_sock_clear_timer(sk);
8999 +@@ -300,7 +305,10 @@ static int iso_connect_bis(struct sock *sk)
9000 + iso_sock_set_timer(sk, sk->sk_sndtimeo);
9001 + }
9002 +
9003 +-done:
9004 ++ release_sock(sk);
9005 ++ return err;
9006 ++
9007 ++unlock:
9008 + hci_dev_unlock(hdev);
9009 + hci_dev_put(hdev);
9010 + return err;
9011 +@@ -324,13 +332,13 @@ static int iso_connect_cis(struct sock *sk)
9012 +
9013 + if (!cis_central_capable(hdev)) {
9014 + err = -EOPNOTSUPP;
9015 +- goto done;
9016 ++ goto unlock;
9017 + }
9018 +
9019 + /* Fail if either PHYs are marked as disabled */
9020 + if (!iso_pi(sk)->qos.in.phy && !iso_pi(sk)->qos.out.phy) {
9021 + err = -EINVAL;
9022 +- goto done;
9023 ++ goto unlock;
9024 + }
9025 +
9026 + /* Just bind if DEFER_SETUP has been set */
9027 +@@ -340,7 +348,7 @@ static int iso_connect_cis(struct sock *sk)
9028 + &iso_pi(sk)->qos);
9029 + if (IS_ERR(hcon)) {
9030 + err = PTR_ERR(hcon);
9031 +- goto done;
9032 ++ goto unlock;
9033 + }
9034 + } else {
9035 + hcon = hci_connect_cis(hdev, &iso_pi(sk)->dst,
9036 +@@ -348,7 +356,7 @@ static int iso_connect_cis(struct sock *sk)
9037 + &iso_pi(sk)->qos);
9038 + if (IS_ERR(hcon)) {
9039 + err = PTR_ERR(hcon);
9040 +- goto done;
9041 ++ goto unlock;
9042 + }
9043 + }
9044 +
9045 +@@ -356,15 +364,20 @@ static int iso_connect_cis(struct sock *sk)
9046 + if (!conn) {
9047 + hci_conn_drop(hcon);
9048 + err = -ENOMEM;
9049 +- goto done;
9050 ++ goto unlock;
9051 + }
9052 +
9053 +- /* Update source addr of the socket */
9054 +- bacpy(&iso_pi(sk)->src, &hcon->src);
9055 ++ hci_dev_unlock(hdev);
9056 ++ hci_dev_put(hdev);
9057 +
9058 + err = iso_chan_add(conn, sk, NULL);
9059 + if (err)
9060 +- goto done;
9061 ++ return err;
9062 ++
9063 ++ lock_sock(sk);
9064 ++
9065 ++ /* Update source addr of the socket */
9066 ++ bacpy(&iso_pi(sk)->src, &hcon->src);
9067 +
9068 + if (hcon->state == BT_CONNECTED) {
9069 + iso_sock_clear_timer(sk);
9070 +@@ -377,7 +390,10 @@ static int iso_connect_cis(struct sock *sk)
9071 + iso_sock_set_timer(sk, sk->sk_sndtimeo);
9072 + }
9073 +
9074 +-done:
9075 ++ release_sock(sk);
9076 ++ return err;
9077 ++
9078 ++unlock:
9079 + hci_dev_unlock(hdev);
9080 + hci_dev_put(hdev);
9081 + return err;
9082 +@@ -831,20 +847,23 @@ static int iso_sock_connect(struct socket *sock, struct sockaddr *addr,
9083 + bacpy(&iso_pi(sk)->dst, &sa->iso_bdaddr);
9084 + iso_pi(sk)->dst_type = sa->iso_bdaddr_type;
9085 +
9086 ++ release_sock(sk);
9087 ++
9088 + if (bacmp(&iso_pi(sk)->dst, BDADDR_ANY))
9089 + err = iso_connect_cis(sk);
9090 + else
9091 + err = iso_connect_bis(sk);
9092 +
9093 + if (err)
9094 +- goto done;
9095 ++ return err;
9096 ++
9097 ++ lock_sock(sk);
9098 +
9099 + if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
9100 + err = bt_sock_wait_state(sk, BT_CONNECTED,
9101 + sock_sndtimeo(sk, flags & O_NONBLOCK));
9102 + }
9103 +
9104 +-done:
9105 + release_sock(sk);
9106 + return err;
9107 + }
9108 +@@ -1099,28 +1118,22 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
9109 + {
9110 + struct sock *sk = sock->sk;
9111 + struct iso_pinfo *pi = iso_pi(sk);
9112 +- int err;
9113 +
9114 + BT_DBG("sk %p", sk);
9115 +
9116 +- lock_sock(sk);
9117 +-
9118 + if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
9119 + switch (sk->sk_state) {
9120 + case BT_CONNECT2:
9121 ++ lock_sock(sk);
9122 + iso_conn_defer_accept(pi->conn->hcon);
9123 + sk->sk_state = BT_CONFIG;
9124 + release_sock(sk);
9125 + return 0;
9126 + case BT_CONNECT:
9127 +- err = iso_connect_cis(sk);
9128 +- release_sock(sk);
9129 +- return err;
9130 ++ return iso_connect_cis(sk);
9131 + }
9132 + }
9133 +
9134 +- release_sock(sk);
9135 +-
9136 + return bt_sock_recvmsg(sock, msg, len, flags);
9137 + }
9138 +
9139 +@@ -1415,33 +1428,29 @@ static void iso_conn_ready(struct iso_conn *conn)
9140 + struct sock *parent;
9141 + struct sock *sk = conn->sk;
9142 + struct hci_ev_le_big_sync_estabilished *ev;
9143 ++ struct hci_conn *hcon;
9144 +
9145 + BT_DBG("conn %p", conn);
9146 +
9147 + if (sk) {
9148 + iso_sock_ready(conn->sk);
9149 + } else {
9150 +- iso_conn_lock(conn);
9151 +-
9152 +- if (!conn->hcon) {
9153 +- iso_conn_unlock(conn);
9154 ++ hcon = conn->hcon;
9155 ++ if (!hcon)
9156 + return;
9157 +- }
9158 +
9159 +- ev = hci_recv_event_data(conn->hcon->hdev,
9160 ++ ev = hci_recv_event_data(hcon->hdev,
9161 + HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
9162 + if (ev)
9163 +- parent = iso_get_sock_listen(&conn->hcon->src,
9164 +- &conn->hcon->dst,
9165 ++ parent = iso_get_sock_listen(&hcon->src,
9166 ++ &hcon->dst,
9167 + iso_match_big, ev);
9168 + else
9169 +- parent = iso_get_sock_listen(&conn->hcon->src,
9170 ++ parent = iso_get_sock_listen(&hcon->src,
9171 + BDADDR_ANY, NULL, NULL);
9172 +
9173 +- if (!parent) {
9174 +- iso_conn_unlock(conn);
9175 ++ if (!parent)
9176 + return;
9177 +- }
9178 +
9179 + lock_sock(parent);
9180 +
9181 +@@ -1449,30 +1458,29 @@ static void iso_conn_ready(struct iso_conn *conn)
9182 + BTPROTO_ISO, GFP_ATOMIC, 0);
9183 + if (!sk) {
9184 + release_sock(parent);
9185 +- iso_conn_unlock(conn);
9186 + return;
9187 + }
9188 +
9189 + iso_sock_init(sk, parent);
9190 +
9191 +- bacpy(&iso_pi(sk)->src, &conn->hcon->src);
9192 +- iso_pi(sk)->src_type = conn->hcon->src_type;
9193 ++ bacpy(&iso_pi(sk)->src, &hcon->src);
9194 ++ iso_pi(sk)->src_type = hcon->src_type;
9195 +
9196 + /* If hcon has no destination address (BDADDR_ANY) it means it
9197 + * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED so we need to
9198 + * initialize using the parent socket destination address.
9199 + */
9200 +- if (!bacmp(&conn->hcon->dst, BDADDR_ANY)) {
9201 +- bacpy(&conn->hcon->dst, &iso_pi(parent)->dst);
9202 +- conn->hcon->dst_type = iso_pi(parent)->dst_type;
9203 +- conn->hcon->sync_handle = iso_pi(parent)->sync_handle;
9204 ++ if (!bacmp(&hcon->dst, BDADDR_ANY)) {
9205 ++ bacpy(&hcon->dst, &iso_pi(parent)->dst);
9206 ++ hcon->dst_type = iso_pi(parent)->dst_type;
9207 ++ hcon->sync_handle = iso_pi(parent)->sync_handle;
9208 + }
9209 +
9210 +- bacpy(&iso_pi(sk)->dst, &conn->hcon->dst);
9211 +- iso_pi(sk)->dst_type = conn->hcon->dst_type;
9212 ++ bacpy(&iso_pi(sk)->dst, &hcon->dst);
9213 ++ iso_pi(sk)->dst_type = hcon->dst_type;
9214 +
9215 +- hci_conn_hold(conn->hcon);
9216 +- __iso_chan_add(conn, sk, parent);
9217 ++ hci_conn_hold(hcon);
9218 ++ iso_chan_add(conn, sk, parent);
9219 +
9220 + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
9221 + sk->sk_state = BT_CONNECT2;
9222 +@@ -1483,8 +1491,6 @@ static void iso_conn_ready(struct iso_conn *conn)
9223 + parent->sk_data_ready(parent);
9224 +
9225 + release_sock(parent);
9226 +-
9227 +- iso_conn_unlock(conn);
9228 + }
9229 + }
9230 +
9231 +diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h
9232 +index 6a8b7e84293df..bdf978605d5a8 100644
9233 +--- a/net/bluetooth/mgmt_util.h
9234 ++++ b/net/bluetooth/mgmt_util.h
9235 +@@ -27,7 +27,7 @@ struct mgmt_mesh_tx {
9236 + struct sock *sk;
9237 + u8 handle;
9238 + u8 instance;
9239 +- u8 param[sizeof(struct mgmt_cp_mesh_send) + 29];
9240 ++ u8 param[sizeof(struct mgmt_cp_mesh_send) + 31];
9241 + };
9242 +
9243 + struct mgmt_pending_cmd {
9244 +diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
9245 +index 21e24da4847f0..4397e14ff560f 100644
9246 +--- a/net/bluetooth/rfcomm/sock.c
9247 ++++ b/net/bluetooth/rfcomm/sock.c
9248 +@@ -391,6 +391,7 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
9249 + addr->sa_family != AF_BLUETOOTH)
9250 + return -EINVAL;
9251 +
9252 ++ sock_hold(sk);
9253 + lock_sock(sk);
9254 +
9255 + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
9256 +@@ -410,14 +411,18 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
9257 + d->sec_level = rfcomm_pi(sk)->sec_level;
9258 + d->role_switch = rfcomm_pi(sk)->role_switch;
9259 +
9260 ++ /* Drop sock lock to avoid potential deadlock with the RFCOMM lock */
9261 ++ release_sock(sk);
9262 + err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr,
9263 + sa->rc_channel);
9264 +- if (!err)
9265 ++ lock_sock(sk);
9266 ++ if (!err && !sock_flag(sk, SOCK_ZAPPED))
9267 + err = bt_sock_wait_state(sk, BT_CONNECTED,
9268 + sock_sndtimeo(sk, flags & O_NONBLOCK));
9269 +
9270 + done:
9271 + release_sock(sk);
9272 ++ sock_put(sk);
9273 + return err;
9274 + }
9275 +
9276 +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
9277 +index f64654df71a29..4c1707d0eb9b0 100644
9278 +--- a/net/core/net_namespace.c
9279 ++++ b/net/core/net_namespace.c
9280 +@@ -137,12 +137,12 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
9281 + return 0;
9282 +
9283 + if (ops->id && ops->size) {
9284 +-cleanup:
9285 + ng = rcu_dereference_protected(net->gen,
9286 + lockdep_is_held(&pernet_ops_rwsem));
9287 + ng->ptr[*ops->id] = NULL;
9288 + }
9289 +
9290 ++cleanup:
9291 + kfree(data);
9292 +
9293 + out:
9294 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
9295 +index ce9ff3c62e840..3bb890a40ed73 100644
9296 +--- a/net/ipv4/fib_semantics.c
9297 ++++ b/net/ipv4/fib_semantics.c
9298 +@@ -30,6 +30,7 @@
9299 + #include <linux/slab.h>
9300 + #include <linux/netlink.h>
9301 + #include <linux/hash.h>
9302 ++#include <linux/nospec.h>
9303 +
9304 + #include <net/arp.h>
9305 + #include <net/inet_dscp.h>
9306 +@@ -1022,6 +1023,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
9307 + if (type > RTAX_MAX)
9308 + return false;
9309 +
9310 ++ type = array_index_nospec(type, RTAX_MAX + 1);
9311 + if (type == RTAX_CC_ALGO) {
9312 + char tmp[TCP_CA_NAME_MAX];
9313 + bool ecn_ca = false;
9314 +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
9315 +index 67f5e54408020..a5711b8f4cb19 100644
9316 +--- a/net/ipv4/inet_hashtables.c
9317 ++++ b/net/ipv4/inet_hashtables.c
9318 +@@ -650,8 +650,20 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
9319 + spin_lock(lock);
9320 + if (osk) {
9321 + WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
9322 +- ret = sk_nulls_del_node_init_rcu(osk);
9323 +- } else if (found_dup_sk) {
9324 ++ ret = sk_hashed(osk);
9325 ++ if (ret) {
9326 ++ /* Before deleting the node, we insert a new one to make
9327 ++ * sure that the look-up-sk process would not miss either
9328 ++ * of them and that at least one node would exist in ehash
9329 ++ * table all the time. Otherwise there's a tiny chance
9330 ++ * that lookup process could find nothing in ehash table.
9331 ++ */
9332 ++ __sk_nulls_add_node_tail_rcu(sk, list);
9333 ++ sk_nulls_del_node_init_rcu(osk);
9334 ++ }
9335 ++ goto unlock;
9336 ++ }
9337 ++ if (found_dup_sk) {
9338 + *found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
9339 + if (*found_dup_sk)
9340 + ret = false;
9341 +@@ -660,6 +672,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
9342 + if (ret)
9343 + __sk_nulls_add_node_rcu(sk, list);
9344 +
9345 ++unlock:
9346 + spin_unlock(lock);
9347 +
9348 + return ret;
9349 +diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
9350 +index 1d77d992e6e77..beed32fff4841 100644
9351 +--- a/net/ipv4/inet_timewait_sock.c
9352 ++++ b/net/ipv4/inet_timewait_sock.c
9353 +@@ -91,10 +91,10 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
9354 + }
9355 + EXPORT_SYMBOL_GPL(inet_twsk_put);
9356 +
9357 +-static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
9358 +- struct hlist_nulls_head *list)
9359 ++static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw,
9360 ++ struct hlist_nulls_head *list)
9361 + {
9362 +- hlist_nulls_add_head_rcu(&tw->tw_node, list);
9363 ++ hlist_nulls_add_tail_rcu(&tw->tw_node, list);
9364 + }
9365 +
9366 + static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
9367 +@@ -147,7 +147,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
9368 +
9369 + spin_lock(lock);
9370 +
9371 +- inet_twsk_add_node_rcu(tw, &ehead->chain);
9372 ++ inet_twsk_add_node_tail_rcu(tw, &ehead->chain);
9373 +
9374 + /* Step 3: Remove SK from hash chain */
9375 + if (__sk_nulls_del_node_init_rcu(sk))
9376 +diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
9377 +index 25ea6ac44db95..6a1427916c7dc 100644
9378 +--- a/net/ipv4/metrics.c
9379 ++++ b/net/ipv4/metrics.c
9380 +@@ -1,5 +1,6 @@
9381 + // SPDX-License-Identifier: GPL-2.0-only
9382 + #include <linux/netlink.h>
9383 ++#include <linux/nospec.h>
9384 + #include <linux/rtnetlink.h>
9385 + #include <linux/types.h>
9386 + #include <net/ip.h>
9387 +@@ -28,6 +29,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
9388 + return -EINVAL;
9389 + }
9390 +
9391 ++ type = array_index_nospec(type, RTAX_MAX + 1);
9392 + if (type == RTAX_CC_ALGO) {
9393 + char tmp[TCP_CA_NAME_MAX];
9394 +
9395 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
9396 +index 4f2205756cfee..ec19ed7224536 100644
9397 +--- a/net/ipv4/tcp.c
9398 ++++ b/net/ipv4/tcp.c
9399 +@@ -435,6 +435,7 @@ void tcp_init_sock(struct sock *sk)
9400 +
9401 + /* There's a bubble in the pipe until at least the first ACK. */
9402 + tp->app_limited = ~0U;
9403 ++ tp->rate_app_limited = 1;
9404 +
9405 + /* See draft-stevens-tcpca-spec-01 for discussion of the
9406 + * initialization of these values.
9407 +@@ -3177,6 +3178,7 @@ int tcp_disconnect(struct sock *sk, int flags)
9408 + tp->last_oow_ack_time = 0;
9409 + /* There's a bubble in the pipe until at least the first ACK. */
9410 + tp->app_limited = ~0U;
9411 ++ tp->rate_app_limited = 1;
9412 + tp->rack.mstamp = 0;
9413 + tp->rack.advanced = 0;
9414 + tp->rack.reo_wnd_steps = 1;
9415 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
9416 +index 60fd91bb5171c..c314fdde0097c 100644
9417 +--- a/net/ipv6/ip6_output.c
9418 ++++ b/net/ipv6/ip6_output.c
9419 +@@ -547,7 +547,20 @@ int ip6_forward(struct sk_buff *skb)
9420 + pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
9421 + int proxied = ip6_forward_proxy_check(skb);
9422 + if (proxied > 0) {
9423 +- hdr->hop_limit--;
9424 ++ /* It's tempting to decrease the hop limit
9425 ++ * here by 1, as we do at the end of the
9426 ++ * function too.
9427 ++ *
9428 ++ * But that would be incorrect, as proxying is
9429 ++ * not forwarding. The ip6_input function
9430 ++ * will handle this packet locally, and it
9431 ++ * depends on the hop limit being unchanged.
9432 ++ *
9433 ++ * One example is the NDP hop limit, that
9434 ++ * always has to stay 255, but other would be
9435 ++ * similar checks around RA packets, where the
9436 ++ * user can even change the desired limit.
9437 ++ */
9438 + return ip6_input(skb);
9439 + } else if (proxied < 0) {
9440 + __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
9441 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
9442 +index 9a1415fe3fa78..03608d3ded4b8 100644
9443 +--- a/net/l2tp/l2tp_core.c
9444 ++++ b/net/l2tp/l2tp_core.c
9445 +@@ -104,9 +104,9 @@ static struct workqueue_struct *l2tp_wq;
9446 + /* per-net private data for this module */
9447 + static unsigned int l2tp_net_id;
9448 + struct l2tp_net {
9449 +- struct list_head l2tp_tunnel_list;
9450 +- /* Lock for write access to l2tp_tunnel_list */
9451 +- spinlock_t l2tp_tunnel_list_lock;
9452 ++ /* Lock for write access to l2tp_tunnel_idr */
9453 ++ spinlock_t l2tp_tunnel_idr_lock;
9454 ++ struct idr l2tp_tunnel_idr;
9455 + struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
9456 + /* Lock for write access to l2tp_session_hlist */
9457 + spinlock_t l2tp_session_hlist_lock;
9458 +@@ -208,13 +208,10 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
9459 + struct l2tp_tunnel *tunnel;
9460 +
9461 + rcu_read_lock_bh();
9462 +- list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
9463 +- if (tunnel->tunnel_id == tunnel_id &&
9464 +- refcount_inc_not_zero(&tunnel->ref_count)) {
9465 +- rcu_read_unlock_bh();
9466 +-
9467 +- return tunnel;
9468 +- }
9469 ++ tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
9470 ++ if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
9471 ++ rcu_read_unlock_bh();
9472 ++ return tunnel;
9473 + }
9474 + rcu_read_unlock_bh();
9475 +
9476 +@@ -224,13 +221,14 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
9477 +
9478 + struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
9479 + {
9480 +- const struct l2tp_net *pn = l2tp_pernet(net);
9481 ++ struct l2tp_net *pn = l2tp_pernet(net);
9482 ++ unsigned long tunnel_id, tmp;
9483 + struct l2tp_tunnel *tunnel;
9484 + int count = 0;
9485 +
9486 + rcu_read_lock_bh();
9487 +- list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
9488 +- if (++count > nth &&
9489 ++ idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
9490 ++ if (tunnel && ++count > nth &&
9491 + refcount_inc_not_zero(&tunnel->ref_count)) {
9492 + rcu_read_unlock_bh();
9493 + return tunnel;
9494 +@@ -1043,7 +1041,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
9495 + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
9496 + nf_reset_ct(skb);
9497 +
9498 +- bh_lock_sock(sk);
9499 ++ bh_lock_sock_nested(sk);
9500 + if (sock_owned_by_user(sk)) {
9501 + kfree_skb(skb);
9502 + ret = NET_XMIT_DROP;
9503 +@@ -1227,6 +1225,15 @@ static void l2tp_udp_encap_destroy(struct sock *sk)
9504 + l2tp_tunnel_delete(tunnel);
9505 + }
9506 +
9507 ++static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
9508 ++{
9509 ++ struct l2tp_net *pn = l2tp_pernet(net);
9510 ++
9511 ++ spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
9512 ++ idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
9513 ++ spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
9514 ++}
9515 ++
9516 + /* Workqueue tunnel deletion function */
9517 + static void l2tp_tunnel_del_work(struct work_struct *work)
9518 + {
9519 +@@ -1234,7 +1241,6 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
9520 + del_work);
9521 + struct sock *sk = tunnel->sock;
9522 + struct socket *sock = sk->sk_socket;
9523 +- struct l2tp_net *pn;
9524 +
9525 + l2tp_tunnel_closeall(tunnel);
9526 +
9527 +@@ -1248,12 +1254,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
9528 + }
9529 + }
9530 +
9531 +- /* Remove the tunnel struct from the tunnel list */
9532 +- pn = l2tp_pernet(tunnel->l2tp_net);
9533 +- spin_lock_bh(&pn->l2tp_tunnel_list_lock);
9534 +- list_del_rcu(&tunnel->list);
9535 +- spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
9536 +-
9537 ++ l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
9538 + /* drop initial ref */
9539 + l2tp_tunnel_dec_refcount(tunnel);
9540 +
9541 +@@ -1384,8 +1385,6 @@ out:
9542 + return err;
9543 + }
9544 +
9545 +-static struct lock_class_key l2tp_socket_class;
9546 +-
9547 + int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
9548 + struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
9549 + {
9550 +@@ -1455,12 +1454,19 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
9551 + int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
9552 + struct l2tp_tunnel_cfg *cfg)
9553 + {
9554 +- struct l2tp_tunnel *tunnel_walk;
9555 +- struct l2tp_net *pn;
9556 ++ struct l2tp_net *pn = l2tp_pernet(net);
9557 ++ u32 tunnel_id = tunnel->tunnel_id;
9558 + struct socket *sock;
9559 + struct sock *sk;
9560 + int ret;
9561 +
9562 ++ spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
9563 ++ ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
9564 ++ GFP_ATOMIC);
9565 ++ spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
9566 ++ if (ret)
9567 ++ return ret == -ENOSPC ? -EEXIST : ret;
9568 ++
9569 + if (tunnel->fd < 0) {
9570 + ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
9571 + tunnel->peer_tunnel_id, cfg,
9572 +@@ -1474,6 +1480,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
9573 + }
9574 +
9575 + sk = sock->sk;
9576 ++ lock_sock(sk);
9577 + write_lock_bh(&sk->sk_callback_lock);
9578 + ret = l2tp_validate_socket(sk, net, tunnel->encap);
9579 + if (ret < 0)
9580 +@@ -1481,24 +1488,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
9581 + rcu_assign_sk_user_data(sk, tunnel);
9582 + write_unlock_bh(&sk->sk_callback_lock);
9583 +
9584 +- tunnel->l2tp_net = net;
9585 +- pn = l2tp_pernet(net);
9586 +-
9587 +- sock_hold(sk);
9588 +- tunnel->sock = sk;
9589 +-
9590 +- spin_lock_bh(&pn->l2tp_tunnel_list_lock);
9591 +- list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
9592 +- if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
9593 +- spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
9594 +- sock_put(sk);
9595 +- ret = -EEXIST;
9596 +- goto err_sock;
9597 +- }
9598 +- }
9599 +- list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
9600 +- spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
9601 +-
9602 + if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
9603 + struct udp_tunnel_sock_cfg udp_cfg = {
9604 + .sk_user_data = tunnel,
9605 +@@ -1512,9 +1501,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
9606 +
9607 + tunnel->old_sk_destruct = sk->sk_destruct;
9608 + sk->sk_destruct = &l2tp_tunnel_destruct;
9609 +- lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
9610 +- "l2tp_sock");
9611 + sk->sk_allocation = GFP_ATOMIC;
9612 ++ release_sock(sk);
9613 ++
9614 ++ sock_hold(sk);
9615 ++ tunnel->sock = sk;
9616 ++ tunnel->l2tp_net = net;
9617 ++
9618 ++ spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
9619 ++ idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
9620 ++ spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
9621 +
9622 + trace_register_tunnel(tunnel);
9623 +
9624 +@@ -1523,17 +1519,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
9625 +
9626 + return 0;
9627 +
9628 +-err_sock:
9629 +- write_lock_bh(&sk->sk_callback_lock);
9630 +- rcu_assign_sk_user_data(sk, NULL);
9631 + err_inval_sock:
9632 + write_unlock_bh(&sk->sk_callback_lock);
9633 ++ release_sock(sk);
9634 +
9635 + if (tunnel->fd < 0)
9636 + sock_release(sock);
9637 + else
9638 + sockfd_put(sock);
9639 + err:
9640 ++ l2tp_tunnel_remove(net, tunnel);
9641 + return ret;
9642 + }
9643 + EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
9644 +@@ -1647,8 +1642,8 @@ static __net_init int l2tp_init_net(struct net *net)
9645 + struct l2tp_net *pn = net_generic(net, l2tp_net_id);
9646 + int hash;
9647 +
9648 +- INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
9649 +- spin_lock_init(&pn->l2tp_tunnel_list_lock);
9650 ++ idr_init(&pn->l2tp_tunnel_idr);
9651 ++ spin_lock_init(&pn->l2tp_tunnel_idr_lock);
9652 +
9653 + for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
9654 + INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
9655 +@@ -1662,11 +1657,13 @@ static __net_exit void l2tp_exit_net(struct net *net)
9656 + {
9657 + struct l2tp_net *pn = l2tp_pernet(net);
9658 + struct l2tp_tunnel *tunnel = NULL;
9659 ++ unsigned long tunnel_id, tmp;
9660 + int hash;
9661 +
9662 + rcu_read_lock_bh();
9663 +- list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
9664 +- l2tp_tunnel_delete(tunnel);
9665 ++ idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
9666 ++ if (tunnel)
9667 ++ l2tp_tunnel_delete(tunnel);
9668 + }
9669 + rcu_read_unlock_bh();
9670 +
9671 +@@ -1676,6 +1673,7 @@ static __net_exit void l2tp_exit_net(struct net *net)
9672 +
9673 + for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
9674 + WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
9675 ++ idr_destroy(&pn->l2tp_tunnel_idr);
9676 + }
9677 +
9678 + static struct pernet_operations l2tp_net_ops = {
9679 +diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
9680 +index b2e40465289d6..85d2b9e4b51ce 100644
9681 +--- a/net/mac80211/agg-tx.c
9682 ++++ b/net/mac80211/agg-tx.c
9683 +@@ -511,8 +511,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
9684 + */
9685 + clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
9686 +
9687 +- ieee80211_agg_stop_txq(sta, tid);
9688 +-
9689 + /*
9690 + * Make sure no packets are being processed. This ensures that
9691 + * we have a valid starting sequence number and that in-flight
9692 +diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
9693 +index d3397c1248d36..b057253db28d5 100644
9694 +--- a/net/mac80211/debugfs_sta.c
9695 ++++ b/net/mac80211/debugfs_sta.c
9696 +@@ -167,7 +167,7 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
9697 + continue;
9698 + txqi = to_txq_info(sta->sta.txq[i]);
9699 + p += scnprintf(p, bufsz + buf - p,
9700 +- "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n",
9701 ++ "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s%s)\n",
9702 + txqi->txq.tid,
9703 + txqi->txq.ac,
9704 + txqi->tin.backlog_bytes,
9705 +@@ -182,7 +182,8 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
9706 + txqi->flags,
9707 + test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ? "STOP" : "RUN",
9708 + test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags) ? " AMPDU" : "",
9709 +- test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "");
9710 ++ test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "",
9711 ++ test_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ? " DIRTY" : "");
9712 + }
9713 +
9714 + rcu_read_unlock();
9715 +diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
9716 +index 81e40b0a3b167..e685c12757f4b 100644
9717 +--- a/net/mac80211/driver-ops.h
9718 ++++ b/net/mac80211/driver-ops.h
9719 +@@ -1183,7 +1183,7 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
9720 +
9721 + /* In reconfig don't transmit now, but mark for waking later */
9722 + if (local->in_reconfig) {
9723 +- set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags);
9724 ++ set_bit(IEEE80211_TXQ_DIRTY, &txq->flags);
9725 + return;
9726 + }
9727 +
9728 +diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
9729 +index 83bc41346ae7f..ae42e956eff5a 100644
9730 +--- a/net/mac80211/ht.c
9731 ++++ b/net/mac80211/ht.c
9732 +@@ -391,6 +391,43 @@ void ieee80211_ba_session_work(struct work_struct *work)
9733 +
9734 + tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
9735 + if (!blocked && tid_tx) {
9736 ++ struct ieee80211_sub_if_data *sdata = sta->sdata;
9737 ++ struct ieee80211_local *local = sdata->local;
9738 ++
9739 ++ if (local->ops->wake_tx_queue) {
9740 ++ struct txq_info *txqi =
9741 ++ to_txq_info(sta->sta.txq[tid]);
9742 ++ struct fq *fq = &local->fq;
9743 ++
9744 ++ spin_lock_bh(&fq->lock);
9745 ++
9746 ++ /* Allow only frags to be dequeued */
9747 ++ set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
9748 ++
9749 ++ if (!skb_queue_empty(&txqi->frags)) {
9750 ++ /* Fragmented Tx is ongoing, wait for it
9751 ++ * to finish. Reschedule worker to retry
9752 ++ * later.
9753 ++ */
9754 ++
9755 ++ spin_unlock_bh(&fq->lock);
9756 ++ spin_unlock_bh(&sta->lock);
9757 ++
9758 ++ /* Give the task working on the txq a
9759 ++ * chance to send out the queued frags
9760 ++ */
9761 ++ synchronize_net();
9762 ++
9763 ++ mutex_unlock(&sta->ampdu_mlme.mtx);
9764 ++
9765 ++ ieee80211_queue_work(&sdata->local->hw,
9766 ++ work);
9767 ++ return;
9768 ++ }
9769 ++
9770 ++ spin_unlock_bh(&fq->lock);
9771 ++ }
9772 ++
9773 + /*
9774 + * Assign it over to the normal tid_tx array
9775 + * where it "goes live".
9776 +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
9777 +index de7b8a4d4bbbb..a8862f2c64ec0 100644
9778 +--- a/net/mac80211/ieee80211_i.h
9779 ++++ b/net/mac80211/ieee80211_i.h
9780 +@@ -836,7 +836,7 @@ enum txq_info_flags {
9781 + IEEE80211_TXQ_STOP,
9782 + IEEE80211_TXQ_AMPDU,
9783 + IEEE80211_TXQ_NO_AMSDU,
9784 +- IEEE80211_TXQ_STOP_NETIF_TX,
9785 ++ IEEE80211_TXQ_DIRTY,
9786 + };
9787 +
9788 + /**
9789 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
9790 +index cc10ee1ff8e93..6409097a56c7a 100644
9791 +--- a/net/mac80211/tx.c
9792 ++++ b/net/mac80211/tx.c
9793 +@@ -1295,7 +1295,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
9794 + if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
9795 + if (!(tx->flags & IEEE80211_TX_UNICAST) ||
9796 + skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
9797 +- info->flags & IEEE80211_TX_CTL_AMPDU)
9798 ++ (info->flags & IEEE80211_TX_CTL_AMPDU &&
9799 ++ !local->ops->wake_tx_queue))
9800 + info->flags |= IEEE80211_TX_CTL_DONTFRAG;
9801 + }
9802 +
9803 +@@ -3709,13 +3710,15 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
9804 + struct ieee80211_local *local = hw_to_local(hw);
9805 + struct txq_info *txqi = container_of(txq, struct txq_info, txq);
9806 + struct ieee80211_hdr *hdr;
9807 +- struct sk_buff *skb = NULL;
9808 + struct fq *fq = &local->fq;
9809 + struct fq_tin *tin = &txqi->tin;
9810 + struct ieee80211_tx_info *info;
9811 + struct ieee80211_tx_data tx;
9812 ++ struct sk_buff *skb;
9813 + ieee80211_tx_result r;
9814 + struct ieee80211_vif *vif = txq->vif;
9815 ++ int q = vif->hw_queue[txq->ac];
9816 ++ bool q_stopped;
9817 +
9818 + WARN_ON_ONCE(softirq_count() == 0);
9819 +
9820 +@@ -3723,17 +3726,18 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
9821 + return NULL;
9822 +
9823 + begin:
9824 +- spin_lock_bh(&fq->lock);
9825 ++ spin_lock(&local->queue_stop_reason_lock);
9826 ++ q_stopped = local->queue_stop_reasons[q];
9827 ++ spin_unlock(&local->queue_stop_reason_lock);
9828 +
9829 +- if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ||
9830 +- test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
9831 +- goto out;
9832 +-
9833 +- if (vif->txqs_stopped[txq->ac]) {
9834 +- set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
9835 +- goto out;
9836 ++ if (unlikely(q_stopped)) {
9837 ++ /* mark for waking later */
9838 ++ set_bit(IEEE80211_TXQ_DIRTY, &txqi->flags);
9839 ++ return NULL;
9840 + }
9841 +
9842 ++ spin_lock_bh(&fq->lock);
9843 ++
9844 + /* Make sure fragments stay together. */
9845 + skb = __skb_dequeue(&txqi->frags);
9846 + if (unlikely(skb)) {
9847 +@@ -3743,6 +3747,9 @@ begin:
9848 + IEEE80211_SKB_CB(skb)->control.flags &=
9849 + ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
9850 + } else {
9851 ++ if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags)))
9852 ++ goto out;
9853 ++
9854 + skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
9855 + }
9856 +
9857 +@@ -3793,7 +3800,8 @@ begin:
9858 + }
9859 +
9860 + if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
9861 +- info->flags |= IEEE80211_TX_CTL_AMPDU;
9862 ++ info->flags |= (IEEE80211_TX_CTL_AMPDU |
9863 ++ IEEE80211_TX_CTL_DONTFRAG);
9864 + else
9865 + info->flags &= ~IEEE80211_TX_CTL_AMPDU;
9866 +
9867 +diff --git a/net/mac80211/util.c b/net/mac80211/util.c
9868 +index b512cb37aafb7..ed53c51bbc321 100644
9869 +--- a/net/mac80211/util.c
9870 ++++ b/net/mac80211/util.c
9871 +@@ -301,8 +301,6 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
9872 + local_bh_disable();
9873 + spin_lock(&fq->lock);
9874 +
9875 +- sdata->vif.txqs_stopped[ac] = false;
9876 +-
9877 + if (!test_bit(SDATA_STATE_RUNNING, &sdata->state))
9878 + goto out;
9879 +
9880 +@@ -324,7 +322,7 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
9881 + if (ac != txq->ac)
9882 + continue;
9883 +
9884 +- if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX,
9885 ++ if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY,
9886 + &txqi->flags))
9887 + continue;
9888 +
9889 +@@ -339,7 +337,7 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
9890 +
9891 + txqi = to_txq_info(vif->txq);
9892 +
9893 +- if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags) ||
9894 ++ if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ||
9895 + (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
9896 + goto out;
9897 +
9898 +@@ -537,16 +535,10 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
9899 + continue;
9900 +
9901 + for (ac = 0; ac < n_acs; ac++) {
9902 +- if (sdata->vif.hw_queue[ac] == queue ||
9903 +- sdata->vif.cab_queue == queue) {
9904 +- if (!local->ops->wake_tx_queue) {
9905 +- netif_stop_subqueue(sdata->dev, ac);
9906 +- continue;
9907 +- }
9908 +- spin_lock(&local->fq.lock);
9909 +- sdata->vif.txqs_stopped[ac] = true;
9910 +- spin_unlock(&local->fq.lock);
9911 +- }
9912 ++ if (!local->ops->wake_tx_queue &&
9913 ++ (sdata->vif.hw_queue[ac] == queue ||
9914 ++ sdata->vif.cab_queue == queue))
9915 ++ netif_stop_subqueue(sdata->dev, ac);
9916 + }
9917 + }
9918 + rcu_read_unlock();
9919 +diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
9920 +index fc9e728b6333a..45bbe3e54cc28 100644
9921 +--- a/net/mctp/af_mctp.c
9922 ++++ b/net/mctp/af_mctp.c
9923 +@@ -544,9 +544,6 @@ static int mctp_sk_init(struct sock *sk)
9924 +
9925 + static void mctp_sk_close(struct sock *sk, long timeout)
9926 + {
9927 +- struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
9928 +-
9929 +- del_timer_sync(&msk->key_expiry);
9930 + sk_common_release(sk);
9931 + }
9932 +
9933 +@@ -580,7 +577,14 @@ static void mctp_sk_unhash(struct sock *sk)
9934 + spin_lock_irqsave(&key->lock, fl2);
9935 + __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED);
9936 + }
9937 ++ sock_set_flag(sk, SOCK_DEAD);
9938 + spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
9939 ++
9940 ++ /* Since there are no more tag allocations (we have removed all of the
9941 ++ * keys), stop any pending expiry events. the timer cannot be re-queued
9942 ++ * as the sk is no longer observable
9943 ++ */
9944 ++ del_timer_sync(&msk->key_expiry);
9945 + }
9946 +
9947 + static struct proto mctp_proto = {
9948 +diff --git a/net/mctp/route.c b/net/mctp/route.c
9949 +index f9a80b82dc511..f51a05ec71624 100644
9950 +--- a/net/mctp/route.c
9951 ++++ b/net/mctp/route.c
9952 +@@ -147,6 +147,7 @@ static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
9953 + key->valid = true;
9954 + spin_lock_init(&key->lock);
9955 + refcount_set(&key->refs, 1);
9956 ++ sock_hold(key->sk);
9957 +
9958 + return key;
9959 + }
9960 +@@ -165,6 +166,7 @@ void mctp_key_unref(struct mctp_sk_key *key)
9961 + mctp_dev_release_key(key->dev, key);
9962 + spin_unlock_irqrestore(&key->lock, flags);
9963 +
9964 ++ sock_put(key->sk);
9965 + kfree(key);
9966 + }
9967 +
9968 +@@ -177,6 +179,11 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
9969 +
9970 + spin_lock_irqsave(&net->mctp.keys_lock, flags);
9971 +
9972 ++ if (sock_flag(&msk->sk, SOCK_DEAD)) {
9973 ++ rc = -EINVAL;
9974 ++ goto out_unlock;
9975 ++ }
9976 ++
9977 + hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
9978 + if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
9979 + key->tag)) {
9980 +@@ -198,6 +205,7 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
9981 + hlist_add_head(&key->sklist, &msk->keys);
9982 + }
9983 +
9984 ++out_unlock:
9985 + spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
9986 +
9987 + return rc;
9988 +@@ -315,8 +323,8 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
9989 +
9990 + static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
9991 + {
9992 ++ struct mctp_sk_key *key, *any_key = NULL;
9993 + struct net *net = dev_net(skb->dev);
9994 +- struct mctp_sk_key *key;
9995 + struct mctp_sock *msk;
9996 + struct mctp_hdr *mh;
9997 + unsigned long f;
9998 +@@ -361,13 +369,11 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
9999 + * key for reassembly - we'll create a more specific
10000 + * one for future packets if required (ie, !EOM).
10001 + */
10002 +- key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
10003 +- if (key) {
10004 +- msk = container_of(key->sk,
10005 ++ any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
10006 ++ if (any_key) {
10007 ++ msk = container_of(any_key->sk,
10008 + struct mctp_sock, sk);
10009 +- spin_unlock_irqrestore(&key->lock, f);
10010 +- mctp_key_unref(key);
10011 +- key = NULL;
10012 ++ spin_unlock_irqrestore(&any_key->lock, f);
10013 + }
10014 + }
10015 +
10016 +@@ -419,14 +425,14 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
10017 + * this function.
10018 + */
10019 + rc = mctp_key_add(key, msk);
10020 +- if (rc) {
10021 +- kfree(key);
10022 +- } else {
10023 ++ if (!rc)
10024 + trace_mctp_key_acquire(key);
10025 +
10026 +- /* we don't need to release key->lock on exit */
10027 +- mctp_key_unref(key);
10028 +- }
10029 ++ /* we don't need to release key->lock on exit, so
10030 ++ * clean up here and suppress the unlock via
10031 ++ * setting to NULL
10032 ++ */
10033 ++ mctp_key_unref(key);
10034 + key = NULL;
10035 +
10036 + } else {
10037 +@@ -473,6 +479,8 @@ out_unlock:
10038 + spin_unlock_irqrestore(&key->lock, f);
10039 + mctp_key_unref(key);
10040 + }
10041 ++ if (any_key)
10042 ++ mctp_key_unref(any_key);
10043 + out:
10044 + if (rc)
10045 + kfree_skb(skb);
10046 +diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
10047 +index 5a936334b517a..895e0ca542994 100644
10048 +--- a/net/netfilter/nf_conntrack_proto_sctp.c
10049 ++++ b/net/netfilter/nf_conntrack_proto_sctp.c
10050 +@@ -27,22 +27,16 @@
10051 + #include <net/netfilter/nf_conntrack_ecache.h>
10052 + #include <net/netfilter/nf_conntrack_timeout.h>
10053 +
10054 +-/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
10055 +- closely. They're more complex. --RR
10056 +-
10057 +- And so for me for SCTP :D -Kiran */
10058 +-
10059 + static const char *const sctp_conntrack_names[] = {
10060 +- "NONE",
10061 +- "CLOSED",
10062 +- "COOKIE_WAIT",
10063 +- "COOKIE_ECHOED",
10064 +- "ESTABLISHED",
10065 +- "SHUTDOWN_SENT",
10066 +- "SHUTDOWN_RECD",
10067 +- "SHUTDOWN_ACK_SENT",
10068 +- "HEARTBEAT_SENT",
10069 +- "HEARTBEAT_ACKED",
10070 ++ [SCTP_CONNTRACK_NONE] = "NONE",
10071 ++ [SCTP_CONNTRACK_CLOSED] = "CLOSED",
10072 ++ [SCTP_CONNTRACK_COOKIE_WAIT] = "COOKIE_WAIT",
10073 ++ [SCTP_CONNTRACK_COOKIE_ECHOED] = "COOKIE_ECHOED",
10074 ++ [SCTP_CONNTRACK_ESTABLISHED] = "ESTABLISHED",
10075 ++ [SCTP_CONNTRACK_SHUTDOWN_SENT] = "SHUTDOWN_SENT",
10076 ++ [SCTP_CONNTRACK_SHUTDOWN_RECD] = "SHUTDOWN_RECD",
10077 ++ [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = "SHUTDOWN_ACK_SENT",
10078 ++ [SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT",
10079 + };
10080 +
10081 + #define SECS * HZ
10082 +@@ -54,12 +48,11 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
10083 + [SCTP_CONNTRACK_CLOSED] = 10 SECS,
10084 + [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
10085 + [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
10086 +- [SCTP_CONNTRACK_ESTABLISHED] = 5 DAYS,
10087 ++ [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS,
10088 + [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000,
10089 + [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000,
10090 + [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
10091 + [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
10092 +- [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
10093 + };
10094 +
10095 + #define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
10096 +@@ -73,7 +66,6 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
10097 + #define sSR SCTP_CONNTRACK_SHUTDOWN_RECD
10098 + #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
10099 + #define sHS SCTP_CONNTRACK_HEARTBEAT_SENT
10100 +-#define sHA SCTP_CONNTRACK_HEARTBEAT_ACKED
10101 + #define sIV SCTP_CONNTRACK_MAX
10102 +
10103 + /*
10104 +@@ -96,9 +88,6 @@ SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
10105 + CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
10106 + the SHUTDOWN chunk. Connection is closed.
10107 + HEARTBEAT_SENT - We have seen a HEARTBEAT in a new flow.
10108 +-HEARTBEAT_ACKED - We have seen a HEARTBEAT-ACK in the direction opposite to
10109 +- that of the HEARTBEAT chunk. Secondary connection is
10110 +- established.
10111 + */
10112 +
10113 + /* TODO
10114 +@@ -115,33 +104,33 @@ cookie echoed to closed.
10115 + static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
10116 + {
10117 + /* ORIGINAL */
10118 +-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
10119 +-/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
10120 +-/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
10121 +-/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
10122 +-/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
10123 +-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA},
10124 +-/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't have Stale cookie*/
10125 +-/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* 5.2.4 - Big TODO */
10126 +-/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't come in orig dir */
10127 +-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA},
10128 +-/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
10129 +-/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}
10130 ++/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
10131 ++/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW},
10132 ++/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
10133 ++/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
10134 ++/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
10135 ++/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
10136 ++/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
10137 ++/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
10138 ++/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
10139 ++/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
10140 ++/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
10141 ++/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
10142 + },
10143 + {
10144 + /* REPLY */
10145 +-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
10146 +-/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
10147 +-/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
10148 +-/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
10149 +-/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
10150 +-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
10151 +-/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA},
10152 +-/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* Can't come in reply dir */
10153 +-/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA},
10154 +-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA},
10155 +-/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
10156 +-/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA}
10157 ++/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
10158 ++/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* INIT in sCL Big TODO */
10159 ++/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV},
10160 ++/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV},
10161 ++/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
10162 ++/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
10163 ++/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
10164 ++/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
10165 ++/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
10166 ++/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
10167 ++/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
10168 ++/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sES},
10169 + }
10170 + };
10171 +
10172 +@@ -412,22 +401,29 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
10173 + for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
10174 + /* Special cases of Verification tag check (Sec 8.5.1) */
10175 + if (sch->type == SCTP_CID_INIT) {
10176 +- /* Sec 8.5.1 (A) */
10177 ++ /* (A) vtag MUST be zero */
10178 + if (sh->vtag != 0)
10179 + goto out_unlock;
10180 + } else if (sch->type == SCTP_CID_ABORT) {
10181 +- /* Sec 8.5.1 (B) */
10182 +- if (sh->vtag != ct->proto.sctp.vtag[dir] &&
10183 +- sh->vtag != ct->proto.sctp.vtag[!dir])
10184 ++ /* (B) vtag MUST match own vtag if T flag is unset OR
10185 ++ * MUST match peer's vtag if T flag is set
10186 ++ */
10187 ++ if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
10188 ++ sh->vtag != ct->proto.sctp.vtag[dir]) ||
10189 ++ ((sch->flags & SCTP_CHUNK_FLAG_T) &&
10190 ++ sh->vtag != ct->proto.sctp.vtag[!dir]))
10191 + goto out_unlock;
10192 + } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
10193 +- /* Sec 8.5.1 (C) */
10194 +- if (sh->vtag != ct->proto.sctp.vtag[dir] &&
10195 +- sh->vtag != ct->proto.sctp.vtag[!dir] &&
10196 +- sch->flags & SCTP_CHUNK_FLAG_T)
10197 ++ /* (C) vtag MUST match own vtag if T flag is unset OR
10198 ++ * MUST match peer's vtag if T flag is set
10199 ++ */
10200 ++ if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
10201 ++ sh->vtag != ct->proto.sctp.vtag[dir]) ||
10202 ++ ((sch->flags & SCTP_CHUNK_FLAG_T) &&
10203 ++ sh->vtag != ct->proto.sctp.vtag[!dir]))
10204 + goto out_unlock;
10205 + } else if (sch->type == SCTP_CID_COOKIE_ECHO) {
10206 +- /* Sec 8.5.1 (D) */
10207 ++ /* (D) vtag must be same as init_vtag as found in INIT_ACK */
10208 + if (sh->vtag != ct->proto.sctp.vtag[dir])
10209 + goto out_unlock;
10210 + } else if (sch->type == SCTP_CID_HEARTBEAT) {
10211 +@@ -501,8 +497,12 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
10212 + }
10213 +
10214 + ct->proto.sctp.state = new_state;
10215 +- if (old_state != new_state)
10216 ++ if (old_state != new_state) {
10217 + nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
10218 ++ if (new_state == SCTP_CONNTRACK_ESTABLISHED &&
10219 ++ !test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
10220 ++ nf_conntrack_event_cache(IPCT_ASSURED, ct);
10221 ++ }
10222 + }
10223 + spin_unlock_bh(&ct->lock);
10224 +
10225 +@@ -516,14 +516,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
10226 +
10227 + nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
10228 +
10229 +- if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
10230 +- dir == IP_CT_DIR_REPLY &&
10231 +- new_state == SCTP_CONNTRACK_ESTABLISHED) {
10232 +- pr_debug("Setting assured bit\n");
10233 +- set_bit(IPS_ASSURED_BIT, &ct->status);
10234 +- nf_conntrack_event_cache(IPCT_ASSURED, ct);
10235 +- }
10236 +-
10237 + return NF_ACCEPT;
10238 +
10239 + out_unlock:
10240 +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
10241 +index 6566310831779..3ac1af6f59fcc 100644
10242 +--- a/net/netfilter/nf_conntrack_proto_tcp.c
10243 ++++ b/net/netfilter/nf_conntrack_proto_tcp.c
10244 +@@ -1068,6 +1068,13 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
10245 + ct->proto.tcp.last_flags |=
10246 + IP_CT_EXP_CHALLENGE_ACK;
10247 + }
10248 ++
10249 ++ /* possible challenge ack reply to syn */
10250 ++ if (old_state == TCP_CONNTRACK_SYN_SENT &&
10251 ++ index == TCP_ACK_SET &&
10252 ++ dir == IP_CT_DIR_REPLY)
10253 ++ ct->proto.tcp.last_ack = ntohl(th->ack_seq);
10254 ++
10255 + spin_unlock_bh(&ct->lock);
10256 + nf_ct_l4proto_log_invalid(skb, ct, state,
10257 + "packet (index %d) in dir %d ignored, state %s",
10258 +@@ -1193,6 +1200,14 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
10259 + * segments we ignored. */
10260 + goto in_window;
10261 + }
10262 ++
10263 ++ /* Reset in response to a challenge-ack we let through earlier */
10264 ++ if (old_state == TCP_CONNTRACK_SYN_SENT &&
10265 ++ ct->proto.tcp.last_index == TCP_ACK_SET &&
10266 ++ ct->proto.tcp.last_dir == IP_CT_DIR_REPLY &&
10267 ++ ntohl(th->seq) == ct->proto.tcp.last_ack)
10268 ++ goto in_window;
10269 ++
10270 + break;
10271 + default:
10272 + /* Keep compilers happy. */
10273 +diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
10274 +index bca839ab1ae8d..460294bd4b606 100644
10275 +--- a/net/netfilter/nf_conntrack_standalone.c
10276 ++++ b/net/netfilter/nf_conntrack_standalone.c
10277 +@@ -601,7 +601,6 @@ enum nf_ct_sysctl_index {
10278 + NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD,
10279 + NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
10280 + NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT,
10281 +- NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED,
10282 + #endif
10283 + #ifdef CONFIG_NF_CT_PROTO_DCCP
10284 + NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST,
10285 +@@ -886,12 +885,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
10286 + .mode = 0644,
10287 + .proc_handler = proc_dointvec_jiffies,
10288 + },
10289 +- [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = {
10290 +- .procname = "nf_conntrack_sctp_timeout_heartbeat_acked",
10291 +- .maxlen = sizeof(unsigned int),
10292 +- .mode = 0644,
10293 +- .proc_handler = proc_dointvec_jiffies,
10294 +- },
10295 + #endif
10296 + #ifdef CONFIG_NF_CT_PROTO_DCCP
10297 + [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = {
10298 +@@ -1035,7 +1028,6 @@ static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net,
10299 + XASSIGN(SHUTDOWN_RECD, sn);
10300 + XASSIGN(SHUTDOWN_ACK_SENT, sn);
10301 + XASSIGN(HEARTBEAT_SENT, sn);
10302 +- XASSIGN(HEARTBEAT_ACKED, sn);
10303 + #undef XASSIGN
10304 + #endif
10305 + }
10306 +diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
10307 +index 7325bee7d1442..19ea4d3c35535 100644
10308 +--- a/net/netfilter/nft_set_rbtree.c
10309 ++++ b/net/netfilter/nft_set_rbtree.c
10310 +@@ -38,10 +38,12 @@ static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
10311 + return !nft_rbtree_interval_end(rbe);
10312 + }
10313 +
10314 +-static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
10315 +- const struct nft_rbtree_elem *interval)
10316 ++static int nft_rbtree_cmp(const struct nft_set *set,
10317 ++ const struct nft_rbtree_elem *e1,
10318 ++ const struct nft_rbtree_elem *e2)
10319 + {
10320 +- return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
10321 ++ return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
10322 ++ set->klen);
10323 + }
10324 +
10325 + static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
10326 +@@ -52,7 +54,6 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
10327 + const struct nft_rbtree_elem *rbe, *interval = NULL;
10328 + u8 genmask = nft_genmask_cur(net);
10329 + const struct rb_node *parent;
10330 +- const void *this;
10331 + int d;
10332 +
10333 + parent = rcu_dereference_raw(priv->root.rb_node);
10334 +@@ -62,12 +63,11 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
10335 +
10336 + rbe = rb_entry(parent, struct nft_rbtree_elem, node);
10337 +
10338 +- this = nft_set_ext_key(&rbe->ext);
10339 +- d = memcmp(this, key, set->klen);
10340 ++ d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
10341 + if (d < 0) {
10342 + parent = rcu_dereference_raw(parent->rb_left);
10343 + if (interval &&
10344 +- nft_rbtree_equal(set, this, interval) &&
10345 ++ !nft_rbtree_cmp(set, rbe, interval) &&
10346 + nft_rbtree_interval_end(rbe) &&
10347 + nft_rbtree_interval_start(interval))
10348 + continue;
10349 +@@ -215,154 +215,216 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
10350 + return rbe;
10351 + }
10352 +
10353 ++static int nft_rbtree_gc_elem(const struct nft_set *__set,
10354 ++ struct nft_rbtree *priv,
10355 ++ struct nft_rbtree_elem *rbe)
10356 ++{
10357 ++ struct nft_set *set = (struct nft_set *)__set;
10358 ++ struct rb_node *prev = rb_prev(&rbe->node);
10359 ++ struct nft_rbtree_elem *rbe_prev;
10360 ++ struct nft_set_gc_batch *gcb;
10361 ++
10362 ++ gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
10363 ++ if (!gcb)
10364 ++ return -ENOMEM;
10365 ++
10366 ++ /* search for expired end interval coming before this element. */
10367 ++ do {
10368 ++ rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
10369 ++ if (nft_rbtree_interval_end(rbe_prev))
10370 ++ break;
10371 ++
10372 ++ prev = rb_prev(prev);
10373 ++ } while (prev != NULL);
10374 ++
10375 ++ rb_erase(&rbe_prev->node, &priv->root);
10376 ++ rb_erase(&rbe->node, &priv->root);
10377 ++ atomic_sub(2, &set->nelems);
10378 ++
10379 ++ nft_set_gc_batch_add(gcb, rbe);
10380 ++ nft_set_gc_batch_complete(gcb);
10381 ++
10382 ++ return 0;
10383 ++}
10384 ++
10385 ++static bool nft_rbtree_update_first(const struct nft_set *set,
10386 ++ struct nft_rbtree_elem *rbe,
10387 ++ struct rb_node *first)
10388 ++{
10389 ++ struct nft_rbtree_elem *first_elem;
10390 ++
10391 ++ first_elem = rb_entry(first, struct nft_rbtree_elem, node);
10392 ++ /* this element is closest to where the new element is to be inserted:
10393 ++ * update the first element for the node list path.
10394 ++ */
10395 ++ if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
10396 ++ return true;
10397 ++
10398 ++ return false;
10399 ++}
10400 ++
10401 + static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
10402 + struct nft_rbtree_elem *new,
10403 + struct nft_set_ext **ext)
10404 + {
10405 +- bool overlap = false, dup_end_left = false, dup_end_right = false;
10406 ++ struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
10407 ++ struct rb_node *node, *parent, **p, *first = NULL;
10408 + struct nft_rbtree *priv = nft_set_priv(set);
10409 + u8 genmask = nft_genmask_next(net);
10410 +- struct nft_rbtree_elem *rbe;
10411 +- struct rb_node *parent, **p;
10412 +- int d;
10413 ++ int d, err;
10414 +
10415 +- /* Detect overlaps as we descend the tree. Set the flag in these cases:
10416 +- *
10417 +- * a1. _ _ __>| ?_ _ __| (insert end before existing end)
10418 +- * a2. _ _ ___| ?_ _ _>| (insert end after existing end)
10419 +- * a3. _ _ ___? >|_ _ __| (insert start before existing end)
10420 +- *
10421 +- * and clear it later on, as we eventually reach the points indicated by
10422 +- * '?' above, in the cases described below. We'll always meet these
10423 +- * later, locally, due to tree ordering, and overlaps for the intervals
10424 +- * that are the closest together are always evaluated last.
10425 +- *
10426 +- * b1. _ _ __>| !_ _ __| (insert end before existing start)
10427 +- * b2. _ _ ___| !_ _ _>| (insert end after existing start)
10428 +- * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf)
10429 +- * '--' no nodes falling in this range
10430 +- * b4. >|_ _ ! (insert start before existing start)
10431 +- *
10432 +- * Case a3. resolves to b3.:
10433 +- * - if the inserted start element is the leftmost, because the '0'
10434 +- * element in the tree serves as end element
10435 +- * - otherwise, if an existing end is found immediately to the left. If
10436 +- * there are existing nodes in between, we need to further descend the
10437 +- * tree before we can conclude the new start isn't causing an overlap
10438 +- *
10439 +- * or to b4., which, preceded by a3., means we already traversed one or
10440 +- * more existing intervals entirely, from the right.
10441 +- *
10442 +- * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
10443 +- * in that order.
10444 +- *
10445 +- * The flag is also cleared in two special cases:
10446 +- *
10447 +- * b5. |__ _ _!|<_ _ _ (insert start right before existing end)
10448 +- * b6. |__ _ >|!__ _ _ (insert end right after existing start)
10449 +- *
10450 +- * which always happen as last step and imply that no further
10451 +- * overlapping is possible.
10452 +- *
10453 +- * Another special case comes from the fact that start elements matching
10454 +- * an already existing start element are allowed: insertion is not
10455 +- * performed but we return -EEXIST in that case, and the error will be
10456 +- * cleared by the caller if NLM_F_EXCL is not present in the request.
10457 +- * This way, request for insertion of an exact overlap isn't reported as
10458 +- * error to userspace if not desired.
10459 +- *
10460 +- * However, if the existing start matches a pre-existing start, but the
10461 +- * end element doesn't match the corresponding pre-existing end element,
10462 +- * we need to report a partial overlap. This is a local condition that
10463 +- * can be noticed without need for a tracking flag, by checking for a
10464 +- * local duplicated end for a corresponding start, from left and right,
10465 +- * separately.
10466 ++ /* Descend the tree to search for an existing element greater than the
10467 ++ * key value to insert that is greater than the new element. This is the
10468 ++ * first element to walk the ordered elements to find possible overlap.
10469 + */
10470 +-
10471 + parent = NULL;
10472 + p = &priv->root.rb_node;
10473 + while (*p != NULL) {
10474 + parent = *p;
10475 + rbe = rb_entry(parent, struct nft_rbtree_elem, node);
10476 +- d = memcmp(nft_set_ext_key(&rbe->ext),
10477 +- nft_set_ext_key(&new->ext),
10478 +- set->klen);
10479 ++ d = nft_rbtree_cmp(set, rbe, new);
10480 ++
10481 + if (d < 0) {
10482 + p = &parent->rb_left;
10483 +-
10484 +- if (nft_rbtree_interval_start(new)) {
10485 +- if (nft_rbtree_interval_end(rbe) &&
10486 +- nft_set_elem_active(&rbe->ext, genmask) &&
10487 +- !nft_set_elem_expired(&rbe->ext) && !*p)
10488 +- overlap = false;
10489 +- } else {
10490 +- if (dup_end_left && !*p)
10491 +- return -ENOTEMPTY;
10492 +-
10493 +- overlap = nft_rbtree_interval_end(rbe) &&
10494 +- nft_set_elem_active(&rbe->ext,
10495 +- genmask) &&
10496 +- !nft_set_elem_expired(&rbe->ext);
10497 +-
10498 +- if (overlap) {
10499 +- dup_end_right = true;
10500 +- continue;
10501 +- }
10502 +- }
10503 + } else if (d > 0) {
10504 +- p = &parent->rb_right;
10505 ++ if (!first ||
10506 ++ nft_rbtree_update_first(set, rbe, first))
10507 ++ first = &rbe->node;
10508 +
10509 +- if (nft_rbtree_interval_end(new)) {
10510 +- if (dup_end_right && !*p)
10511 +- return -ENOTEMPTY;
10512 +-
10513 +- overlap = nft_rbtree_interval_end(rbe) &&
10514 +- nft_set_elem_active(&rbe->ext,
10515 +- genmask) &&
10516 +- !nft_set_elem_expired(&rbe->ext);
10517 +-
10518 +- if (overlap) {
10519 +- dup_end_left = true;
10520 +- continue;
10521 +- }
10522 +- } else if (nft_set_elem_active(&rbe->ext, genmask) &&
10523 +- !nft_set_elem_expired(&rbe->ext)) {
10524 +- overlap = nft_rbtree_interval_end(rbe);
10525 +- }
10526 ++ p = &parent->rb_right;
10527 + } else {
10528 +- if (nft_rbtree_interval_end(rbe) &&
10529 +- nft_rbtree_interval_start(new)) {
10530 ++ if (nft_rbtree_interval_end(rbe))
10531 + p = &parent->rb_left;
10532 +-
10533 +- if (nft_set_elem_active(&rbe->ext, genmask) &&
10534 +- !nft_set_elem_expired(&rbe->ext))
10535 +- overlap = false;
10536 +- } else if (nft_rbtree_interval_start(rbe) &&
10537 +- nft_rbtree_interval_end(new)) {
10538 ++ else
10539 + p = &parent->rb_right;
10540 ++ }
10541 ++ }
10542 ++
10543 ++ if (!first)
10544 ++ first = rb_first(&priv->root);
10545 ++
10546 ++ /* Detect overlap by going through the list of valid tree nodes.
10547 ++ * Values stored in the tree are in reversed order, starting from
10548 ++ * highest to lowest value.
10549 ++ */
10550 ++ for (node = first; node != NULL; node = rb_next(node)) {
10551 ++ rbe = rb_entry(node, struct nft_rbtree_elem, node);
10552 +
10553 +- if (nft_set_elem_active(&rbe->ext, genmask) &&
10554 +- !nft_set_elem_expired(&rbe->ext))
10555 +- overlap = false;
10556 +- } else if (nft_set_elem_active(&rbe->ext, genmask) &&
10557 +- !nft_set_elem_expired(&rbe->ext)) {
10558 +- *ext = &rbe->ext;
10559 +- return -EEXIST;
10560 +- } else {
10561 +- overlap = false;
10562 +- if (nft_rbtree_interval_end(rbe))
10563 +- p = &parent->rb_left;
10564 +- else
10565 +- p = &parent->rb_right;
10566 ++ if (!nft_set_elem_active(&rbe->ext, genmask))
10567 ++ continue;
10568 ++
10569 ++ /* perform garbage collection to avoid bogus overlap reports. */
10570 ++ if (nft_set_elem_expired(&rbe->ext)) {
10571 ++ err = nft_rbtree_gc_elem(set, priv, rbe);
10572 ++ if (err < 0)
10573 ++ return err;
10574 ++
10575 ++ continue;
10576 ++ }
10577 ++
10578 ++ d = nft_rbtree_cmp(set, rbe, new);
10579 ++ if (d == 0) {
10580 ++ /* Matching end element: no need to look for an
10581 ++ * overlapping greater or equal element.
10582 ++ */
10583 ++ if (nft_rbtree_interval_end(rbe)) {
10584 ++ rbe_le = rbe;
10585 ++ break;
10586 ++ }
10587 ++
10588 ++ /* first element that is greater or equal to key value. */
10589 ++ if (!rbe_ge) {
10590 ++ rbe_ge = rbe;
10591 ++ continue;
10592 ++ }
10593 ++
10594 ++ /* this is a closer more or equal element, update it. */
10595 ++ if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
10596 ++ rbe_ge = rbe;
10597 ++ continue;
10598 ++ }
10599 ++
10600 ++ /* element is equal to key value, make sure flags are
10601 ++ * the same, an existing more or equal start element
10602 ++ * must not be replaced by more or equal end element.
10603 ++ */
10604 ++ if ((nft_rbtree_interval_start(new) &&
10605 ++ nft_rbtree_interval_start(rbe_ge)) ||
10606 ++ (nft_rbtree_interval_end(new) &&
10607 ++ nft_rbtree_interval_end(rbe_ge))) {
10608 ++ rbe_ge = rbe;
10609 ++ continue;
10610 + }
10611 ++ } else if (d > 0) {
10612 ++ /* annotate element greater than the new element. */
10613 ++ rbe_ge = rbe;
10614 ++ continue;
10615 ++ } else if (d < 0) {
10616 ++ /* annotate element less than the new element. */
10617 ++ rbe_le = rbe;
10618 ++ break;
10619 + }
10620 ++ }
10621 +
10622 +- dup_end_left = dup_end_right = false;
10623 ++ /* - new start element matching existing start element: full overlap
10624 ++ * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
10625 ++ */
10626 ++ if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
10627 ++ nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
10628 ++ *ext = &rbe_ge->ext;
10629 ++ return -EEXIST;
10630 + }
10631 +
10632 +- if (overlap)
10633 ++ /* - new end element matching existing end element: full overlap
10634 ++ * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
10635 ++ */
10636 ++ if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
10637 ++ nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
10638 ++ *ext = &rbe_le->ext;
10639 ++ return -EEXIST;
10640 ++ }
10641 ++
10642 ++ /* - new start element with existing closest, less or equal key value
10643 ++ * being a start element: partial overlap, reported as -ENOTEMPTY.
10644 ++ * Anonymous sets allow for two consecutive start element since they
10645 ++ * are constant, skip them to avoid bogus overlap reports.
10646 ++ */
10647 ++ if (!nft_set_is_anonymous(set) && rbe_le &&
10648 ++ nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
10649 ++ return -ENOTEMPTY;
10650 ++
10651 ++ /* - new end element with existing closest, less or equal key value
10652 ++ * being a end element: partial overlap, reported as -ENOTEMPTY.
10653 ++ */
10654 ++ if (rbe_le &&
10655 ++ nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
10656 + return -ENOTEMPTY;
10657 +
10658 ++ /* - new end element with existing closest, greater or equal key value
10659 ++ * being an end element: partial overlap, reported as -ENOTEMPTY
10660 ++ */
10661 ++ if (rbe_ge &&
10662 ++ nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
10663 ++ return -ENOTEMPTY;
10664 ++
10665 ++ /* Accepted element: pick insertion point depending on key value */
10666 ++ parent = NULL;
10667 ++ p = &priv->root.rb_node;
10668 ++ while (*p != NULL) {
10669 ++ parent = *p;
10670 ++ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
10671 ++ d = nft_rbtree_cmp(set, rbe, new);
10672 ++
10673 ++ if (d < 0)
10674 ++ p = &parent->rb_left;
10675 ++ else if (d > 0)
10676 ++ p = &parent->rb_right;
10677 ++ else if (nft_rbtree_interval_end(rbe))
10678 ++ p = &parent->rb_left;
10679 ++ else
10680 ++ p = &parent->rb_right;
10681 ++ }
10682 ++
10683 + rb_link_node_rcu(&new->node, parent, p);
10684 + rb_insert_color(&new->node, &priv->root);
10685 + return 0;
10686 +@@ -501,23 +563,37 @@ static void nft_rbtree_gc(struct work_struct *work)
10687 + struct nft_rbtree *priv;
10688 + struct rb_node *node;
10689 + struct nft_set *set;
10690 ++ struct net *net;
10691 ++ u8 genmask;
10692 +
10693 + priv = container_of(work, struct nft_rbtree, gc_work.work);
10694 + set = nft_set_container_of(priv);
10695 ++ net = read_pnet(&set->net);
10696 ++ genmask = nft_genmask_cur(net);
10697 +
10698 + write_lock_bh(&priv->lock);
10699 + write_seqcount_begin(&priv->count);
10700 + for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
10701 + rbe = rb_entry(node, struct nft_rbtree_elem, node);
10702 +
10703 ++ if (!nft_set_elem_active(&rbe->ext, genmask))
10704 ++ continue;
10705 ++
10706 ++ /* elements are reversed in the rbtree for historical reasons,
10707 ++ * from highest to lowest value, that is why end element is
10708 ++ * always visited before the start element.
10709 ++ */
10710 + if (nft_rbtree_interval_end(rbe)) {
10711 + rbe_end = rbe;
10712 + continue;
10713 + }
10714 + if (!nft_set_elem_expired(&rbe->ext))
10715 + continue;
10716 +- if (nft_set_elem_mark_busy(&rbe->ext))
10717 ++
10718 ++ if (nft_set_elem_mark_busy(&rbe->ext)) {
10719 ++ rbe_end = NULL;
10720 + continue;
10721 ++ }
10722 +
10723 + if (rbe_prev) {
10724 + rb_erase(&rbe_prev->node, &priv->root);
10725 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
10726 +index a662e8a5ff84a..e506712967918 100644
10727 +--- a/net/netlink/af_netlink.c
10728 ++++ b/net/netlink/af_netlink.c
10729 +@@ -580,7 +580,9 @@ static int netlink_insert(struct sock *sk, u32 portid)
10730 + if (nlk_sk(sk)->bound)
10731 + goto err;
10732 +
10733 +- nlk_sk(sk)->portid = portid;
10734 ++ /* portid can be read locklessly from netlink_getname(). */
10735 ++ WRITE_ONCE(nlk_sk(sk)->portid, portid);
10736 ++
10737 + sock_hold(sk);
10738 +
10739 + err = __netlink_insert(table, sk);
10740 +@@ -1085,9 +1087,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
10741 + return -EINVAL;
10742 +
10743 + if (addr->sa_family == AF_UNSPEC) {
10744 +- sk->sk_state = NETLINK_UNCONNECTED;
10745 +- nlk->dst_portid = 0;
10746 +- nlk->dst_group = 0;
10747 ++ /* paired with READ_ONCE() in netlink_getsockbyportid() */
10748 ++ WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
10749 ++ /* dst_portid and dst_group can be read locklessly */
10750 ++ WRITE_ONCE(nlk->dst_portid, 0);
10751 ++ WRITE_ONCE(nlk->dst_group, 0);
10752 + return 0;
10753 + }
10754 + if (addr->sa_family != AF_NETLINK)
10755 +@@ -1108,9 +1112,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
10756 + err = netlink_autobind(sock);
10757 +
10758 + if (err == 0) {
10759 +- sk->sk_state = NETLINK_CONNECTED;
10760 +- nlk->dst_portid = nladdr->nl_pid;
10761 +- nlk->dst_group = ffs(nladdr->nl_groups);
10762 ++ /* paired with READ_ONCE() in netlink_getsockbyportid() */
10763 ++ WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
10764 ++ /* dst_portid and dst_group can be read locklessly */
10765 ++ WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
10766 ++ WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
10767 + }
10768 +
10769 + return err;
10770 +@@ -1127,10 +1133,12 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
10771 + nladdr->nl_pad = 0;
10772 +
10773 + if (peer) {
10774 +- nladdr->nl_pid = nlk->dst_portid;
10775 +- nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
10776 ++ /* Paired with WRITE_ONCE() in netlink_connect() */
10777 ++ nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
10778 ++ nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
10779 + } else {
10780 +- nladdr->nl_pid = nlk->portid;
10781 ++ /* Paired with WRITE_ONCE() in netlink_insert() */
10782 ++ nladdr->nl_pid = READ_ONCE(nlk->portid);
10783 + netlink_lock_table();
10784 + nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
10785 + netlink_unlock_table();
10786 +@@ -1157,8 +1165,9 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
10787 +
10788 + /* Don't bother queuing skb if kernel socket has no input function */
10789 + nlk = nlk_sk(sock);
10790 +- if (sock->sk_state == NETLINK_CONNECTED &&
10791 +- nlk->dst_portid != nlk_sk(ssk)->portid) {
10792 ++ /* dst_portid and sk_state can be changed in netlink_connect() */
10793 ++ if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
10794 ++ READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
10795 + sock_put(sock);
10796 + return ERR_PTR(-ECONNREFUSED);
10797 + }
10798 +@@ -1875,8 +1884,9 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
10799 + goto out;
10800 + netlink_skb_flags |= NETLINK_SKB_DST;
10801 + } else {
10802 +- dst_portid = nlk->dst_portid;
10803 +- dst_group = nlk->dst_group;
10804 ++ /* Paired with WRITE_ONCE() in netlink_connect() */
10805 ++ dst_portid = READ_ONCE(nlk->dst_portid);
10806 ++ dst_group = READ_ONCE(nlk->dst_group);
10807 + }
10808 +
10809 + /* Paired with WRITE_ONCE() in netlink_insert() */
10810 +diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
10811 +index a8da88db7893f..4e7c968cde2dc 100644
10812 +--- a/net/netrom/nr_timer.c
10813 ++++ b/net/netrom/nr_timer.c
10814 +@@ -121,6 +121,7 @@ static void nr_heartbeat_expiry(struct timer_list *t)
10815 + is accepted() it isn't 'dead' so doesn't get removed. */
10816 + if (sock_flag(sk, SOCK_DESTROY) ||
10817 + (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
10818 ++ sock_hold(sk);
10819 + bh_unlock_sock(sk);
10820 + nr_destroy_socket(sk);
10821 + goto out;
10822 +diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
10823 +index 3364caabef8b1..a27e1842b2a09 100644
10824 +--- a/net/nfc/llcp_core.c
10825 ++++ b/net/nfc/llcp_core.c
10826 +@@ -157,6 +157,7 @@ static void local_cleanup(struct nfc_llcp_local *local)
10827 + cancel_work_sync(&local->rx_work);
10828 + cancel_work_sync(&local->timeout_work);
10829 + kfree_skb(local->rx_pending);
10830 ++ local->rx_pending = NULL;
10831 + del_timer_sync(&local->sdreq_timer);
10832 + cancel_work_sync(&local->sdreq_timeout_work);
10833 + nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
10834 +diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
10835 +index a661b062cca85..872d127c9db42 100644
10836 +--- a/net/sched/sch_gred.c
10837 ++++ b/net/sched/sch_gred.c
10838 +@@ -377,6 +377,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
10839 + /* Even if driver returns failure adjust the stats - in case offload
10840 + * ended but driver still wants to adjust the values.
10841 + */
10842 ++ sch_tree_lock(sch);
10843 + for (i = 0; i < MAX_DPs; i++) {
10844 + if (!table->tab[i])
10845 + continue;
10846 +@@ -393,6 +394,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
10847 + sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
10848 + }
10849 + _bstats_update(&sch->bstats, bytes, packets);
10850 ++ sch_tree_unlock(sch);
10851 +
10852 + kfree(hw_stats);
10853 + return ret;
10854 +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
10855 +index e5b4bbf3ce3d5..3afac9c21a763 100644
10856 +--- a/net/sched/sch_htb.c
10857 ++++ b/net/sched/sch_htb.c
10858 +@@ -1545,7 +1545,7 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
10859 + struct tc_htb_qopt_offload offload_opt;
10860 + struct netdev_queue *dev_queue;
10861 + struct Qdisc *q = cl->leaf.q;
10862 +- struct Qdisc *old = NULL;
10863 ++ struct Qdisc *old;
10864 + int err;
10865 +
10866 + if (cl->level)
10867 +@@ -1553,14 +1553,17 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
10868 +
10869 + WARN_ON(!q);
10870 + dev_queue = htb_offload_get_queue(cl);
10871 +- old = htb_graft_helper(dev_queue, NULL);
10872 +- if (destroying)
10873 +- /* Before HTB is destroyed, the kernel grafts noop_qdisc to
10874 +- * all queues.
10875 ++ /* When destroying, caller qdisc_graft grafts the new qdisc and invokes
10876 ++ * qdisc_put for the qdisc being destroyed. htb_destroy_class_offload
10877 ++ * does not need to graft or qdisc_put the qdisc being destroyed.
10878 ++ */
10879 ++ if (!destroying) {
10880 ++ old = htb_graft_helper(dev_queue, NULL);
10881 ++ /* Last qdisc grafted should be the same as cl->leaf.q when
10882 ++ * calling htb_delete.
10883 + */
10884 +- WARN_ON(!(old->flags & TCQ_F_BUILTIN));
10885 +- else
10886 + WARN_ON(old != q);
10887 ++ }
10888 +
10889 + if (cl->parent) {
10890 + _bstats_update(&cl->parent->bstats_bias,
10891 +@@ -1577,10 +1580,12 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
10892 + };
10893 + err = htb_offload(qdisc_dev(sch), &offload_opt);
10894 +
10895 +- if (!err || destroying)
10896 +- qdisc_put(old);
10897 +- else
10898 +- htb_graft_helper(dev_queue, old);
10899 ++ if (!destroying) {
10900 ++ if (!err)
10901 ++ qdisc_put(old);
10902 ++ else
10903 ++ htb_graft_helper(dev_queue, old);
10904 ++ }
10905 +
10906 + if (last_child)
10907 + return err;
10908 +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
10909 +index 570389f6cdd7d..c322a61eaeeac 100644
10910 +--- a/net/sched/sch_taprio.c
10911 ++++ b/net/sched/sch_taprio.c
10912 +@@ -1700,6 +1700,7 @@ static void taprio_reset(struct Qdisc *sch)
10913 + int i;
10914 +
10915 + hrtimer_cancel(&q->advance_timer);
10916 ++
10917 + if (q->qdiscs) {
10918 + for (i = 0; i < dev->num_tx_queues; i++)
10919 + if (q->qdiscs[i])
10920 +@@ -1720,6 +1721,7 @@ static void taprio_destroy(struct Qdisc *sch)
10921 + * happens in qdisc_create(), after taprio_init() has been called.
10922 + */
10923 + hrtimer_cancel(&q->advance_timer);
10924 ++ qdisc_synchronize(sch);
10925 +
10926 + taprio_disable_offload(dev, q, NULL);
10927 +
10928 +diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
10929 +index 59e653b528b1f..6b95d3ba8fe1c 100644
10930 +--- a/net/sctp/bind_addr.c
10931 ++++ b/net/sctp/bind_addr.c
10932 +@@ -73,6 +73,12 @@ int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
10933 + }
10934 + }
10935 +
10936 ++ /* If somehow no addresses were found that can be used with this
10937 ++ * scope, it's an error.
10938 ++ */
10939 ++ if (list_empty(&dest->address_list))
10940 ++ error = -ENETUNREACH;
10941 ++
10942 + out:
10943 + if (error)
10944 + sctp_bind_addr_clean(dest);
10945 +diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c
10946 +index 65aa94d96f4e3..6f23b1413745c 100644
10947 +--- a/samples/ftrace/ftrace-direct-multi-modify.c
10948 ++++ b/samples/ftrace/ftrace-direct-multi-modify.c
10949 +@@ -149,6 +149,7 @@ static void __exit ftrace_direct_multi_exit(void)
10950 + {
10951 + kthread_stop(simple_tsk);
10952 + unregister_ftrace_direct_multi(&direct, my_tramp);
10953 ++ ftrace_free_filter(&direct);
10954 + }
10955 +
10956 + module_init(ftrace_direct_multi_init);
10957 +diff --git a/samples/ftrace/ftrace-direct-multi.c b/samples/ftrace/ftrace-direct-multi.c
10958 +index 41ded7c615c7f..a9a5c90fb2044 100644
10959 +--- a/samples/ftrace/ftrace-direct-multi.c
10960 ++++ b/samples/ftrace/ftrace-direct-multi.c
10961 +@@ -77,6 +77,7 @@ static int __init ftrace_direct_multi_init(void)
10962 + static void __exit ftrace_direct_multi_exit(void)
10963 + {
10964 + unregister_ftrace_direct_multi(&direct, (unsigned long) my_tramp);
10965 ++ ftrace_free_filter(&direct);
10966 + }
10967 +
10968 + module_init(ftrace_direct_multi_init);
10969 +diff --git a/scripts/atomic/atomics.tbl b/scripts/atomic/atomics.tbl
10970 +old mode 100755
10971 +new mode 100644
10972 +diff --git a/scripts/tracing/ftrace-bisect.sh b/scripts/tracing/ftrace-bisect.sh
10973 +index 926701162bc83..bb4f59262bbe9 100755
10974 +--- a/scripts/tracing/ftrace-bisect.sh
10975 ++++ b/scripts/tracing/ftrace-bisect.sh
10976 +@@ -12,7 +12,7 @@
10977 + # (note, if this is a problem with function_graph tracing, then simply
10978 + # replace "function" with "function_graph" in the following steps).
10979 + #
10980 +-# # cd /sys/kernel/debug/tracing
10981 ++# # cd /sys/kernel/tracing
10982 + # # echo schedule > set_ftrace_filter
10983 + # # echo function > current_tracer
10984 + #
10985 +@@ -20,22 +20,40 @@
10986 + #
10987 + # # echo nop > current_tracer
10988 + #
10989 +-# # cat available_filter_functions > ~/full-file
10990 ++# Starting with v5.1 this can be done with numbers, making it much faster:
10991 ++#
10992 ++# The old (slow) way, for kernels before v5.1.
10993 ++#
10994 ++# [old-way] # cat available_filter_functions > ~/full-file
10995 ++#
10996 ++# [old-way] *** Note *** this process will take several minutes to update the
10997 ++# [old-way] filters. Setting multiple functions is an O(n^2) operation, and we
10998 ++# [old-way] are dealing with thousands of functions. So go have coffee, talk
10999 ++# [old-way] with your coworkers, read facebook. And eventually, this operation
11000 ++# [old-way] will end.
11001 ++#
11002 ++# The new way (using numbers) is an O(n) operation, and usually takes less than a second.
11003 ++#
11004 ++# seq `wc -l available_filter_functions | cut -d' ' -f1` > ~/full-file
11005 ++#
11006 ++# This will create a sequence of numbers that match the functions in
11007 ++# available_filter_functions, and when echoing in a number into the
11008 ++# set_ftrace_filter file, it will enable the corresponding function in
11009 ++# O(1) time. Making enabling all functions O(n) where n is the number of
11010 ++# functions to enable.
11011 ++#
11012 ++# For either the new or old way, the rest of the operations remain the same.
11013 ++#
11014 + # # ftrace-bisect ~/full-file ~/test-file ~/non-test-file
11015 + # # cat ~/test-file > set_ftrace_filter
11016 + #
11017 +-# *** Note *** this will take several minutes. Setting multiple functions is
11018 +-# an O(n^2) operation, and we are dealing with thousands of functions. So go
11019 +-# have coffee, talk with your coworkers, read facebook. And eventually, this
11020 +-# operation will end.
11021 +-#
11022 + # # echo function > current_tracer
11023 + #
11024 + # If it crashes, we know that ~/test-file has a bad function.
11025 + #
11026 + # Reboot back to test kernel.
11027 + #
11028 +-# # cd /sys/kernel/debug/tracing
11029 ++# # cd /sys/kernel/tracing
11030 + # # mv ~/test-file ~/full-file
11031 + #
11032 + # If it didn't crash.
11033 +diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
11034 +index cca5a3012fee2..221eaadffb09c 100644
11035 +--- a/security/tomoyo/Makefile
11036 ++++ b/security/tomoyo/Makefile
11037 +@@ -10,7 +10,7 @@ endef
11038 + quiet_cmd_policy = POLICY $@
11039 + cmd_policy = ($(call do_policy,profile); $(call do_policy,exception_policy); $(call do_policy,domain_policy); $(call do_policy,manager); $(call do_policy,stat)) >$@
11040 +
11041 +-$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(src)/policy/*.conf.default) FORCE
11042 ++$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(srctree)/$(src)/policy/*.conf.default) FORCE
11043 + $(call if_changed,policy)
11044 +
11045 + $(obj)/common.o: $(obj)/builtin-policy.h
11046 +diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
11047 +index 1f0b5527c5949..0d283e41f66dc 100644
11048 +--- a/sound/soc/amd/yc/acp6x-mach.c
11049 ++++ b/sound/soc/amd/yc/acp6x-mach.c
11050 +@@ -206,6 +206,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
11051 + DMI_MATCH(DMI_PRODUCT_NAME, "UM5302TA"),
11052 + }
11053 + },
11054 ++ {
11055 ++ .driver_data = &acp6x_card,
11056 ++ .matches = {
11057 ++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
11058 ++ DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"),
11059 ++ }
11060 ++ },
11061 + {
11062 + .driver_data = &acp6x_card,
11063 + .matches = {
11064 +@@ -220,6 +227,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
11065 + DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 14 2022"),
11066 + }
11067 + },
11068 ++ {
11069 ++ .driver_data = &acp6x_card,
11070 ++ .matches = {
11071 ++ DMI_MATCH(DMI_BOARD_VENDOR, "Razer"),
11072 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Blade 14 (2022) - RZ09-0427"),
11073 ++ }
11074 ++ },
11075 + {}
11076 + };
11077 +
11078 +diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
11079 +old mode 100755
11080 +new mode 100644
11081 +diff --git a/sound/soc/codecs/es8326.h b/sound/soc/codecs/es8326.h
11082 +old mode 100755
11083 +new mode 100644
11084 +diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
11085 +index c836848ef0a65..8d14b5593658d 100644
11086 +--- a/sound/soc/fsl/fsl-asoc-card.c
11087 ++++ b/sound/soc/fsl/fsl-asoc-card.c
11088 +@@ -121,11 +121,11 @@ static const struct snd_soc_dapm_route audio_map[] = {
11089 +
11090 + static const struct snd_soc_dapm_route audio_map_ac97[] = {
11091 + /* 1st half -- Normal DAPM routes */
11092 +- {"Playback", NULL, "AC97 Playback"},
11093 +- {"AC97 Capture", NULL, "Capture"},
11094 ++ {"AC97 Playback", NULL, "CPU AC97 Playback"},
11095 ++ {"CPU AC97 Capture", NULL, "AC97 Capture"},
11096 + /* 2nd half -- ASRC DAPM routes */
11097 +- {"AC97 Playback", NULL, "ASRC-Playback"},
11098 +- {"ASRC-Capture", NULL, "AC97 Capture"},
11099 ++ {"CPU AC97 Playback", NULL, "ASRC-Playback"},
11100 ++ {"ASRC-Capture", NULL, "CPU AC97 Capture"},
11101 + };
11102 +
11103 + static const struct snd_soc_dapm_route audio_map_tx[] = {
11104 +diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
11105 +index 4b86ef82fd930..4b8fe9b8be407 100644
11106 +--- a/sound/soc/fsl/fsl_micfil.c
11107 ++++ b/sound/soc/fsl/fsl_micfil.c
11108 +@@ -154,21 +154,21 @@ static int micfil_quality_set(struct snd_kcontrol *kcontrol,
11109 +
11110 + static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = {
11111 + SOC_SINGLE_SX_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL,
11112 +- MICFIL_OUTGAIN_CHX_SHIFT(0), 0xF, 0x7, gain_tlv),
11113 ++ MICFIL_OUTGAIN_CHX_SHIFT(0), 0x8, 0xF, gain_tlv),
11114 + SOC_SINGLE_SX_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL,
11115 +- MICFIL_OUTGAIN_CHX_SHIFT(1), 0xF, 0x7, gain_tlv),
11116 ++ MICFIL_OUTGAIN_CHX_SHIFT(1), 0x8, 0xF, gain_tlv),
11117 + SOC_SINGLE_SX_TLV("CH2 Volume", REG_MICFIL_OUT_CTRL,
11118 +- MICFIL_OUTGAIN_CHX_SHIFT(2), 0xF, 0x7, gain_tlv),
11119 ++ MICFIL_OUTGAIN_CHX_SHIFT(2), 0x8, 0xF, gain_tlv),
11120 + SOC_SINGLE_SX_TLV("CH3 Volume", REG_MICFIL_OUT_CTRL,
11121 +- MICFIL_OUTGAIN_CHX_SHIFT(3), 0xF, 0x7, gain_tlv),
11122 ++ MICFIL_OUTGAIN_CHX_SHIFT(3), 0x8, 0xF, gain_tlv),
11123 + SOC_SINGLE_SX_TLV("CH4 Volume", REG_MICFIL_OUT_CTRL,
11124 +- MICFIL_OUTGAIN_CHX_SHIFT(4), 0xF, 0x7, gain_tlv),
11125 ++ MICFIL_OUTGAIN_CHX_SHIFT(4), 0x8, 0xF, gain_tlv),
11126 + SOC_SINGLE_SX_TLV("CH5 Volume", REG_MICFIL_OUT_CTRL,
11127 +- MICFIL_OUTGAIN_CHX_SHIFT(5), 0xF, 0x7, gain_tlv),
11128 ++ MICFIL_OUTGAIN_CHX_SHIFT(5), 0x8, 0xF, gain_tlv),
11129 + SOC_SINGLE_SX_TLV("CH6 Volume", REG_MICFIL_OUT_CTRL,
11130 +- MICFIL_OUTGAIN_CHX_SHIFT(6), 0xF, 0x7, gain_tlv),
11131 ++ MICFIL_OUTGAIN_CHX_SHIFT(6), 0x8, 0xF, gain_tlv),
11132 + SOC_SINGLE_SX_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL,
11133 +- MICFIL_OUTGAIN_CHX_SHIFT(7), 0xF, 0x7, gain_tlv),
11134 ++ MICFIL_OUTGAIN_CHX_SHIFT(7), 0x8, 0xF, gain_tlv),
11135 + SOC_ENUM_EXT("MICFIL Quality Select",
11136 + fsl_micfil_quality_enum,
11137 + micfil_quality_get, micfil_quality_set),
11138 +diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
11139 +index c9e0e31d5b34d..46a53551b955c 100644
11140 +--- a/sound/soc/fsl/fsl_ssi.c
11141 ++++ b/sound/soc/fsl/fsl_ssi.c
11142 +@@ -1189,14 +1189,14 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
11143 + .symmetric_channels = 1,
11144 + .probe = fsl_ssi_dai_probe,
11145 + .playback = {
11146 +- .stream_name = "AC97 Playback",
11147 ++ .stream_name = "CPU AC97 Playback",
11148 + .channels_min = 2,
11149 + .channels_max = 2,
11150 + .rates = SNDRV_PCM_RATE_8000_48000,
11151 + .formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20,
11152 + },
11153 + .capture = {
11154 +- .stream_name = "AC97 Capture",
11155 ++ .stream_name = "CPU AC97 Capture",
11156 + .channels_min = 2,
11157 + .channels_max = 2,
11158 + .rates = SNDRV_PCM_RATE_48000,
11159 +diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
11160 +index 363fa4d476800..b027fba8233df 100644
11161 +--- a/sound/soc/mediatek/Kconfig
11162 ++++ b/sound/soc/mediatek/Kconfig
11163 +@@ -182,10 +182,12 @@ config SND_SOC_MT8186_MT6366_DA7219_MAX98357
11164 + If unsure select "N".
11165 +
11166 + config SND_SOC_MT8186_MT6366_RT1019_RT5682S
11167 +- tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S codec"
11168 ++ tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S MAX98357A/MAX98360 codec"
11169 + depends on I2C && GPIOLIB
11170 + depends on SND_SOC_MT8186 && MTK_PMIC_WRAP
11171 ++ select SND_SOC_MAX98357A
11172 + select SND_SOC_MT6358
11173 ++ select SND_SOC_MAX98357A
11174 + select SND_SOC_RT1015P
11175 + select SND_SOC_RT5682S
11176 + select SND_SOC_BT_SCO
11177 +diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
11178 +index 60fa55d0c91f0..6babadb2e6fe2 100644
11179 +--- a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
11180 ++++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
11181 +@@ -991,6 +991,21 @@ static struct snd_soc_card mt8186_mt6366_rt1019_rt5682s_soc_card = {
11182 + .num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf),
11183 + };
11184 +
11185 ++static struct snd_soc_card mt8186_mt6366_rt5682s_max98360_soc_card = {
11186 ++ .name = "mt8186_rt5682s_max98360",
11187 ++ .owner = THIS_MODULE,
11188 ++ .dai_link = mt8186_mt6366_rt1019_rt5682s_dai_links,
11189 ++ .num_links = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_dai_links),
11190 ++ .controls = mt8186_mt6366_rt1019_rt5682s_controls,
11191 ++ .num_controls = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_controls),
11192 ++ .dapm_widgets = mt8186_mt6366_rt1019_rt5682s_widgets,
11193 ++ .num_dapm_widgets = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_widgets),
11194 ++ .dapm_routes = mt8186_mt6366_rt1019_rt5682s_routes,
11195 ++ .num_dapm_routes = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_routes),
11196 ++ .codec_conf = mt8186_mt6366_rt1019_rt5682s_codec_conf,
11197 ++ .num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf),
11198 ++};
11199 ++
11200 + static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
11201 + {
11202 + struct snd_soc_card *card;
11203 +@@ -1132,9 +1147,14 @@ err_adsp_node:
11204 +
11205 + #if IS_ENABLED(CONFIG_OF)
11206 + static const struct of_device_id mt8186_mt6366_rt1019_rt5682s_dt_match[] = {
11207 +- { .compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound",
11208 ++ {
11209 ++ .compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound",
11210 + .data = &mt8186_mt6366_rt1019_rt5682s_soc_card,
11211 + },
11212 ++ {
11213 ++ .compatible = "mediatek,mt8186-mt6366-rt5682s-max98360-sound",
11214 ++ .data = &mt8186_mt6366_rt5682s_max98360_soc_card,
11215 ++ },
11216 + {}
11217 + };
11218 + #endif
11219 +diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
11220 +index d9a3ce7b69e16..ade0507328af4 100644
11221 +--- a/sound/soc/sof/debug.c
11222 ++++ b/sound/soc/sof/debug.c
11223 +@@ -353,7 +353,9 @@ int snd_sof_dbg_init(struct snd_sof_dev *sdev)
11224 + return err;
11225 + }
11226 +
11227 +- return 0;
11228 ++ return snd_sof_debugfs_buf_item(sdev, &sdev->fw_state,
11229 ++ sizeof(sdev->fw_state),
11230 ++ "fw_state", 0444);
11231 + }
11232 + EXPORT_SYMBOL_GPL(snd_sof_dbg_init);
11233 +
11234 +diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
11235 +index df740be645e84..8722bbd7fd3d7 100644
11236 +--- a/sound/soc/sof/pm.c
11237 ++++ b/sound/soc/sof/pm.c
11238 +@@ -182,7 +182,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
11239 + const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
11240 + const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
11241 + pm_message_t pm_state;
11242 +- u32 target_state = 0;
11243 ++ u32 target_state = snd_sof_dsp_power_target(sdev);
11244 + int ret;
11245 +
11246 + /* do nothing if dsp suspend callback is not set */
11247 +@@ -192,6 +192,9 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
11248 + if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
11249 + return 0;
11250 +
11251 ++ if (tplg_ops && tplg_ops->tear_down_all_pipelines)
11252 ++ tplg_ops->tear_down_all_pipelines(sdev, false);
11253 ++
11254 + if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
11255 + goto suspend;
11256 +
11257 +@@ -206,7 +209,6 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
11258 + }
11259 + }
11260 +
11261 +- target_state = snd_sof_dsp_power_target(sdev);
11262 + pm_state.event = target_state;
11263 +
11264 + /* Skip to platform-specific suspend if DSP is entering D0 */
11265 +@@ -217,9 +219,6 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
11266 + goto suspend;
11267 + }
11268 +
11269 +- if (tplg_ops->tear_down_all_pipelines)
11270 +- tplg_ops->tear_down_all_pipelines(sdev, false);
11271 +-
11272 + /* suspend DMA trace */
11273 + sof_fw_trace_suspend(sdev, pm_state);
11274 +
11275 +diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
11276 +index 6c122952c5892..5dee2b98ab604 100644
11277 +--- a/tools/gpio/gpio-event-mon.c
11278 ++++ b/tools/gpio/gpio-event-mon.c
11279 +@@ -86,6 +86,7 @@ int monitor_device(const char *device_name,
11280 + gpiotools_test_bit(values.bits, i));
11281 + }
11282 +
11283 ++ i = 0;
11284 + while (1) {
11285 + struct gpio_v2_line_event event;
11286 +
11287 +diff --git a/tools/include/nolibc/ctype.h b/tools/include/nolibc/ctype.h
11288 +index e3000b2992d7b..6f90706d06442 100644
11289 +--- a/tools/include/nolibc/ctype.h
11290 ++++ b/tools/include/nolibc/ctype.h
11291 +@@ -96,4 +96,7 @@ int ispunct(int c)
11292 + return isgraph(c) && !isalnum(c);
11293 + }
11294 +
11295 ++/* make sure to include all global symbols */
11296 ++#include "nolibc.h"
11297 ++
11298 + #endif /* _NOLIBC_CTYPE_H */
11299 +diff --git a/tools/include/nolibc/errno.h b/tools/include/nolibc/errno.h
11300 +index 06893d6dfb7a6..9dc4919c769b7 100644
11301 +--- a/tools/include/nolibc/errno.h
11302 ++++ b/tools/include/nolibc/errno.h
11303 +@@ -24,4 +24,7 @@ static int errno;
11304 + */
11305 + #define MAX_ERRNO 4095
11306 +
11307 ++/* make sure to include all global symbols */
11308 ++#include "nolibc.h"
11309 ++
11310 + #endif /* _NOLIBC_ERRNO_H */
11311 +diff --git a/tools/include/nolibc/signal.h b/tools/include/nolibc/signal.h
11312 +index ef47e71e2be37..137552216e469 100644
11313 +--- a/tools/include/nolibc/signal.h
11314 ++++ b/tools/include/nolibc/signal.h
11315 +@@ -19,4 +19,7 @@ int raise(int signal)
11316 + return sys_kill(sys_getpid(), signal);
11317 + }
11318 +
11319 ++/* make sure to include all global symbols */
11320 ++#include "nolibc.h"
11321 ++
11322 + #endif /* _NOLIBC_SIGNAL_H */
11323 +diff --git a/tools/include/nolibc/stdio.h b/tools/include/nolibc/stdio.h
11324 +index a3cebc4bc3ac4..96ac8afc5aeed 100644
11325 +--- a/tools/include/nolibc/stdio.h
11326 ++++ b/tools/include/nolibc/stdio.h
11327 +@@ -303,4 +303,7 @@ void perror(const char *msg)
11328 + fprintf(stderr, "%s%serrno=%d\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "", errno);
11329 + }
11330 +
11331 ++/* make sure to include all global symbols */
11332 ++#include "nolibc.h"
11333 ++
11334 + #endif /* _NOLIBC_STDIO_H */
11335 +diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
11336 +index 92378c4b96605..a24000d1e8222 100644
11337 +--- a/tools/include/nolibc/stdlib.h
11338 ++++ b/tools/include/nolibc/stdlib.h
11339 +@@ -419,4 +419,7 @@ char *u64toa(uint64_t in)
11340 + return itoa_buffer;
11341 + }
11342 +
11343 ++/* make sure to include all global symbols */
11344 ++#include "nolibc.h"
11345 ++
11346 + #endif /* _NOLIBC_STDLIB_H */
11347 +diff --git a/tools/include/nolibc/string.h b/tools/include/nolibc/string.h
11348 +index ad97c0d522b8e..fffdaf6ff4673 100644
11349 +--- a/tools/include/nolibc/string.h
11350 ++++ b/tools/include/nolibc/string.h
11351 +@@ -88,8 +88,11 @@ void *memset(void *dst, int b, size_t len)
11352 + {
11353 + char *p = dst;
11354 +
11355 +- while (len--)
11356 ++ while (len--) {
11357 ++ /* prevent gcc from recognizing memset() here */
11358 ++ asm volatile("");
11359 + *(p++) = b;
11360 ++ }
11361 + return dst;
11362 + }
11363 +
11364 +@@ -285,4 +288,7 @@ char *strrchr(const char *s, int c)
11365 + return (char *)ret;
11366 + }
11367 +
11368 ++/* make sure to include all global symbols */
11369 ++#include "nolibc.h"
11370 ++
11371 + #endif /* _NOLIBC_STRING_H */
11372 +diff --git a/tools/include/nolibc/sys.h b/tools/include/nolibc/sys.h
11373 +index ce3ee03aa6794..78473d34e27cd 100644
11374 +--- a/tools/include/nolibc/sys.h
11375 ++++ b/tools/include/nolibc/sys.h
11376 +@@ -1243,5 +1243,7 @@ ssize_t write(int fd, const void *buf, size_t count)
11377 + return ret;
11378 + }
11379 +
11380 ++/* make sure to include all global symbols */
11381 ++#include "nolibc.h"
11382 +
11383 + #endif /* _NOLIBC_SYS_H */
11384 +diff --git a/tools/include/nolibc/time.h b/tools/include/nolibc/time.h
11385 +index d18b7661fdd71..84655361b9ad2 100644
11386 +--- a/tools/include/nolibc/time.h
11387 ++++ b/tools/include/nolibc/time.h
11388 +@@ -25,4 +25,7 @@ time_t time(time_t *tptr)
11389 + return tv.tv_sec;
11390 + }
11391 +
11392 ++/* make sure to include all global symbols */
11393 ++#include "nolibc.h"
11394 ++
11395 + #endif /* _NOLIBC_TIME_H */
11396 +diff --git a/tools/include/nolibc/types.h b/tools/include/nolibc/types.h
11397 +index 959997034e553..fbbc0e68c001b 100644
11398 +--- a/tools/include/nolibc/types.h
11399 ++++ b/tools/include/nolibc/types.h
11400 +@@ -26,13 +26,13 @@
11401 + #define S_IFSOCK 0140000
11402 + #define S_IFMT 0170000
11403 +
11404 +-#define S_ISDIR(mode) (((mode) & S_IFDIR) == S_IFDIR)
11405 +-#define S_ISCHR(mode) (((mode) & S_IFCHR) == S_IFCHR)
11406 +-#define S_ISBLK(mode) (((mode) & S_IFBLK) == S_IFBLK)
11407 +-#define S_ISREG(mode) (((mode) & S_IFREG) == S_IFREG)
11408 +-#define S_ISFIFO(mode) (((mode) & S_IFIFO) == S_IFIFO)
11409 +-#define S_ISLNK(mode) (((mode) & S_IFLNK) == S_IFLNK)
11410 +-#define S_ISSOCK(mode) (((mode) & S_IFSOCK) == S_IFSOCK)
11411 ++#define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR)
11412 ++#define S_ISCHR(mode) (((mode) & S_IFMT) == S_IFCHR)
11413 ++#define S_ISBLK(mode) (((mode) & S_IFMT) == S_IFBLK)
11414 ++#define S_ISREG(mode) (((mode) & S_IFMT) == S_IFREG)
11415 ++#define S_ISFIFO(mode) (((mode) & S_IFMT) == S_IFIFO)
11416 ++#define S_ISLNK(mode) (((mode) & S_IFMT) == S_IFLNK)
11417 ++#define S_ISSOCK(mode) (((mode) & S_IFMT) == S_IFSOCK)
11418 +
11419 + /* dirent types */
11420 + #define DT_UNKNOWN 0x0
11421 +@@ -89,39 +89,46 @@
11422 + #define EXIT_SUCCESS 0
11423 + #define EXIT_FAILURE 1
11424 +
11425 ++#define FD_SETIDXMASK (8 * sizeof(unsigned long))
11426 ++#define FD_SETBITMASK (8 * sizeof(unsigned long)-1)
11427 ++
11428 + /* for select() */
11429 + typedef struct {
11430 +- uint32_t fd32[(FD_SETSIZE + 31) / 32];
11431 ++ unsigned long fds[(FD_SETSIZE + FD_SETBITMASK) / FD_SETIDXMASK];
11432 + } fd_set;
11433 +
11434 +-#define FD_CLR(fd, set) do { \
11435 +- fd_set *__set = (set); \
11436 +- int __fd = (fd); \
11437 +- if (__fd >= 0) \
11438 +- __set->fd32[__fd / 32] &= ~(1U << (__fd & 31)); \
11439 ++#define FD_CLR(fd, set) do { \
11440 ++ fd_set *__set = (set); \
11441 ++ int __fd = (fd); \
11442 ++ if (__fd >= 0) \
11443 ++ __set->fds[__fd / FD_SETIDXMASK] &= \
11444 ++ ~(1U << (__fd & FX_SETBITMASK)); \
11445 + } while (0)
11446 +
11447 +-#define FD_SET(fd, set) do { \
11448 +- fd_set *__set = (set); \
11449 +- int __fd = (fd); \
11450 +- if (__fd >= 0) \
11451 +- __set->fd32[__fd / 32] |= 1U << (__fd & 31); \
11452 ++#define FD_SET(fd, set) do { \
11453 ++ fd_set *__set = (set); \
11454 ++ int __fd = (fd); \
11455 ++ if (__fd >= 0) \
11456 ++ __set->fds[__fd / FD_SETIDXMASK] |= \
11457 ++ 1 << (__fd & FD_SETBITMASK); \
11458 + } while (0)
11459 +
11460 +-#define FD_ISSET(fd, set) ({ \
11461 +- fd_set *__set = (set); \
11462 +- int __fd = (fd); \
11463 +- int __r = 0; \
11464 +- if (__fd >= 0) \
11465 +- __r = !!(__set->fd32[__fd / 32] & 1U << (__fd & 31)); \
11466 +- __r; \
11467 ++#define FD_ISSET(fd, set) ({ \
11468 ++ fd_set *__set = (set); \
11469 ++ int __fd = (fd); \
11470 ++ int __r = 0; \
11471 ++ if (__fd >= 0) \
11472 ++ __r = !!(__set->fds[__fd / FD_SETIDXMASK] & \
11473 ++1U << (__fd & FD_SET_BITMASK)); \
11474 ++ __r; \
11475 + })
11476 +
11477 +-#define FD_ZERO(set) do { \
11478 +- fd_set *__set = (set); \
11479 +- int __idx; \
11480 +- for (__idx = 0; __idx < (FD_SETSIZE+31) / 32; __idx ++) \
11481 +- __set->fd32[__idx] = 0; \
11482 ++#define FD_ZERO(set) do { \
11483 ++ fd_set *__set = (set); \
11484 ++ int __idx; \
11485 ++ int __size = (FD_SETSIZE+FD_SETBITMASK) / FD_SETIDXMASK;\
11486 ++ for (__idx = 0; __idx < __size; __idx++) \
11487 ++ __set->fds[__idx] = 0; \
11488 + } while (0)
11489 +
11490 + /* for poll() */
11491 +@@ -202,4 +209,7 @@ struct stat {
11492 + })
11493 + #endif
11494 +
11495 ++/* make sure to include all global symbols */
11496 ++#include "nolibc.h"
11497 ++
11498 + #endif /* _NOLIBC_TYPES_H */
11499 +diff --git a/tools/include/nolibc/unistd.h b/tools/include/nolibc/unistd.h
11500 +index 1c25e20ee3606..1cfcd52106a42 100644
11501 +--- a/tools/include/nolibc/unistd.h
11502 ++++ b/tools/include/nolibc/unistd.h
11503 +@@ -51,4 +51,7 @@ int tcsetpgrp(int fd, pid_t pid)
11504 + return ioctl(fd, TIOCSPGRP, &pid);
11505 + }
11506 +
11507 ++/* make sure to include all global symbols */
11508 ++#include "nolibc.h"
11509 ++
11510 + #endif /* _NOLIBC_UNISTD_H */
11511 +diff --git a/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c b/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c
11512 +deleted file mode 100644
11513 +index 3add34df57678..0000000000000
11514 +--- a/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c
11515 ++++ /dev/null
11516 +@@ -1,9 +0,0 @@
11517 +-// SPDX-License-Identifier: GPL-2.0
11518 +-
11519 +-#include <test_progs.h>
11520 +-#include "jeq_infer_not_null_fail.skel.h"
11521 +-
11522 +-void test_jeq_infer_not_null(void)
11523 +-{
11524 +- RUN_TESTS(jeq_infer_not_null_fail);
11525 +-}
11526 +diff --git a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
11527 +deleted file mode 100644
11528 +index f46965053acb2..0000000000000
11529 +--- a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
11530 ++++ /dev/null
11531 +@@ -1,42 +0,0 @@
11532 +-// SPDX-License-Identifier: GPL-2.0
11533 +-
11534 +-#include "vmlinux.h"
11535 +-#include <bpf/bpf_helpers.h>
11536 +-#include "bpf_misc.h"
11537 +-
11538 +-char _license[] SEC("license") = "GPL";
11539 +-
11540 +-struct {
11541 +- __uint(type, BPF_MAP_TYPE_HASH);
11542 +- __uint(max_entries, 1);
11543 +- __type(key, u64);
11544 +- __type(value, u64);
11545 +-} m_hash SEC(".maps");
11546 +-
11547 +-SEC("?raw_tp")
11548 +-__failure __msg("R8 invalid mem access 'map_value_or_null")
11549 +-int jeq_infer_not_null_ptr_to_btfid(void *ctx)
11550 +-{
11551 +- struct bpf_map *map = (struct bpf_map *)&m_hash;
11552 +- struct bpf_map *inner_map = map->inner_map_meta;
11553 +- u64 key = 0, ret = 0, *val;
11554 +-
11555 +- val = bpf_map_lookup_elem(map, &key);
11556 +- /* Do not mark ptr as non-null if one of them is
11557 +- * PTR_TO_BTF_ID (R9), reject because of invalid
11558 +- * access to map value (R8).
11559 +- *
11560 +- * Here, we need to inline those insns to access
11561 +- * R8 directly, since compiler may use other reg
11562 +- * once it figures out val==inner_map.
11563 +- */
11564 +- asm volatile("r8 = %[val];\n"
11565 +- "r9 = %[inner_map];\n"
11566 +- "if r8 != r9 goto +1;\n"
11567 +- "%[ret] = *(u64 *)(r8 +0);\n"
11568 +- : [ret] "+r"(ret)
11569 +- : [inner_map] "r"(inner_map), [val] "r"(val)
11570 +- : "r8", "r9");
11571 +-
11572 +- return ret;
11573 +-}
11574 +diff --git a/tools/testing/selftests/net/toeplitz.c b/tools/testing/selftests/net/toeplitz.c
11575 +index 90026a27eac0c..9ba03164d73a6 100644
11576 +--- a/tools/testing/selftests/net/toeplitz.c
11577 ++++ b/tools/testing/selftests/net/toeplitz.c
11578 +@@ -215,7 +215,7 @@ static char *recv_frame(const struct ring_state *ring, char *frame)
11579 + }
11580 +
11581 + /* A single TPACKET_V3 block can hold multiple frames */
11582 +-static void recv_block(struct ring_state *ring)
11583 ++static bool recv_block(struct ring_state *ring)
11584 + {
11585 + struct tpacket_block_desc *block;
11586 + char *frame;
11587 +@@ -223,7 +223,7 @@ static void recv_block(struct ring_state *ring)
11588 +
11589 + block = (void *)(ring->mmap + ring->idx * ring_block_sz);
11590 + if (!(block->hdr.bh1.block_status & TP_STATUS_USER))
11591 +- return;
11592 ++ return false;
11593 +
11594 + frame = (char *)block;
11595 + frame += block->hdr.bh1.offset_to_first_pkt;
11596 +@@ -235,6 +235,8 @@ static void recv_block(struct ring_state *ring)
11597 +
11598 + block->hdr.bh1.block_status = TP_STATUS_KERNEL;
11599 + ring->idx = (ring->idx + 1) % ring_block_nr;
11600 ++
11601 ++ return true;
11602 + }
11603 +
11604 + /* simple test: sleep once unconditionally and then process all rings */
11605 +@@ -245,7 +247,7 @@ static void process_rings(void)
11606 + usleep(1000 * cfg_timeout_msec);
11607 +
11608 + for (i = 0; i < num_cpus; i++)
11609 +- recv_block(&rings[i]);
11610 ++ do {} while (recv_block(&rings[i]));
11611 +
11612 + fprintf(stderr, "count: pass=%u nohash=%u fail=%u\n",
11613 + frames_received - frames_nohash - frames_error,
11614 +@@ -257,12 +259,12 @@ static char *setup_ring(int fd)
11615 + struct tpacket_req3 req3 = {0};
11616 + void *ring;
11617 +
11618 +- req3.tp_retire_blk_tov = cfg_timeout_msec;
11619 ++ req3.tp_retire_blk_tov = cfg_timeout_msec / 8;
11620 + req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
11621 +
11622 + req3.tp_frame_size = 2048;
11623 + req3.tp_frame_nr = 1 << 10;
11624 +- req3.tp_block_nr = 2;
11625 ++ req3.tp_block_nr = 16;
11626 +
11627 + req3.tp_block_size = req3.tp_frame_size * req3.tp_frame_nr;
11628 + req3.tp_block_size /= req3.tp_block_nr;
11629 +diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
11630 +index 495ceabffe88b..9584eb57e0eda 100644
11631 +--- a/virt/kvm/vfio.c
11632 ++++ b/virt/kvm/vfio.c
11633 +@@ -336,7 +336,7 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
11634 + return -ENXIO;
11635 + }
11636 +
11637 +-static void kvm_vfio_destroy(struct kvm_device *dev)
11638 ++static void kvm_vfio_release(struct kvm_device *dev)
11639 + {
11640 + struct kvm_vfio *kv = dev->private;
11641 + struct kvm_vfio_group *kvg, *tmp;
11642 +@@ -355,7 +355,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
11643 + kvm_vfio_update_coherency(dev);
11644 +
11645 + kfree(kv);
11646 +- kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
11647 ++ kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
11648 + }
11649 +
11650 + static int kvm_vfio_create(struct kvm_device *dev, u32 type);
11651 +@@ -363,7 +363,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type);
11652 + static struct kvm_device_ops kvm_vfio_ops = {
11653 + .name = "kvm-vfio",
11654 + .create = kvm_vfio_create,
11655 +- .destroy = kvm_vfio_destroy,
11656 ++ .release = kvm_vfio_release,
11657 + .set_attr = kvm_vfio_set_attr,
11658 + .has_attr = kvm_vfio_has_attr,
11659 + };