Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.19 commit in: /
Date: Wed, 28 Sep 2022 09:55:58
Message-Id: 1664358939.27a162cbb4e6bf6258462f89e5da2c02364e125e.mpagano@gentoo
1 commit: 27a162cbb4e6bf6258462f89e5da2c02364e125e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 28 09:55:39 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 28 09:55:39 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=27a162cb
7
8 Linux patch 5.19.12
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1011_linux-5.19.12.patch | 9776 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 9780 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 591733a1..05763bb8 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -87,6 +87,10 @@ Patch: 1010_linux-5.19.11.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.19.11
23
24 +Patch: 1011_linux-5.19.12.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.19.12
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1011_linux-5.19.12.patch b/1011_linux-5.19.12.patch
33 new file mode 100644
34 index 00000000..8c6e32f4
35 --- /dev/null
36 +++ b/1011_linux-5.19.12.patch
37 @@ -0,0 +1,9776 @@
38 +diff --git a/Makefile b/Makefile
39 +index 01463a22926d5..7df4c195c8ab2 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 11
47 ++SUBLEVEL = 12
48 + EXTRAVERSION =
49 + NAME = Superb Owl
50 +
51 +diff --git a/arch/arm/boot/dts/lan966x.dtsi b/arch/arm/boot/dts/lan966x.dtsi
52 +index 38e90a31d2dd1..25c19f9d0a12f 100644
53 +--- a/arch/arm/boot/dts/lan966x.dtsi
54 ++++ b/arch/arm/boot/dts/lan966x.dtsi
55 +@@ -515,13 +515,13 @@
56 +
57 + phy0: ethernet-phy@1 {
58 + reg = <1>;
59 +- interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
60 ++ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
61 + status = "disabled";
62 + };
63 +
64 + phy1: ethernet-phy@2 {
65 + reg = <2>;
66 +- interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
67 ++ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
68 + status = "disabled";
69 + };
70 + };
71 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts b/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts
72 +index 92eaf4ef45638..57ecdfa0dfc09 100644
73 +--- a/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts
74 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts
75 +@@ -152,11 +152,11 @@
76 + * CPLD_reset is RESET_SOFT in schematic
77 + */
78 + gpio-line-names =
79 +- "CPLD_D[1]", "CPLD_int", "CPLD_reset", "",
80 +- "", "CPLD_D[0]", "", "",
81 +- "", "", "", "CPLD_D[2]",
82 +- "CPLD_D[3]", "CPLD_D[4]", "CPLD_D[5]", "CPLD_D[6]",
83 +- "CPLD_D[7]", "", "", "",
84 ++ "CPLD_D[6]", "CPLD_int", "CPLD_reset", "",
85 ++ "", "CPLD_D[7]", "", "",
86 ++ "", "", "", "CPLD_D[5]",
87 ++ "CPLD_D[4]", "CPLD_D[3]", "CPLD_D[2]", "CPLD_D[1]",
88 ++ "CPLD_D[0]", "", "", "",
89 + "", "", "", "",
90 + "", "", "", "KBD_intK",
91 + "", "", "", "";
92 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
93 +index 286d2df01cfa7..7e0aeb2db3054 100644
94 +--- a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
95 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
96 +@@ -5,7 +5,6 @@
97 +
98 + /dts-v1/;
99 +
100 +-#include <dt-bindings/phy/phy-imx8-pcie.h>
101 + #include "imx8mm-tqma8mqml.dtsi"
102 + #include "mba8mx.dtsi"
103 +
104 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
105 +index 16ee9b5179e6e..f649dfacb4b69 100644
106 +--- a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
107 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
108 +@@ -3,6 +3,7 @@
109 + * Copyright 2020-2021 TQ-Systems GmbH
110 + */
111 +
112 ++#include <dt-bindings/phy/phy-imx8-pcie.h>
113 + #include "imx8mm.dtsi"
114 +
115 + / {
116 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
117 +index c2d4da25482ff..44b473494d0f5 100644
118 +--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
119 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
120 +@@ -359,8 +359,8 @@
121 + nxp,dvs-standby-voltage = <850000>;
122 + regulator-always-on;
123 + regulator-boot-on;
124 +- regulator-max-microvolt = <950000>;
125 +- regulator-min-microvolt = <850000>;
126 ++ regulator-max-microvolt = <1050000>;
127 ++ regulator-min-microvolt = <805000>;
128 + regulator-name = "On-module +VDD_ARM (BUCK2)";
129 + regulator-ramp-delay = <3125>;
130 + };
131 +@@ -368,8 +368,8 @@
132 + reg_vdd_dram: BUCK3 {
133 + regulator-always-on;
134 + regulator-boot-on;
135 +- regulator-max-microvolt = <950000>;
136 +- regulator-min-microvolt = <850000>;
137 ++ regulator-max-microvolt = <1000000>;
138 ++ regulator-min-microvolt = <805000>;
139 + regulator-name = "On-module +VDD_GPU_VPU_DDR (BUCK3)";
140 + };
141 +
142 +@@ -408,7 +408,7 @@
143 + reg_vdd_snvs: LDO2 {
144 + regulator-always-on;
145 + regulator-boot-on;
146 +- regulator-max-microvolt = <900000>;
147 ++ regulator-max-microvolt = <800000>;
148 + regulator-min-microvolt = <800000>;
149 + regulator-name = "On-module +V0.8_SNVS (LDO2)";
150 + };
151 +diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
152 +index e41e1d56f980d..7bd4eecd592ef 100644
153 +--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
154 ++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
155 +@@ -672,7 +672,6 @@
156 + <&clk IMX8MN_CLK_GPU_SHADER>,
157 + <&clk IMX8MN_CLK_GPU_BUS_ROOT>,
158 + <&clk IMX8MN_CLK_GPU_AHB>;
159 +- resets = <&src IMX8MQ_RESET_GPU_RESET>;
160 + };
161 +
162 + pgc_dispmix: power-domain@3 {
163 +diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
164 +index 6630ec561dc25..211e6a1b296e1 100644
165 +--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
166 ++++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
167 +@@ -123,8 +123,7 @@
168 + pinctrl-names = "default";
169 + pinctrl-0 = <&pinctrl_reg_can>;
170 + regulator-name = "can2_stby";
171 +- gpio = <&gpio3 19 GPIO_ACTIVE_HIGH>;
172 +- enable-active-high;
173 ++ gpio = <&gpio3 19 GPIO_ACTIVE_LOW>;
174 + regulator-min-microvolt = <3300000>;
175 + regulator-max-microvolt = <3300000>;
176 + };
177 +@@ -484,35 +483,40 @@
178 + lan1: port@0 {
179 + reg = <0>;
180 + label = "lan1";
181 ++ phy-mode = "internal";
182 + local-mac-address = [00 00 00 00 00 00];
183 + };
184 +
185 + lan2: port@1 {
186 + reg = <1>;
187 + label = "lan2";
188 ++ phy-mode = "internal";
189 + local-mac-address = [00 00 00 00 00 00];
190 + };
191 +
192 + lan3: port@2 {
193 + reg = <2>;
194 + label = "lan3";
195 ++ phy-mode = "internal";
196 + local-mac-address = [00 00 00 00 00 00];
197 + };
198 +
199 + lan4: port@3 {
200 + reg = <3>;
201 + label = "lan4";
202 ++ phy-mode = "internal";
203 + local-mac-address = [00 00 00 00 00 00];
204 + };
205 +
206 + lan5: port@4 {
207 + reg = <4>;
208 + label = "lan5";
209 ++ phy-mode = "internal";
210 + local-mac-address = [00 00 00 00 00 00];
211 + };
212 +
213 +- port@6 {
214 +- reg = <6>;
215 ++ port@5 {
216 ++ reg = <5>;
217 + label = "cpu";
218 + ethernet = <&fec>;
219 + phy-mode = "rgmii-id";
220 +diff --git a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
221 +index 09f7364dd1d05..1cd389b1b95d6 100644
222 +--- a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
223 ++++ b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
224 +@@ -172,6 +172,7 @@
225 + compatible = "fsl,imx8ulp-pcc3";
226 + reg = <0x292d0000 0x10000>;
227 + #clock-cells = <1>;
228 ++ #reset-cells = <1>;
229 + };
230 +
231 + tpm5: tpm@29340000 {
232 +@@ -270,6 +271,7 @@
233 + compatible = "fsl,imx8ulp-pcc4";
234 + reg = <0x29800000 0x10000>;
235 + #clock-cells = <1>;
236 ++ #reset-cells = <1>;
237 + };
238 +
239 + lpi2c6: i2c@29840000 {
240 +@@ -414,6 +416,7 @@
241 + compatible = "fsl,imx8ulp-pcc5";
242 + reg = <0x2da70000 0x10000>;
243 + #clock-cells = <1>;
244 ++ #reset-cells = <1>;
245 + };
246 + };
247 +
248 +diff --git a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
249 +index 7249871530ab9..5eecbefa8a336 100644
250 +--- a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
251 ++++ b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
252 +@@ -2,8 +2,8 @@
253 + /*
254 + * Copyright (c) 2020 Fuzhou Rockchip Electronics Co., Ltd
255 + * Copyright (c) 2020 Engicam srl
256 +- * Copyright (c) 2020 Amarula Solutons
257 +- * Copyright (c) 2020 Amarula Solutons(India)
258 ++ * Copyright (c) 2020 Amarula Solutions
259 ++ * Copyright (c) 2020 Amarula Solutions(India)
260 + */
261 +
262 + #include <dt-bindings/gpio/gpio.h>
263 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
264 +index 31ebb4e5fd330..0f9cc042d9bf0 100644
265 +--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
266 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
267 +@@ -88,3 +88,8 @@
268 + };
269 + };
270 + };
271 ++
272 ++&wlan_host_wake_l {
273 ++ /* Kevin has an external pull up, but Bob does not. */
274 ++ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
275 ++};
276 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
277 +index 50d459ee4831c..af5810e5f5b79 100644
278 +--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
279 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
280 +@@ -244,6 +244,14 @@
281 + &edp {
282 + status = "okay";
283 +
284 ++ /*
285 ++ * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only
286 ++ * set this here, because rk3399-gru.dtsi ensures we can generate this
287 ++ * off GPLL=600MHz, whereas some other RK3399 boards may not.
288 ++ */
289 ++ assigned-clocks = <&cru PCLK_EDP>;
290 ++ assigned-clock-rates = <24000000>;
291 ++
292 + ports {
293 + edp_out: port@1 {
294 + reg = <1>;
295 +@@ -578,6 +586,7 @@ ap_i2c_tp: &i2c5 {
296 + };
297 +
298 + wlan_host_wake_l: wlan-host-wake-l {
299 ++ /* Kevin has an external pull up, but Bob does not */
300 + rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
301 + };
302 + };
303 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
304 +index b1ac3a89f259c..aa3e21bd6c8f4 100644
305 +--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
306 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
307 +@@ -62,7 +62,6 @@
308 + vcc5v0_host: vcc5v0-host-regulator {
309 + compatible = "regulator-fixed";
310 + gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
311 +- enable-active-low;
312 + pinctrl-names = "default";
313 + pinctrl-0 = <&vcc5v0_host_en>;
314 + regulator-name = "vcc5v0_host";
315 +diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
316 +index fa953b7366421..fdbfdf3634e43 100644
317 +--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
318 ++++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
319 +@@ -163,7 +163,6 @@
320 +
321 + vcc3v3_sd: vcc3v3_sd {
322 + compatible = "regulator-fixed";
323 +- enable-active-low;
324 + gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>;
325 + pinctrl-names = "default";
326 + pinctrl-0 = <&vcc_sd_h>;
327 +diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
328 +index 02d5f5a8ca036..528bb4e8ac776 100644
329 +--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
330 ++++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
331 +@@ -506,7 +506,7 @@
332 + disable-wp;
333 + pinctrl-names = "default";
334 + pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
335 +- sd-uhs-sdr104;
336 ++ sd-uhs-sdr50;
337 + vmmc-supply = <&vcc3v3_sd>;
338 + vqmmc-supply = <&vccio_sd>;
339 + status = "okay";
340 +diff --git a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
341 +index 622be8be9813d..282f5c74d5cda 100644
342 +--- a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
343 ++++ b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
344 +@@ -618,7 +618,7 @@
345 + };
346 +
347 + &usb2phy0_otg {
348 +- vbus-supply = <&vcc5v0_usb_otg>;
349 ++ phy-supply = <&vcc5v0_usb_otg>;
350 + status = "okay";
351 + };
352 +
353 +diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
354 +index 0813c0c5abded..26912f02684ce 100644
355 +--- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
356 ++++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
357 +@@ -543,7 +543,7 @@
358 + };
359 +
360 + &usb2phy0_otg {
361 +- vbus-supply = <&vcc5v0_usb_otg>;
362 ++ phy-supply = <&vcc5v0_usb_otg>;
363 + status = "okay";
364 + };
365 +
366 +diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
367 +index 707b5451929d4..d4abb948eb14e 100644
368 +--- a/arch/arm64/kernel/topology.c
369 ++++ b/arch/arm64/kernel/topology.c
370 +@@ -251,7 +251,7 @@ static void amu_fie_setup(const struct cpumask *cpus)
371 + for_each_cpu(cpu, cpus) {
372 + if (!freq_counters_valid(cpu) ||
373 + freq_inv_set_max_ratio(cpu,
374 +- cpufreq_get_hw_max_freq(cpu) * 1000,
375 ++ cpufreq_get_hw_max_freq(cpu) * 1000ULL,
376 + arch_timer_get_rate()))
377 + return;
378 + }
379 +diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
380 +index 7a623684d9b5e..2d5a0bcb0cec1 100644
381 +--- a/arch/mips/lantiq/clk.c
382 ++++ b/arch/mips/lantiq/clk.c
383 +@@ -50,6 +50,7 @@ struct clk *clk_get_io(void)
384 + {
385 + return &cpu_clk_generic[2];
386 + }
387 ++EXPORT_SYMBOL_GPL(clk_get_io);
388 +
389 + struct clk *clk_get_ppe(void)
390 + {
391 +diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
392 +index 794c96c2a4cdd..311dc1580bbde 100644
393 +--- a/arch/mips/loongson32/common/platform.c
394 ++++ b/arch/mips/loongson32/common/platform.c
395 +@@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
396 + if (plat_dat->bus_id) {
397 + __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
398 + GMAC1_USE_UART0, LS1X_MUX_CTRL0);
399 +- switch (plat_dat->interface) {
400 ++ switch (plat_dat->phy_interface) {
401 + case PHY_INTERFACE_MODE_RGMII:
402 + val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
403 + break;
404 +@@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
405 + break;
406 + default:
407 + pr_err("unsupported mii mode %d\n",
408 +- plat_dat->interface);
409 ++ plat_dat->phy_interface);
410 + return -ENOTSUPP;
411 + }
412 + val &= ~GMAC1_SHUT;
413 + } else {
414 +- switch (plat_dat->interface) {
415 ++ switch (plat_dat->phy_interface) {
416 + case PHY_INTERFACE_MODE_RGMII:
417 + val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
418 + break;
419 +@@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
420 + break;
421 + default:
422 + pr_err("unsupported mii mode %d\n",
423 +- plat_dat->interface);
424 ++ plat_dat->phy_interface);
425 + return -ENOTSUPP;
426 + }
427 + val &= ~GMAC0_SHUT;
428 +@@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
429 + plat_dat = dev_get_platdata(&pdev->dev);
430 +
431 + val &= ~PHY_INTF_SELI;
432 +- if (plat_dat->interface == PHY_INTERFACE_MODE_RMII)
433 ++ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII)
434 + val |= 0x4 << PHY_INTF_SELI_SHIFT;
435 + __raw_writel(val, LS1X_MUX_CTRL1);
436 +
437 +@@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = {
438 + .bus_id = 0,
439 + .phy_addr = -1,
440 + #if defined(CONFIG_LOONGSON1_LS1B)
441 +- .interface = PHY_INTERFACE_MODE_MII,
442 ++ .phy_interface = PHY_INTERFACE_MODE_MII,
443 + #elif defined(CONFIG_LOONGSON1_LS1C)
444 +- .interface = PHY_INTERFACE_MODE_RMII,
445 ++ .phy_interface = PHY_INTERFACE_MODE_RMII,
446 + #endif
447 + .mdio_bus_data = &ls1x_mdio_bus_data,
448 + .dma_cfg = &ls1x_eth_dma_cfg,
449 +@@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = {
450 + static struct plat_stmmacenet_data ls1x_eth1_pdata = {
451 + .bus_id = 1,
452 + .phy_addr = -1,
453 +- .interface = PHY_INTERFACE_MODE_MII,
454 ++ .phy_interface = PHY_INTERFACE_MODE_MII,
455 + .mdio_bus_data = &ls1x_mdio_bus_data,
456 + .dma_cfg = &ls1x_eth_dma_cfg,
457 + .has_gmac = 1,
458 +diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
459 +index fcbb81feb7ad8..1f02f15569749 100644
460 +--- a/arch/riscv/Kconfig
461 ++++ b/arch/riscv/Kconfig
462 +@@ -361,6 +361,7 @@ config RISCV_ISA_C
463 + config RISCV_ISA_SVPBMT
464 + bool "SVPBMT extension support"
465 + depends on 64BIT && MMU
466 ++ depends on !XIP_KERNEL
467 + select RISCV_ALTERNATIVE
468 + default y
469 + help
470 +diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
471 +index 5a2de6b6f8822..5c591123c4409 100644
472 +--- a/arch/riscv/kernel/signal.c
473 ++++ b/arch/riscv/kernel/signal.c
474 +@@ -124,6 +124,8 @@ SYSCALL_DEFINE0(rt_sigreturn)
475 + if (restore_altstack(&frame->uc.uc_stack))
476 + goto badframe;
477 +
478 ++ regs->cause = -1UL;
479 ++
480 + return regs->a0;
481 +
482 + badframe:
483 +diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
484 +index e0de60e503b98..d9e023c78f568 100644
485 +--- a/arch/um/kernel/um_arch.c
486 ++++ b/arch/um/kernel/um_arch.c
487 +@@ -33,7 +33,7 @@
488 + #include "um_arch.h"
489 +
490 + #define DEFAULT_COMMAND_LINE_ROOT "root=98:0"
491 +-#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty"
492 ++#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty0"
493 +
494 + /* Changed in add_arg and setup_arch, which run before SMP is started */
495 + static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
496 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
497 +index 4c0e812f2f044..19c04412f6e16 100644
498 +--- a/arch/x86/include/asm/kvm_host.h
499 ++++ b/arch/x86/include/asm/kvm_host.h
500 +@@ -713,6 +713,7 @@ struct kvm_vcpu_arch {
501 + struct fpu_guest guest_fpu;
502 +
503 + u64 xcr0;
504 ++ u64 guest_supported_xcr0;
505 +
506 + struct kvm_pio_request pio;
507 + void *pio_data;
508 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
509 +index de6d44e07e348..3ab498165639f 100644
510 +--- a/arch/x86/kvm/cpuid.c
511 ++++ b/arch/x86/kvm/cpuid.c
512 +@@ -283,7 +283,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
513 + {
514 + struct kvm_lapic *apic = vcpu->arch.apic;
515 + struct kvm_cpuid_entry2 *best;
516 +- u64 guest_supported_xcr0;
517 +
518 + best = kvm_find_cpuid_entry(vcpu, 1, 0);
519 + if (best && apic) {
520 +@@ -295,10 +294,16 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
521 + kvm_apic_set_version(vcpu);
522 + }
523 +
524 +- guest_supported_xcr0 =
525 ++ vcpu->arch.guest_supported_xcr0 =
526 + cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
527 +
528 +- vcpu->arch.guest_fpu.fpstate->user_xfeatures = guest_supported_xcr0;
529 ++ /*
530 ++ * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
531 ++ * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
532 ++ * supported by the host.
533 ++ */
534 ++ vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
535 ++ XFEATURE_MASK_FPSSE;
536 +
537 + kvm_update_pv_runtime(vcpu);
538 +
539 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
540 +index 09fa8a94807bf..0c4a866813b31 100644
541 +--- a/arch/x86/kvm/emulate.c
542 ++++ b/arch/x86/kvm/emulate.c
543 +@@ -4134,6 +4134,9 @@ static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
544 + {
545 + u32 eax, ecx, edx;
546 +
547 ++ if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
548 ++ return emulate_ud(ctxt);
549 ++
550 + eax = reg_read(ctxt, VCPU_REGS_RAX);
551 + edx = reg_read(ctxt, VCPU_REGS_RDX);
552 + ecx = reg_read(ctxt, VCPU_REGS_RCX);
553 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
554 +index 5b36866528568..8c2815151864b 100644
555 +--- a/arch/x86/kvm/x86.c
556 ++++ b/arch/x86/kvm/x86.c
557 +@@ -1025,15 +1025,10 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
558 + }
559 + EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
560 +
561 +-static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu)
562 +-{
563 +- return vcpu->arch.guest_fpu.fpstate->user_xfeatures;
564 +-}
565 +-
566 + #ifdef CONFIG_X86_64
567 + static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
568 + {
569 +- return kvm_guest_supported_xcr0(vcpu) & XFEATURE_MASK_USER_DYNAMIC;
570 ++ return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC;
571 + }
572 + #endif
573 +
574 +@@ -1056,7 +1051,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
575 + * saving. However, xcr0 bit 0 is always set, even if the
576 + * emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
577 + */
578 +- valid_bits = kvm_guest_supported_xcr0(vcpu) | XFEATURE_MASK_FP;
579 ++ valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
580 + if (xcr0 & ~valid_bits)
581 + return 1;
582 +
583 +@@ -1084,6 +1079,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
584 +
585 + int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
586 + {
587 ++ /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
588 + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
589 + __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
590 + kvm_inject_gp(vcpu, 0);
591 +diff --git a/block/blk-core.c b/block/blk-core.c
592 +index cc6fbcb6d2521..7743c68177e89 100644
593 +--- a/block/blk-core.c
594 ++++ b/block/blk-core.c
595 +@@ -284,49 +284,6 @@ void blk_queue_start_drain(struct request_queue *q)
596 + wake_up_all(&q->mq_freeze_wq);
597 + }
598 +
599 +-/**
600 +- * blk_cleanup_queue - shutdown a request queue
601 +- * @q: request queue to shutdown
602 +- *
603 +- * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
604 +- * put it. All future requests will be failed immediately with -ENODEV.
605 +- *
606 +- * Context: can sleep
607 +- */
608 +-void blk_cleanup_queue(struct request_queue *q)
609 +-{
610 +- /* cannot be called from atomic context */
611 +- might_sleep();
612 +-
613 +- WARN_ON_ONCE(blk_queue_registered(q));
614 +-
615 +- /* mark @q DYING, no new request or merges will be allowed afterwards */
616 +- blk_queue_flag_set(QUEUE_FLAG_DYING, q);
617 +- blk_queue_start_drain(q);
618 +-
619 +- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
620 +- blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
621 +-
622 +- /*
623 +- * Drain all requests queued before DYING marking. Set DEAD flag to
624 +- * prevent that blk_mq_run_hw_queues() accesses the hardware queues
625 +- * after draining finished.
626 +- */
627 +- blk_freeze_queue(q);
628 +-
629 +- blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
630 +-
631 +- blk_sync_queue(q);
632 +- if (queue_is_mq(q)) {
633 +- blk_mq_cancel_work_sync(q);
634 +- blk_mq_exit_queue(q);
635 +- }
636 +-
637 +- /* @q is and will stay empty, shutdown and put */
638 +- blk_put_queue(q);
639 +-}
640 +-EXPORT_SYMBOL(blk_cleanup_queue);
641 +-
642 + /**
643 + * blk_queue_enter() - try to increase q->q_usage_counter
644 + * @q: request queue pointer
645 +diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
646 +index 61f179e5f151a..28adb01f64419 100644
647 +--- a/block/blk-mq-debugfs.c
648 ++++ b/block/blk-mq-debugfs.c
649 +@@ -116,7 +116,6 @@ static const char *const blk_queue_flag_name[] = {
650 + QUEUE_FLAG_NAME(NOXMERGES),
651 + QUEUE_FLAG_NAME(ADD_RANDOM),
652 + QUEUE_FLAG_NAME(SAME_FORCE),
653 +- QUEUE_FLAG_NAME(DEAD),
654 + QUEUE_FLAG_NAME(INIT_DONE),
655 + QUEUE_FLAG_NAME(STABLE_WRITES),
656 + QUEUE_FLAG_NAME(POLL),
657 +@@ -151,11 +150,10 @@ static ssize_t queue_state_write(void *data, const char __user *buf,
658 + char opbuf[16] = { }, *op;
659 +
660 + /*
661 +- * The "state" attribute is removed after blk_cleanup_queue() has called
662 +- * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
663 +- * triggering a use-after-free.
664 ++ * The "state" attribute is removed when the queue is removed. Don't
665 ++ * allow setting the state on a dying queue to avoid a use-after-free.
666 + */
667 +- if (blk_queue_dead(q))
668 ++ if (blk_queue_dying(q))
669 + return -ENOENT;
670 +
671 + if (count >= sizeof(opbuf)) {
672 +diff --git a/block/blk-mq.c b/block/blk-mq.c
673 +index 0a299941c622e..69d0a58f9e2f1 100644
674 +--- a/block/blk-mq.c
675 ++++ b/block/blk-mq.c
676 +@@ -3896,7 +3896,7 @@ static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
677 + q->queuedata = queuedata;
678 + ret = blk_mq_init_allocated_queue(set, q);
679 + if (ret) {
680 +- blk_cleanup_queue(q);
681 ++ blk_put_queue(q);
682 + return ERR_PTR(ret);
683 + }
684 + return q;
685 +@@ -3908,6 +3908,35 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
686 + }
687 + EXPORT_SYMBOL(blk_mq_init_queue);
688 +
689 ++/**
690 ++ * blk_mq_destroy_queue - shutdown a request queue
691 ++ * @q: request queue to shutdown
692 ++ *
693 ++ * This shuts down a request queue allocated by blk_mq_init_queue() and drops
694 ++ * the initial reference. All future requests will failed with -ENODEV.
695 ++ *
696 ++ * Context: can sleep
697 ++ */
698 ++void blk_mq_destroy_queue(struct request_queue *q)
699 ++{
700 ++ WARN_ON_ONCE(!queue_is_mq(q));
701 ++ WARN_ON_ONCE(blk_queue_registered(q));
702 ++
703 ++ might_sleep();
704 ++
705 ++ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
706 ++ blk_queue_start_drain(q);
707 ++ blk_freeze_queue(q);
708 ++
709 ++ blk_sync_queue(q);
710 ++ blk_mq_cancel_work_sync(q);
711 ++ blk_mq_exit_queue(q);
712 ++
713 ++ /* @q is and will stay empty, shutdown and put */
714 ++ blk_put_queue(q);
715 ++}
716 ++EXPORT_SYMBOL(blk_mq_destroy_queue);
717 ++
718 + struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
719 + struct lock_class_key *lkclass)
720 + {
721 +@@ -3920,13 +3949,23 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
722 +
723 + disk = __alloc_disk_node(q, set->numa_node, lkclass);
724 + if (!disk) {
725 +- blk_cleanup_queue(q);
726 ++ blk_mq_destroy_queue(q);
727 + return ERR_PTR(-ENOMEM);
728 + }
729 ++ set_bit(GD_OWNS_QUEUE, &disk->state);
730 + return disk;
731 + }
732 + EXPORT_SYMBOL(__blk_mq_alloc_disk);
733 +
734 ++struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
735 ++ struct lock_class_key *lkclass)
736 ++{
737 ++ if (!blk_get_queue(q))
738 ++ return NULL;
739 ++ return __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
740 ++}
741 ++EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
742 ++
743 + static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
744 + struct blk_mq_tag_set *set, struct request_queue *q,
745 + int hctx_idx, int node)
746 +diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
747 +index 9b905e9443e49..84d7f87015673 100644
748 +--- a/block/blk-sysfs.c
749 ++++ b/block/blk-sysfs.c
750 +@@ -748,11 +748,6 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
751 + * decremented with blk_put_queue(). Once the refcount reaches 0 this function
752 + * is called.
753 + *
754 +- * For drivers that have a request_queue on a gendisk and added with
755 +- * __device_add_disk() the refcount to request_queue will reach 0 with
756 +- * the last put_disk() called by the driver. For drivers which don't use
757 +- * __device_add_disk() this happens with blk_cleanup_queue().
758 +- *
759 + * Drivers exist which depend on the release of the request_queue to be
760 + * synchronous, it should not be deferred.
761 + *
762 +diff --git a/block/blk.h b/block/blk.h
763 +index 434017701403f..0d6668663ab5d 100644
764 +--- a/block/blk.h
765 ++++ b/block/blk.h
766 +@@ -411,6 +411,9 @@ int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
767 + sector_t length);
768 + void blk_drop_partitions(struct gendisk *disk);
769 +
770 ++struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
771 ++ struct lock_class_key *lkclass);
772 ++
773 + int bio_add_hw_page(struct request_queue *q, struct bio *bio,
774 + struct page *page, unsigned int len, unsigned int offset,
775 + unsigned int max_sectors, bool *same_page);
776 +diff --git a/block/bsg-lib.c b/block/bsg-lib.c
777 +index acfe1357bf6c4..fd4cd5e682826 100644
778 +--- a/block/bsg-lib.c
779 ++++ b/block/bsg-lib.c
780 +@@ -324,7 +324,7 @@ void bsg_remove_queue(struct request_queue *q)
781 + container_of(q->tag_set, struct bsg_set, tag_set);
782 +
783 + bsg_unregister_queue(bset->bd);
784 +- blk_cleanup_queue(q);
785 ++ blk_mq_destroy_queue(q);
786 + blk_mq_free_tag_set(&bset->tag_set);
787 + kfree(bset);
788 + }
789 +@@ -399,7 +399,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
790 +
791 + return q;
792 + out_cleanup_queue:
793 +- blk_cleanup_queue(q);
794 ++ blk_mq_destroy_queue(q);
795 + out_queue:
796 + blk_mq_free_tag_set(set);
797 + out_tag_set:
798 +diff --git a/block/genhd.c b/block/genhd.c
799 +index 278227ba1d531..a39c416d658fd 100644
800 +--- a/block/genhd.c
801 ++++ b/block/genhd.c
802 +@@ -617,13 +617,14 @@ void del_gendisk(struct gendisk *disk)
803 + * Fail any new I/O.
804 + */
805 + set_bit(GD_DEAD, &disk->state);
806 ++ if (test_bit(GD_OWNS_QUEUE, &disk->state))
807 ++ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
808 + set_capacity(disk, 0);
809 +
810 + /*
811 + * Prevent new I/O from crossing bio_queue_enter().
812 + */
813 + blk_queue_start_drain(q);
814 +- blk_mq_freeze_queue_wait(q);
815 +
816 + if (!(disk->flags & GENHD_FL_HIDDEN)) {
817 + sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
818 +@@ -647,6 +648,8 @@ void del_gendisk(struct gendisk *disk)
819 + pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
820 + device_del(disk_to_dev(disk));
821 +
822 ++ blk_mq_freeze_queue_wait(q);
823 ++
824 + blk_throtl_cancel_bios(disk->queue);
825 +
826 + blk_sync_queue(q);
827 +@@ -663,11 +666,16 @@ void del_gendisk(struct gendisk *disk)
828 + blk_mq_unquiesce_queue(q);
829 +
830 + /*
831 +- * Allow using passthrough request again after the queue is torn down.
832 ++ * If the disk does not own the queue, allow using passthrough requests
833 ++ * again. Else leave the queue frozen to fail all I/O.
834 + */
835 +- blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
836 +- __blk_mq_unfreeze_queue(q, true);
837 +-
838 ++ if (!test_bit(GD_OWNS_QUEUE, &disk->state)) {
839 ++ blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
840 ++ __blk_mq_unfreeze_queue(q, true);
841 ++ } else {
842 ++ if (queue_is_mq(q))
843 ++ blk_mq_exit_queue(q);
844 ++ }
845 + }
846 + EXPORT_SYMBOL(del_gendisk);
847 +
848 +@@ -1151,6 +1159,18 @@ static void disk_release(struct device *dev)
849 + might_sleep();
850 + WARN_ON_ONCE(disk_live(disk));
851 +
852 ++ /*
853 ++ * To undo the all initialization from blk_mq_init_allocated_queue in
854 ++ * case of a probe failure where add_disk is never called we have to
855 ++ * call blk_mq_exit_queue here. We can't do this for the more common
856 ++ * teardown case (yet) as the tagset can be gone by the time the disk
857 ++ * is released once it was added.
858 ++ */
859 ++ if (queue_is_mq(disk->queue) &&
860 ++ test_bit(GD_OWNS_QUEUE, &disk->state) &&
861 ++ !test_bit(GD_ADDED, &disk->state))
862 ++ blk_mq_exit_queue(disk->queue);
863 ++
864 + blkcg_exit_queue(disk->queue);
865 +
866 + disk_release_events(disk);
867 +@@ -1338,12 +1358,9 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
868 + {
869 + struct gendisk *disk;
870 +
871 +- if (!blk_get_queue(q))
872 +- return NULL;
873 +-
874 + disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
875 + if (!disk)
876 +- goto out_put_queue;
877 ++ return NULL;
878 +
879 + disk->bdi = bdi_alloc(node_id);
880 + if (!disk->bdi)
881 +@@ -1387,11 +1404,8 @@ out_free_bdi:
882 + bdi_put(disk->bdi);
883 + out_free_disk:
884 + kfree(disk);
885 +-out_put_queue:
886 +- blk_put_queue(q);
887 + return NULL;
888 + }
889 +-EXPORT_SYMBOL(__alloc_disk_node);
890 +
891 + struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
892 + {
893 +@@ -1404,9 +1418,10 @@ struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
894 +
895 + disk = __alloc_disk_node(q, node, lkclass);
896 + if (!disk) {
897 +- blk_cleanup_queue(q);
898 ++ blk_put_queue(q);
899 + return NULL;
900 + }
901 ++ set_bit(GD_OWNS_QUEUE, &disk->state);
902 + return disk;
903 + }
904 + EXPORT_SYMBOL(__blk_alloc_disk);
905 +@@ -1418,6 +1433,9 @@ EXPORT_SYMBOL(__blk_alloc_disk);
906 + * This decrements the refcount for the struct gendisk. When this reaches 0
907 + * we'll have disk_release() called.
908 + *
909 ++ * Note: for blk-mq disk put_disk must be called before freeing the tag_set
910 ++ * when handling probe errors (that is before add_disk() is called).
911 ++ *
912 + * Context: Any context, but the last reference must not be dropped from
913 + * atomic context.
914 + */
915 +@@ -1439,7 +1457,6 @@ EXPORT_SYMBOL(put_disk);
916 + */
917 + void blk_cleanup_disk(struct gendisk *disk)
918 + {
919 +- blk_cleanup_queue(disk->queue);
920 + put_disk(disk);
921 + }
922 + EXPORT_SYMBOL(blk_cleanup_disk);
923 +diff --git a/certs/Kconfig b/certs/Kconfig
924 +index bf9b511573d75..1f109b0708778 100644
925 +--- a/certs/Kconfig
926 ++++ b/certs/Kconfig
927 +@@ -43,7 +43,7 @@ config SYSTEM_TRUSTED_KEYRING
928 + bool "Provide system-wide ring of trusted keys"
929 + depends on KEYS
930 + depends on ASYMMETRIC_KEY_TYPE
931 +- depends on X509_CERTIFICATE_PARSER
932 ++ depends on X509_CERTIFICATE_PARSER = y
933 + help
934 + Provide a system keyring to which trusted keys can be added. Keys in
935 + the keyring are considered to be trusted. Keys may be added at will
936 +diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
937 +index e232cc4fd444b..c6e41ee18aaa2 100644
938 +--- a/drivers/block/ataflop.c
939 ++++ b/drivers/block/ataflop.c
940 +@@ -2045,7 +2045,6 @@ static void atari_floppy_cleanup(void)
941 + if (!unit[i].disk[type])
942 + continue;
943 + del_gendisk(unit[i].disk[type]);
944 +- blk_cleanup_queue(unit[i].disk[type]->queue);
945 + put_disk(unit[i].disk[type]);
946 + }
947 + blk_mq_free_tag_set(&unit[i].tag_set);
948 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
949 +index a59910ef948e9..1c036ef686fbb 100644
950 +--- a/drivers/block/loop.c
951 ++++ b/drivers/block/loop.c
952 +@@ -2062,7 +2062,6 @@ static void loop_remove(struct loop_device *lo)
953 + {
954 + /* Make this loop device unreachable from pathname. */
955 + del_gendisk(lo->lo_disk);
956 +- blk_cleanup_queue(lo->lo_disk->queue);
957 + blk_mq_free_tag_set(&lo->tag_set);
958 +
959 + mutex_lock(&loop_ctl_mutex);
960 +diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
961 +index 6699e4b2f7f43..06994a35acc7a 100644
962 +--- a/drivers/block/mtip32xx/mtip32xx.c
963 ++++ b/drivers/block/mtip32xx/mtip32xx.c
964 +@@ -3677,7 +3677,6 @@ static int mtip_block_shutdown(struct driver_data *dd)
965 + if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
966 + del_gendisk(dd->disk);
967 +
968 +- blk_cleanup_queue(dd->queue);
969 + blk_mq_free_tag_set(&dd->tags);
970 + put_disk(dd->disk);
971 + return 0;
972 +@@ -4040,7 +4039,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
973 + dev_info(&dd->pdev->dev, "device %s surprise removal\n",
974 + dd->disk->disk_name);
975 +
976 +- blk_cleanup_queue(dd->queue);
977 + blk_mq_free_tag_set(&dd->tags);
978 +
979 + /* De-initialize the protocol layer. */
980 +diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
981 +index 409c76b81aed4..a4470374f54fc 100644
982 +--- a/drivers/block/rnbd/rnbd-clt.c
983 ++++ b/drivers/block/rnbd/rnbd-clt.c
984 +@@ -1755,7 +1755,7 @@ static void rnbd_destroy_sessions(void)
985 + list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
986 + /*
987 + * Here unmap happens in parallel for only one reason:
988 +- * blk_cleanup_queue() takes around half a second, so
989 ++ * del_gendisk() takes around half a second, so
990 + * on huge amount of devices the whole module unload
991 + * procedure takes minutes.
992 + */
993 +diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
994 +index 63b4f6431d2e6..75057dbbcfbea 100644
995 +--- a/drivers/block/sx8.c
996 ++++ b/drivers/block/sx8.c
997 +@@ -1536,7 +1536,7 @@ err_out_free_majors:
998 + clear_bit(0, &carm_major_alloc);
999 + else if (host->major == 161)
1000 + clear_bit(1, &carm_major_alloc);
1001 +- blk_cleanup_queue(host->oob_q);
1002 ++ blk_mq_destroy_queue(host->oob_q);
1003 + blk_mq_free_tag_set(&host->tag_set);
1004 + err_out_dma_free:
1005 + dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
1006 +@@ -1570,7 +1570,7 @@ static void carm_remove_one (struct pci_dev *pdev)
1007 + clear_bit(0, &carm_major_alloc);
1008 + else if (host->major == 161)
1009 + clear_bit(1, &carm_major_alloc);
1010 +- blk_cleanup_queue(host->oob_q);
1011 ++ blk_mq_destroy_queue(host->oob_q);
1012 + blk_mq_free_tag_set(&host->tag_set);
1013 + dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
1014 + iounmap(host->mmio);
1015 +diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
1016 +index d756423e0059a..59d6d5faf7396 100644
1017 +--- a/drivers/block/virtio_blk.c
1018 ++++ b/drivers/block/virtio_blk.c
1019 +@@ -1107,7 +1107,6 @@ static void virtblk_remove(struct virtio_device *vdev)
1020 + flush_work(&vblk->config_work);
1021 +
1022 + del_gendisk(vblk->disk);
1023 +- blk_cleanup_queue(vblk->disk->queue);
1024 + blk_mq_free_tag_set(&vblk->tag_set);
1025 +
1026 + mutex_lock(&vblk->vdev_mutex);
1027 +diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
1028 +index 7a6ed83481b8d..18ad43d9933ec 100644
1029 +--- a/drivers/block/z2ram.c
1030 ++++ b/drivers/block/z2ram.c
1031 +@@ -384,7 +384,6 @@ static void __exit z2_exit(void)
1032 +
1033 + for (i = 0; i < Z2MINOR_COUNT; i++) {
1034 + del_gendisk(z2ram_gendisk[i]);
1035 +- blk_cleanup_queue(z2ram_gendisk[i]->queue);
1036 + put_disk(z2ram_gendisk[i]);
1037 + }
1038 + blk_mq_free_tag_set(&tag_set);
1039 +diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
1040 +index 8e78b37d0f6a4..f4cc90ea6198e 100644
1041 +--- a/drivers/cdrom/gdrom.c
1042 ++++ b/drivers/cdrom/gdrom.c
1043 +@@ -831,7 +831,6 @@ probe_fail_no_mem:
1044 +
1045 + static int remove_gdrom(struct platform_device *devptr)
1046 + {
1047 +- blk_cleanup_queue(gd.gdrom_rq);
1048 + blk_mq_free_tag_set(&gd.tag_set);
1049 + free_irq(HW_EVENT_GDROM_CMD, &gd);
1050 + free_irq(HW_EVENT_GDROM_DMA, &gd);
1051 +diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
1052 +index cb6401c9e9a4f..acf31cc1dbcca 100644
1053 +--- a/drivers/dax/hmem/device.c
1054 ++++ b/drivers/dax/hmem/device.c
1055 +@@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r)
1056 + .start = r->start,
1057 + .end = r->end,
1058 + .flags = IORESOURCE_MEM,
1059 ++ .desc = IORES_DESC_SOFT_RESERVED,
1060 + };
1061 + struct platform_device *pdev;
1062 + struct memregion_info info;
1063 +diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
1064 +index d4f1e4e9603a4..85e00701473cb 100644
1065 +--- a/drivers/dma/ti/k3-udma-private.c
1066 ++++ b/drivers/dma/ti/k3-udma-private.c
1067 +@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
1068 + }
1069 +
1070 + pdev = of_find_device_by_node(udma_node);
1071 ++ if (np != udma_node)
1072 ++ of_node_put(udma_node);
1073 ++
1074 + if (!pdev) {
1075 + pr_debug("UDMA device not found\n");
1076 + return ERR_PTR(-EPROBE_DEFER);
1077 + }
1078 +
1079 +- if (np != udma_node)
1080 +- of_node_put(udma_node);
1081 +-
1082 + ud = platform_get_drvdata(pdev);
1083 + if (!ud) {
1084 + pr_debug("UDMA has not been probed\n");
1085 +diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
1086 +index 673f3eb498f43..e9afa8cab7309 100644
1087 +--- a/drivers/firmware/arm_scmi/reset.c
1088 ++++ b/drivers/firmware/arm_scmi/reset.c
1089 +@@ -166,9 +166,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
1090 + struct scmi_xfer *t;
1091 + struct scmi_msg_reset_domain_reset *dom;
1092 + struct scmi_reset_info *pi = ph->get_priv(ph);
1093 +- struct reset_dom_info *rdom = pi->dom_info + domain;
1094 ++ struct reset_dom_info *rdom;
1095 +
1096 +- if (rdom->async_reset)
1097 ++ if (domain >= pi->num_domains)
1098 ++ return -EINVAL;
1099 ++
1100 ++ rdom = pi->dom_info + domain;
1101 ++ if (rdom->async_reset && flags & AUTONOMOUS_RESET)
1102 + flags |= ASYNCHRONOUS_RESET;
1103 +
1104 + ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
1105 +@@ -180,7 +184,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
1106 + dom->flags = cpu_to_le32(flags);
1107 + dom->reset_state = cpu_to_le32(state);
1108 +
1109 +- if (rdom->async_reset)
1110 ++ if (flags & ASYNCHRONOUS_RESET)
1111 + ret = ph->xops->do_xfer_with_response(ph, t);
1112 + else
1113 + ret = ph->xops->do_xfer(ph, t);
1114 +diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
1115 +index 8a18930f3eb69..516f4f0069bd2 100644
1116 +--- a/drivers/firmware/efi/libstub/secureboot.c
1117 ++++ b/drivers/firmware/efi/libstub/secureboot.c
1118 +@@ -14,7 +14,7 @@
1119 +
1120 + /* SHIM variables */
1121 + static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
1122 +-static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
1123 ++static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
1124 +
1125 + static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
1126 + unsigned long *data_size, void *data)
1127 +@@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void)
1128 +
1129 + /*
1130 + * See if a user has put the shim into insecure mode. If so, and if the
1131 +- * variable doesn't have the runtime attribute set, we might as well
1132 +- * honor that.
1133 ++ * variable doesn't have the non-volatile attribute set, we might as
1134 ++ * well honor that.
1135 + */
1136 + size = sizeof(moksbstate);
1137 + status = get_efi_var(shim_MokSBState_name, &shim_guid,
1138 +@@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void)
1139 + /* If it fails, we don't care why. Default to secure */
1140 + if (status != EFI_SUCCESS)
1141 + goto secure_boot_enabled;
1142 +- if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
1143 ++ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
1144 + return efi_secureboot_mode_disabled;
1145 +
1146 + secure_boot_enabled:
1147 +diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
1148 +index 05ae8bcc9d671..9780f32a9f243 100644
1149 +--- a/drivers/firmware/efi/libstub/x86-stub.c
1150 ++++ b/drivers/firmware/efi/libstub/x86-stub.c
1151 +@@ -517,6 +517,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
1152 + hdr->ramdisk_image = 0;
1153 + hdr->ramdisk_size = 0;
1154 +
1155 ++ /*
1156 ++ * Disregard any setup data that was provided by the bootloader:
1157 ++ * setup_data could be pointing anywhere, and we have no way of
1158 ++ * authenticating or validating the payload.
1159 ++ */
1160 ++ hdr->setup_data = 0;
1161 ++
1162 + efi_stub_entry(handle, sys_table_arg, boot_params);
1163 + /* not reached */
1164 +
1165 +diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
1166 +index 312309be0287d..56656fb519f85 100644
1167 +--- a/drivers/gpio/gpio-ixp4xx.c
1168 ++++ b/drivers/gpio/gpio-ixp4xx.c
1169 +@@ -63,6 +63,14 @@ static void ixp4xx_gpio_irq_ack(struct irq_data *d)
1170 + __raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
1171 + }
1172 +
1173 ++static void ixp4xx_gpio_mask_irq(struct irq_data *d)
1174 ++{
1175 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1176 ++
1177 ++ irq_chip_mask_parent(d);
1178 ++ gpiochip_disable_irq(gc, d->hwirq);
1179 ++}
1180 ++
1181 + static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
1182 + {
1183 + struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1184 +@@ -72,6 +80,7 @@ static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
1185 + if (!(g->irq_edge & BIT(d->hwirq)))
1186 + ixp4xx_gpio_irq_ack(d);
1187 +
1188 ++ gpiochip_enable_irq(gc, d->hwirq);
1189 + irq_chip_unmask_parent(d);
1190 + }
1191 +
1192 +@@ -149,12 +158,14 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
1193 + return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
1194 + }
1195 +
1196 +-static struct irq_chip ixp4xx_gpio_irqchip = {
1197 ++static const struct irq_chip ixp4xx_gpio_irqchip = {
1198 + .name = "IXP4GPIO",
1199 + .irq_ack = ixp4xx_gpio_irq_ack,
1200 +- .irq_mask = irq_chip_mask_parent,
1201 ++ .irq_mask = ixp4xx_gpio_mask_irq,
1202 + .irq_unmask = ixp4xx_gpio_irq_unmask,
1203 + .irq_set_type = ixp4xx_gpio_irq_set_type,
1204 ++ .flags = IRQCHIP_IMMUTABLE,
1205 ++ GPIOCHIP_IRQ_RESOURCE_HELPERS,
1206 + };
1207 +
1208 + static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
1209 +@@ -263,7 +274,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
1210 + g->gc.owner = THIS_MODULE;
1211 +
1212 + girq = &g->gc.irq;
1213 +- girq->chip = &ixp4xx_gpio_irqchip;
1214 ++ gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip);
1215 + girq->fwnode = g->fwnode;
1216 + girq->parent_domain = parent;
1217 + girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq;
1218 +diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
1219 +index a2e505a7545cd..523dfd17dd922 100644
1220 +--- a/drivers/gpio/gpio-mockup.c
1221 ++++ b/drivers/gpio/gpio-mockup.c
1222 +@@ -533,8 +533,10 @@ static int __init gpio_mockup_register_chip(int idx)
1223 + }
1224 +
1225 + fwnode = fwnode_create_software_node(properties, NULL);
1226 +- if (IS_ERR(fwnode))
1227 ++ if (IS_ERR(fwnode)) {
1228 ++ kfree_strarray(line_names, ngpio);
1229 + return PTR_ERR(fwnode);
1230 ++ }
1231 +
1232 + pdevinfo.name = "gpio-mockup";
1233 + pdevinfo.id = idx;
1234 +@@ -597,9 +599,9 @@ static int __init gpio_mockup_init(void)
1235 +
1236 + static void __exit gpio_mockup_exit(void)
1237 + {
1238 ++ gpio_mockup_unregister_pdevs();
1239 + debugfs_remove_recursive(gpio_mockup_dbg_dir);
1240 + platform_driver_unregister(&gpio_mockup_driver);
1241 +- gpio_mockup_unregister_pdevs();
1242 + }
1243 +
1244 + module_init(gpio_mockup_init);
1245 +diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
1246 +index d8a26e503ca5d..f163f5ca857be 100644
1247 +--- a/drivers/gpio/gpio-mt7621.c
1248 ++++ b/drivers/gpio/gpio-mt7621.c
1249 +@@ -112,6 +112,8 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
1250 + unsigned long flags;
1251 + u32 rise, fall, high, low;
1252 +
1253 ++ gpiochip_enable_irq(gc, d->hwirq);
1254 ++
1255 + spin_lock_irqsave(&rg->lock, flags);
1256 + rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
1257 + fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
1258 +@@ -143,6 +145,8 @@ mediatek_gpio_irq_mask(struct irq_data *d)
1259 + mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
1260 + mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
1261 + spin_unlock_irqrestore(&rg->lock, flags);
1262 ++
1263 ++ gpiochip_disable_irq(gc, d->hwirq);
1264 + }
1265 +
1266 + static int
1267 +@@ -204,6 +208,16 @@ mediatek_gpio_xlate(struct gpio_chip *chip,
1268 + return gpio % MTK_BANK_WIDTH;
1269 + }
1270 +
1271 ++static const struct irq_chip mt7621_irq_chip = {
1272 ++ .name = "mt7621-gpio",
1273 ++ .irq_mask_ack = mediatek_gpio_irq_mask,
1274 ++ .irq_mask = mediatek_gpio_irq_mask,
1275 ++ .irq_unmask = mediatek_gpio_irq_unmask,
1276 ++ .irq_set_type = mediatek_gpio_irq_type,
1277 ++ .flags = IRQCHIP_IMMUTABLE,
1278 ++ GPIOCHIP_IRQ_RESOURCE_HELPERS,
1279 ++};
1280 ++
1281 + static int
1282 + mediatek_gpio_bank_probe(struct device *dev, int bank)
1283 + {
1284 +@@ -238,11 +252,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
1285 + return -ENOMEM;
1286 +
1287 + rg->chip.offset = bank * MTK_BANK_WIDTH;
1288 +- rg->irq_chip.name = dev_name(dev);
1289 +- rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
1290 +- rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
1291 +- rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
1292 +- rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
1293 +
1294 + if (mtk->gpio_irq) {
1295 + struct gpio_irq_chip *girq;
1296 +@@ -262,7 +271,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
1297 + }
1298 +
1299 + girq = &rg->chip.irq;
1300 +- girq->chip = &rg->irq_chip;
1301 ++ gpio_irq_chip_set_chip(girq, &mt7621_irq_chip);
1302 + /* This will let us handle the parent IRQ in the driver */
1303 + girq->parent_handler = NULL;
1304 + girq->num_parents = 0;
1305 +diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
1306 +index fa4bc7481f9a6..e739dcea61b23 100644
1307 +--- a/drivers/gpio/gpio-tqmx86.c
1308 ++++ b/drivers/gpio/gpio-tqmx86.c
1309 +@@ -307,6 +307,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
1310 + girq->default_type = IRQ_TYPE_NONE;
1311 + girq->handler = handle_simple_irq;
1312 + girq->init_valid_mask = tqmx86_init_irq_valid_mask;
1313 ++
1314 ++ irq_domain_set_pm_device(girq->domain, dev);
1315 + }
1316 +
1317 + ret = devm_gpiochip_add_data(dev, chip, gpio);
1318 +@@ -315,8 +317,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
1319 + goto out_pm_dis;
1320 + }
1321 +
1322 +- irq_domain_set_pm_device(girq->domain, dev);
1323 +-
1324 + dev_info(dev, "GPIO functionality initialized with %d pins\n",
1325 + chip->ngpio);
1326 +
1327 +diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
1328 +index b26e643383762..21fee9ed7f0d2 100644
1329 +--- a/drivers/gpio/gpiolib-cdev.c
1330 ++++ b/drivers/gpio/gpiolib-cdev.c
1331 +@@ -1975,7 +1975,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
1332 + ret = -ENODEV;
1333 + goto out_free_le;
1334 + }
1335 +- le->irq = irq;
1336 +
1337 + if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
1338 + irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
1339 +@@ -1989,7 +1988,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
1340 + init_waitqueue_head(&le->wait);
1341 +
1342 + /* Request a thread to read the events */
1343 +- ret = request_threaded_irq(le->irq,
1344 ++ ret = request_threaded_irq(irq,
1345 + lineevent_irq_handler,
1346 + lineevent_irq_thread,
1347 + irqflags,
1348 +@@ -1998,6 +1997,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
1349 + if (ret)
1350 + goto out_free_le;
1351 +
1352 ++ le->irq = irq;
1353 ++
1354 + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1355 + if (fd < 0) {
1356 + ret = fd;
1357 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1358 +index 4dfd6724b3caa..0a8c15c3a04c3 100644
1359 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1360 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1361 +@@ -35,6 +35,8 @@
1362 + #include <linux/pci.h>
1363 + #include <linux/pm_runtime.h>
1364 + #include <drm/drm_crtc_helper.h>
1365 ++#include <drm/drm_damage_helper.h>
1366 ++#include <drm/drm_drv.h>
1367 + #include <drm/drm_edid.h>
1368 + #include <drm/drm_gem_framebuffer_helper.h>
1369 + #include <drm/drm_fb_helper.h>
1370 +@@ -495,6 +497,12 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
1371 + .create_handle = drm_gem_fb_create_handle,
1372 + };
1373 +
1374 ++static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
1375 ++ .destroy = drm_gem_fb_destroy,
1376 ++ .create_handle = drm_gem_fb_create_handle,
1377 ++ .dirty = drm_atomic_helper_dirtyfb,
1378 ++};
1379 ++
1380 + uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
1381 + uint64_t bo_flags)
1382 + {
1383 +@@ -1069,7 +1077,10 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
1384 + if (ret)
1385 + goto err;
1386 +
1387 +- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1388 ++ if (drm_drv_uses_atomic_modeset(dev))
1389 ++ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
1390 ++ else
1391 ++ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1392 + if (ret)
1393 + goto err;
1394 +
1395 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
1396 +index b19bf0c3f3737..79ce654bd3dad 100644
1397 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
1398 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
1399 +@@ -748,7 +748,7 @@ static int psp_tmr_init(struct psp_context *psp)
1400 + }
1401 +
1402 + pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
1403 +- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev),
1404 ++ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
1405 + AMDGPU_GEM_DOMAIN_VRAM,
1406 + &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
1407 +
1408 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
1409 +index e431f49949319..cd366c7f311fd 100644
1410 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
1411 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
1412 +@@ -36,6 +36,7 @@
1413 + #define PSP_CMD_BUFFER_SIZE 0x1000
1414 + #define PSP_1_MEG 0x100000
1415 + #define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
1416 ++#define PSP_TMR_ALIGNMENT 0x100000
1417 + #define PSP_FW_NAME_LEN 0x24
1418 +
1419 + enum psp_shared_mem_size {
1420 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
1421 +index dac202ae864dd..9193ca5d6fe7a 100644
1422 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
1423 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
1424 +@@ -1805,7 +1805,8 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1425 + amdgpu_ras_query_error_status(adev, &info);
1426 +
1427 + if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1428 +- adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
1429 ++ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
1430 ++ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
1431 + if (amdgpu_ras_reset_error_status(adev, info.head.block))
1432 + dev_warn(adev->dev, "Failed to reset error counter and error status");
1433 + }
1434 +diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
1435 +index cdc0c97798483..6c1fd471a4c7d 100644
1436 +--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
1437 ++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
1438 +@@ -28,6 +28,14 @@
1439 + #include "nbio/nbio_7_7_0_sh_mask.h"
1440 + #include <uapi/linux/kfd_ioctl.h>
1441 +
1442 ++static void nbio_v7_7_remap_hdp_registers(struct amdgpu_device *adev)
1443 ++{
1444 ++ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
1445 ++ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
1446 ++ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
1447 ++ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
1448 ++}
1449 ++
1450 + static u32 nbio_v7_7_get_rev_id(struct amdgpu_device *adev)
1451 + {
1452 + u32 tmp;
1453 +@@ -237,4 +245,5 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
1454 + .ih_doorbell_range = nbio_v7_7_ih_doorbell_range,
1455 + .ih_control = nbio_v7_7_ih_control,
1456 + .init_registers = nbio_v7_7_init_registers,
1457 ++ .remap_hdp_registers = nbio_v7_7_remap_hdp_registers,
1458 + };
1459 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
1460 +index f47d82da115c9..42a567e71439b 100644
1461 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
1462 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
1463 +@@ -6651,8 +6651,7 @@ static double CalculateUrgentLatency(
1464 + return ret;
1465 + }
1466 +
1467 +-
1468 +-static void UseMinimumDCFCLK(
1469 ++static noinline_for_stack void UseMinimumDCFCLK(
1470 + struct display_mode_lib *mode_lib,
1471 + int MaxInterDCNTileRepeaters,
1472 + int MaxPrefetchMode,
1473 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
1474 +index e4b9fd31223c9..40a672236198e 100644
1475 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
1476 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
1477 +@@ -261,33 +261,13 @@ static void CalculateRowBandwidth(
1478 +
1479 + static void CalculateFlipSchedule(
1480 + struct display_mode_lib *mode_lib,
1481 ++ unsigned int k,
1482 + double HostVMInefficiencyFactor,
1483 + double UrgentExtraLatency,
1484 + double UrgentLatency,
1485 +- unsigned int GPUVMMaxPageTableLevels,
1486 +- bool HostVMEnable,
1487 +- unsigned int HostVMMaxNonCachedPageTableLevels,
1488 +- bool GPUVMEnable,
1489 +- double HostVMMinPageSize,
1490 + double PDEAndMetaPTEBytesPerFrame,
1491 + double MetaRowBytes,
1492 +- double DPTEBytesPerRow,
1493 +- double BandwidthAvailableForImmediateFlip,
1494 +- unsigned int TotImmediateFlipBytes,
1495 +- enum source_format_class SourcePixelFormat,
1496 +- double LineTime,
1497 +- double VRatio,
1498 +- double VRatioChroma,
1499 +- double Tno_bw,
1500 +- bool DCCEnable,
1501 +- unsigned int dpte_row_height,
1502 +- unsigned int meta_row_height,
1503 +- unsigned int dpte_row_height_chroma,
1504 +- unsigned int meta_row_height_chroma,
1505 +- double *DestinationLinesToRequestVMInImmediateFlip,
1506 +- double *DestinationLinesToRequestRowInImmediateFlip,
1507 +- double *final_flip_bw,
1508 +- bool *ImmediateFlipSupportedForPipe);
1509 ++ double DPTEBytesPerRow);
1510 + static double CalculateWriteBackDelay(
1511 + enum source_format_class WritebackPixelFormat,
1512 + double WritebackHRatio,
1513 +@@ -321,64 +301,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
1514 + static void CalculateWatermarksAndDRAMSpeedChangeSupport(
1515 + struct display_mode_lib *mode_lib,
1516 + unsigned int PrefetchMode,
1517 +- unsigned int NumberOfActivePlanes,
1518 +- unsigned int MaxLineBufferLines,
1519 +- unsigned int LineBufferSize,
1520 +- unsigned int WritebackInterfaceBufferSize,
1521 + double DCFCLK,
1522 + double ReturnBW,
1523 +- bool SynchronizedVBlank,
1524 +- unsigned int dpte_group_bytes[],
1525 +- unsigned int MetaChunkSize,
1526 + double UrgentLatency,
1527 + double ExtraLatency,
1528 +- double WritebackLatency,
1529 +- double WritebackChunkSize,
1530 + double SOCCLK,
1531 +- double DRAMClockChangeLatency,
1532 +- double SRExitTime,
1533 +- double SREnterPlusExitTime,
1534 +- double SRExitZ8Time,
1535 +- double SREnterPlusExitZ8Time,
1536 + double DCFCLKDeepSleep,
1537 + unsigned int DETBufferSizeY[],
1538 + unsigned int DETBufferSizeC[],
1539 + unsigned int SwathHeightY[],
1540 + unsigned int SwathHeightC[],
1541 +- unsigned int LBBitPerPixel[],
1542 + double SwathWidthY[],
1543 + double SwathWidthC[],
1544 +- double HRatio[],
1545 +- double HRatioChroma[],
1546 +- unsigned int vtaps[],
1547 +- unsigned int VTAPsChroma[],
1548 +- double VRatio[],
1549 +- double VRatioChroma[],
1550 +- unsigned int HTotal[],
1551 +- double PixelClock[],
1552 +- unsigned int BlendingAndTiming[],
1553 + unsigned int DPPPerPlane[],
1554 + double BytePerPixelDETY[],
1555 + double BytePerPixelDETC[],
1556 +- double DSTXAfterScaler[],
1557 +- double DSTYAfterScaler[],
1558 +- bool WritebackEnable[],
1559 +- enum source_format_class WritebackPixelFormat[],
1560 +- double WritebackDestinationWidth[],
1561 +- double WritebackDestinationHeight[],
1562 +- double WritebackSourceHeight[],
1563 + bool UnboundedRequestEnabled,
1564 + int unsigned CompressedBufferSizeInkByte,
1565 + enum clock_change_support *DRAMClockChangeSupport,
1566 +- double *UrgentWatermark,
1567 +- double *WritebackUrgentWatermark,
1568 +- double *DRAMClockChangeWatermark,
1569 +- double *WritebackDRAMClockChangeWatermark,
1570 + double *StutterExitWatermark,
1571 + double *StutterEnterPlusExitWatermark,
1572 + double *Z8StutterExitWatermark,
1573 +- double *Z8StutterEnterPlusExitWatermark,
1574 +- double *MinActiveDRAMClockChangeLatencySupported);
1575 ++ double *Z8StutterEnterPlusExitWatermark);
1576 +
1577 + static void CalculateDCFCLKDeepSleep(
1578 + struct display_mode_lib *mode_lib,
1579 +@@ -2914,33 +2858,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
1580 + for (k = 0; k < v->NumberOfActivePlanes; ++k) {
1581 + CalculateFlipSchedule(
1582 + mode_lib,
1583 ++ k,
1584 + HostVMInefficiencyFactor,
1585 + v->UrgentExtraLatency,
1586 + v->UrgentLatency,
1587 +- v->GPUVMMaxPageTableLevels,
1588 +- v->HostVMEnable,
1589 +- v->HostVMMaxNonCachedPageTableLevels,
1590 +- v->GPUVMEnable,
1591 +- v->HostVMMinPageSize,
1592 + v->PDEAndMetaPTEBytesFrame[k],
1593 + v->MetaRowByte[k],
1594 +- v->PixelPTEBytesPerRow[k],
1595 +- v->BandwidthAvailableForImmediateFlip,
1596 +- v->TotImmediateFlipBytes,
1597 +- v->SourcePixelFormat[k],
1598 +- v->HTotal[k] / v->PixelClock[k],
1599 +- v->VRatio[k],
1600 +- v->VRatioChroma[k],
1601 +- v->Tno_bw[k],
1602 +- v->DCCEnable[k],
1603 +- v->dpte_row_height[k],
1604 +- v->meta_row_height[k],
1605 +- v->dpte_row_height_chroma[k],
1606 +- v->meta_row_height_chroma[k],
1607 +- &v->DestinationLinesToRequestVMInImmediateFlip[k],
1608 +- &v->DestinationLinesToRequestRowInImmediateFlip[k],
1609 +- &v->final_flip_bw[k],
1610 +- &v->ImmediateFlipSupportedForPipe[k]);
1611 ++ v->PixelPTEBytesPerRow[k]);
1612 + }
1613 +
1614 + v->total_dcn_read_bw_with_flip = 0.0;
1615 +@@ -3027,64 +2951,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
1616 + CalculateWatermarksAndDRAMSpeedChangeSupport(
1617 + mode_lib,
1618 + PrefetchMode,
1619 +- v->NumberOfActivePlanes,
1620 +- v->MaxLineBufferLines,
1621 +- v->LineBufferSize,
1622 +- v->WritebackInterfaceBufferSize,
1623 + v->DCFCLK,
1624 + v->ReturnBW,
1625 +- v->SynchronizedVBlank,
1626 +- v->dpte_group_bytes,
1627 +- v->MetaChunkSize,
1628 + v->UrgentLatency,
1629 + v->UrgentExtraLatency,
1630 +- v->WritebackLatency,
1631 +- v->WritebackChunkSize,
1632 + v->SOCCLK,
1633 +- v->DRAMClockChangeLatency,
1634 +- v->SRExitTime,
1635 +- v->SREnterPlusExitTime,
1636 +- v->SRExitZ8Time,
1637 +- v->SREnterPlusExitZ8Time,
1638 + v->DCFCLKDeepSleep,
1639 + v->DETBufferSizeY,
1640 + v->DETBufferSizeC,
1641 + v->SwathHeightY,
1642 + v->SwathHeightC,
1643 +- v->LBBitPerPixel,
1644 + v->SwathWidthY,
1645 + v->SwathWidthC,
1646 +- v->HRatio,
1647 +- v->HRatioChroma,
1648 +- v->vtaps,
1649 +- v->VTAPsChroma,
1650 +- v->VRatio,
1651 +- v->VRatioChroma,
1652 +- v->HTotal,
1653 +- v->PixelClock,
1654 +- v->BlendingAndTiming,
1655 + v->DPPPerPlane,
1656 + v->BytePerPixelDETY,
1657 + v->BytePerPixelDETC,
1658 +- v->DSTXAfterScaler,
1659 +- v->DSTYAfterScaler,
1660 +- v->WritebackEnable,
1661 +- v->WritebackPixelFormat,
1662 +- v->WritebackDestinationWidth,
1663 +- v->WritebackDestinationHeight,
1664 +- v->WritebackSourceHeight,
1665 + v->UnboundedRequestEnabled,
1666 + v->CompressedBufferSizeInkByte,
1667 + &DRAMClockChangeSupport,
1668 +- &v->UrgentWatermark,
1669 +- &v->WritebackUrgentWatermark,
1670 +- &v->DRAMClockChangeWatermark,
1671 +- &v->WritebackDRAMClockChangeWatermark,
1672 + &v->StutterExitWatermark,
1673 + &v->StutterEnterPlusExitWatermark,
1674 + &v->Z8StutterExitWatermark,
1675 +- &v->Z8StutterEnterPlusExitWatermark,
1676 +- &v->MinActiveDRAMClockChangeLatencySupported);
1677 ++ &v->Z8StutterEnterPlusExitWatermark);
1678 +
1679 + for (k = 0; k < v->NumberOfActivePlanes; ++k) {
1680 + if (v->WritebackEnable[k] == true) {
1681 +@@ -3696,61 +3584,43 @@ static void CalculateRowBandwidth(
1682 +
1683 + static void CalculateFlipSchedule(
1684 + struct display_mode_lib *mode_lib,
1685 ++ unsigned int k,
1686 + double HostVMInefficiencyFactor,
1687 + double UrgentExtraLatency,
1688 + double UrgentLatency,
1689 +- unsigned int GPUVMMaxPageTableLevels,
1690 +- bool HostVMEnable,
1691 +- unsigned int HostVMMaxNonCachedPageTableLevels,
1692 +- bool GPUVMEnable,
1693 +- double HostVMMinPageSize,
1694 + double PDEAndMetaPTEBytesPerFrame,
1695 + double MetaRowBytes,
1696 +- double DPTEBytesPerRow,
1697 +- double BandwidthAvailableForImmediateFlip,
1698 +- unsigned int TotImmediateFlipBytes,
1699 +- enum source_format_class SourcePixelFormat,
1700 +- double LineTime,
1701 +- double VRatio,
1702 +- double VRatioChroma,
1703 +- double Tno_bw,
1704 +- bool DCCEnable,
1705 +- unsigned int dpte_row_height,
1706 +- unsigned int meta_row_height,
1707 +- unsigned int dpte_row_height_chroma,
1708 +- unsigned int meta_row_height_chroma,
1709 +- double *DestinationLinesToRequestVMInImmediateFlip,
1710 +- double *DestinationLinesToRequestRowInImmediateFlip,
1711 +- double *final_flip_bw,
1712 +- bool *ImmediateFlipSupportedForPipe)
1713 ++ double DPTEBytesPerRow)
1714 + {
1715 ++ struct vba_vars_st *v = &mode_lib->vba;
1716 + double min_row_time = 0.0;
1717 + unsigned int HostVMDynamicLevelsTrips;
1718 + double TimeForFetchingMetaPTEImmediateFlip;
1719 + double TimeForFetchingRowInVBlankImmediateFlip;
1720 + double ImmediateFlipBW;
1721 ++ double LineTime = v->HTotal[k] / v->PixelClock[k];
1722 +
1723 +- if (GPUVMEnable == true && HostVMEnable == true) {
1724 +- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
1725 ++ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
1726 ++ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
1727 + } else {
1728 + HostVMDynamicLevelsTrips = 0;
1729 + }
1730 +
1731 +- if (GPUVMEnable == true || DCCEnable == true) {
1732 +- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
1733 ++ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
1734 ++ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
1735 + }
1736 +
1737 +- if (GPUVMEnable == true) {
1738 ++ if (v->GPUVMEnable == true) {
1739 + TimeForFetchingMetaPTEImmediateFlip = dml_max3(
1740 +- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
1741 +- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
1742 ++ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
1743 ++ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
1744 + LineTime / 4.0);
1745 + } else {
1746 + TimeForFetchingMetaPTEImmediateFlip = 0;
1747 + }
1748 +
1749 +- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
1750 +- if ((GPUVMEnable == true || DCCEnable == true)) {
1751 ++ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
1752 ++ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
1753 + TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
1754 + (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
1755 + UrgentLatency * (HostVMDynamicLevelsTrips + 1),
1756 +@@ -3759,54 +3629,54 @@ static void CalculateFlipSchedule(
1757 + TimeForFetchingRowInVBlankImmediateFlip = 0;
1758 + }
1759 +
1760 +- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
1761 ++ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
1762 +
1763 +- if (GPUVMEnable == true) {
1764 +- *final_flip_bw = dml_max(
1765 +- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
1766 +- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
1767 +- } else if ((GPUVMEnable == true || DCCEnable == true)) {
1768 +- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
1769 ++ if (v->GPUVMEnable == true) {
1770 ++ v->final_flip_bw[k] = dml_max(
1771 ++ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
1772 ++ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
1773 ++ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
1774 ++ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
1775 + } else {
1776 +- *final_flip_bw = 0;
1777 ++ v->final_flip_bw[k] = 0;
1778 + }
1779 +
1780 +- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
1781 +- if (GPUVMEnable == true && DCCEnable != true) {
1782 +- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
1783 +- } else if (GPUVMEnable != true && DCCEnable == true) {
1784 +- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
1785 ++ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
1786 ++ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
1787 ++ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
1788 ++ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
1789 ++ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
1790 + } else {
1791 + min_row_time = dml_min4(
1792 +- dpte_row_height * LineTime / VRatio,
1793 +- meta_row_height * LineTime / VRatio,
1794 +- dpte_row_height_chroma * LineTime / VRatioChroma,
1795 +- meta_row_height_chroma * LineTime / VRatioChroma);
1796 ++ v->dpte_row_height[k] * LineTime / v->VRatio[k],
1797 ++ v->meta_row_height[k] * LineTime / v->VRatio[k],
1798 ++ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
1799 ++ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
1800 + }
1801 + } else {
1802 +- if (GPUVMEnable == true && DCCEnable != true) {
1803 +- min_row_time = dpte_row_height * LineTime / VRatio;
1804 +- } else if (GPUVMEnable != true && DCCEnable == true) {
1805 +- min_row_time = meta_row_height * LineTime / VRatio;
1806 ++ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
1807 ++ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
1808 ++ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
1809 ++ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
1810 + } else {
1811 +- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
1812 ++ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
1813 + }
1814 + }
1815 +
1816 +- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
1817 ++ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
1818 + || TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
1819 +- *ImmediateFlipSupportedForPipe = false;
1820 ++ v->ImmediateFlipSupportedForPipe[k] = false;
1821 + } else {
1822 +- *ImmediateFlipSupportedForPipe = true;
1823 ++ v->ImmediateFlipSupportedForPipe[k] = true;
1824 + }
1825 +
1826 + #ifdef __DML_VBA_DEBUG__
1827 +- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
1828 +- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
1829 ++ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
1830 ++ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
1831 + dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
1832 + dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
1833 + dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
1834 +- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
1835 ++ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
1836 + #endif
1837 +
1838 + }
1839 +@@ -5397,33 +5267,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
1840 + for (k = 0; k < v->NumberOfActivePlanes; k++) {
1841 + CalculateFlipSchedule(
1842 + mode_lib,
1843 ++ k,
1844 + HostVMInefficiencyFactor,
1845 + v->ExtraLatency,
1846 + v->UrgLatency[i],
1847 +- v->GPUVMMaxPageTableLevels,
1848 +- v->HostVMEnable,
1849 +- v->HostVMMaxNonCachedPageTableLevels,
1850 +- v->GPUVMEnable,
1851 +- v->HostVMMinPageSize,
1852 + v->PDEAndMetaPTEBytesPerFrame[i][j][k],
1853 + v->MetaRowBytes[i][j][k],
1854 +- v->DPTEBytesPerRow[i][j][k],
1855 +- v->BandwidthAvailableForImmediateFlip,
1856 +- v->TotImmediateFlipBytes,
1857 +- v->SourcePixelFormat[k],
1858 +- v->HTotal[k] / v->PixelClock[k],
1859 +- v->VRatio[k],
1860 +- v->VRatioChroma[k],
1861 +- v->Tno_bw[k],
1862 +- v->DCCEnable[k],
1863 +- v->dpte_row_height[k],
1864 +- v->meta_row_height[k],
1865 +- v->dpte_row_height_chroma[k],
1866 +- v->meta_row_height_chroma[k],
1867 +- &v->DestinationLinesToRequestVMInImmediateFlip[k],
1868 +- &v->DestinationLinesToRequestRowInImmediateFlip[k],
1869 +- &v->final_flip_bw[k],
1870 +- &v->ImmediateFlipSupportedForPipe[k]);
1871 ++ v->DPTEBytesPerRow[i][j][k]);
1872 + }
1873 + v->total_dcn_read_bw_with_flip = 0.0;
1874 + for (k = 0; k < v->NumberOfActivePlanes; k++) {
1875 +@@ -5481,64 +5331,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
1876 + CalculateWatermarksAndDRAMSpeedChangeSupport(
1877 + mode_lib,
1878 + v->PrefetchModePerState[i][j],
1879 +- v->NumberOfActivePlanes,
1880 +- v->MaxLineBufferLines,
1881 +- v->LineBufferSize,
1882 +- v->WritebackInterfaceBufferSize,
1883 + v->DCFCLKState[i][j],
1884 + v->ReturnBWPerState[i][j],
1885 +- v->SynchronizedVBlank,
1886 +- v->dpte_group_bytes,
1887 +- v->MetaChunkSize,
1888 + v->UrgLatency[i],
1889 + v->ExtraLatency,
1890 +- v->WritebackLatency,
1891 +- v->WritebackChunkSize,
1892 + v->SOCCLKPerState[i],
1893 +- v->DRAMClockChangeLatency,
1894 +- v->SRExitTime,
1895 +- v->SREnterPlusExitTime,
1896 +- v->SRExitZ8Time,
1897 +- v->SREnterPlusExitZ8Time,
1898 + v->ProjectedDCFCLKDeepSleep[i][j],
1899 + v->DETBufferSizeYThisState,
1900 + v->DETBufferSizeCThisState,
1901 + v->SwathHeightYThisState,
1902 + v->SwathHeightCThisState,
1903 +- v->LBBitPerPixel,
1904 + v->SwathWidthYThisState,
1905 + v->SwathWidthCThisState,
1906 +- v->HRatio,
1907 +- v->HRatioChroma,
1908 +- v->vtaps,
1909 +- v->VTAPsChroma,
1910 +- v->VRatio,
1911 +- v->VRatioChroma,
1912 +- v->HTotal,
1913 +- v->PixelClock,
1914 +- v->BlendingAndTiming,
1915 + v->NoOfDPPThisState,
1916 + v->BytePerPixelInDETY,
1917 + v->BytePerPixelInDETC,
1918 +- v->DSTXAfterScaler,
1919 +- v->DSTYAfterScaler,
1920 +- v->WritebackEnable,
1921 +- v->WritebackPixelFormat,
1922 +- v->WritebackDestinationWidth,
1923 +- v->WritebackDestinationHeight,
1924 +- v->WritebackSourceHeight,
1925 + UnboundedRequestEnabledThisState,
1926 + CompressedBufferSizeInkByteThisState,
1927 + &v->DRAMClockChangeSupport[i][j],
1928 +- &v->UrgentWatermark,
1929 +- &v->WritebackUrgentWatermark,
1930 +- &v->DRAMClockChangeWatermark,
1931 +- &v->WritebackDRAMClockChangeWatermark,
1932 +- &dummy,
1933 + &dummy,
1934 + &dummy,
1935 + &dummy,
1936 +- &v->MinActiveDRAMClockChangeLatencySupported);
1937 ++ &dummy);
1938 + }
1939 + }
1940 +
1941 +@@ -5663,64 +5477,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
1942 + static void CalculateWatermarksAndDRAMSpeedChangeSupport(
1943 + struct display_mode_lib *mode_lib,
1944 + unsigned int PrefetchMode,
1945 +- unsigned int NumberOfActivePlanes,
1946 +- unsigned int MaxLineBufferLines,
1947 +- unsigned int LineBufferSize,
1948 +- unsigned int WritebackInterfaceBufferSize,
1949 + double DCFCLK,
1950 + double ReturnBW,
1951 +- bool SynchronizedVBlank,
1952 +- unsigned int dpte_group_bytes[],
1953 +- unsigned int MetaChunkSize,
1954 + double UrgentLatency,
1955 + double ExtraLatency,
1956 +- double WritebackLatency,
1957 +- double WritebackChunkSize,
1958 + double SOCCLK,
1959 +- double DRAMClockChangeLatency,
1960 +- double SRExitTime,
1961 +- double SREnterPlusExitTime,
1962 +- double SRExitZ8Time,
1963 +- double SREnterPlusExitZ8Time,
1964 + double DCFCLKDeepSleep,
1965 + unsigned int DETBufferSizeY[],
1966 + unsigned int DETBufferSizeC[],
1967 + unsigned int SwathHeightY[],
1968 + unsigned int SwathHeightC[],
1969 +- unsigned int LBBitPerPixel[],
1970 + double SwathWidthY[],
1971 + double SwathWidthC[],
1972 +- double HRatio[],
1973 +- double HRatioChroma[],
1974 +- unsigned int vtaps[],
1975 +- unsigned int VTAPsChroma[],
1976 +- double VRatio[],
1977 +- double VRatioChroma[],
1978 +- unsigned int HTotal[],
1979 +- double PixelClock[],
1980 +- unsigned int BlendingAndTiming[],
1981 + unsigned int DPPPerPlane[],
1982 + double BytePerPixelDETY[],
1983 + double BytePerPixelDETC[],
1984 +- double DSTXAfterScaler[],
1985 +- double DSTYAfterScaler[],
1986 +- bool WritebackEnable[],
1987 +- enum source_format_class WritebackPixelFormat[],
1988 +- double WritebackDestinationWidth[],
1989 +- double WritebackDestinationHeight[],
1990 +- double WritebackSourceHeight[],
1991 + bool UnboundedRequestEnabled,
1992 + int unsigned CompressedBufferSizeInkByte,
1993 + enum clock_change_support *DRAMClockChangeSupport,
1994 +- double *UrgentWatermark,
1995 +- double *WritebackUrgentWatermark,
1996 +- double *DRAMClockChangeWatermark,
1997 +- double *WritebackDRAMClockChangeWatermark,
1998 + double *StutterExitWatermark,
1999 + double *StutterEnterPlusExitWatermark,
2000 + double *Z8StutterExitWatermark,
2001 +- double *Z8StutterEnterPlusExitWatermark,
2002 +- double *MinActiveDRAMClockChangeLatencySupported)
2003 ++ double *Z8StutterEnterPlusExitWatermark)
2004 + {
2005 + struct vba_vars_st *v = &mode_lib->vba;
2006 + double EffectiveLBLatencyHidingY;
2007 +@@ -5740,103 +5518,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
2008 + double TotalPixelBW = 0.0;
2009 + int k, j;
2010 +
2011 +- *UrgentWatermark = UrgentLatency + ExtraLatency;
2012 ++ v->UrgentWatermark = UrgentLatency + ExtraLatency;
2013 +
2014 + #ifdef __DML_VBA_DEBUG__
2015 + dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
2016 + dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
2017 +- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
2018 ++ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
2019 + #endif
2020 +
2021 +- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
2022 ++ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
2023 +
2024 + #ifdef __DML_VBA_DEBUG__
2025 +- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
2026 +- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
2027 ++ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
2028 ++ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
2029 + #endif
2030 +
2031 + v->TotalActiveWriteback = 0;
2032 +- for (k = 0; k < NumberOfActivePlanes; ++k) {
2033 +- if (WritebackEnable[k] == true) {
2034 ++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
2035 ++ if (v->WritebackEnable[k] == true) {
2036 + v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
2037 + }
2038 + }
2039 +
2040 + if (v->TotalActiveWriteback <= 1) {
2041 +- *WritebackUrgentWatermark = WritebackLatency;
2042 ++ v->WritebackUrgentWatermark = v->WritebackLatency;
2043 + } else {
2044 +- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
2045 ++ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
2046 + }
2047 +
2048 + if (v->TotalActiveWriteback <= 1) {
2049 +- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
2050 ++ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
2051 + } else {
2052 +- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
2053 ++ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
2054 + }
2055 +
2056 +- for (k = 0; k < NumberOfActivePlanes; ++k) {
2057 ++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
2058 + TotalPixelBW = TotalPixelBW
2059 +- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
2060 +- / (HTotal[k] / PixelClock[k]);
2061 ++ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
2062 ++ / (v->HTotal[k] / v->PixelClock[k]);
2063 + }
2064 +
2065 +- for (k = 0; k < NumberOfActivePlanes; ++k) {
2066 ++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
2067 + double EffectiveDETBufferSizeY = DETBufferSizeY[k];
2068 +
2069 + v->LBLatencyHidingSourceLinesY = dml_min(
2070 +- (double) MaxLineBufferLines,
2071 +- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
2072 ++ (double) v->MaxLineBufferLines,
2073 ++ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
2074 +
2075 + v->LBLatencyHidingSourceLinesC = dml_min(
2076 +- (double) MaxLineBufferLines,
2077 +- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
2078 ++ (double) v->MaxLineBufferLines,
2079 ++ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
2080 +
2081 +- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
2082 ++ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
2083 +
2084 +- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
2085 ++ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
2086 +
2087 + if (UnboundedRequestEnabled) {
2088 + EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
2089 +- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
2090 ++ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
2091 + }
2092 +
2093 + LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
2094 + LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
2095 +- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
2096 ++ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
2097 + if (BytePerPixelDETC[k] > 0) {
2098 + LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
2099 + LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
2100 +- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
2101 ++ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
2102 + } else {
2103 + LinesInDETC = 0;
2104 + FullDETBufferingTimeC = 999999;
2105 + }
2106 +
2107 + ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
2108 +- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
2109 ++ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
2110 +
2111 +- if (NumberOfActivePlanes > 1) {
2112 ++ if (v->NumberOfActivePlanes > 1) {
2113 + ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
2114 +- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
2115 ++ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
2116 + }
2117 +
2118 + if (BytePerPixelDETC[k] > 0) {
2119 + ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
2120 +- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
2121 ++ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
2122 +
2123 +- if (NumberOfActivePlanes > 1) {
2124 ++ if (v->NumberOfActivePlanes > 1) {
2125 + ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
2126 +- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
2127 ++ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
2128 + }
2129 + v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
2130 + } else {
2131 + v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
2132 + }
2133 +
2134 +- if (WritebackEnable[k] == true) {
2135 +- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
2136 +- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
2137 +- if (WritebackPixelFormat[k] == dm_444_64) {
2138 ++ if (v->WritebackEnable[k] == true) {
2139 ++ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
2140 ++ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
2141 ++ if (v->WritebackPixelFormat[k] == dm_444_64) {
2142 + WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
2143 + }
2144 + WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
2145 +@@ -5846,14 +5624,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
2146 +
2147 + v->MinActiveDRAMClockChangeMargin = 999999;
2148 + PlaneWithMinActiveDRAMClockChangeMargin = 0;
2149 +- for (k = 0; k < NumberOfActivePlanes; ++k) {
2150 ++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
2151 + if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
2152 + v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
2153 +- if (BlendingAndTiming[k] == k) {
2154 ++ if (v->BlendingAndTiming[k] == k) {
2155 + PlaneWithMinActiveDRAMClockChangeMargin = k;
2156 + } else {
2157 +- for (j = 0; j < NumberOfActivePlanes; ++j) {
2158 +- if (BlendingAndTiming[k] == j) {
2159 ++ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
2160 ++ if (v->BlendingAndTiming[k] == j) {
2161 + PlaneWithMinActiveDRAMClockChangeMargin = j;
2162 + }
2163 + }
2164 +@@ -5861,11 +5639,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
2165 + }
2166 + }
2167 +
2168 +- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
2169 ++ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
2170 +
2171 + SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
2172 +- for (k = 0; k < NumberOfActivePlanes; ++k) {
2173 +- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
2174 ++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
2175 ++ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
2176 + && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
2177 + SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
2178 + }
2179 +@@ -5873,25 +5651,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
2180 +
2181 + v->TotalNumberOfActiveOTG = 0;
2182 +
2183 +- for (k = 0; k < NumberOfActivePlanes; ++k) {
2184 +- if (BlendingAndTiming[k] == k) {
2185 ++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
2186 ++ if (v->BlendingAndTiming[k] == k) {
2187 + v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
2188 + }
2189 + }
2190 +
2191 + if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
2192 + *DRAMClockChangeSupport = dm_dram_clock_change_vactive;
2193 +- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
2194 ++ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
2195 + || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
2196 + *DRAMClockChangeSupport = dm_dram_clock_change_vblank;
2197 + } else {
2198 + *DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
2199 + }
2200 +
2201 +- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
2202 +- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
2203 +- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
2204 +- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
2205 ++ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
2206 ++ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
2207 ++ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
2208 ++ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
2209 +
2210 + #ifdef __DML_VBA_DEBUG__
2211 + dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
2212 +diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
2213 +index 64a38f08f4974..5a51be753e87f 100644
2214 +--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
2215 ++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
2216 +@@ -1603,6 +1603,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
2217 + struct fixed31_32 lut2;
2218 + struct fixed31_32 delta_lut;
2219 + struct fixed31_32 delta_index;
2220 ++ const struct fixed31_32 one = dc_fixpt_from_int(1);
2221 +
2222 + i = 0;
2223 + /* fixed_pt library has problems handling too small values */
2224 +@@ -1631,6 +1632,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
2225 + } else
2226 + hw_x = coordinates_x[i].x;
2227 +
2228 ++ if (dc_fixpt_le(one, hw_x))
2229 ++ hw_x = one;
2230 ++
2231 + norm_x = dc_fixpt_mul(norm_factor, hw_x);
2232 + index = dc_fixpt_floor(norm_x);
2233 + if (index < 0 || index > 255)
2234 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2235 +index 32bb6b1d95261..d13e455c8827e 100644
2236 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2237 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2238 +@@ -368,6 +368,17 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
2239 + smu_baco->platform_support =
2240 + (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
2241 + false;
2242 ++
2243 ++ /*
2244 ++ * Disable BACO entry/exit completely on below SKUs to
2245 ++ * avoid hardware intermittent failures.
2246 ++ */
2247 ++ if (((adev->pdev->device == 0x73A1) &&
2248 ++ (adev->pdev->revision == 0x00)) ||
2249 ++ ((adev->pdev->device == 0x73BF) &&
2250 ++ (adev->pdev->revision == 0xCF)))
2251 ++ smu_baco->platform_support = false;
2252 ++
2253 + }
2254 + }
2255 +
2256 +diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
2257 +index dd32b484dd825..ce96234f3df20 100644
2258 +--- a/drivers/gpu/drm/gma500/cdv_device.c
2259 ++++ b/drivers/gpu/drm/gma500/cdv_device.c
2260 +@@ -581,11 +581,9 @@ static const struct psb_offset cdv_regmap[2] = {
2261 + static int cdv_chip_setup(struct drm_device *dev)
2262 + {
2263 + struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
2264 +- struct pci_dev *pdev = to_pci_dev(dev->dev);
2265 + INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
2266 +
2267 +- if (pci_enable_msi(pdev))
2268 +- dev_warn(dev->dev, "Enabling MSI failed!\n");
2269 ++ dev_priv->use_msi = true;
2270 + dev_priv->regmap = cdv_regmap;
2271 + gma_get_core_freq(dev);
2272 + psb_intel_opregion_init(dev);
2273 +diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
2274 +index dffe37490206d..4b7627a726378 100644
2275 +--- a/drivers/gpu/drm/gma500/gem.c
2276 ++++ b/drivers/gpu/drm/gma500/gem.c
2277 +@@ -112,12 +112,12 @@ static void psb_gem_free_object(struct drm_gem_object *obj)
2278 + {
2279 + struct psb_gem_object *pobj = to_psb_gem_object(obj);
2280 +
2281 +- drm_gem_object_release(obj);
2282 +-
2283 + /* Undo the mmap pin if we are destroying the object */
2284 + if (pobj->mmapping)
2285 + psb_gem_unpin(pobj);
2286 +
2287 ++ drm_gem_object_release(obj);
2288 ++
2289 + WARN_ON(pobj->in_gart && !pobj->stolen);
2290 +
2291 + release_resource(&pobj->resource);
2292 +diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
2293 +index 34ec3fca09ba6..12287c9bb4d80 100644
2294 +--- a/drivers/gpu/drm/gma500/gma_display.c
2295 ++++ b/drivers/gpu/drm/gma500/gma_display.c
2296 +@@ -531,15 +531,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
2297 + WARN_ON(drm_crtc_vblank_get(crtc) != 0);
2298 +
2299 + gma_crtc->page_flip_event = event;
2300 ++ spin_unlock_irqrestore(&dev->event_lock, flags);
2301 +
2302 + /* Call this locked if we want an event at vblank interrupt. */
2303 + ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
2304 + if (ret) {
2305 +- gma_crtc->page_flip_event = NULL;
2306 +- drm_crtc_vblank_put(crtc);
2307 ++ spin_lock_irqsave(&dev->event_lock, flags);
2308 ++ if (gma_crtc->page_flip_event) {
2309 ++ gma_crtc->page_flip_event = NULL;
2310 ++ drm_crtc_vblank_put(crtc);
2311 ++ }
2312 ++ spin_unlock_irqrestore(&dev->event_lock, flags);
2313 + }
2314 +-
2315 +- spin_unlock_irqrestore(&dev->event_lock, flags);
2316 + } else {
2317 + ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
2318 + }
2319 +diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
2320 +index 5923a9c893122..f90e628cb482c 100644
2321 +--- a/drivers/gpu/drm/gma500/oaktrail_device.c
2322 ++++ b/drivers/gpu/drm/gma500/oaktrail_device.c
2323 +@@ -501,12 +501,9 @@ static const struct psb_offset oaktrail_regmap[2] = {
2324 + static int oaktrail_chip_setup(struct drm_device *dev)
2325 + {
2326 + struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
2327 +- struct pci_dev *pdev = to_pci_dev(dev->dev);
2328 + int ret;
2329 +
2330 +- if (pci_enable_msi(pdev))
2331 +- dev_warn(dev->dev, "Enabling MSI failed!\n");
2332 +-
2333 ++ dev_priv->use_msi = true;
2334 + dev_priv->regmap = oaktrail_regmap;
2335 +
2336 + ret = mid_chip_setup(dev);
2337 +diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
2338 +index b91de6d36e412..66873085d4505 100644
2339 +--- a/drivers/gpu/drm/gma500/power.c
2340 ++++ b/drivers/gpu/drm/gma500/power.c
2341 +@@ -139,8 +139,6 @@ static void gma_suspend_pci(struct pci_dev *pdev)
2342 + dev_priv->regs.saveBSM = bsm;
2343 + pci_read_config_dword(pdev, 0xFC, &vbt);
2344 + dev_priv->regs.saveVBT = vbt;
2345 +- pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
2346 +- pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
2347 +
2348 + pci_disable_device(pdev);
2349 + pci_set_power_state(pdev, PCI_D3hot);
2350 +@@ -168,9 +166,6 @@ static bool gma_resume_pci(struct pci_dev *pdev)
2351 + pci_restore_state(pdev);
2352 + pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM);
2353 + pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT);
2354 +- /* restoring MSI address and data in PCIx space */
2355 +- pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
2356 +- pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
2357 + ret = pci_enable_device(pdev);
2358 +
2359 + if (ret != 0)
2360 +@@ -223,8 +218,7 @@ int gma_power_resume(struct device *_dev)
2361 + mutex_lock(&power_mutex);
2362 + gma_resume_pci(pdev);
2363 + gma_resume_display(pdev);
2364 +- gma_irq_preinstall(dev);
2365 +- gma_irq_postinstall(dev);
2366 ++ gma_irq_install(dev);
2367 + mutex_unlock(&power_mutex);
2368 + return 0;
2369 + }
2370 +diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
2371 +index 1d8744f3e7020..54e756b486060 100644
2372 +--- a/drivers/gpu/drm/gma500/psb_drv.c
2373 ++++ b/drivers/gpu/drm/gma500/psb_drv.c
2374 +@@ -383,7 +383,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
2375 + PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
2376 + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
2377 +
2378 +- gma_irq_install(dev, pdev->irq);
2379 ++ gma_irq_install(dev);
2380 +
2381 + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2382 +
2383 +diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
2384 +index 0ddfec1a0851d..4c3fc5eaf6ad5 100644
2385 +--- a/drivers/gpu/drm/gma500/psb_drv.h
2386 ++++ b/drivers/gpu/drm/gma500/psb_drv.h
2387 +@@ -490,6 +490,7 @@ struct drm_psb_private {
2388 + int rpm_enabled;
2389 +
2390 + /* MID specific */
2391 ++ bool use_msi;
2392 + bool has_gct;
2393 + struct oaktrail_gct_data gct_data;
2394 +
2395 +@@ -499,10 +500,6 @@ struct drm_psb_private {
2396 + /* Register state */
2397 + struct psb_save_area regs;
2398 +
2399 +- /* MSI reg save */
2400 +- uint32_t msi_addr;
2401 +- uint32_t msi_data;
2402 +-
2403 + /* Hotplug handling */
2404 + struct work_struct hotplug_work;
2405 +
2406 +diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
2407 +index e6e6d61bbeab6..038f18ed0a95e 100644
2408 +--- a/drivers/gpu/drm/gma500/psb_irq.c
2409 ++++ b/drivers/gpu/drm/gma500/psb_irq.c
2410 +@@ -316,17 +316,24 @@ void gma_irq_postinstall(struct drm_device *dev)
2411 + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
2412 + }
2413 +
2414 +-int gma_irq_install(struct drm_device *dev, unsigned int irq)
2415 ++int gma_irq_install(struct drm_device *dev)
2416 + {
2417 ++ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
2418 ++ struct pci_dev *pdev = to_pci_dev(dev->dev);
2419 + int ret;
2420 +
2421 +- if (irq == IRQ_NOTCONNECTED)
2422 ++ if (dev_priv->use_msi && pci_enable_msi(pdev)) {
2423 ++ dev_warn(dev->dev, "Enabling MSI failed!\n");
2424 ++ dev_priv->use_msi = false;
2425 ++ }
2426 ++
2427 ++ if (pdev->irq == IRQ_NOTCONNECTED)
2428 + return -ENOTCONN;
2429 +
2430 + gma_irq_preinstall(dev);
2431 +
2432 + /* PCI devices require shared interrupts. */
2433 +- ret = request_irq(irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
2434 ++ ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
2435 + if (ret)
2436 + return ret;
2437 +
2438 +@@ -369,6 +376,8 @@ void gma_irq_uninstall(struct drm_device *dev)
2439 + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
2440 +
2441 + free_irq(pdev->irq, dev);
2442 ++ if (dev_priv->use_msi)
2443 ++ pci_disable_msi(pdev);
2444 + }
2445 +
2446 + int gma_crtc_enable_vblank(struct drm_crtc *crtc)
2447 +diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
2448 +index b51e395194fff..7648f69824a5d 100644
2449 +--- a/drivers/gpu/drm/gma500/psb_irq.h
2450 ++++ b/drivers/gpu/drm/gma500/psb_irq.h
2451 +@@ -17,7 +17,7 @@ struct drm_device;
2452 +
2453 + void gma_irq_preinstall(struct drm_device *dev);
2454 + void gma_irq_postinstall(struct drm_device *dev);
2455 +-int gma_irq_install(struct drm_device *dev, unsigned int irq);
2456 ++int gma_irq_install(struct drm_device *dev);
2457 + void gma_irq_uninstall(struct drm_device *dev);
2458 +
2459 + int gma_crtc_enable_vblank(struct drm_crtc *crtc);
2460 +diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
2461 +index 073adfe438ddd..4e41c144a2902 100644
2462 +--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
2463 ++++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
2464 +@@ -2,6 +2,7 @@
2465 + config DRM_HISI_HIBMC
2466 + tristate "DRM Support for Hisilicon Hibmc"
2467 + depends on DRM && PCI && (ARM64 || COMPILE_TEST)
2468 ++ depends on MMU
2469 + select DRM_KMS_HELPER
2470 + select DRM_VRAM_HELPER
2471 + select DRM_TTM
2472 +diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
2473 +index 5a957acebfd62..82ad8fe7440c0 100644
2474 +--- a/drivers/gpu/drm/i915/display/g4x_dp.c
2475 ++++ b/drivers/gpu/drm/i915/display/g4x_dp.c
2476 +@@ -395,26 +395,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
2477 + intel_dotclock_calculate(pipe_config->port_clock,
2478 + &pipe_config->dp_m_n);
2479 +
2480 +- if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2481 +- pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2482 +- /*
2483 +- * This is a big fat ugly hack.
2484 +- *
2485 +- * Some machines in UEFI boot mode provide us a VBT that has 18
2486 +- * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2487 +- * unknown we fail to light up. Yet the same BIOS boots up with
2488 +- * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2489 +- * max, not what it tells us to use.
2490 +- *
2491 +- * Note: This will still be broken if the eDP panel is not lit
2492 +- * up by the BIOS, and thus we can't get the mode at module
2493 +- * load.
2494 +- */
2495 +- drm_dbg_kms(&dev_priv->drm,
2496 +- "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2497 +- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2498 +- dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2499 +- }
2500 ++ if (intel_dp_is_edp(intel_dp))
2501 ++ intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
2502 + }
2503 +
2504 + static void
2505 +diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
2506 +index 5508ebb9eb434..f416499dad6f3 100644
2507 +--- a/drivers/gpu/drm/i915/display/icl_dsi.c
2508 ++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
2509 +@@ -1864,7 +1864,8 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
2510 + {
2511 + struct drm_device *dev = intel_dsi->base.base.dev;
2512 + struct drm_i915_private *dev_priv = to_i915(dev);
2513 +- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
2514 ++ struct intel_connector *connector = intel_dsi->attached_connector;
2515 ++ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
2516 + u32 tlpx_ns;
2517 + u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
2518 + u32 ths_prepare_ns, tclk_trail_ns;
2519 +@@ -2051,6 +2052,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
2520 + /* attach connector to encoder */
2521 + intel_connector_attach_encoder(intel_connector, encoder);
2522 +
2523 ++ intel_bios_init_panel(dev_priv, &intel_connector->panel);
2524 ++
2525 + mutex_lock(&dev->mode_config.mutex);
2526 + intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
2527 + mutex_unlock(&dev->mode_config.mutex);
2528 +@@ -2064,13 +2067,20 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
2529 +
2530 + intel_backlight_setup(intel_connector, INVALID_PIPE);
2531 +
2532 +- if (dev_priv->vbt.dsi.config->dual_link)
2533 ++ if (intel_connector->panel.vbt.dsi.config->dual_link)
2534 + intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
2535 + else
2536 + intel_dsi->ports = BIT(port);
2537 +
2538 +- intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
2539 +- intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
2540 ++ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
2541 ++ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
2542 ++
2543 ++ intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
2544 ++
2545 ++ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
2546 ++ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
2547 ++
2548 ++ intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
2549 +
2550 + for_each_dsi_port(port, intel_dsi->ports) {
2551 + struct intel_dsi_host *host;
2552 +diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
2553 +index 3e200a2e4ba29..5182bb66bd289 100644
2554 +--- a/drivers/gpu/drm/i915/display/intel_backlight.c
2555 ++++ b/drivers/gpu/drm/i915/display/intel_backlight.c
2556 +@@ -1158,9 +1158,10 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
2557 + return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
2558 + }
2559 +
2560 +-static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv)
2561 ++static u16 get_vbt_pwm_freq(struct intel_connector *connector)
2562 + {
2563 +- u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
2564 ++ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2565 ++ u16 pwm_freq_hz = connector->panel.vbt.backlight.pwm_freq_hz;
2566 +
2567 + if (pwm_freq_hz) {
2568 + drm_dbg_kms(&dev_priv->drm,
2569 +@@ -1180,7 +1181,7 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
2570 + {
2571 + struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2572 + struct intel_panel *panel = &connector->panel;
2573 +- u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
2574 ++ u16 pwm_freq_hz = get_vbt_pwm_freq(connector);
2575 + u32 pwm;
2576 +
2577 + if (!panel->backlight.pwm_funcs->hz_to_pwm) {
2578 +@@ -1217,11 +1218,11 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
2579 + * against this by letting the minimum be at most (arbitrarily chosen)
2580 + * 25% of the max.
2581 + */
2582 +- min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
2583 +- if (min != dev_priv->vbt.backlight.min_brightness) {
2584 ++ min = clamp_t(int, connector->panel.vbt.backlight.min_brightness, 0, 64);
2585 ++ if (min != connector->panel.vbt.backlight.min_brightness) {
2586 + drm_dbg_kms(&dev_priv->drm,
2587 + "clamping VBT min backlight %d/255 to %d/255\n",
2588 +- dev_priv->vbt.backlight.min_brightness, min);
2589 ++ connector->panel.vbt.backlight.min_brightness, min);
2590 + }
2591 +
2592 + /* vbt value is a coefficient in range [0..255] */
2593 +@@ -1410,7 +1411,7 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
2594 + struct intel_panel *panel = &connector->panel;
2595 + u32 pwm_ctl, val;
2596 +
2597 +- panel->backlight.controller = dev_priv->vbt.backlight.controller;
2598 ++ panel->backlight.controller = connector->panel.vbt.backlight.controller;
2599 +
2600 + pwm_ctl = intel_de_read(dev_priv,
2601 + BXT_BLC_PWM_CTL(panel->backlight.controller));
2602 +@@ -1483,7 +1484,7 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
2603 + u32 level;
2604 +
2605 + /* Get the right PWM chip for DSI backlight according to VBT */
2606 +- if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
2607 ++ if (connector->panel.vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
2608 + panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
2609 + desc = "PMIC";
2610 + } else {
2611 +@@ -1512,11 +1513,11 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
2612 +
2613 + drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
2614 + NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
2615 +- get_vbt_pwm_freq(dev_priv), level);
2616 ++ get_vbt_pwm_freq(connector), level);
2617 + } else {
2618 + /* Set period from VBT frequency, leave other settings at 0. */
2619 + panel->backlight.pwm_state.period =
2620 +- NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv);
2621 ++ NSEC_PER_SEC / get_vbt_pwm_freq(connector);
2622 + }
2623 +
2624 + drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
2625 +@@ -1601,7 +1602,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
2626 + struct intel_panel *panel = &connector->panel;
2627 + int ret;
2628 +
2629 +- if (!dev_priv->vbt.backlight.present) {
2630 ++ if (!connector->panel.vbt.backlight.present) {
2631 + if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
2632 + drm_dbg_kms(&dev_priv->drm,
2633 + "no backlight present per VBT, but present per quirk\n");
2634 +diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
2635 +index 91caf4523b34d..b5de61fe9cc67 100644
2636 +--- a/drivers/gpu/drm/i915/display/intel_bios.c
2637 ++++ b/drivers/gpu/drm/i915/display/intel_bios.c
2638 +@@ -682,7 +682,8 @@ static int get_panel_type(struct drm_i915_private *i915)
2639 +
2640 + /* Parse general panel options */
2641 + static void
2642 +-parse_panel_options(struct drm_i915_private *i915)
2643 ++parse_panel_options(struct drm_i915_private *i915,
2644 ++ struct intel_panel *panel)
2645 + {
2646 + const struct bdb_lvds_options *lvds_options;
2647 + int panel_type;
2648 +@@ -692,11 +693,11 @@ parse_panel_options(struct drm_i915_private *i915)
2649 + if (!lvds_options)
2650 + return;
2651 +
2652 +- i915->vbt.lvds_dither = lvds_options->pixel_dither;
2653 ++ panel->vbt.lvds_dither = lvds_options->pixel_dither;
2654 +
2655 + panel_type = get_panel_type(i915);
2656 +
2657 +- i915->vbt.panel_type = panel_type;
2658 ++ panel->vbt.panel_type = panel_type;
2659 +
2660 + drrs_mode = (lvds_options->dps_panel_type_bits
2661 + >> (panel_type * 2)) & MODE_MASK;
2662 +@@ -707,16 +708,16 @@ parse_panel_options(struct drm_i915_private *i915)
2663 + */
2664 + switch (drrs_mode) {
2665 + case 0:
2666 +- i915->vbt.drrs_type = DRRS_TYPE_STATIC;
2667 ++ panel->vbt.drrs_type = DRRS_TYPE_STATIC;
2668 + drm_dbg_kms(&i915->drm, "DRRS supported mode is static\n");
2669 + break;
2670 + case 2:
2671 +- i915->vbt.drrs_type = DRRS_TYPE_SEAMLESS;
2672 ++ panel->vbt.drrs_type = DRRS_TYPE_SEAMLESS;
2673 + drm_dbg_kms(&i915->drm,
2674 + "DRRS supported mode is seamless\n");
2675 + break;
2676 + default:
2677 +- i915->vbt.drrs_type = DRRS_TYPE_NONE;
2678 ++ panel->vbt.drrs_type = DRRS_TYPE_NONE;
2679 + drm_dbg_kms(&i915->drm,
2680 + "DRRS not supported (VBT input)\n");
2681 + break;
2682 +@@ -725,13 +726,14 @@ parse_panel_options(struct drm_i915_private *i915)
2683 +
2684 + static void
2685 + parse_lfp_panel_dtd(struct drm_i915_private *i915,
2686 ++ struct intel_panel *panel,
2687 + const struct bdb_lvds_lfp_data *lvds_lfp_data,
2688 + const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs)
2689 + {
2690 + const struct lvds_dvo_timing *panel_dvo_timing;
2691 + const struct lvds_fp_timing *fp_timing;
2692 + struct drm_display_mode *panel_fixed_mode;
2693 +- int panel_type = i915->vbt.panel_type;
2694 ++ int panel_type = panel->vbt.panel_type;
2695 +
2696 + panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
2697 + lvds_lfp_data_ptrs,
2698 +@@ -743,7 +745,7 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915,
2699 +
2700 + fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
2701 +
2702 +- i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
2703 ++ panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
2704 +
2705 + drm_dbg_kms(&i915->drm,
2706 + "Found panel mode in BIOS VBT legacy lfp table: " DRM_MODE_FMT "\n",
2707 +@@ -756,20 +758,21 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915,
2708 + /* check the resolution, just to be sure */
2709 + if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
2710 + fp_timing->y_res == panel_fixed_mode->vdisplay) {
2711 +- i915->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
2712 ++ panel->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
2713 + drm_dbg_kms(&i915->drm,
2714 + "VBT initial LVDS value %x\n",
2715 +- i915->vbt.bios_lvds_val);
2716 ++ panel->vbt.bios_lvds_val);
2717 + }
2718 + }
2719 +
2720 + static void
2721 +-parse_lfp_data(struct drm_i915_private *i915)
2722 ++parse_lfp_data(struct drm_i915_private *i915,
2723 ++ struct intel_panel *panel)
2724 + {
2725 + const struct bdb_lvds_lfp_data *data;
2726 + const struct bdb_lvds_lfp_data_tail *tail;
2727 + const struct bdb_lvds_lfp_data_ptrs *ptrs;
2728 +- int panel_type = i915->vbt.panel_type;
2729 ++ int panel_type = panel->vbt.panel_type;
2730 +
2731 + ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
2732 + if (!ptrs)
2733 +@@ -779,24 +782,25 @@ parse_lfp_data(struct drm_i915_private *i915)
2734 + if (!data)
2735 + return;
2736 +
2737 +- if (!i915->vbt.lfp_lvds_vbt_mode)
2738 +- parse_lfp_panel_dtd(i915, data, ptrs);
2739 ++ if (!panel->vbt.lfp_lvds_vbt_mode)
2740 ++ parse_lfp_panel_dtd(i915, panel, data, ptrs);
2741 +
2742 + tail = get_lfp_data_tail(data, ptrs);
2743 + if (!tail)
2744 + return;
2745 +
2746 + if (i915->vbt.version >= 188) {
2747 +- i915->vbt.seamless_drrs_min_refresh_rate =
2748 ++ panel->vbt.seamless_drrs_min_refresh_rate =
2749 + tail->seamless_drrs_min_refresh_rate[panel_type];
2750 + drm_dbg_kms(&i915->drm,
2751 + "Seamless DRRS min refresh rate: %d Hz\n",
2752 +- i915->vbt.seamless_drrs_min_refresh_rate);
2753 ++ panel->vbt.seamless_drrs_min_refresh_rate);
2754 + }
2755 + }
2756 +
2757 + static void
2758 +-parse_generic_dtd(struct drm_i915_private *i915)
2759 ++parse_generic_dtd(struct drm_i915_private *i915,
2760 ++ struct intel_panel *panel)
2761 + {
2762 + const struct bdb_generic_dtd *generic_dtd;
2763 + const struct generic_dtd_entry *dtd;
2764 +@@ -831,14 +835,14 @@ parse_generic_dtd(struct drm_i915_private *i915)
2765 +
2766 + num_dtd = (get_blocksize(generic_dtd) -
2767 + sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
2768 +- if (i915->vbt.panel_type >= num_dtd) {
2769 ++ if (panel->vbt.panel_type >= num_dtd) {
2770 + drm_err(&i915->drm,
2771 + "Panel type %d not found in table of %d DTD's\n",
2772 +- i915->vbt.panel_type, num_dtd);
2773 ++ panel->vbt.panel_type, num_dtd);
2774 + return;
2775 + }
2776 +
2777 +- dtd = &generic_dtd->dtd[i915->vbt.panel_type];
2778 ++ dtd = &generic_dtd->dtd[panel->vbt.panel_type];
2779 +
2780 + panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
2781 + if (!panel_fixed_mode)
2782 +@@ -881,15 +885,16 @@ parse_generic_dtd(struct drm_i915_private *i915)
2783 + "Found panel mode in BIOS VBT generic dtd table: " DRM_MODE_FMT "\n",
2784 + DRM_MODE_ARG(panel_fixed_mode));
2785 +
2786 +- i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
2787 ++ panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
2788 + }
2789 +
2790 + static void
2791 +-parse_lfp_backlight(struct drm_i915_private *i915)
2792 ++parse_lfp_backlight(struct drm_i915_private *i915,
2793 ++ struct intel_panel *panel)
2794 + {
2795 + const struct bdb_lfp_backlight_data *backlight_data;
2796 + const struct lfp_backlight_data_entry *entry;
2797 +- int panel_type = i915->vbt.panel_type;
2798 ++ int panel_type = panel->vbt.panel_type;
2799 + u16 level;
2800 +
2801 + backlight_data = find_section(i915, BDB_LVDS_BACKLIGHT);
2802 +@@ -905,15 +910,15 @@ parse_lfp_backlight(struct drm_i915_private *i915)
2803 +
2804 + entry = &backlight_data->data[panel_type];
2805 +
2806 +- i915->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
2807 +- if (!i915->vbt.backlight.present) {
2808 ++ panel->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
2809 ++ if (!panel->vbt.backlight.present) {
2810 + drm_dbg_kms(&i915->drm,
2811 + "PWM backlight not present in VBT (type %u)\n",
2812 + entry->type);
2813 + return;
2814 + }
2815 +
2816 +- i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
2817 ++ panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
2818 + if (i915->vbt.version >= 191) {
2819 + size_t exp_size;
2820 +
2821 +@@ -928,13 +933,13 @@ parse_lfp_backlight(struct drm_i915_private *i915)
2822 + const struct lfp_backlight_control_method *method;
2823 +
2824 + method = &backlight_data->backlight_control[panel_type];
2825 +- i915->vbt.backlight.type = method->type;
2826 +- i915->vbt.backlight.controller = method->controller;
2827 ++ panel->vbt.backlight.type = method->type;
2828 ++ panel->vbt.backlight.controller = method->controller;
2829 + }
2830 + }
2831 +
2832 +- i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
2833 +- i915->vbt.backlight.active_low_pwm = entry->active_low_pwm;
2834 ++ panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
2835 ++ panel->vbt.backlight.active_low_pwm = entry->active_low_pwm;
2836 +
2837 + if (i915->vbt.version >= 234) {
2838 + u16 min_level;
2839 +@@ -955,28 +960,29 @@ parse_lfp_backlight(struct drm_i915_private *i915)
2840 + drm_warn(&i915->drm, "Brightness min level > 255\n");
2841 + level = 255;
2842 + }
2843 +- i915->vbt.backlight.min_brightness = min_level;
2844 ++ panel->vbt.backlight.min_brightness = min_level;
2845 +
2846 +- i915->vbt.backlight.brightness_precision_bits =
2847 ++ panel->vbt.backlight.brightness_precision_bits =
2848 + backlight_data->brightness_precision_bits[panel_type];
2849 + } else {
2850 + level = backlight_data->level[panel_type];
2851 +- i915->vbt.backlight.min_brightness = entry->min_brightness;
2852 ++ panel->vbt.backlight.min_brightness = entry->min_brightness;
2853 + }
2854 +
2855 + drm_dbg_kms(&i915->drm,
2856 + "VBT backlight PWM modulation frequency %u Hz, "
2857 + "active %s, min brightness %u, level %u, controller %u\n",
2858 +- i915->vbt.backlight.pwm_freq_hz,
2859 +- i915->vbt.backlight.active_low_pwm ? "low" : "high",
2860 +- i915->vbt.backlight.min_brightness,
2861 ++ panel->vbt.backlight.pwm_freq_hz,
2862 ++ panel->vbt.backlight.active_low_pwm ? "low" : "high",
2863 ++ panel->vbt.backlight.min_brightness,
2864 + level,
2865 +- i915->vbt.backlight.controller);
2866 ++ panel->vbt.backlight.controller);
2867 + }
2868 +
2869 + /* Try to find sdvo panel data */
2870 + static void
2871 +-parse_sdvo_panel_data(struct drm_i915_private *i915)
2872 ++parse_sdvo_panel_data(struct drm_i915_private *i915,
2873 ++ struct intel_panel *panel)
2874 + {
2875 + const struct bdb_sdvo_panel_dtds *dtds;
2876 + struct drm_display_mode *panel_fixed_mode;
2877 +@@ -1009,7 +1015,7 @@ parse_sdvo_panel_data(struct drm_i915_private *i915)
2878 +
2879 + fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]);
2880 +
2881 +- i915->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
2882 ++ panel->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
2883 +
2884 + drm_dbg_kms(&i915->drm,
2885 + "Found SDVO panel mode in BIOS VBT tables: " DRM_MODE_FMT "\n",
2886 +@@ -1188,6 +1194,17 @@ parse_driver_features(struct drm_i915_private *i915)
2887 + driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
2888 + i915->vbt.int_lvds_support = 0;
2889 + }
2890 ++}
2891 ++
2892 ++static void
2893 ++parse_panel_driver_features(struct drm_i915_private *i915,
2894 ++ struct intel_panel *panel)
2895 ++{
2896 ++ const struct bdb_driver_features *driver;
2897 ++
2898 ++ driver = find_section(i915, BDB_DRIVER_FEATURES);
2899 ++ if (!driver)
2900 ++ return;
2901 +
2902 + if (i915->vbt.version < 228) {
2903 + drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n",
2904 +@@ -1199,17 +1216,18 @@ parse_driver_features(struct drm_i915_private *i915)
2905 + * driver->drrs_enabled=false
2906 + */
2907 + if (!driver->drrs_enabled)
2908 +- i915->vbt.drrs_type = DRRS_TYPE_NONE;
2909 ++ panel->vbt.drrs_type = DRRS_TYPE_NONE;
2910 +
2911 +- i915->vbt.psr.enable = driver->psr_enabled;
2912 ++ panel->vbt.psr.enable = driver->psr_enabled;
2913 + }
2914 + }
2915 +
2916 + static void
2917 +-parse_power_conservation_features(struct drm_i915_private *i915)
2918 ++parse_power_conservation_features(struct drm_i915_private *i915,
2919 ++ struct intel_panel *panel)
2920 + {
2921 + const struct bdb_lfp_power *power;
2922 +- u8 panel_type = i915->vbt.panel_type;
2923 ++ u8 panel_type = panel->vbt.panel_type;
2924 +
2925 + if (i915->vbt.version < 228)
2926 + return;
2927 +@@ -1218,7 +1236,7 @@ parse_power_conservation_features(struct drm_i915_private *i915)
2928 + if (!power)
2929 + return;
2930 +
2931 +- i915->vbt.psr.enable = power->psr & BIT(panel_type);
2932 ++ panel->vbt.psr.enable = power->psr & BIT(panel_type);
2933 +
2934 + /*
2935 + * If DRRS is not supported, drrs_type has to be set to 0.
2936 +@@ -1227,19 +1245,20 @@ parse_power_conservation_features(struct drm_i915_private *i915)
2937 + * power->drrs & BIT(panel_type)=false
2938 + */
2939 + if (!(power->drrs & BIT(panel_type)))
2940 +- i915->vbt.drrs_type = DRRS_TYPE_NONE;
2941 ++ panel->vbt.drrs_type = DRRS_TYPE_NONE;
2942 +
2943 + if (i915->vbt.version >= 232)
2944 +- i915->vbt.edp.hobl = power->hobl & BIT(panel_type);
2945 ++ panel->vbt.edp.hobl = power->hobl & BIT(panel_type);
2946 + }
2947 +
2948 + static void
2949 +-parse_edp(struct drm_i915_private *i915)
2950 ++parse_edp(struct drm_i915_private *i915,
2951 ++ struct intel_panel *panel)
2952 + {
2953 + const struct bdb_edp *edp;
2954 + const struct edp_power_seq *edp_pps;
2955 + const struct edp_fast_link_params *edp_link_params;
2956 +- int panel_type = i915->vbt.panel_type;
2957 ++ int panel_type = panel->vbt.panel_type;
2958 +
2959 + edp = find_section(i915, BDB_EDP);
2960 + if (!edp)
2961 +@@ -1247,13 +1266,13 @@ parse_edp(struct drm_i915_private *i915)
2962 +
2963 + switch ((edp->color_depth >> (panel_type * 2)) & 3) {
2964 + case EDP_18BPP:
2965 +- i915->vbt.edp.bpp = 18;
2966 ++ panel->vbt.edp.bpp = 18;
2967 + break;
2968 + case EDP_24BPP:
2969 +- i915->vbt.edp.bpp = 24;
2970 ++ panel->vbt.edp.bpp = 24;
2971 + break;
2972 + case EDP_30BPP:
2973 +- i915->vbt.edp.bpp = 30;
2974 ++ panel->vbt.edp.bpp = 30;
2975 + break;
2976 + }
2977 +
2978 +@@ -1261,14 +1280,14 @@ parse_edp(struct drm_i915_private *i915)
2979 + edp_pps = &edp->power_seqs[panel_type];
2980 + edp_link_params = &edp->fast_link_params[panel_type];
2981 +
2982 +- i915->vbt.edp.pps = *edp_pps;
2983 ++ panel->vbt.edp.pps = *edp_pps;
2984 +
2985 + switch (edp_link_params->rate) {
2986 + case EDP_RATE_1_62:
2987 +- i915->vbt.edp.rate = DP_LINK_BW_1_62;
2988 ++ panel->vbt.edp.rate = DP_LINK_BW_1_62;
2989 + break;
2990 + case EDP_RATE_2_7:
2991 +- i915->vbt.edp.rate = DP_LINK_BW_2_7;
2992 ++ panel->vbt.edp.rate = DP_LINK_BW_2_7;
2993 + break;
2994 + default:
2995 + drm_dbg_kms(&i915->drm,
2996 +@@ -1279,13 +1298,13 @@ parse_edp(struct drm_i915_private *i915)
2997 +
2998 + switch (edp_link_params->lanes) {
2999 + case EDP_LANE_1:
3000 +- i915->vbt.edp.lanes = 1;
3001 ++ panel->vbt.edp.lanes = 1;
3002 + break;
3003 + case EDP_LANE_2:
3004 +- i915->vbt.edp.lanes = 2;
3005 ++ panel->vbt.edp.lanes = 2;
3006 + break;
3007 + case EDP_LANE_4:
3008 +- i915->vbt.edp.lanes = 4;
3009 ++ panel->vbt.edp.lanes = 4;
3010 + break;
3011 + default:
3012 + drm_dbg_kms(&i915->drm,
3013 +@@ -1296,16 +1315,16 @@ parse_edp(struct drm_i915_private *i915)
3014 +
3015 + switch (edp_link_params->preemphasis) {
3016 + case EDP_PREEMPHASIS_NONE:
3017 +- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
3018 ++ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
3019 + break;
3020 + case EDP_PREEMPHASIS_3_5dB:
3021 +- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
3022 ++ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
3023 + break;
3024 + case EDP_PREEMPHASIS_6dB:
3025 +- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
3026 ++ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
3027 + break;
3028 + case EDP_PREEMPHASIS_9_5dB:
3029 +- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
3030 ++ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
3031 + break;
3032 + default:
3033 + drm_dbg_kms(&i915->drm,
3034 +@@ -1316,16 +1335,16 @@ parse_edp(struct drm_i915_private *i915)
3035 +
3036 + switch (edp_link_params->vswing) {
3037 + case EDP_VSWING_0_4V:
3038 +- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
3039 ++ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
3040 + break;
3041 + case EDP_VSWING_0_6V:
3042 +- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
3043 ++ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
3044 + break;
3045 + case EDP_VSWING_0_8V:
3046 +- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3047 ++ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3048 + break;
3049 + case EDP_VSWING_1_2V:
3050 +- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3051 ++ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3052 + break;
3053 + default:
3054 + drm_dbg_kms(&i915->drm,
3055 +@@ -1339,24 +1358,25 @@ parse_edp(struct drm_i915_private *i915)
3056 +
3057 + /* Don't read from VBT if module parameter has valid value*/
3058 + if (i915->params.edp_vswing) {
3059 +- i915->vbt.edp.low_vswing =
3060 ++ panel->vbt.edp.low_vswing =
3061 + i915->params.edp_vswing == 1;
3062 + } else {
3063 + vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
3064 +- i915->vbt.edp.low_vswing = vswing == 0;
3065 ++ panel->vbt.edp.low_vswing = vswing == 0;
3066 + }
3067 + }
3068 +
3069 +- i915->vbt.edp.drrs_msa_timing_delay =
3070 ++ panel->vbt.edp.drrs_msa_timing_delay =
3071 + (edp->sdrrs_msa_timing_delay >> (panel_type * 2)) & 3;
3072 + }
3073 +
3074 + static void
3075 +-parse_psr(struct drm_i915_private *i915)
3076 ++parse_psr(struct drm_i915_private *i915,
3077 ++ struct intel_panel *panel)
3078 + {
3079 + const struct bdb_psr *psr;
3080 + const struct psr_table *psr_table;
3081 +- int panel_type = i915->vbt.panel_type;
3082 ++ int panel_type = panel->vbt.panel_type;
3083 +
3084 + psr = find_section(i915, BDB_PSR);
3085 + if (!psr) {
3086 +@@ -1366,11 +1386,11 @@ parse_psr(struct drm_i915_private *i915)
3087 +
3088 + psr_table = &psr->psr_table[panel_type];
3089 +
3090 +- i915->vbt.psr.full_link = psr_table->full_link;
3091 +- i915->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
3092 ++ panel->vbt.psr.full_link = psr_table->full_link;
3093 ++ panel->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
3094 +
3095 + /* Allowed VBT values goes from 0 to 15 */
3096 +- i915->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
3097 ++ panel->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
3098 + psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
3099 +
3100 + /*
3101 +@@ -1381,13 +1401,13 @@ parse_psr(struct drm_i915_private *i915)
3102 + (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) {
3103 + switch (psr_table->tp1_wakeup_time) {
3104 + case 0:
3105 +- i915->vbt.psr.tp1_wakeup_time_us = 500;
3106 ++ panel->vbt.psr.tp1_wakeup_time_us = 500;
3107 + break;
3108 + case 1:
3109 +- i915->vbt.psr.tp1_wakeup_time_us = 100;
3110 ++ panel->vbt.psr.tp1_wakeup_time_us = 100;
3111 + break;
3112 + case 3:
3113 +- i915->vbt.psr.tp1_wakeup_time_us = 0;
3114 ++ panel->vbt.psr.tp1_wakeup_time_us = 0;
3115 + break;
3116 + default:
3117 + drm_dbg_kms(&i915->drm,
3118 +@@ -1395,19 +1415,19 @@ parse_psr(struct drm_i915_private *i915)
3119 + psr_table->tp1_wakeup_time);
3120 + fallthrough;
3121 + case 2:
3122 +- i915->vbt.psr.tp1_wakeup_time_us = 2500;
3123 ++ panel->vbt.psr.tp1_wakeup_time_us = 2500;
3124 + break;
3125 + }
3126 +
3127 + switch (psr_table->tp2_tp3_wakeup_time) {
3128 + case 0:
3129 +- i915->vbt.psr.tp2_tp3_wakeup_time_us = 500;
3130 ++ panel->vbt.psr.tp2_tp3_wakeup_time_us = 500;
3131 + break;
3132 + case 1:
3133 +- i915->vbt.psr.tp2_tp3_wakeup_time_us = 100;
3134 ++ panel->vbt.psr.tp2_tp3_wakeup_time_us = 100;
3135 + break;
3136 + case 3:
3137 +- i915->vbt.psr.tp2_tp3_wakeup_time_us = 0;
3138 ++ panel->vbt.psr.tp2_tp3_wakeup_time_us = 0;
3139 + break;
3140 + default:
3141 + drm_dbg_kms(&i915->drm,
3142 +@@ -1415,12 +1435,12 @@ parse_psr(struct drm_i915_private *i915)
3143 + psr_table->tp2_tp3_wakeup_time);
3144 + fallthrough;
3145 + case 2:
3146 +- i915->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
3147 ++ panel->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
3148 + break;
3149 + }
3150 + } else {
3151 +- i915->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
3152 +- i915->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
3153 ++ panel->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
3154 ++ panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
3155 + }
3156 +
3157 + if (i915->vbt.version >= 226) {
3158 +@@ -1442,62 +1462,66 @@ parse_psr(struct drm_i915_private *i915)
3159 + wakeup_time = 2500;
3160 + break;
3161 + }
3162 +- i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
3163 ++ panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
3164 + } else {
3165 + /* Reusing PSR1 wakeup time for PSR2 in older VBTs */
3166 +- i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = i915->vbt.psr.tp2_tp3_wakeup_time_us;
3167 ++ panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = panel->vbt.psr.tp2_tp3_wakeup_time_us;
3168 + }
3169 + }
3170 +
3171 + static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
3172 +- u16 version, enum port port)
3173 ++ struct intel_panel *panel,
3174 ++ enum port port)
3175 + {
3176 +- if (!i915->vbt.dsi.config->dual_link || version < 197) {
3177 +- i915->vbt.dsi.bl_ports = BIT(port);
3178 +- if (i915->vbt.dsi.config->cabc_supported)
3179 +- i915->vbt.dsi.cabc_ports = BIT(port);
3180 ++ enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C;
3181 ++
3182 ++ if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) {
3183 ++ panel->vbt.dsi.bl_ports = BIT(port);
3184 ++ if (panel->vbt.dsi.config->cabc_supported)
3185 ++ panel->vbt.dsi.cabc_ports = BIT(port);
3186 +
3187 + return;
3188 + }
3189 +
3190 +- switch (i915->vbt.dsi.config->dl_dcs_backlight_ports) {
3191 ++ switch (panel->vbt.dsi.config->dl_dcs_backlight_ports) {
3192 + case DL_DCS_PORT_A:
3193 +- i915->vbt.dsi.bl_ports = BIT(PORT_A);
3194 ++ panel->vbt.dsi.bl_ports = BIT(PORT_A);
3195 + break;
3196 + case DL_DCS_PORT_C:
3197 +- i915->vbt.dsi.bl_ports = BIT(PORT_C);
3198 ++ panel->vbt.dsi.bl_ports = BIT(port_bc);
3199 + break;
3200 + default:
3201 + case DL_DCS_PORT_A_AND_C:
3202 +- i915->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
3203 ++ panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(port_bc);
3204 + break;
3205 + }
3206 +
3207 +- if (!i915->vbt.dsi.config->cabc_supported)
3208 ++ if (!panel->vbt.dsi.config->cabc_supported)
3209 + return;
3210 +
3211 +- switch (i915->vbt.dsi.config->dl_dcs_cabc_ports) {
3212 ++ switch (panel->vbt.dsi.config->dl_dcs_cabc_ports) {
3213 + case DL_DCS_PORT_A:
3214 +- i915->vbt.dsi.cabc_ports = BIT(PORT_A);
3215 ++ panel->vbt.dsi.cabc_ports = BIT(PORT_A);
3216 + break;
3217 + case DL_DCS_PORT_C:
3218 +- i915->vbt.dsi.cabc_ports = BIT(PORT_C);
3219 ++ panel->vbt.dsi.cabc_ports = BIT(port_bc);
3220 + break;
3221 + default:
3222 + case DL_DCS_PORT_A_AND_C:
3223 +- i915->vbt.dsi.cabc_ports =
3224 +- BIT(PORT_A) | BIT(PORT_C);
3225 ++ panel->vbt.dsi.cabc_ports =
3226 ++ BIT(PORT_A) | BIT(port_bc);
3227 + break;
3228 + }
3229 + }
3230 +
3231 + static void
3232 +-parse_mipi_config(struct drm_i915_private *i915)
3233 ++parse_mipi_config(struct drm_i915_private *i915,
3234 ++ struct intel_panel *panel)
3235 + {
3236 + const struct bdb_mipi_config *start;
3237 + const struct mipi_config *config;
3238 + const struct mipi_pps_data *pps;
3239 +- int panel_type = i915->vbt.panel_type;
3240 ++ int panel_type = panel->vbt.panel_type;
3241 + enum port port;
3242 +
3243 + /* parse MIPI blocks only if LFP type is MIPI */
3244 +@@ -1505,7 +1529,7 @@ parse_mipi_config(struct drm_i915_private *i915)
3245 + return;
3246 +
3247 + /* Initialize this to undefined indicating no generic MIPI support */
3248 +- i915->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
3249 ++ panel->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
3250 +
3251 + /* Block #40 is already parsed and panel_fixed_mode is
3252 + * stored in i915->lfp_lvds_vbt_mode
3253 +@@ -1532,17 +1556,17 @@ parse_mipi_config(struct drm_i915_private *i915)
3254 + pps = &start->pps[panel_type];
3255 +
3256 + /* store as of now full data. Trim when we realise all is not needed */
3257 +- i915->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
3258 +- if (!i915->vbt.dsi.config)
3259 ++ panel->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
3260 ++ if (!panel->vbt.dsi.config)
3261 + return;
3262 +
3263 +- i915->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
3264 +- if (!i915->vbt.dsi.pps) {
3265 +- kfree(i915->vbt.dsi.config);
3266 ++ panel->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
3267 ++ if (!panel->vbt.dsi.pps) {
3268 ++ kfree(panel->vbt.dsi.config);
3269 + return;
3270 + }
3271 +
3272 +- parse_dsi_backlight_ports(i915, i915->vbt.version, port);
3273 ++ parse_dsi_backlight_ports(i915, panel, port);
3274 +
3275 + /* FIXME is the 90 vs. 270 correct? */
3276 + switch (config->rotation) {
3277 +@@ -1551,25 +1575,25 @@ parse_mipi_config(struct drm_i915_private *i915)
3278 + * Most (all?) VBTs claim 0 degrees despite having
3279 + * an upside down panel, thus we do not trust this.
3280 + */
3281 +- i915->vbt.dsi.orientation =
3282 ++ panel->vbt.dsi.orientation =
3283 + DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
3284 + break;
3285 + case ENABLE_ROTATION_90:
3286 +- i915->vbt.dsi.orientation =
3287 ++ panel->vbt.dsi.orientation =
3288 + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
3289 + break;
3290 + case ENABLE_ROTATION_180:
3291 +- i915->vbt.dsi.orientation =
3292 ++ panel->vbt.dsi.orientation =
3293 + DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
3294 + break;
3295 + case ENABLE_ROTATION_270:
3296 +- i915->vbt.dsi.orientation =
3297 ++ panel->vbt.dsi.orientation =
3298 + DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
3299 + break;
3300 + }
3301 +
3302 + /* We have mandatory mipi config blocks. Initialize as generic panel */
3303 +- i915->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
3304 ++ panel->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
3305 + }
3306 +
3307 + /* Find the sequence block and size for the given panel. */
3308 +@@ -1732,13 +1756,14 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
3309 + * Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
3310 + * skip all delay + gpio operands and stop at the first DSI packet op.
3311 + */
3312 +-static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915)
3313 ++static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
3314 ++ struct intel_panel *panel)
3315 + {
3316 +- const u8 *data = i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
3317 ++ const u8 *data = panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
3318 + int index, len;
3319 +
3320 + if (drm_WARN_ON(&i915->drm,
3321 +- !data || i915->vbt.dsi.seq_version != 1))
3322 ++ !data || panel->vbt.dsi.seq_version != 1))
3323 + return 0;
3324 +
3325 + /* index = 1 to skip sequence byte */
3326 +@@ -1766,7 +1791,8 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915)
3327 + * these devices we split the init OTP sequence into a deassert sequence and
3328 + * the actual init OTP part.
3329 + */
3330 +-static void fixup_mipi_sequences(struct drm_i915_private *i915)
3331 ++static void fixup_mipi_sequences(struct drm_i915_private *i915,
3332 ++ struct intel_panel *panel)
3333 + {
3334 + u8 *init_otp;
3335 + int len;
3336 +@@ -1776,18 +1802,18 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915)
3337 + return;
3338 +
3339 + /* Limit this to v1 vid-mode sequences */
3340 +- if (i915->vbt.dsi.config->is_cmd_mode ||
3341 +- i915->vbt.dsi.seq_version != 1)
3342 ++ if (panel->vbt.dsi.config->is_cmd_mode ||
3343 ++ panel->vbt.dsi.seq_version != 1)
3344 + return;
3345 +
3346 + /* Only do this if there are otp and assert seqs and no deassert seq */
3347 +- if (!i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
3348 +- !i915->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
3349 +- i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
3350 ++ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
3351 ++ !panel->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
3352 ++ panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
3353 + return;
3354 +
3355 + /* The deassert-sequence ends at the first DSI packet */
3356 +- len = get_init_otp_deassert_fragment_len(i915);
3357 ++ len = get_init_otp_deassert_fragment_len(i915, panel);
3358 + if (!len)
3359 + return;
3360 +
3361 +@@ -1795,25 +1821,26 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915)
3362 + "Using init OTP fragment to deassert reset\n");
3363 +
3364 + /* Copy the fragment, update seq byte and terminate it */
3365 +- init_otp = (u8 *)i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
3366 +- i915->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
3367 +- if (!i915->vbt.dsi.deassert_seq)
3368 ++ init_otp = (u8 *)panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
3369 ++ panel->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
3370 ++ if (!panel->vbt.dsi.deassert_seq)
3371 + return;
3372 +- i915->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
3373 +- i915->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
3374 ++ panel->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
3375 ++ panel->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
3376 + /* Use the copy for deassert */
3377 +- i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
3378 +- i915->vbt.dsi.deassert_seq;
3379 ++ panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
3380 ++ panel->vbt.dsi.deassert_seq;
3381 + /* Replace the last byte of the fragment with init OTP seq byte */
3382 + init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
3383 + /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
3384 +- i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
3385 ++ panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
3386 + }
3387 +
3388 + static void
3389 +-parse_mipi_sequence(struct drm_i915_private *i915)
3390 ++parse_mipi_sequence(struct drm_i915_private *i915,
3391 ++ struct intel_panel *panel)
3392 + {
3393 +- int panel_type = i915->vbt.panel_type;
3394 ++ int panel_type = panel->vbt.panel_type;
3395 + const struct bdb_mipi_sequence *sequence;
3396 + const u8 *seq_data;
3397 + u32 seq_size;
3398 +@@ -1821,7 +1848,7 @@ parse_mipi_sequence(struct drm_i915_private *i915)
3399 + int index = 0;
3400 +
3401 + /* Only our generic panel driver uses the sequence block. */
3402 +- if (i915->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
3403 ++ if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
3404 + return;
3405 +
3406 + sequence = find_section(i915, BDB_MIPI_SEQUENCE);
3407 +@@ -1867,7 +1894,7 @@ parse_mipi_sequence(struct drm_i915_private *i915)
3408 + drm_dbg_kms(&i915->drm,
3409 + "Unsupported sequence %u\n", seq_id);
3410 +
3411 +- i915->vbt.dsi.sequence[seq_id] = data + index;
3412 ++ panel->vbt.dsi.sequence[seq_id] = data + index;
3413 +
3414 + if (sequence->version >= 3)
3415 + index = goto_next_sequence_v3(data, index, seq_size);
3416 +@@ -1880,18 +1907,18 @@ parse_mipi_sequence(struct drm_i915_private *i915)
3417 + }
3418 + }
3419 +
3420 +- i915->vbt.dsi.data = data;
3421 +- i915->vbt.dsi.size = seq_size;
3422 +- i915->vbt.dsi.seq_version = sequence->version;
3423 ++ panel->vbt.dsi.data = data;
3424 ++ panel->vbt.dsi.size = seq_size;
3425 ++ panel->vbt.dsi.seq_version = sequence->version;
3426 +
3427 +- fixup_mipi_sequences(i915);
3428 ++ fixup_mipi_sequences(i915, panel);
3429 +
3430 + drm_dbg(&i915->drm, "MIPI related VBT parsing complete\n");
3431 + return;
3432 +
3433 + err:
3434 + kfree(data);
3435 +- memset(i915->vbt.dsi.sequence, 0, sizeof(i915->vbt.dsi.sequence));
3436 ++ memset(panel->vbt.dsi.sequence, 0, sizeof(panel->vbt.dsi.sequence));
3437 + }
3438 +
3439 + static void
3440 +@@ -2645,15 +2672,6 @@ init_vbt_defaults(struct drm_i915_private *i915)
3441 + {
3442 + i915->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
3443 +
3444 +- /* Default to having backlight */
3445 +- i915->vbt.backlight.present = true;
3446 +-
3447 +- /* LFP panel data */
3448 +- i915->vbt.lvds_dither = 1;
3449 +-
3450 +- /* SDVO panel data */
3451 +- i915->vbt.sdvo_lvds_vbt_mode = NULL;
3452 +-
3453 + /* general features */
3454 + i915->vbt.int_tv_support = 1;
3455 + i915->vbt.int_crt_support = 1;
3456 +@@ -2673,6 +2691,17 @@ init_vbt_defaults(struct drm_i915_private *i915)
3457 + i915->vbt.lvds_ssc_freq);
3458 + }
3459 +
3460 ++/* Common defaults which may be overridden by VBT. */
3461 ++static void
3462 ++init_vbt_panel_defaults(struct intel_panel *panel)
3463 ++{
3464 ++ /* Default to having backlight */
3465 ++ panel->vbt.backlight.present = true;
3466 ++
3467 ++ /* LFP panel data */
3468 ++ panel->vbt.lvds_dither = true;
3469 ++}
3470 ++
3471 + /* Defaults to initialize only if there is no VBT. */
3472 + static void
3473 + init_vbt_missing_defaults(struct drm_i915_private *i915)
3474 +@@ -2959,17 +2988,7 @@ void intel_bios_init(struct drm_i915_private *i915)
3475 + /* Grab useful general definitions */
3476 + parse_general_features(i915);
3477 + parse_general_definitions(i915);
3478 +- parse_panel_options(i915);
3479 +- parse_generic_dtd(i915);
3480 +- parse_lfp_data(i915);
3481 +- parse_lfp_backlight(i915);
3482 +- parse_sdvo_panel_data(i915);
3483 + parse_driver_features(i915);
3484 +- parse_power_conservation_features(i915);
3485 +- parse_edp(i915);
3486 +- parse_psr(i915);
3487 +- parse_mipi_config(i915);
3488 +- parse_mipi_sequence(i915);
3489 +
3490 + /* Depends on child device list */
3491 + parse_compression_parameters(i915);
3492 +@@ -2988,6 +3007,24 @@ out:
3493 + kfree(oprom_vbt);
3494 + }
3495 +
3496 ++void intel_bios_init_panel(struct drm_i915_private *i915,
3497 ++ struct intel_panel *panel)
3498 ++{
3499 ++ init_vbt_panel_defaults(panel);
3500 ++
3501 ++ parse_panel_options(i915, panel);
3502 ++ parse_generic_dtd(i915, panel);
3503 ++ parse_lfp_data(i915, panel);
3504 ++ parse_lfp_backlight(i915, panel);
3505 ++ parse_sdvo_panel_data(i915, panel);
3506 ++ parse_panel_driver_features(i915, panel);
3507 ++ parse_power_conservation_features(i915, panel);
3508 ++ parse_edp(i915, panel);
3509 ++ parse_psr(i915, panel);
3510 ++ parse_mipi_config(i915, panel);
3511 ++ parse_mipi_sequence(i915, panel);
3512 ++}
3513 ++
3514 + /**
3515 + * intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
3516 + * @i915: i915 device instance
3517 +@@ -3007,19 +3044,22 @@ void intel_bios_driver_remove(struct drm_i915_private *i915)
3518 + list_del(&entry->node);
3519 + kfree(entry);
3520 + }
3521 ++}
3522 +
3523 +- kfree(i915->vbt.sdvo_lvds_vbt_mode);
3524 +- i915->vbt.sdvo_lvds_vbt_mode = NULL;
3525 +- kfree(i915->vbt.lfp_lvds_vbt_mode);
3526 +- i915->vbt.lfp_lvds_vbt_mode = NULL;
3527 +- kfree(i915->vbt.dsi.data);
3528 +- i915->vbt.dsi.data = NULL;
3529 +- kfree(i915->vbt.dsi.pps);
3530 +- i915->vbt.dsi.pps = NULL;
3531 +- kfree(i915->vbt.dsi.config);
3532 +- i915->vbt.dsi.config = NULL;
3533 +- kfree(i915->vbt.dsi.deassert_seq);
3534 +- i915->vbt.dsi.deassert_seq = NULL;
3535 ++void intel_bios_fini_panel(struct intel_panel *panel)
3536 ++{
3537 ++ kfree(panel->vbt.sdvo_lvds_vbt_mode);
3538 ++ panel->vbt.sdvo_lvds_vbt_mode = NULL;
3539 ++ kfree(panel->vbt.lfp_lvds_vbt_mode);
3540 ++ panel->vbt.lfp_lvds_vbt_mode = NULL;
3541 ++ kfree(panel->vbt.dsi.data);
3542 ++ panel->vbt.dsi.data = NULL;
3543 ++ kfree(panel->vbt.dsi.pps);
3544 ++ panel->vbt.dsi.pps = NULL;
3545 ++ kfree(panel->vbt.dsi.config);
3546 ++ panel->vbt.dsi.config = NULL;
3547 ++ kfree(panel->vbt.dsi.deassert_seq);
3548 ++ panel->vbt.dsi.deassert_seq = NULL;
3549 + }
3550 +
3551 + /**
3552 +diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
3553 +index 4709c4d298059..86129f015718d 100644
3554 +--- a/drivers/gpu/drm/i915/display/intel_bios.h
3555 ++++ b/drivers/gpu/drm/i915/display/intel_bios.h
3556 +@@ -36,6 +36,7 @@ struct drm_i915_private;
3557 + struct intel_bios_encoder_data;
3558 + struct intel_crtc_state;
3559 + struct intel_encoder;
3560 ++struct intel_panel;
3561 + enum port;
3562 +
3563 + enum intel_backlight_type {
3564 +@@ -230,6 +231,9 @@ struct mipi_pps_data {
3565 + } __packed;
3566 +
3567 + void intel_bios_init(struct drm_i915_private *dev_priv);
3568 ++void intel_bios_init_panel(struct drm_i915_private *dev_priv,
3569 ++ struct intel_panel *panel);
3570 ++void intel_bios_fini_panel(struct intel_panel *panel);
3571 + void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
3572 + bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3573 + bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
3574 +diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
3575 +index 9e6fa59eabba7..333871cf3a2c5 100644
3576 +--- a/drivers/gpu/drm/i915/display/intel_ddi.c
3577 ++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
3578 +@@ -3433,26 +3433,8 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
3579 + pipe_config->has_audio =
3580 + intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
3581 +
3582 +- if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
3583 +- pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3584 +- /*
3585 +- * This is a big fat ugly hack.
3586 +- *
3587 +- * Some machines in UEFI boot mode provide us a VBT that has 18
3588 +- * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3589 +- * unknown we fail to light up. Yet the same BIOS boots up with
3590 +- * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3591 +- * max, not what it tells us to use.
3592 +- *
3593 +- * Note: This will still be broken if the eDP panel is not lit
3594 +- * up by the BIOS, and thus we can't get the mode at module
3595 +- * load.
3596 +- */
3597 +- drm_dbg_kms(&dev_priv->drm,
3598 +- "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3599 +- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3600 +- dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3601 +- }
3602 ++ if (encoder->type == INTEL_OUTPUT_EDP)
3603 ++ intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
3604 +
3605 + ddi_dotclock_get(pipe_config);
3606 +
3607 +diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
3608 +index 85f58dd3df722..b490acd0ab691 100644
3609 +--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
3610 ++++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
3611 +@@ -1062,17 +1062,18 @@ bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
3612 +
3613 + static bool use_edp_hobl(struct intel_encoder *encoder)
3614 + {
3615 +- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3616 + struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3617 ++ struct intel_connector *connector = intel_dp->attached_connector;
3618 +
3619 +- return i915->vbt.edp.hobl && !intel_dp->hobl_failed;
3620 ++ return connector->panel.vbt.edp.hobl && !intel_dp->hobl_failed;
3621 + }
3622 +
3623 + static bool use_edp_low_vswing(struct intel_encoder *encoder)
3624 + {
3625 +- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3626 ++ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3627 ++ struct intel_connector *connector = intel_dp->attached_connector;
3628 +
3629 +- return i915->vbt.edp.low_vswing;
3630 ++ return connector->panel.vbt.edp.low_vswing;
3631 + }
3632 +
3633 + static const struct intel_ddi_buf_trans *
3634 +diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
3635 +index 408152f9f46a4..e2561c5d4953c 100644
3636 +--- a/drivers/gpu/drm/i915/display/intel_display_types.h
3637 ++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
3638 +@@ -279,6 +279,73 @@ struct intel_panel_bl_funcs {
3639 + u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
3640 + };
3641 +
3642 ++enum drrs_type {
3643 ++ DRRS_TYPE_NONE,
3644 ++ DRRS_TYPE_STATIC,
3645 ++ DRRS_TYPE_SEAMLESS,
3646 ++};
3647 ++
3648 ++struct intel_vbt_panel_data {
3649 ++ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
3650 ++ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
3651 ++
3652 ++ /* Feature bits */
3653 ++ unsigned int panel_type:4;
3654 ++ unsigned int lvds_dither:1;
3655 ++ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
3656 ++
3657 ++ u8 seamless_drrs_min_refresh_rate;
3658 ++ enum drrs_type drrs_type;
3659 ++
3660 ++ struct {
3661 ++ int rate;
3662 ++ int lanes;
3663 ++ int preemphasis;
3664 ++ int vswing;
3665 ++ int bpp;
3666 ++ struct edp_power_seq pps;
3667 ++ u8 drrs_msa_timing_delay;
3668 ++ bool low_vswing;
3669 ++ bool initialized;
3670 ++ bool hobl;
3671 ++ } edp;
3672 ++
3673 ++ struct {
3674 ++ bool enable;
3675 ++ bool full_link;
3676 ++ bool require_aux_wakeup;
3677 ++ int idle_frames;
3678 ++ int tp1_wakeup_time_us;
3679 ++ int tp2_tp3_wakeup_time_us;
3680 ++ int psr2_tp2_tp3_wakeup_time_us;
3681 ++ } psr;
3682 ++
3683 ++ struct {
3684 ++ u16 pwm_freq_hz;
3685 ++ u16 brightness_precision_bits;
3686 ++ bool present;
3687 ++ bool active_low_pwm;
3688 ++ u8 min_brightness; /* min_brightness/255 of max */
3689 ++ u8 controller; /* brightness controller number */
3690 ++ enum intel_backlight_type type;
3691 ++ } backlight;
3692 ++
3693 ++ /* MIPI DSI */
3694 ++ struct {
3695 ++ u16 panel_id;
3696 ++ struct mipi_config *config;
3697 ++ struct mipi_pps_data *pps;
3698 ++ u16 bl_ports;
3699 ++ u16 cabc_ports;
3700 ++ u8 seq_version;
3701 ++ u32 size;
3702 ++ u8 *data;
3703 ++ const u8 *sequence[MIPI_SEQ_MAX];
3704 ++ u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
3705 ++ enum drm_panel_orientation orientation;
3706 ++ } dsi;
3707 ++};
3708 ++
3709 + struct intel_panel {
3710 + struct list_head fixed_modes;
3711 +
3712 +@@ -318,6 +385,8 @@ struct intel_panel {
3713 + const struct intel_panel_bl_funcs *pwm_funcs;
3714 + void (*power)(struct intel_connector *, bool enable);
3715 + } backlight;
3716 ++
3717 ++ struct intel_vbt_panel_data vbt;
3718 + };
3719 +
3720 + struct intel_digital_port;
3721 +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
3722 +index fe8b6b72970a2..0efec6023fbe8 100644
3723 +--- a/drivers/gpu/drm/i915/display/intel_dp.c
3724 ++++ b/drivers/gpu/drm/i915/display/intel_dp.c
3725 +@@ -1246,11 +1246,12 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
3726 + if (intel_dp_is_edp(intel_dp)) {
3727 + /* Get bpp from vbt only for panels that dont have bpp in edid */
3728 + if (intel_connector->base.display_info.bpc == 0 &&
3729 +- dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
3730 ++ intel_connector->panel.vbt.edp.bpp &&
3731 ++ intel_connector->panel.vbt.edp.bpp < bpp) {
3732 + drm_dbg_kms(&dev_priv->drm,
3733 + "clamping bpp for eDP panel to BIOS-provided %i\n",
3734 +- dev_priv->vbt.edp.bpp);
3735 +- bpp = dev_priv->vbt.edp.bpp;
3736 ++ intel_connector->panel.vbt.edp.bpp);
3737 ++ bpp = intel_connector->panel.vbt.edp.bpp;
3738 + }
3739 + }
3740 +
3741 +@@ -1907,7 +1908,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
3742 + }
3743 +
3744 + if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915))
3745 +- pipe_config->msa_timing_delay = i915->vbt.edp.drrs_msa_timing_delay;
3746 ++ pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay;
3747 +
3748 + pipe_config->has_drrs = true;
3749 +
3750 +@@ -2737,6 +2738,33 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
3751 + DRM_MODE_ARG(mode));
3752 + }
3753 +
3754 ++void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
3755 ++{
3756 ++ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3757 ++ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3758 ++ struct intel_connector *connector = intel_dp->attached_connector;
3759 ++
3760 ++ if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) {
3761 ++ /*
3762 ++ * This is a big fat ugly hack.
3763 ++ *
3764 ++ * Some machines in UEFI boot mode provide us a VBT that has 18
3765 ++ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3766 ++ * unknown we fail to light up. Yet the same BIOS boots up with
3767 ++ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3768 ++ * max, not what it tells us to use.
3769 ++ *
3770 ++ * Note: This will still be broken if the eDP panel is not lit
3771 ++ * up by the BIOS, and thus we can't get the mode at module
3772 ++ * load.
3773 ++ */
3774 ++ drm_dbg_kms(&dev_priv->drm,
3775 ++ "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3776 ++ pipe_bpp, connector->panel.vbt.edp.bpp);
3777 ++ connector->panel.vbt.edp.bpp = pipe_bpp;
3778 ++ }
3779 ++}
3780 ++
3781 + static void intel_edp_mso_init(struct intel_dp *intel_dp)
3782 + {
3783 + struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3784 +@@ -5212,8 +5240,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3785 + }
3786 + intel_connector->edid = edid;
3787 +
3788 ++ intel_bios_init_panel(dev_priv, &intel_connector->panel);
3789 ++
3790 + intel_panel_add_edid_fixed_modes(intel_connector,
3791 +- dev_priv->vbt.drrs_type != DRRS_TYPE_NONE);
3792 ++ intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE);
3793 +
3794 + /* MSO requires information from the EDID */
3795 + intel_edp_mso_init(intel_dp);
3796 +diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
3797 +index d457e17bdc57e..a54902c713a34 100644
3798 +--- a/drivers/gpu/drm/i915/display/intel_dp.h
3799 ++++ b/drivers/gpu/drm/i915/display/intel_dp.h
3800 +@@ -29,6 +29,7 @@ struct link_config_limits {
3801 + int min_bpp, max_bpp;
3802 + };
3803 +
3804 ++void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
3805 + void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
3806 + struct intel_crtc_state *pipe_config,
3807 + struct link_config_limits *limits);
3808 +@@ -63,6 +64,7 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
3809 + void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
3810 + const struct drm_connector_state *conn_state);
3811 + void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
3812 ++void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
3813 + void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
3814 + void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
3815 + int intel_dp_max_link_rate(struct intel_dp *intel_dp);
3816 +diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
3817 +index fb6cf30ee6281..c92d5bb2326a3 100644
3818 +--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
3819 ++++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
3820 +@@ -370,7 +370,7 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
3821 + int ret;
3822 +
3823 + ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info,
3824 +- i915->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
3825 ++ panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
3826 + &current_level, &current_mode);
3827 + if (ret < 0)
3828 + return ret;
3829 +@@ -454,7 +454,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
3830 + case INTEL_DP_AUX_BACKLIGHT_OFF:
3831 + return -ENODEV;
3832 + case INTEL_DP_AUX_BACKLIGHT_AUTO:
3833 +- switch (i915->vbt.backlight.type) {
3834 ++ switch (panel->vbt.backlight.type) {
3835 + case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE:
3836 + try_vesa_interface = true;
3837 + break;
3838 +@@ -466,7 +466,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
3839 + }
3840 + break;
3841 + case INTEL_DP_AUX_BACKLIGHT_ON:
3842 +- if (i915->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
3843 ++ if (panel->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
3844 + try_intel_interface = true;
3845 +
3846 + try_vesa_interface = true;
3847 +diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
3848 +index 166caf293f7bc..7da4a9cbe4ba4 100644
3849 +--- a/drivers/gpu/drm/i915/display/intel_drrs.c
3850 ++++ b/drivers/gpu/drm/i915/display/intel_drrs.c
3851 +@@ -217,9 +217,6 @@ static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
3852 + {
3853 + struct intel_crtc *crtc;
3854 +
3855 +- if (dev_priv->vbt.drrs_type != DRRS_TYPE_SEAMLESS)
3856 +- return;
3857 +-
3858 + for_each_intel_crtc(&dev_priv->drm, crtc) {
3859 + unsigned int frontbuffer_bits;
3860 +
3861 +diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
3862 +index 389a8c24cdc1e..35e121cd226c5 100644
3863 +--- a/drivers/gpu/drm/i915/display/intel_dsi.c
3864 ++++ b/drivers/gpu/drm/i915/display/intel_dsi.c
3865 +@@ -102,7 +102,7 @@ intel_dsi_get_panel_orientation(struct intel_connector *connector)
3866 + struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
3867 + enum drm_panel_orientation orientation;
3868 +
3869 +- orientation = dev_priv->vbt.dsi.orientation;
3870 ++ orientation = connector->panel.vbt.dsi.orientation;
3871 + if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
3872 + return orientation;
3873 +
3874 +diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
3875 +index 7d234429e71ef..1bc7118c56a2a 100644
3876 +--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
3877 ++++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
3878 +@@ -160,12 +160,10 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
3879 + static int dcs_setup_backlight(struct intel_connector *connector,
3880 + enum pipe unused)
3881 + {
3882 +- struct drm_device *dev = connector->base.dev;
3883 +- struct drm_i915_private *dev_priv = to_i915(dev);
3884 + struct intel_panel *panel = &connector->panel;
3885 +
3886 +- if (dev_priv->vbt.backlight.brightness_precision_bits > 8)
3887 +- panel->backlight.max = (1 << dev_priv->vbt.backlight.brightness_precision_bits) - 1;
3888 ++ if (panel->vbt.backlight.brightness_precision_bits > 8)
3889 ++ panel->backlight.max = (1 << panel->vbt.backlight.brightness_precision_bits) - 1;
3890 + else
3891 + panel->backlight.max = PANEL_PWM_MAX_VALUE;
3892 +
3893 +@@ -185,11 +183,10 @@ static const struct intel_panel_bl_funcs dcs_bl_funcs = {
3894 + int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
3895 + {
3896 + struct drm_device *dev = intel_connector->base.dev;
3897 +- struct drm_i915_private *dev_priv = to_i915(dev);
3898 + struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
3899 + struct intel_panel *panel = &intel_connector->panel;
3900 +
3901 +- if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
3902 ++ if (panel->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
3903 + return -ENODEV;
3904 +
3905 + if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
3906 +diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
3907 +index dd24aef925f2e..75e8cc4337c93 100644
3908 +--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
3909 ++++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
3910 +@@ -240,9 +240,10 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
3911 + return data;
3912 + }
3913 +
3914 +-static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
3915 ++static void vlv_exec_gpio(struct intel_connector *connector,
3916 + u8 gpio_source, u8 gpio_index, bool value)
3917 + {
3918 ++ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
3919 + struct gpio_map *map;
3920 + u16 pconf0, padval;
3921 + u32 tmp;
3922 +@@ -256,7 +257,7 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
3923 +
3924 + map = &vlv_gpio_table[gpio_index];
3925 +
3926 +- if (dev_priv->vbt.dsi.seq_version >= 3) {
3927 ++ if (connector->panel.vbt.dsi.seq_version >= 3) {
3928 + /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
3929 + port = IOSF_PORT_GPIO_NC;
3930 + } else {
3931 +@@ -287,14 +288,15 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
3932 + vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
3933 + }
3934 +
3935 +-static void chv_exec_gpio(struct drm_i915_private *dev_priv,
3936 ++static void chv_exec_gpio(struct intel_connector *connector,
3937 + u8 gpio_source, u8 gpio_index, bool value)
3938 + {
3939 ++ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
3940 + u16 cfg0, cfg1;
3941 + u16 family_num;
3942 + u8 port;
3943 +
3944 +- if (dev_priv->vbt.dsi.seq_version >= 3) {
3945 ++ if (connector->panel.vbt.dsi.seq_version >= 3) {
3946 + if (gpio_index >= CHV_GPIO_IDX_START_SE) {
3947 + /* XXX: it's unclear whether 255->57 is part of SE. */
3948 + gpio_index -= CHV_GPIO_IDX_START_SE;
3949 +@@ -340,9 +342,10 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
3950 + vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
3951 + }
3952 +
3953 +-static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
3954 ++static void bxt_exec_gpio(struct intel_connector *connector,
3955 + u8 gpio_source, u8 gpio_index, bool value)
3956 + {
3957 ++ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
3958 + /* XXX: this table is a quick ugly hack. */
3959 + static struct gpio_desc *bxt_gpio_table[U8_MAX + 1];
3960 + struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index];
3961 +@@ -366,9 +369,11 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
3962 + gpiod_set_value(gpio_desc, value);
3963 + }
3964 +
3965 +-static void icl_exec_gpio(struct drm_i915_private *dev_priv,
3966 ++static void icl_exec_gpio(struct intel_connector *connector,
3967 + u8 gpio_source, u8 gpio_index, bool value)
3968 + {
3969 ++ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
3970 ++
3971 + drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
3972 + }
3973 +
3974 +@@ -376,18 +381,19 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
3975 + {
3976 + struct drm_device *dev = intel_dsi->base.base.dev;
3977 + struct drm_i915_private *dev_priv = to_i915(dev);
3978 ++ struct intel_connector *connector = intel_dsi->attached_connector;
3979 + u8 gpio_source, gpio_index = 0, gpio_number;
3980 + bool value;
3981 +
3982 + drm_dbg_kms(&dev_priv->drm, "\n");
3983 +
3984 +- if (dev_priv->vbt.dsi.seq_version >= 3)
3985 ++ if (connector->panel.vbt.dsi.seq_version >= 3)
3986 + gpio_index = *data++;
3987 +
3988 + gpio_number = *data++;
3989 +
3990 + /* gpio source in sequence v2 only */
3991 +- if (dev_priv->vbt.dsi.seq_version == 2)
3992 ++ if (connector->panel.vbt.dsi.seq_version == 2)
3993 + gpio_source = (*data >> 1) & 3;
3994 + else
3995 + gpio_source = 0;
3996 +@@ -396,13 +402,13 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
3997 + value = *data++ & 1;
3998 +
3999 + if (DISPLAY_VER(dev_priv) >= 11)
4000 +- icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
4001 ++ icl_exec_gpio(connector, gpio_source, gpio_index, value);
4002 + else if (IS_VALLEYVIEW(dev_priv))
4003 +- vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
4004 ++ vlv_exec_gpio(connector, gpio_source, gpio_number, value);
4005 + else if (IS_CHERRYVIEW(dev_priv))
4006 +- chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
4007 ++ chv_exec_gpio(connector, gpio_source, gpio_number, value);
4008 + else
4009 +- bxt_exec_gpio(dev_priv, gpio_source, gpio_index, value);
4010 ++ bxt_exec_gpio(connector, gpio_source, gpio_index, value);
4011 +
4012 + return data;
4013 + }
4014 +@@ -585,14 +591,15 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
4015 + enum mipi_seq seq_id)
4016 + {
4017 + struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
4018 ++ struct intel_connector *connector = intel_dsi->attached_connector;
4019 + const u8 *data;
4020 + fn_mipi_elem_exec mipi_elem_exec;
4021 +
4022 + if (drm_WARN_ON(&dev_priv->drm,
4023 +- seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
4024 ++ seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence)))
4025 + return;
4026 +
4027 +- data = dev_priv->vbt.dsi.sequence[seq_id];
4028 ++ data = connector->panel.vbt.dsi.sequence[seq_id];
4029 + if (!data)
4030 + return;
4031 +
4032 +@@ -605,7 +612,7 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
4033 + data++;
4034 +
4035 + /* Skip Size of Sequence. */
4036 +- if (dev_priv->vbt.dsi.seq_version >= 3)
4037 ++ if (connector->panel.vbt.dsi.seq_version >= 3)
4038 + data += 4;
4039 +
4040 + while (1) {
4041 +@@ -621,7 +628,7 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
4042 + mipi_elem_exec = NULL;
4043 +
4044 + /* Size of Operation. */
4045 +- if (dev_priv->vbt.dsi.seq_version >= 3)
4046 ++ if (connector->panel.vbt.dsi.seq_version >= 3)
4047 + operation_size = *data++;
4048 +
4049 + if (mipi_elem_exec) {
4050 +@@ -669,10 +676,10 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
4051 +
4052 + void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
4053 + {
4054 +- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
4055 ++ struct intel_connector *connector = intel_dsi->attached_connector;
4056 +
4057 + /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
4058 +- if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
4059 ++ if (is_vid_mode(intel_dsi) && connector->panel.vbt.dsi.seq_version >= 3)
4060 + return;
4061 +
4062 + msleep(msec);
4063 +@@ -734,9 +741,10 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
4064 + {
4065 + struct drm_device *dev = intel_dsi->base.base.dev;
4066 + struct drm_i915_private *dev_priv = to_i915(dev);
4067 +- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
4068 +- struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
4069 +- struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
4070 ++ struct intel_connector *connector = intel_dsi->attached_connector;
4071 ++ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
4072 ++ struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps;
4073 ++ struct drm_display_mode *mode = connector->panel.vbt.lfp_lvds_vbt_mode;
4074 + u16 burst_mode_ratio;
4075 + enum port port;
4076 +
4077 +@@ -872,7 +880,8 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
4078 + {
4079 + struct drm_device *dev = intel_dsi->base.base.dev;
4080 + struct drm_i915_private *dev_priv = to_i915(dev);
4081 +- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
4082 ++ struct intel_connector *connector = intel_dsi->attached_connector;
4083 ++ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
4084 + enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
4085 + bool want_backlight_gpio = false;
4086 + bool want_panel_gpio = false;
4087 +@@ -927,7 +936,8 @@ void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
4088 + {
4089 + struct drm_device *dev = intel_dsi->base.base.dev;
4090 + struct drm_i915_private *dev_priv = to_i915(dev);
4091 +- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
4092 ++ struct intel_connector *connector = intel_dsi->attached_connector;
4093 ++ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
4094 +
4095 + if (intel_dsi->gpio_panel) {
4096 + gpiod_put(intel_dsi->gpio_panel);
4097 +diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
4098 +index e8478161f8b9b..9f250a70519aa 100644
4099 +--- a/drivers/gpu/drm/i915/display/intel_lvds.c
4100 ++++ b/drivers/gpu/drm/i915/display/intel_lvds.c
4101 +@@ -809,7 +809,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
4102 + else
4103 + val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
4104 + if (val == 0)
4105 +- val = dev_priv->vbt.bios_lvds_val;
4106 ++ val = connector->panel.vbt.bios_lvds_val;
4107 +
4108 + return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
4109 + }
4110 +@@ -967,9 +967,11 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
4111 + }
4112 + intel_connector->edid = edid;
4113 +
4114 ++ intel_bios_init_panel(dev_priv, &intel_connector->panel);
4115 ++
4116 + /* Try EDID first */
4117 + intel_panel_add_edid_fixed_modes(intel_connector,
4118 +- dev_priv->vbt.drrs_type != DRRS_TYPE_NONE);
4119 ++ intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE);
4120 +
4121 + /* Failed to get EDID, what about VBT? */
4122 + if (!intel_panel_preferred_fixed_mode(intel_connector))
4123 +diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
4124 +index d1d1b59102d69..d055e41185582 100644
4125 +--- a/drivers/gpu/drm/i915/display/intel_panel.c
4126 ++++ b/drivers/gpu/drm/i915/display/intel_panel.c
4127 +@@ -75,9 +75,8 @@ const struct drm_display_mode *
4128 + intel_panel_downclock_mode(struct intel_connector *connector,
4129 + const struct drm_display_mode *adjusted_mode)
4130 + {
4131 +- struct drm_i915_private *i915 = to_i915(connector->base.dev);
4132 + const struct drm_display_mode *fixed_mode, *best_mode = NULL;
4133 +- int min_vrefresh = i915->vbt.seamless_drrs_min_refresh_rate;
4134 ++ int min_vrefresh = connector->panel.vbt.seamless_drrs_min_refresh_rate;
4135 + int max_vrefresh = drm_mode_vrefresh(adjusted_mode);
4136 +
4137 + /* pick the fixed_mode with the lowest refresh rate */
4138 +@@ -113,13 +112,11 @@ int intel_panel_get_modes(struct intel_connector *connector)
4139 +
4140 + enum drrs_type intel_panel_drrs_type(struct intel_connector *connector)
4141 + {
4142 +- struct drm_i915_private *i915 = to_i915(connector->base.dev);
4143 +-
4144 + if (list_empty(&connector->panel.fixed_modes) ||
4145 + list_is_singular(&connector->panel.fixed_modes))
4146 + return DRRS_TYPE_NONE;
4147 +
4148 +- return i915->vbt.drrs_type;
4149 ++ return connector->panel.vbt.drrs_type;
4150 + }
4151 +
4152 + int intel_panel_compute_config(struct intel_connector *connector,
4153 +@@ -260,7 +257,7 @@ void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector)
4154 + struct drm_i915_private *i915 = to_i915(connector->base.dev);
4155 + const struct drm_display_mode *mode;
4156 +
4157 +- mode = i915->vbt.lfp_lvds_vbt_mode;
4158 ++ mode = connector->panel.vbt.lfp_lvds_vbt_mode;
4159 + if (!mode)
4160 + return;
4161 +
4162 +@@ -274,7 +271,7 @@ void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector)
4163 + struct drm_i915_private *i915 = to_i915(connector->base.dev);
4164 + const struct drm_display_mode *mode;
4165 +
4166 +- mode = i915->vbt.sdvo_lvds_vbt_mode;
4167 ++ mode = connector->panel.vbt.sdvo_lvds_vbt_mode;
4168 + if (!mode)
4169 + return;
4170 +
4171 +@@ -639,6 +636,8 @@ void intel_panel_fini(struct intel_connector *connector)
4172 +
4173 + intel_backlight_destroy(panel);
4174 +
4175 ++ intel_bios_fini_panel(panel);
4176 ++
4177 + list_for_each_entry_safe(fixed_mode, next, &panel->fixed_modes, head) {
4178 + list_del(&fixed_mode->head);
4179 + drm_mode_destroy(connector->base.dev, fixed_mode);
4180 +diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
4181 +index 5a598dd060391..a226e4e5c5698 100644
4182 +--- a/drivers/gpu/drm/i915/display/intel_pps.c
4183 ++++ b/drivers/gpu/drm/i915/display/intel_pps.c
4184 +@@ -209,7 +209,8 @@ static int
4185 + bxt_power_sequencer_idx(struct intel_dp *intel_dp)
4186 + {
4187 + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4188 +- int backlight_controller = dev_priv->vbt.backlight.controller;
4189 ++ struct intel_connector *connector = intel_dp->attached_connector;
4190 ++ int backlight_controller = connector->panel.vbt.backlight.controller;
4191 +
4192 + lockdep_assert_held(&dev_priv->pps_mutex);
4193 +
4194 +@@ -1159,53 +1160,84 @@ intel_pps_verify_state(struct intel_dp *intel_dp)
4195 + }
4196 + }
4197 +
4198 +-static void pps_init_delays(struct intel_dp *intel_dp)
4199 ++static void pps_init_delays_cur(struct intel_dp *intel_dp,
4200 ++ struct edp_power_seq *cur)
4201 + {
4202 + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4203 +- struct edp_power_seq cur, vbt, spec,
4204 +- *final = &intel_dp->pps.pps_delays;
4205 +
4206 + lockdep_assert_held(&dev_priv->pps_mutex);
4207 +
4208 +- /* already initialized? */
4209 +- if (final->t11_t12 != 0)
4210 +- return;
4211 ++ intel_pps_readout_hw_state(intel_dp, cur);
4212 ++
4213 ++ intel_pps_dump_state(intel_dp, "cur", cur);
4214 ++}
4215 +
4216 +- intel_pps_readout_hw_state(intel_dp, &cur);
4217 ++static void pps_init_delays_vbt(struct intel_dp *intel_dp,
4218 ++ struct edp_power_seq *vbt)
4219 ++{
4220 ++ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4221 ++ struct intel_connector *connector = intel_dp->attached_connector;
4222 +
4223 +- intel_pps_dump_state(intel_dp, "cur", &cur);
4224 ++ *vbt = connector->panel.vbt.edp.pps;
4225 +
4226 +- vbt = dev_priv->vbt.edp.pps;
4227 + /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
4228 + * of 500ms appears to be too short. Ocassionally the panel
4229 + * just fails to power back on. Increasing the delay to 800ms
4230 + * seems sufficient to avoid this problem.
4231 + */
4232 + if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
4233 +- vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
4234 ++ vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
4235 + drm_dbg_kms(&dev_priv->drm,
4236 + "Increasing T12 panel delay as per the quirk to %d\n",
4237 +- vbt.t11_t12);
4238 ++ vbt->t11_t12);
4239 + }
4240 ++
4241 + /* T11_T12 delay is special and actually in units of 100ms, but zero
4242 + * based in the hw (so we need to add 100 ms). But the sw vbt
4243 + * table multiplies it with 1000 to make it in units of 100usec,
4244 + * too. */
4245 +- vbt.t11_t12 += 100 * 10;
4246 ++ vbt->t11_t12 += 100 * 10;
4247 ++
4248 ++ intel_pps_dump_state(intel_dp, "vbt", vbt);
4249 ++}
4250 ++
4251 ++static void pps_init_delays_spec(struct intel_dp *intel_dp,
4252 ++ struct edp_power_seq *spec)
4253 ++{
4254 ++ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4255 ++
4256 ++ lockdep_assert_held(&dev_priv->pps_mutex);
4257 +
4258 + /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4259 + * our hw here, which are all in 100usec. */
4260 +- spec.t1_t3 = 210 * 10;
4261 +- spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4262 +- spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4263 +- spec.t10 = 500 * 10;
4264 ++ spec->t1_t3 = 210 * 10;
4265 ++ spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */
4266 ++ spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4267 ++ spec->t10 = 500 * 10;
4268 + /* This one is special and actually in units of 100ms, but zero
4269 + * based in the hw (so we need to add 100 ms). But the sw vbt
4270 + * table multiplies it with 1000 to make it in units of 100usec,
4271 + * too. */
4272 +- spec.t11_t12 = (510 + 100) * 10;
4273 ++ spec->t11_t12 = (510 + 100) * 10;
4274 ++
4275 ++ intel_pps_dump_state(intel_dp, "spec", spec);
4276 ++}
4277 ++
4278 ++static void pps_init_delays(struct intel_dp *intel_dp)
4279 ++{
4280 ++ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4281 ++ struct edp_power_seq cur, vbt, spec,
4282 ++ *final = &intel_dp->pps.pps_delays;
4283 ++
4284 ++ lockdep_assert_held(&dev_priv->pps_mutex);
4285 ++
4286 ++ /* already initialized? */
4287 ++ if (final->t11_t12 != 0)
4288 ++ return;
4289 +
4290 +- intel_pps_dump_state(intel_dp, "vbt", &vbt);
4291 ++ pps_init_delays_cur(intel_dp, &cur);
4292 ++ pps_init_delays_vbt(intel_dp, &vbt);
4293 ++ pps_init_delays_spec(intel_dp, &spec);
4294 +
4295 + /* Use the max of the register settings and vbt. If both are
4296 + * unset, fall back to the spec limits. */
4297 +diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
4298 +index 06db407e2749f..8f09203e0cf03 100644
4299 +--- a/drivers/gpu/drm/i915/display/intel_psr.c
4300 ++++ b/drivers/gpu/drm/i915/display/intel_psr.c
4301 +@@ -86,10 +86,13 @@
4302 +
4303 + static bool psr_global_enabled(struct intel_dp *intel_dp)
4304 + {
4305 ++ struct intel_connector *connector = intel_dp->attached_connector;
4306 + struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4307 +
4308 + switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
4309 + case I915_PSR_DEBUG_DEFAULT:
4310 ++ if (i915->params.enable_psr == -1)
4311 ++ return connector->panel.vbt.psr.enable;
4312 + return i915->params.enable_psr;
4313 + case I915_PSR_DEBUG_DISABLE:
4314 + return false;
4315 +@@ -399,6 +402,7 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
4316 +
4317 + static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
4318 + {
4319 ++ struct intel_connector *connector = intel_dp->attached_connector;
4320 + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4321 + u32 val = 0;
4322 +
4323 +@@ -411,20 +415,20 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
4324 + goto check_tp3_sel;
4325 + }
4326 +
4327 +- if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
4328 ++ if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
4329 + val |= EDP_PSR_TP1_TIME_0us;
4330 +- else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
4331 ++ else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
4332 + val |= EDP_PSR_TP1_TIME_100us;
4333 +- else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
4334 ++ else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
4335 + val |= EDP_PSR_TP1_TIME_500us;
4336 + else
4337 + val |= EDP_PSR_TP1_TIME_2500us;
4338 +
4339 +- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
4340 ++ if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
4341 + val |= EDP_PSR_TP2_TP3_TIME_0us;
4342 +- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
4343 ++ else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
4344 + val |= EDP_PSR_TP2_TP3_TIME_100us;
4345 +- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
4346 ++ else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
4347 + val |= EDP_PSR_TP2_TP3_TIME_500us;
4348 + else
4349 + val |= EDP_PSR_TP2_TP3_TIME_2500us;
4350 +@@ -441,13 +445,14 @@ check_tp3_sel:
4351 +
4352 + static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
4353 + {
4354 ++ struct intel_connector *connector = intel_dp->attached_connector;
4355 + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4356 + int idle_frames;
4357 +
4358 + /* Let's use 6 as the minimum to cover all known cases including the
4359 + * off-by-one issue that HW has in some cases.
4360 + */
4361 +- idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
4362 ++ idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
4363 + idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
4364 +
4365 + if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
4366 +@@ -483,18 +488,19 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
4367 +
4368 + static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
4369 + {
4370 ++ struct intel_connector *connector = intel_dp->attached_connector;
4371 + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4372 + u32 val = 0;
4373 +
4374 + if (dev_priv->params.psr_safest_params)
4375 + return EDP_PSR2_TP2_TIME_2500us;
4376 +
4377 +- if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
4378 +- dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
4379 ++ if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
4380 ++ connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
4381 + val |= EDP_PSR2_TP2_TIME_50us;
4382 +- else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
4383 ++ else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
4384 + val |= EDP_PSR2_TP2_TIME_100us;
4385 +- else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
4386 ++ else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
4387 + val |= EDP_PSR2_TP2_TIME_500us;
4388 + else
4389 + val |= EDP_PSR2_TP2_TIME_2500us;
4390 +@@ -2344,6 +2350,7 @@ unlock:
4391 + */
4392 + void intel_psr_init(struct intel_dp *intel_dp)
4393 + {
4394 ++ struct intel_connector *connector = intel_dp->attached_connector;
4395 + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4396 + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4397 +
4398 +@@ -2367,14 +2374,10 @@ void intel_psr_init(struct intel_dp *intel_dp)
4399 +
4400 + intel_dp->psr.source_support = true;
4401 +
4402 +- if (dev_priv->params.enable_psr == -1)
4403 +- if (!dev_priv->vbt.psr.enable)
4404 +- dev_priv->params.enable_psr = 0;
4405 +-
4406 + /* Set link_standby x link_off defaults */
4407 + if (DISPLAY_VER(dev_priv) < 12)
4408 + /* For new platforms up to TGL let's respect VBT back again */
4409 +- intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
4410 ++ intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
4411 +
4412 + INIT_WORK(&intel_dp->psr.work, intel_psr_work);
4413 + INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
4414 +diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
4415 +index d81855d57cdc9..14a64bd61176d 100644
4416 +--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
4417 ++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
4418 +@@ -2869,6 +2869,7 @@ static bool
4419 + intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
4420 + {
4421 + struct drm_encoder *encoder = &intel_sdvo->base.base;
4422 ++ struct drm_i915_private *i915 = to_i915(encoder->dev);
4423 + struct drm_connector *connector;
4424 + struct intel_connector *intel_connector;
4425 + struct intel_sdvo_connector *intel_sdvo_connector;
4426 +@@ -2900,6 +2901,8 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
4427 + if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
4428 + goto err;
4429 +
4430 ++ intel_bios_init_panel(i915, &intel_connector->panel);
4431 ++
4432 + /*
4433 + * Fetch modes from VBT. For SDVO prefer the VBT mode since some
4434 + * SDVO->LVDS transcoders can't cope with the EDID mode.
4435 +diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
4436 +index 1954f07f0d3ec..02f75e95b2ec1 100644
4437 +--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
4438 ++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
4439 +@@ -782,6 +782,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
4440 + {
4441 + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
4442 + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
4443 ++ struct intel_connector *connector = to_intel_connector(conn_state->connector);
4444 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4445 + enum pipe pipe = crtc->pipe;
4446 + enum port port;
4447 +@@ -838,7 +839,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
4448 + * the delay in that case. If there is no deassert-seq, then an
4449 + * unconditional msleep is used to give the panel time to power-on.
4450 + */
4451 +- if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
4452 ++ if (connector->panel.vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
4453 + intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
4454 + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
4455 + } else {
4456 +@@ -1690,7 +1691,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
4457 + {
4458 + struct drm_device *dev = intel_dsi->base.base.dev;
4459 + struct drm_i915_private *dev_priv = to_i915(dev);
4460 +- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
4461 ++ struct intel_connector *connector = intel_dsi->attached_connector;
4462 ++ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
4463 + u32 tlpx_ns, extra_byte_count, tlpx_ui;
4464 + u32 ui_num, ui_den;
4465 + u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
4466 +@@ -1924,13 +1926,22 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
4467 +
4468 + intel_dsi->panel_power_off_time = ktime_get_boottime();
4469 +
4470 +- if (dev_priv->vbt.dsi.config->dual_link)
4471 ++ intel_bios_init_panel(dev_priv, &intel_connector->panel);
4472 ++
4473 ++ if (intel_connector->panel.vbt.dsi.config->dual_link)
4474 + intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
4475 + else
4476 + intel_dsi->ports = BIT(port);
4477 +
4478 +- intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
4479 +- intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
4480 ++ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
4481 ++ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
4482 ++
4483 ++ intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
4484 ++
4485 ++ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
4486 ++ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
4487 ++
4488 ++ intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
4489 +
4490 + /* Create a DSI host (and a device) for each port. */
4491 + for_each_dsi_port(port, intel_dsi->ports) {
4492 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
4493 +index 321af109d484f..8da42af0256ab 100644
4494 +--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
4495 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
4496 +@@ -1269,6 +1269,10 @@ static void i915_gem_context_release_work(struct work_struct *work)
4497 + trace_i915_context_free(ctx);
4498 + GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
4499 +
4500 ++ spin_lock(&ctx->i915->gem.contexts.lock);
4501 ++ list_del(&ctx->link);
4502 ++ spin_unlock(&ctx->i915->gem.contexts.lock);
4503 ++
4504 + if (ctx->syncobj)
4505 + drm_syncobj_put(ctx->syncobj);
4506 +
4507 +@@ -1514,10 +1518,6 @@ static void context_close(struct i915_gem_context *ctx)
4508 +
4509 + ctx->file_priv = ERR_PTR(-EBADF);
4510 +
4511 +- spin_lock(&ctx->i915->gem.contexts.lock);
4512 +- list_del(&ctx->link);
4513 +- spin_unlock(&ctx->i915->gem.contexts.lock);
4514 +-
4515 + client = ctx->client;
4516 + if (client) {
4517 + spin_lock(&client->ctx_lock);
4518 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
4519 +index 5184d70d48382..554d79bc0312d 100644
4520 +--- a/drivers/gpu/drm/i915/i915_drv.h
4521 ++++ b/drivers/gpu/drm/i915/i915_drv.h
4522 +@@ -194,12 +194,6 @@ struct drm_i915_display_funcs {
4523 +
4524 + #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
4525 +
4526 +-enum drrs_type {
4527 +- DRRS_TYPE_NONE,
4528 +- DRRS_TYPE_STATIC,
4529 +- DRRS_TYPE_SEAMLESS,
4530 +-};
4531 +-
4532 + #define QUIRK_LVDS_SSC_DISABLE (1<<1)
4533 + #define QUIRK_INVERT_BRIGHTNESS (1<<2)
4534 + #define QUIRK_BACKLIGHT_PRESENT (1<<3)
4535 +@@ -308,76 +302,19 @@ struct intel_vbt_data {
4536 + /* bdb version */
4537 + u16 version;
4538 +
4539 +- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
4540 +- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
4541 +-
4542 + /* Feature bits */
4543 + unsigned int int_tv_support:1;
4544 +- unsigned int lvds_dither:1;
4545 + unsigned int int_crt_support:1;
4546 + unsigned int lvds_use_ssc:1;
4547 + unsigned int int_lvds_support:1;
4548 + unsigned int display_clock_mode:1;
4549 + unsigned int fdi_rx_polarity_inverted:1;
4550 +- unsigned int panel_type:4;
4551 + int lvds_ssc_freq;
4552 +- unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
4553 + enum drm_panel_orientation orientation;
4554 +
4555 + bool override_afc_startup;
4556 + u8 override_afc_startup_val;
4557 +
4558 +- u8 seamless_drrs_min_refresh_rate;
4559 +- enum drrs_type drrs_type;
4560 +-
4561 +- struct {
4562 +- int rate;
4563 +- int lanes;
4564 +- int preemphasis;
4565 +- int vswing;
4566 +- int bpp;
4567 +- struct edp_power_seq pps;
4568 +- u8 drrs_msa_timing_delay;
4569 +- bool low_vswing;
4570 +- bool initialized;
4571 +- bool hobl;
4572 +- } edp;
4573 +-
4574 +- struct {
4575 +- bool enable;
4576 +- bool full_link;
4577 +- bool require_aux_wakeup;
4578 +- int idle_frames;
4579 +- int tp1_wakeup_time_us;
4580 +- int tp2_tp3_wakeup_time_us;
4581 +- int psr2_tp2_tp3_wakeup_time_us;
4582 +- } psr;
4583 +-
4584 +- struct {
4585 +- u16 pwm_freq_hz;
4586 +- u16 brightness_precision_bits;
4587 +- bool present;
4588 +- bool active_low_pwm;
4589 +- u8 min_brightness; /* min_brightness/255 of max */
4590 +- u8 controller; /* brightness controller number */
4591 +- enum intel_backlight_type type;
4592 +- } backlight;
4593 +-
4594 +- /* MIPI DSI */
4595 +- struct {
4596 +- u16 panel_id;
4597 +- struct mipi_config *config;
4598 +- struct mipi_pps_data *pps;
4599 +- u16 bl_ports;
4600 +- u16 cabc_ports;
4601 +- u8 seq_version;
4602 +- u32 size;
4603 +- u8 *data;
4604 +- const u8 *sequence[MIPI_SEQ_MAX];
4605 +- u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
4606 +- enum drm_panel_orientation orientation;
4607 +- } dsi;
4608 +-
4609 + int crt_ddc_pin;
4610 +
4611 + struct list_head display_devices;
4612 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
4613 +index 702e5b89be226..b605d0ceaefad 100644
4614 +--- a/drivers/gpu/drm/i915/i915_gem.c
4615 ++++ b/drivers/gpu/drm/i915/i915_gem.c
4616 +@@ -1191,7 +1191,8 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
4617 +
4618 + intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
4619 +
4620 +- i915_gem_drain_freed_objects(dev_priv);
4621 ++ /* Flush any outstanding work, including i915_gem_context.release_work. */
4622 ++ i915_gem_drain_workqueue(dev_priv);
4623 +
4624 + drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
4625 + }
4626 +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
4627 +index 5d7504a72b11c..e244aa408d9d4 100644
4628 +--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
4629 ++++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
4630 +@@ -151,7 +151,7 @@ static void mtk_dither_config(struct device *dev, unsigned int w,
4631 + {
4632 + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
4633 +
4634 +- mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
4635 ++ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
4636 + mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs,
4637 + DISP_REG_DITHER_CFG);
4638 + mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG,
4639 +diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
4640 +index af2f123e9a9a9..9a3b86c29b503 100644
4641 +--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
4642 ++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
4643 +@@ -685,6 +685,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
4644 + if (--dsi->refcount != 0)
4645 + return;
4646 +
4647 ++ /*
4648 ++ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
4649 ++ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
4650 ++ * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
4651 ++ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
4652 ++ * after dsi is fully set.
4653 ++ */
4654 ++ mtk_dsi_stop(dsi);
4655 ++
4656 ++ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
4657 + mtk_dsi_reset_engine(dsi);
4658 + mtk_dsi_lane0_ulp_mode_enter(dsi);
4659 + mtk_dsi_clk_ulp_mode_enter(dsi);
4660 +@@ -735,17 +745,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
4661 + if (!dsi->enabled)
4662 + return;
4663 +
4664 +- /*
4665 +- * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
4666 +- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
4667 +- * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
4668 +- * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
4669 +- * after dsi is fully set.
4670 +- */
4671 +- mtk_dsi_stop(dsi);
4672 +-
4673 +- mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
4674 +-
4675 + dsi->enabled = false;
4676 + }
4677 +
4678 +@@ -808,10 +807,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
4679 +
4680 + static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
4681 + .attach = mtk_dsi_bridge_attach,
4682 ++ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
4683 + .atomic_disable = mtk_dsi_bridge_atomic_disable,
4684 ++ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
4685 + .atomic_enable = mtk_dsi_bridge_atomic_enable,
4686 + .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
4687 + .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
4688 ++ .atomic_reset = drm_atomic_helper_bridge_reset,
4689 + .mode_set = mtk_dsi_bridge_mode_set,
4690 + };
4691 +
4692 +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
4693 +index 4a2e580a2f7b7..0e001ce8a40fd 100644
4694 +--- a/drivers/gpu/drm/panel/panel-simple.c
4695 ++++ b/drivers/gpu/drm/panel/panel-simple.c
4696 +@@ -2136,7 +2136,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
4697 + .enable = 200,
4698 + .disable = 20,
4699 + },
4700 +- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
4701 ++ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
4702 + .connector_type = DRM_MODE_CONNECTOR_LVDS,
4703 + };
4704 +
4705 +diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
4706 +index c204e9b95c1f7..518ee13b1d6f4 100644
4707 +--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
4708 ++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
4709 +@@ -283,8 +283,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
4710 + return ret;
4711 + }
4712 +
4713 +-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
4714 +- struct drm_display_mode *mode)
4715 ++static enum drm_mode_status
4716 ++cdn_dp_connector_mode_valid(struct drm_connector *connector,
4717 ++ struct drm_display_mode *mode)
4718 + {
4719 + struct cdn_dp_device *dp = connector_to_dp(connector);
4720 + struct drm_display_info *display_info = &dp->connector.display_info;
4721 +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
4722 +index 547ae334e5cd8..027029efb0088 100644
4723 +--- a/drivers/hv/vmbus_drv.c
4724 ++++ b/drivers/hv/vmbus_drv.c
4725 +@@ -2309,7 +2309,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
4726 + bool fb_overlap_ok)
4727 + {
4728 + struct resource *iter, *shadow;
4729 +- resource_size_t range_min, range_max, start;
4730 ++ resource_size_t range_min, range_max, start, end;
4731 + const char *dev_n = dev_name(&device_obj->device);
4732 + int retval;
4733 +
4734 +@@ -2344,6 +2344,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
4735 + range_max = iter->end;
4736 + start = (range_min + align - 1) & ~(align - 1);
4737 + for (; start + size - 1 <= range_max; start += align) {
4738 ++ end = start + size - 1;
4739 ++
4740 ++ /* Skip the whole fb_mmio region if not fb_overlap_ok */
4741 ++ if (!fb_overlap_ok && fb_mmio &&
4742 ++ (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
4743 ++ ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
4744 ++ continue;
4745 ++
4746 + shadow = __request_region(iter, start, size, NULL,
4747 + IORESOURCE_BUSY);
4748 + if (!shadow)
4749 +diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
4750 +index e47fa34656717..3082183bd66a4 100644
4751 +--- a/drivers/i2c/busses/i2c-imx.c
4752 ++++ b/drivers/i2c/busses/i2c-imx.c
4753 +@@ -1583,7 +1583,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
4754 + if (i2c_imx->dma)
4755 + i2c_imx_dma_free(i2c_imx);
4756 +
4757 +- if (ret == 0) {
4758 ++ if (ret >= 0) {
4759 + /* setup chip registers to defaults */
4760 + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
4761 + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
4762 +diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
4763 +index 8716032f030a0..ad5efd7497d1c 100644
4764 +--- a/drivers/i2c/busses/i2c-mlxbf.c
4765 ++++ b/drivers/i2c/busses/i2c-mlxbf.c
4766 +@@ -6,6 +6,7 @@
4767 + */
4768 +
4769 + #include <linux/acpi.h>
4770 ++#include <linux/bitfield.h>
4771 + #include <linux/delay.h>
4772 + #include <linux/err.h>
4773 + #include <linux/interrupt.h>
4774 +@@ -63,13 +64,14 @@
4775 + */
4776 + #define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000)
4777 + /* Reference clock for Bluefield - 156 MHz. */
4778 +-#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000)
4779 ++#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL
4780 +
4781 + /* Constant used to determine the PLL frequency. */
4782 +-#define MLNXBF_I2C_COREPLL_CONST 16384
4783 ++#define MLNXBF_I2C_COREPLL_CONST 16384ULL
4784 ++
4785 ++#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL
4786 +
4787 + /* PLL registers. */
4788 +-#define MLXBF_I2C_CORE_PLL_REG0 0x0
4789 + #define MLXBF_I2C_CORE_PLL_REG1 0x4
4790 + #define MLXBF_I2C_CORE_PLL_REG2 0x8
4791 +
4792 +@@ -181,22 +183,15 @@
4793 + #define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ
4794 +
4795 + /* Core PLL TYU configuration. */
4796 +-#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0)
4797 +-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0)
4798 +-#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0)
4799 +-
4800 +-#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3
4801 +-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16
4802 +-#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20
4803 ++#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3)
4804 ++#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16)
4805 ++#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20)
4806 +
4807 + /* Core PLL YU configuration. */
4808 + #define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0)
4809 + #define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0)
4810 +-#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0)
4811 ++#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26)
4812 +
4813 +-#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0
4814 +-#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1
4815 +-#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26
4816 +
4817 + /* Core PLL frequency. */
4818 + static u64 mlxbf_i2c_corepll_frequency;
4819 +@@ -479,8 +474,6 @@ static struct mutex mlxbf_i2c_bus_lock;
4820 + #define MLXBF_I2C_MASK_8 GENMASK(7, 0)
4821 + #define MLXBF_I2C_MASK_16 GENMASK(15, 0)
4822 +
4823 +-#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000
4824 +-
4825 + /*
4826 + * Function to poll a set of bits at a specific address; it checks whether
4827 + * the bits are equal to zero when eq_zero is set to 'true', and not equal
4828 +@@ -669,7 +662,7 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
4829 + /* Clear status bits. */
4830 + writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
4831 + /* Set the cause data. */
4832 +- writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR);
4833 ++ writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
4834 + /* Zero PEC byte. */
4835 + writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC);
4836 + /* Zero byte count. */
4837 +@@ -738,6 +731,9 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
4838 + if (flags & MLXBF_I2C_F_WRITE) {
4839 + write_en = 1;
4840 + write_len += operation->length;
4841 ++ if (data_idx + operation->length >
4842 ++ MLXBF_I2C_MASTER_DATA_DESC_SIZE)
4843 ++ return -ENOBUFS;
4844 + memcpy(data_desc + data_idx,
4845 + operation->buffer, operation->length);
4846 + data_idx += operation->length;
4847 +@@ -1407,24 +1403,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev,
4848 + return 0;
4849 + }
4850 +
4851 +-static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
4852 ++static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
4853 + {
4854 +- u64 core_frequency, pad_frequency;
4855 ++ u64 core_frequency;
4856 + u8 core_od, core_r;
4857 + u32 corepll_val;
4858 + u16 core_f;
4859 +
4860 +- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
4861 +-
4862 + corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
4863 +
4864 + /* Get Core PLL configuration bits. */
4865 +- core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) &
4866 +- MLXBF_I2C_COREPLL_CORE_F_TYU_MASK;
4867 +- core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) &
4868 +- MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK;
4869 +- core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) &
4870 +- MLXBF_I2C_COREPLL_CORE_R_TYU_MASK;
4871 ++ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val);
4872 ++ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val);
4873 ++ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val);
4874 +
4875 + /*
4876 + * Compute PLL output frequency as follow:
4877 +@@ -1436,31 +1427,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
4878 + * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
4879 + * and PadFrequency, respectively.
4880 + */
4881 +- core_frequency = pad_frequency * (++core_f);
4882 ++ core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
4883 + core_frequency /= (++core_r) * (++core_od);
4884 +
4885 + return core_frequency;
4886 + }
4887 +
4888 +-static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
4889 ++static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
4890 + {
4891 + u32 corepll_reg1_val, corepll_reg2_val;
4892 +- u64 corepll_frequency, pad_frequency;
4893 ++ u64 corepll_frequency;
4894 + u8 core_od, core_r;
4895 + u32 core_f;
4896 +
4897 +- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
4898 +-
4899 + corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
4900 + corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
4901 +
4902 + /* Get Core PLL configuration bits */
4903 +- core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) &
4904 +- MLXBF_I2C_COREPLL_CORE_F_YU_MASK;
4905 +- core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) &
4906 +- MLXBF_I2C_COREPLL_CORE_R_YU_MASK;
4907 +- core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) &
4908 +- MLXBF_I2C_COREPLL_CORE_OD_YU_MASK;
4909 ++ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val);
4910 ++ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val);
4911 ++ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val);
4912 +
4913 + /*
4914 + * Compute PLL output frequency as follow:
4915 +@@ -1472,7 +1458,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
4916 + * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
4917 + * and PadFrequency, respectively.
4918 + */
4919 +- corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST;
4920 ++ corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
4921 + corepll_frequency /= (++core_r) * (++core_od);
4922 +
4923 + return corepll_frequency;
4924 +@@ -2180,14 +2166,14 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = {
4925 + [1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1],
4926 + [2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1]
4927 + },
4928 +- .calculate_freq = mlxbf_calculate_freq_from_tyu
4929 ++ .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu
4930 + },
4931 + [MLXBF_I2C_CHIP_TYPE_2] = {
4932 + .type = MLXBF_I2C_CHIP_TYPE_2,
4933 + .shared_res = {
4934 + [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2]
4935 + },
4936 +- .calculate_freq = mlxbf_calculate_freq_from_yu
4937 ++ .calculate_freq = mlxbf_i2c_calculate_freq_from_yu
4938 + }
4939 + };
4940 +
4941 +diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
4942 +index 774507b54b57b..313904be5f3bd 100644
4943 +--- a/drivers/i2c/i2c-mux.c
4944 ++++ b/drivers/i2c/i2c-mux.c
4945 +@@ -243,9 +243,10 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
4946 + int (*deselect)(struct i2c_mux_core *, u32))
4947 + {
4948 + struct i2c_mux_core *muxc;
4949 ++ size_t mux_size;
4950 +
4951 +- muxc = devm_kzalloc(dev, struct_size(muxc, adapter, max_adapters)
4952 +- + sizeof_priv, GFP_KERNEL);
4953 ++ mux_size = struct_size(muxc, adapter, max_adapters);
4954 ++ muxc = devm_kzalloc(dev, size_add(mux_size, sizeof_priv), GFP_KERNEL);
4955 + if (!muxc)
4956 + return NULL;
4957 + if (sizeof_priv)
4958 +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
4959 +index 861a239d905a4..3ed15e8ca6775 100644
4960 +--- a/drivers/iommu/intel/iommu.c
4961 ++++ b/drivers/iommu/intel/iommu.c
4962 +@@ -419,7 +419,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
4963 + {
4964 + unsigned long fl_sagaw, sl_sagaw;
4965 +
4966 +- fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0);
4967 ++ fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
4968 + sl_sagaw = cap_sagaw(iommu->cap);
4969 +
4970 + /* Second level only. */
4971 +diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
4972 +index 7835bb0f32fc3..e012b21c4fd7a 100644
4973 +--- a/drivers/media/usb/b2c2/flexcop-usb.c
4974 ++++ b/drivers/media/usb/b2c2/flexcop-usb.c
4975 +@@ -511,7 +511,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
4976 +
4977 + if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
4978 + return -ENODEV;
4979 +- if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc))
4980 ++ if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc))
4981 + return -ENODEV;
4982 +
4983 + switch (fc_usb->udev->speed) {
4984 +diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
4985 +index f8fdf88fb240c..ecbc46714e681 100644
4986 +--- a/drivers/memstick/core/ms_block.c
4987 ++++ b/drivers/memstick/core/ms_block.c
4988 +@@ -2188,7 +2188,6 @@ static void msb_remove(struct memstick_dev *card)
4989 +
4990 + /* Remove the disk */
4991 + del_gendisk(msb->disk);
4992 +- blk_cleanup_queue(msb->queue);
4993 + blk_mq_free_tag_set(&msb->tag_set);
4994 + msb->queue = NULL;
4995 +
4996 +diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
4997 +index 725ba74ded308..72e91c06c618b 100644
4998 +--- a/drivers/memstick/core/mspro_block.c
4999 ++++ b/drivers/memstick/core/mspro_block.c
5000 +@@ -1294,7 +1294,6 @@ static void mspro_block_remove(struct memstick_dev *card)
5001 + del_gendisk(msb->disk);
5002 + dev_dbg(&card->dev, "mspro block remove\n");
5003 +
5004 +- blk_cleanup_queue(msb->queue);
5005 + blk_mq_free_tag_set(&msb->tag_set);
5006 + msb->queue = NULL;
5007 +
5008 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
5009 +index 912a398a9a764..2f89ae55c1773 100644
5010 +--- a/drivers/mmc/core/block.c
5011 ++++ b/drivers/mmc/core/block.c
5012 +@@ -2509,7 +2509,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
5013 + return md;
5014 +
5015 + err_cleanup_queue:
5016 +- blk_cleanup_queue(md->disk->queue);
5017 + blk_mq_free_tag_set(&md->queue.tag_set);
5018 + err_kfree:
5019 + kfree(md);
5020 +diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
5021 +index fa5324ceeebe4..f824cfdab75ac 100644
5022 +--- a/drivers/mmc/core/queue.c
5023 ++++ b/drivers/mmc/core/queue.c
5024 +@@ -494,7 +494,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
5025 + if (blk_queue_quiesced(q))
5026 + blk_mq_unquiesce_queue(q);
5027 +
5028 +- blk_cleanup_queue(q);
5029 + blk_mq_free_tag_set(&mq->tag_set);
5030 +
5031 + /*
5032 +diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
5033 +index 1f0120cbe9e80..8ad095c19f271 100644
5034 +--- a/drivers/net/bonding/bond_3ad.c
5035 ++++ b/drivers/net/bonding/bond_3ad.c
5036 +@@ -87,8 +87,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
5037 + static u16 ad_ticks_per_sec;
5038 + static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
5039 +
5040 +-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
5041 +- MULTICAST_LACPDU_ADDR;
5042 ++const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
5043 ++ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
5044 ++};
5045 +
5046 + /* ================= main 802.3ad protocol functions ================== */
5047 + static int ad_lacpdu_send(struct port *port);
5048 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
5049 +index bff0bfd10e235..ab7cb48f8dfdd 100644
5050 +--- a/drivers/net/bonding/bond_main.c
5051 ++++ b/drivers/net/bonding/bond_main.c
5052 +@@ -865,12 +865,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
5053 + dev_uc_unsync(slave_dev, bond_dev);
5054 + dev_mc_unsync(slave_dev, bond_dev);
5055 +
5056 +- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
5057 +- /* del lacpdu mc addr from mc list */
5058 +- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
5059 +-
5060 +- dev_mc_del(slave_dev, lacpdu_multicast);
5061 +- }
5062 ++ if (BOND_MODE(bond) == BOND_MODE_8023AD)
5063 ++ dev_mc_del(slave_dev, lacpdu_mcast_addr);
5064 + }
5065 +
5066 + /*--------------------------- Active slave change ---------------------------*/
5067 +@@ -890,7 +886,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
5068 + if (bond->dev->flags & IFF_ALLMULTI)
5069 + dev_set_allmulti(old_active->dev, -1);
5070 +
5071 +- bond_hw_addr_flush(bond->dev, old_active->dev);
5072 ++ if (bond->dev->flags & IFF_UP)
5073 ++ bond_hw_addr_flush(bond->dev, old_active->dev);
5074 + }
5075 +
5076 + if (new_active) {
5077 +@@ -901,10 +898,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
5078 + if (bond->dev->flags & IFF_ALLMULTI)
5079 + dev_set_allmulti(new_active->dev, 1);
5080 +
5081 +- netif_addr_lock_bh(bond->dev);
5082 +- dev_uc_sync(new_active->dev, bond->dev);
5083 +- dev_mc_sync(new_active->dev, bond->dev);
5084 +- netif_addr_unlock_bh(bond->dev);
5085 ++ if (bond->dev->flags & IFF_UP) {
5086 ++ netif_addr_lock_bh(bond->dev);
5087 ++ dev_uc_sync(new_active->dev, bond->dev);
5088 ++ dev_mc_sync(new_active->dev, bond->dev);
5089 ++ netif_addr_unlock_bh(bond->dev);
5090 ++ }
5091 + }
5092 + }
5093 +
5094 +@@ -2139,16 +2138,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
5095 + }
5096 + }
5097 +
5098 +- netif_addr_lock_bh(bond_dev);
5099 +- dev_mc_sync_multiple(slave_dev, bond_dev);
5100 +- dev_uc_sync_multiple(slave_dev, bond_dev);
5101 +- netif_addr_unlock_bh(bond_dev);
5102 +-
5103 +- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
5104 +- /* add lacpdu mc addr to mc list */
5105 +- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
5106 ++ if (bond_dev->flags & IFF_UP) {
5107 ++ netif_addr_lock_bh(bond_dev);
5108 ++ dev_mc_sync_multiple(slave_dev, bond_dev);
5109 ++ dev_uc_sync_multiple(slave_dev, bond_dev);
5110 ++ netif_addr_unlock_bh(bond_dev);
5111 +
5112 +- dev_mc_add(slave_dev, lacpdu_multicast);
5113 ++ if (BOND_MODE(bond) == BOND_MODE_8023AD)
5114 ++ dev_mc_add(slave_dev, lacpdu_mcast_addr);
5115 + }
5116 + }
5117 +
5118 +@@ -2420,7 +2417,8 @@ static int __bond_release_one(struct net_device *bond_dev,
5119 + if (old_flags & IFF_ALLMULTI)
5120 + dev_set_allmulti(slave_dev, -1);
5121 +
5122 +- bond_hw_addr_flush(bond_dev, slave_dev);
5123 ++ if (old_flags & IFF_UP)
5124 ++ bond_hw_addr_flush(bond_dev, slave_dev);
5125 + }
5126 +
5127 + slave_disable_netpoll(slave);
5128 +@@ -4157,6 +4155,12 @@ static int bond_open(struct net_device *bond_dev)
5129 + struct list_head *iter;
5130 + struct slave *slave;
5131 +
5132 ++ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
5133 ++ bond->rr_tx_counter = alloc_percpu(u32);
5134 ++ if (!bond->rr_tx_counter)
5135 ++ return -ENOMEM;
5136 ++ }
5137 ++
5138 + /* reset slave->backup and slave->inactive */
5139 + if (bond_has_slaves(bond)) {
5140 + bond_for_each_slave(bond, slave, iter) {
5141 +@@ -4194,6 +4198,9 @@ static int bond_open(struct net_device *bond_dev)
5142 + /* register to receive LACPDUs */
5143 + bond->recv_probe = bond_3ad_lacpdu_recv;
5144 + bond_3ad_initiate_agg_selection(bond, 1);
5145 ++
5146 ++ bond_for_each_slave(bond, slave, iter)
5147 ++ dev_mc_add(slave->dev, lacpdu_mcast_addr);
5148 + }
5149 +
5150 + if (bond_mode_can_use_xmit_hash(bond))
5151 +@@ -4205,6 +4212,7 @@ static int bond_open(struct net_device *bond_dev)
5152 + static int bond_close(struct net_device *bond_dev)
5153 + {
5154 + struct bonding *bond = netdev_priv(bond_dev);
5155 ++ struct slave *slave;
5156 +
5157 + bond_work_cancel_all(bond);
5158 + bond->send_peer_notif = 0;
5159 +@@ -4212,6 +4220,19 @@ static int bond_close(struct net_device *bond_dev)
5160 + bond_alb_deinitialize(bond);
5161 + bond->recv_probe = NULL;
5162 +
5163 ++ if (bond_uses_primary(bond)) {
5164 ++ rcu_read_lock();
5165 ++ slave = rcu_dereference(bond->curr_active_slave);
5166 ++ if (slave)
5167 ++ bond_hw_addr_flush(bond_dev, slave->dev);
5168 ++ rcu_read_unlock();
5169 ++ } else {
5170 ++ struct list_head *iter;
5171 ++
5172 ++ bond_for_each_slave(bond, slave, iter)
5173 ++ bond_hw_addr_flush(bond_dev, slave->dev);
5174 ++ }
5175 ++
5176 + return 0;
5177 + }
5178 +
5179 +@@ -6195,15 +6216,6 @@ static int bond_init(struct net_device *bond_dev)
5180 + if (!bond->wq)
5181 + return -ENOMEM;
5182 +
5183 +- if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
5184 +- bond->rr_tx_counter = alloc_percpu(u32);
5185 +- if (!bond->rr_tx_counter) {
5186 +- destroy_workqueue(bond->wq);
5187 +- bond->wq = NULL;
5188 +- return -ENOMEM;
5189 +- }
5190 +- }
5191 +-
5192 + spin_lock_init(&bond->stats_lock);
5193 + netdev_lockdep_set_classes(bond_dev);
5194 +
5195 +diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
5196 +index d060088047f16..131467d37a45b 100644
5197 +--- a/drivers/net/can/flexcan/flexcan-core.c
5198 ++++ b/drivers/net/can/flexcan/flexcan-core.c
5199 +@@ -941,11 +941,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
5200 + u32 reg_ctrl, reg_id, reg_iflag1;
5201 + int i;
5202 +
5203 +- if (unlikely(drop)) {
5204 +- skb = ERR_PTR(-ENOBUFS);
5205 +- goto mark_as_read;
5206 +- }
5207 +-
5208 + mb = flexcan_get_mb(priv, n);
5209 +
5210 + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
5211 +@@ -974,6 +969,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
5212 + reg_ctrl = priv->read(&mb->can_ctrl);
5213 + }
5214 +
5215 ++ if (unlikely(drop)) {
5216 ++ skb = ERR_PTR(-ENOBUFS);
5217 ++ goto mark_as_read;
5218 ++ }
5219 ++
5220 + if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
5221 + skb = alloc_canfd_skb(offload->dev, &cfd);
5222 + else
5223 +diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
5224 +index d3a658b444b5f..092cd51b3926e 100644
5225 +--- a/drivers/net/can/usb/gs_usb.c
5226 ++++ b/drivers/net/can/usb/gs_usb.c
5227 +@@ -824,6 +824,7 @@ static int gs_can_open(struct net_device *netdev)
5228 + flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
5229 +
5230 + /* finally start device */
5231 ++ dev->can.state = CAN_STATE_ERROR_ACTIVE;
5232 + dm->mode = cpu_to_le32(GS_CAN_MODE_START);
5233 + dm->flags = cpu_to_le32(flags);
5234 + rc = usb_control_msg(interface_to_usbdev(dev->iface),
5235 +@@ -835,13 +836,12 @@ static int gs_can_open(struct net_device *netdev)
5236 + if (rc < 0) {
5237 + netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
5238 + kfree(dm);
5239 ++ dev->can.state = CAN_STATE_STOPPED;
5240 + return rc;
5241 + }
5242 +
5243 + kfree(dm);
5244 +
5245 +- dev->can.state = CAN_STATE_ERROR_ACTIVE;
5246 +-
5247 + parent->active_channels++;
5248 + if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
5249 + netif_start_queue(netdev);
5250 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
5251 +index 964354536f9ce..111a952f880ee 100644
5252 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
5253 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
5254 +@@ -662,7 +662,6 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
5255 +
5256 + for (i = 0; i < nr_pkts; i++) {
5257 + struct bnxt_sw_tx_bd *tx_buf;
5258 +- bool compl_deferred = false;
5259 + struct sk_buff *skb;
5260 + int j, last;
5261 +
5262 +@@ -671,6 +670,8 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
5263 + skb = tx_buf->skb;
5264 + tx_buf->skb = NULL;
5265 +
5266 ++ tx_bytes += skb->len;
5267 ++
5268 + if (tx_buf->is_push) {
5269 + tx_buf->is_push = 0;
5270 + goto next_tx_int;
5271 +@@ -691,8 +692,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
5272 + }
5273 + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
5274 + if (bp->flags & BNXT_FLAG_CHIP_P5) {
5275 ++ /* PTP worker takes ownership of the skb */
5276 + if (!bnxt_get_tx_ts_p5(bp, skb))
5277 +- compl_deferred = true;
5278 ++ skb = NULL;
5279 + else
5280 + atomic_inc(&bp->ptp_cfg->tx_avail);
5281 + }
5282 +@@ -701,9 +703,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
5283 + next_tx_int:
5284 + cons = NEXT_TX(cons);
5285 +
5286 +- tx_bytes += skb->len;
5287 +- if (!compl_deferred)
5288 +- dev_kfree_skb_any(skb);
5289 ++ dev_kfree_skb_any(skb);
5290 + }
5291 +
5292 + netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
5293 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
5294 +index 7f3c0875b6f58..8e316367f6ced 100644
5295 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
5296 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
5297 +@@ -317,9 +317,9 @@ void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
5298 +
5299 + if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
5300 + (PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
5301 +- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE))) {
5302 ++ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) {
5303 + ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
5304 +- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE);
5305 ++ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE);
5306 + netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n");
5307 + }
5308 +
5309 +diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
5310 +index a139f2e9d59f0..e0e8dfd137930 100644
5311 +--- a/drivers/net/ethernet/freescale/enetc/Makefile
5312 ++++ b/drivers/net/ethernet/freescale/enetc/Makefile
5313 +@@ -9,7 +9,6 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
5314 +
5315 + obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
5316 + fsl-enetc-vf-y := enetc_vf.o $(common-objs)
5317 +-fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
5318 +
5319 + obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
5320 + fsl-enetc-ierb-y := enetc_ierb.o
5321 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
5322 +index 4470a4a3e4c3e..9f5b921039bd4 100644
5323 +--- a/drivers/net/ethernet/freescale/enetc/enetc.c
5324 ++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
5325 +@@ -2432,7 +2432,7 @@ int enetc_close(struct net_device *ndev)
5326 + return 0;
5327 + }
5328 +
5329 +-static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
5330 ++int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
5331 + {
5332 + struct enetc_ndev_priv *priv = netdev_priv(ndev);
5333 + struct tc_mqprio_qopt *mqprio = type_data;
5334 +@@ -2486,25 +2486,6 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
5335 + return 0;
5336 + }
5337 +
5338 +-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5339 +- void *type_data)
5340 +-{
5341 +- switch (type) {
5342 +- case TC_SETUP_QDISC_MQPRIO:
5343 +- return enetc_setup_tc_mqprio(ndev, type_data);
5344 +- case TC_SETUP_QDISC_TAPRIO:
5345 +- return enetc_setup_tc_taprio(ndev, type_data);
5346 +- case TC_SETUP_QDISC_CBS:
5347 +- return enetc_setup_tc_cbs(ndev, type_data);
5348 +- case TC_SETUP_QDISC_ETF:
5349 +- return enetc_setup_tc_txtime(ndev, type_data);
5350 +- case TC_SETUP_BLOCK:
5351 +- return enetc_setup_tc_psfp(ndev, type_data);
5352 +- default:
5353 +- return -EOPNOTSUPP;
5354 +- }
5355 +-}
5356 +-
5357 + static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
5358 + struct netlink_ext_ack *extack)
5359 + {
5360 +@@ -2600,29 +2581,6 @@ static int enetc_set_rss(struct net_device *ndev, int en)
5361 + return 0;
5362 + }
5363 +
5364 +-static int enetc_set_psfp(struct net_device *ndev, int en)
5365 +-{
5366 +- struct enetc_ndev_priv *priv = netdev_priv(ndev);
5367 +- int err;
5368 +-
5369 +- if (en) {
5370 +- err = enetc_psfp_enable(priv);
5371 +- if (err)
5372 +- return err;
5373 +-
5374 +- priv->active_offloads |= ENETC_F_QCI;
5375 +- return 0;
5376 +- }
5377 +-
5378 +- err = enetc_psfp_disable(priv);
5379 +- if (err)
5380 +- return err;
5381 +-
5382 +- priv->active_offloads &= ~ENETC_F_QCI;
5383 +-
5384 +- return 0;
5385 +-}
5386 +-
5387 + static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
5388 + {
5389 + struct enetc_ndev_priv *priv = netdev_priv(ndev);
5390 +@@ -2641,11 +2599,9 @@ static void enetc_enable_txvlan(struct net_device *ndev, bool en)
5391 + enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
5392 + }
5393 +
5394 +-int enetc_set_features(struct net_device *ndev,
5395 +- netdev_features_t features)
5396 ++void enetc_set_features(struct net_device *ndev, netdev_features_t features)
5397 + {
5398 + netdev_features_t changed = ndev->features ^ features;
5399 +- int err = 0;
5400 +
5401 + if (changed & NETIF_F_RXHASH)
5402 + enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
5403 +@@ -2657,11 +2613,6 @@ int enetc_set_features(struct net_device *ndev,
5404 + if (changed & NETIF_F_HW_VLAN_CTAG_TX)
5405 + enetc_enable_txvlan(ndev,
5406 + !!(features & NETIF_F_HW_VLAN_CTAG_TX));
5407 +-
5408 +- if (changed & NETIF_F_HW_TC)
5409 +- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
5410 +-
5411 +- return err;
5412 + }
5413 +
5414 + #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
5415 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
5416 +index 29922c20531f0..2cfe6944ebd32 100644
5417 +--- a/drivers/net/ethernet/freescale/enetc/enetc.h
5418 ++++ b/drivers/net/ethernet/freescale/enetc/enetc.h
5419 +@@ -393,11 +393,9 @@ void enetc_start(struct net_device *ndev);
5420 + void enetc_stop(struct net_device *ndev);
5421 + netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
5422 + struct net_device_stats *enetc_get_stats(struct net_device *ndev);
5423 +-int enetc_set_features(struct net_device *ndev,
5424 +- netdev_features_t features);
5425 ++void enetc_set_features(struct net_device *ndev, netdev_features_t features);
5426 + int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
5427 +-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5428 +- void *type_data);
5429 ++int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
5430 + int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
5431 + int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
5432 + struct xdp_frame **frames, u32 flags);
5433 +@@ -465,6 +463,7 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5434 + int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
5435 + int enetc_psfp_init(struct enetc_ndev_priv *priv);
5436 + int enetc_psfp_clean(struct enetc_ndev_priv *priv);
5437 ++int enetc_set_psfp(struct net_device *ndev, bool en);
5438 +
5439 + static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
5440 + {
5441 +@@ -540,4 +539,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
5442 + {
5443 + return 0;
5444 + }
5445 ++
5446 ++static inline int enetc_set_psfp(struct net_device *ndev, bool en)
5447 ++{
5448 ++ return 0;
5449 ++}
5450 + #endif
5451 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
5452 +index c4a0e836d4f09..bb7750222691d 100644
5453 +--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
5454 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
5455 +@@ -709,6 +709,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
5456 + {
5457 + netdev_features_t changed = ndev->features ^ features;
5458 + struct enetc_ndev_priv *priv = netdev_priv(ndev);
5459 ++ int err;
5460 ++
5461 ++ if (changed & NETIF_F_HW_TC) {
5462 ++ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
5463 ++ if (err)
5464 ++ return err;
5465 ++ }
5466 +
5467 + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
5468 + struct enetc_pf *pf = enetc_si_priv(priv->si);
5469 +@@ -722,7 +729,28 @@ static int enetc_pf_set_features(struct net_device *ndev,
5470 + if (changed & NETIF_F_LOOPBACK)
5471 + enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
5472 +
5473 +- return enetc_set_features(ndev, features);
5474 ++ enetc_set_features(ndev, features);
5475 ++
5476 ++ return 0;
5477 ++}
5478 ++
5479 ++static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5480 ++ void *type_data)
5481 ++{
5482 ++ switch (type) {
5483 ++ case TC_SETUP_QDISC_MQPRIO:
5484 ++ return enetc_setup_tc_mqprio(ndev, type_data);
5485 ++ case TC_SETUP_QDISC_TAPRIO:
5486 ++ return enetc_setup_tc_taprio(ndev, type_data);
5487 ++ case TC_SETUP_QDISC_CBS:
5488 ++ return enetc_setup_tc_cbs(ndev, type_data);
5489 ++ case TC_SETUP_QDISC_ETF:
5490 ++ return enetc_setup_tc_txtime(ndev, type_data);
5491 ++ case TC_SETUP_BLOCK:
5492 ++ return enetc_setup_tc_psfp(ndev, type_data);
5493 ++ default:
5494 ++ return -EOPNOTSUPP;
5495 ++ }
5496 + }
5497 +
5498 + static const struct net_device_ops enetc_ndev_ops = {
5499 +@@ -739,7 +767,7 @@ static const struct net_device_ops enetc_ndev_ops = {
5500 + .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
5501 + .ndo_set_features = enetc_pf_set_features,
5502 + .ndo_eth_ioctl = enetc_ioctl,
5503 +- .ndo_setup_tc = enetc_setup_tc,
5504 ++ .ndo_setup_tc = enetc_pf_setup_tc,
5505 + .ndo_bpf = enetc_setup_bpf,
5506 + .ndo_xdp_xmit = enetc_xdp_xmit,
5507 + };
5508 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
5509 +index 582a663ed0ba4..f8a2f02ce22de 100644
5510 +--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
5511 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
5512 +@@ -1517,6 +1517,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5513 + }
5514 + }
5515 +
5516 ++int enetc_set_psfp(struct net_device *ndev, bool en)
5517 ++{
5518 ++ struct enetc_ndev_priv *priv = netdev_priv(ndev);
5519 ++ int err;
5520 ++
5521 ++ if (en) {
5522 ++ err = enetc_psfp_enable(priv);
5523 ++ if (err)
5524 ++ return err;
5525 ++
5526 ++ priv->active_offloads |= ENETC_F_QCI;
5527 ++ return 0;
5528 ++ }
5529 ++
5530 ++ err = enetc_psfp_disable(priv);
5531 ++ if (err)
5532 ++ return err;
5533 ++
5534 ++ priv->active_offloads &= ~ENETC_F_QCI;
5535 ++
5536 ++ return 0;
5537 ++}
5538 ++
5539 + int enetc_psfp_init(struct enetc_ndev_priv *priv)
5540 + {
5541 + if (epsfp.psfp_sfi_bitmap)
5542 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
5543 +index 17924305afa2f..dfcaac302e245 100644
5544 +--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
5545 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
5546 +@@ -88,7 +88,20 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
5547 + static int enetc_vf_set_features(struct net_device *ndev,
5548 + netdev_features_t features)
5549 + {
5550 +- return enetc_set_features(ndev, features);
5551 ++ enetc_set_features(ndev, features);
5552 ++
5553 ++ return 0;
5554 ++}
5555 ++
5556 ++static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5557 ++ void *type_data)
5558 ++{
5559 ++ switch (type) {
5560 ++ case TC_SETUP_QDISC_MQPRIO:
5561 ++ return enetc_setup_tc_mqprio(ndev, type_data);
5562 ++ default:
5563 ++ return -EOPNOTSUPP;
5564 ++ }
5565 + }
5566 +
5567 + /* Probing/ Init */
5568 +@@ -100,7 +113,7 @@ static const struct net_device_ops enetc_ndev_ops = {
5569 + .ndo_set_mac_address = enetc_vf_set_mac_addr,
5570 + .ndo_set_features = enetc_vf_set_features,
5571 + .ndo_eth_ioctl = enetc_ioctl,
5572 +- .ndo_setup_tc = enetc_setup_tc,
5573 ++ .ndo_setup_tc = enetc_vf_setup_tc,
5574 + };
5575 +
5576 + static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
5577 +diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
5578 +index 8c939628e2d85..2e6461b0ea8bc 100644
5579 +--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
5580 ++++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
5581 +@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
5582 + int err;
5583 +
5584 + err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
5585 +- &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL);
5586 ++ &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC);
5587 + if (err)
5588 + return err;
5589 +
5590 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
5591 +index 1aaf0c5ddf6cf..57e27f2024d38 100644
5592 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
5593 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
5594 +@@ -5785,6 +5785,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
5595 + }
5596 + }
5597 +
5598 ++/**
5599 ++ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
5600 ++ * @vsi: Pointer to vsi structure
5601 ++ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
5602 ++ *
5603 ++ * Helper function to convert units before send to set BW limit
5604 ++ **/
5605 ++static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
5606 ++{
5607 ++ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
5608 ++ dev_warn(&vsi->back->pdev->dev,
5609 ++ "Setting max tx rate to minimum usable value of 50Mbps.\n");
5610 ++ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5611 ++ } else {
5612 ++ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
5613 ++ }
5614 ++
5615 ++ return max_tx_rate;
5616 ++}
5617 ++
5618 + /**
5619 + * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5620 + * @vsi: VSI to be configured
5621 +@@ -5807,10 +5827,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5622 + max_tx_rate, seid);
5623 + return -EINVAL;
5624 + }
5625 +- if (max_tx_rate && max_tx_rate < 50) {
5626 ++ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
5627 + dev_warn(&pf->pdev->dev,
5628 + "Setting max tx rate to minimum usable value of 50Mbps.\n");
5629 +- max_tx_rate = 50;
5630 ++ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5631 + }
5632 +
5633 + /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5634 +@@ -8101,9 +8121,9 @@ config_tc:
5635 +
5636 + if (i40e_is_tc_mqprio_enabled(pf)) {
5637 + if (vsi->mqprio_qopt.max_rate[0]) {
5638 +- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
5639 ++ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
5640 ++ vsi->mqprio_qopt.max_rate[0]);
5641 +
5642 +- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
5643 + ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
5644 + if (!ret) {
5645 + u64 credits = max_tx_rate;
5646 +@@ -10848,10 +10868,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
5647 + }
5648 +
5649 + if (vsi->mqprio_qopt.max_rate[0]) {
5650 +- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
5651 ++ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
5652 ++ vsi->mqprio_qopt.max_rate[0]);
5653 + u64 credits = 0;
5654 +
5655 +- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
5656 + ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
5657 + if (ret)
5658 + goto end_unlock;
5659 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
5660 +index 86b0f21287dc8..67fbaaad39859 100644
5661 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
5662 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
5663 +@@ -2038,6 +2038,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
5664 + }
5665 + }
5666 +
5667 ++/**
5668 ++ * i40e_vc_get_max_frame_size
5669 ++ * @vf: pointer to the VF
5670 ++ *
5671 ++ * Max frame size is determined based on the current port's max frame size and
5672 ++ * whether a port VLAN is configured on this VF. The VF is not aware whether
5673 ++ * it's in a port VLAN so the PF needs to account for this in max frame size
5674 ++ * checks and sending the max frame size to the VF.
5675 ++ **/
5676 ++static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
5677 ++{
5678 ++ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
5679 ++
5680 ++ if (vf->port_vlan_id)
5681 ++ max_frame_size -= VLAN_HLEN;
5682 ++
5683 ++ return max_frame_size;
5684 ++}
5685 ++
5686 + /**
5687 + * i40e_vc_get_vf_resources_msg
5688 + * @vf: pointer to the VF info
5689 +@@ -2139,6 +2158,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
5690 + vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5691 + vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
5692 + vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
5693 ++ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
5694 +
5695 + if (vf->lan_vsi_idx) {
5696 + vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
5697 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
5698 +index 06d18797d25a2..18b6a702a1d6d 100644
5699 +--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
5700 ++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
5701 +@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
5702 + {
5703 + u32 head, tail;
5704 +
5705 ++ /* underlying hardware might not allow access and/or always return
5706 ++ * 0 for the head/tail registers so just use the cached values
5707 ++ */
5708 + head = ring->next_to_clean;
5709 +- tail = readl(ring->tail);
5710 ++ tail = ring->next_to_use;
5711 +
5712 + if (head != tail)
5713 + return (head < tail) ?
5714 +@@ -1390,7 +1393,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
5715 + #endif
5716 + struct sk_buff *skb;
5717 +
5718 +- if (!rx_buffer)
5719 ++ if (!rx_buffer || !size)
5720 + return NULL;
5721 + /* prefetch first cache line of first page */
5722 + va = page_address(rx_buffer->page) + rx_buffer->page_offset;
5723 +@@ -1548,7 +1551,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
5724 + /* exit if we failed to retrieve a buffer */
5725 + if (!skb) {
5726 + rx_ring->rx_stats.alloc_buff_failed++;
5727 +- if (rx_buffer)
5728 ++ if (rx_buffer && size)
5729 + rx_buffer->pagecnt_bias++;
5730 + break;
5731 + }
5732 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
5733 +index 1603e99bae4af..498797a0a0a95 100644
5734 +--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
5735 ++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
5736 +@@ -273,11 +273,14 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
5737 + void iavf_configure_queues(struct iavf_adapter *adapter)
5738 + {
5739 + struct virtchnl_vsi_queue_config_info *vqci;
5740 +- struct virtchnl_queue_pair_info *vqpi;
5741 ++ int i, max_frame = adapter->vf_res->max_mtu;
5742 + int pairs = adapter->num_active_queues;
5743 +- int i, max_frame = IAVF_MAX_RXBUFFER;
5744 ++ struct virtchnl_queue_pair_info *vqpi;
5745 + size_t len;
5746 +
5747 ++ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
5748 ++ max_frame = IAVF_MAX_RXBUFFER;
5749 ++
5750 + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
5751 + /* bail because we already have a command pending */
5752 + dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
5753 +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
5754 +index 6c4e1d45235ef..1169fd7811b09 100644
5755 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c
5756 ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
5757 +@@ -911,7 +911,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
5758 + */
5759 + static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
5760 + {
5761 +- u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
5762 ++ u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
5763 + u16 num_txq_per_tc, num_rxq_per_tc;
5764 + u16 qcount_tx = vsi->alloc_txq;
5765 + u16 qcount_rx = vsi->alloc_rxq;
5766 +@@ -978,23 +978,25 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
5767 + * at least 1)
5768 + */
5769 + if (offset)
5770 +- vsi->num_rxq = offset;
5771 ++ rx_count = offset;
5772 + else
5773 +- vsi->num_rxq = num_rxq_per_tc;
5774 ++ rx_count = num_rxq_per_tc;
5775 +
5776 +- if (vsi->num_rxq > vsi->alloc_rxq) {
5777 ++ if (rx_count > vsi->alloc_rxq) {
5778 + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
5779 +- vsi->num_rxq, vsi->alloc_rxq);
5780 ++ rx_count, vsi->alloc_rxq);
5781 + return -EINVAL;
5782 + }
5783 +
5784 +- vsi->num_txq = tx_count;
5785 +- if (vsi->num_txq > vsi->alloc_txq) {
5786 ++ if (tx_count > vsi->alloc_txq) {
5787 + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
5788 +- vsi->num_txq, vsi->alloc_txq);
5789 ++ tx_count, vsi->alloc_txq);
5790 + return -EINVAL;
5791 + }
5792 +
5793 ++ vsi->num_txq = tx_count;
5794 ++ vsi->num_rxq = rx_count;
5795 ++
5796 + if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
5797 + dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
5798 + /* since there is a chance that num_rxq could have been changed
5799 +@@ -3487,6 +3489,7 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
5800 + u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
5801 + u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
5802 + int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
5803 ++ u16 new_txq, new_rxq;
5804 + u8 netdev_tc = 0;
5805 + int i;
5806 +
5807 +@@ -3527,21 +3530,24 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
5808 + }
5809 + }
5810 +
5811 +- /* Set actual Tx/Rx queue pairs */
5812 +- vsi->num_txq = offset + qcount_tx;
5813 +- if (vsi->num_txq > vsi->alloc_txq) {
5814 ++ new_txq = offset + qcount_tx;
5815 ++ if (new_txq > vsi->alloc_txq) {
5816 + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
5817 +- vsi->num_txq, vsi->alloc_txq);
5818 ++ new_txq, vsi->alloc_txq);
5819 + return -EINVAL;
5820 + }
5821 +
5822 +- vsi->num_rxq = offset + qcount_rx;
5823 +- if (vsi->num_rxq > vsi->alloc_rxq) {
5824 ++ new_rxq = offset + qcount_rx;
5825 ++ if (new_rxq > vsi->alloc_rxq) {
5826 + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
5827 +- vsi->num_rxq, vsi->alloc_rxq);
5828 ++ new_rxq, vsi->alloc_rxq);
5829 + return -EINVAL;
5830 + }
5831 +
5832 ++ /* Set actual Tx/Rx queue pairs */
5833 ++ vsi->num_txq = new_txq;
5834 ++ vsi->num_rxq = new_rxq;
5835 ++
5836 + /* Setup queue TC[0].qmap for given VSI context */
5837 + ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5838 + ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
5839 +@@ -3573,6 +3579,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
5840 + {
5841 + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
5842 + struct ice_pf *pf = vsi->back;
5843 ++ struct ice_tc_cfg old_tc_cfg;
5844 + struct ice_vsi_ctx *ctx;
5845 + struct device *dev;
5846 + int i, ret = 0;
5847 +@@ -3597,6 +3604,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
5848 + max_txqs[i] = vsi->num_txq;
5849 + }
5850 +
5851 ++ memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
5852 + vsi->tc_cfg.ena_tc = ena_tc;
5853 + vsi->tc_cfg.numtc = num_tc;
5854 +
5855 +@@ -3613,8 +3621,10 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
5856 + else
5857 + ret = ice_vsi_setup_q_map(vsi, ctx);
5858 +
5859 +- if (ret)
5860 ++ if (ret) {
5861 ++ memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
5862 + goto out;
5863 ++ }
5864 +
5865 + /* must to indicate which section of VSI context are being modified */
5866 + ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
5867 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
5868 +index 4c6bb7482b362..48befe1e2872c 100644
5869 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
5870 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
5871 +@@ -2399,8 +2399,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
5872 + return -EBUSY;
5873 + }
5874 +
5875 +- ice_unplug_aux_dev(pf);
5876 +-
5877 + switch (reset) {
5878 + case ICE_RESET_PFR:
5879 + set_bit(ICE_PFR_REQ, pf->state);
5880 +@@ -6629,7 +6627,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
5881 + */
5882 + int ice_down(struct ice_vsi *vsi)
5883 + {
5884 +- int i, tx_err, rx_err, link_err = 0, vlan_err = 0;
5885 ++ int i, tx_err, rx_err, vlan_err = 0;
5886 +
5887 + WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
5888 +
5889 +@@ -6663,20 +6661,13 @@ int ice_down(struct ice_vsi *vsi)
5890 +
5891 + ice_napi_disable_all(vsi);
5892 +
5893 +- if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
5894 +- link_err = ice_force_phys_link_state(vsi, false);
5895 +- if (link_err)
5896 +- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
5897 +- vsi->vsi_num, link_err);
5898 +- }
5899 +-
5900 + ice_for_each_txq(vsi, i)
5901 + ice_clean_tx_ring(vsi->tx_rings[i]);
5902 +
5903 + ice_for_each_rxq(vsi, i)
5904 + ice_clean_rx_ring(vsi->rx_rings[i]);
5905 +
5906 +- if (tx_err || rx_err || link_err || vlan_err) {
5907 ++ if (tx_err || rx_err || vlan_err) {
5908 + netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
5909 + vsi->vsi_num, vsi->vsw->sw_id);
5910 + return -EIO;
5911 +@@ -6838,6 +6829,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
5912 + if (err)
5913 + goto err_setup_rx;
5914 +
5915 ++ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
5916 ++
5917 + if (vsi->type == ICE_VSI_PF) {
5918 + /* Notify the stack of the actual queue counts. */
5919 + err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
5920 +@@ -8876,6 +8869,16 @@ int ice_stop(struct net_device *netdev)
5921 + return -EBUSY;
5922 + }
5923 +
5924 ++ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
5925 ++ int link_err = ice_force_phys_link_state(vsi, false);
5926 ++
5927 ++ if (link_err) {
5928 ++ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
5929 ++ vsi->vsi_num, link_err);
5930 ++ return -EIO;
5931 ++ }
5932 ++ }
5933 ++
5934 + ice_vsi_close(vsi);
5935 +
5936 + return 0;
5937 +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
5938 +index 836dce8407124..97453d1dfafed 100644
5939 +--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
5940 ++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
5941 +@@ -610,7 +610,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
5942 + if (test_bit(ICE_VSI_DOWN, vsi->state))
5943 + return -ENETDOWN;
5944 +
5945 +- if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
5946 ++ if (!ice_is_xdp_ena_vsi(vsi))
5947 + return -ENXIO;
5948 +
5949 + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
5950 +@@ -621,6 +621,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
5951 + xdp_ring = vsi->xdp_rings[queue_index];
5952 + spin_lock(&xdp_ring->tx_lock);
5953 + } else {
5954 ++ /* Generally, should not happen */
5955 ++ if (unlikely(queue_index >= vsi->num_xdp_txq))
5956 ++ return -ENXIO;
5957 + xdp_ring = vsi->xdp_rings[queue_index];
5958 + }
5959 +
5960 +diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
5961 +index 85155cd9405c5..4aeb927c37153 100644
5962 +--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
5963 ++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
5964 +@@ -179,6 +179,9 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
5965 + /* Only return ad bits of the gw register */
5966 + ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
5967 +
5968 ++ /* The MDIO lock is set on read. To release it, clear gw register */
5969 ++ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
5970 ++
5971 + return ret;
5972 + }
5973 +
5974 +@@ -203,6 +206,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
5975 + temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
5976 + 5, 1000000);
5977 +
5978 ++ /* The MDIO lock is set on read. To release it, clear gw register */
5979 ++ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
5980 ++
5981 + return ret;
5982 + }
5983 +
5984 +diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
5985 +index 49b85ca578b01..9820efce72ffe 100644
5986 +--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
5987 ++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
5988 +@@ -370,6 +370,11 @@ static void mana_gd_process_eq_events(void *arg)
5989 + break;
5990 + }
5991 +
5992 ++ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
5993 ++ * reading eqe.
5994 ++ */
5995 ++ rmb();
5996 ++
5997 + mana_gd_process_eqe(eq);
5998 +
5999 + eq->head++;
6000 +@@ -1107,6 +1112,11 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
6001 + if (WARN_ON_ONCE(owner_bits != new_bits))
6002 + return -1;
6003 +
6004 ++ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
6005 ++ * reading completion info
6006 ++ */
6007 ++ rmb();
6008 ++
6009 + comp->wq_num = cqe->cqe_info.wq_num;
6010 + comp->is_sq = cqe->cqe_info.is_sq;
6011 + memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
6012 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
6013 +index b357ac4c56c59..7e32b04eb0c75 100644
6014 +--- a/drivers/net/ethernet/renesas/ravb_main.c
6015 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
6016 +@@ -1449,6 +1449,8 @@ static int ravb_phy_init(struct net_device *ndev)
6017 + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
6018 + }
6019 +
6020 ++ /* Indicate that the MAC is responsible for managing PHY PM */
6021 ++ phydev->mac_managed_pm = true;
6022 + phy_attached_info(phydev);
6023 +
6024 + return 0;
6025 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
6026 +index 67ade78fb7671..7fd8828d3a846 100644
6027 +--- a/drivers/net/ethernet/renesas/sh_eth.c
6028 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
6029 +@@ -2029,6 +2029,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
6030 + if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
6031 + phy_set_max_speed(phydev, SPEED_100);
6032 +
6033 ++ /* Indicate that the MAC is responsible for managing PHY PM */
6034 ++ phydev->mac_managed_pm = true;
6035 + phy_attached_info(phydev);
6036 +
6037 + return 0;
6038 +diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
6039 +index 032b8c0bd7889..5b4d661ab9867 100644
6040 +--- a/drivers/net/ethernet/sfc/efx_channels.c
6041 ++++ b/drivers/net/ethernet/sfc/efx_channels.c
6042 +@@ -319,7 +319,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
6043 + efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
6044 + efx->n_rx_channels = 1;
6045 + efx->n_tx_channels = 1;
6046 +- efx->tx_channel_offset = 1;
6047 ++ efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
6048 + efx->n_xdp_channels = 0;
6049 + efx->xdp_channel_offset = efx->n_channels;
6050 + efx->legacy_irq = efx->pci_dev->irq;
6051 +diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
6052 +index 017212a40df38..f54ebd0072868 100644
6053 +--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
6054 ++++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
6055 +@@ -320,7 +320,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
6056 + efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
6057 + efx->n_rx_channels = 1;
6058 + efx->n_tx_channels = 1;
6059 +- efx->tx_channel_offset = 1;
6060 ++ efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
6061 + efx->n_xdp_channels = 0;
6062 + efx->xdp_channel_offset = efx->n_channels;
6063 + efx->legacy_irq = efx->pci_dev->irq;
6064 +diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c
6065 +index e166dcb9b99ce..91e87594ed1ea 100644
6066 +--- a/drivers/net/ethernet/sfc/siena/tx.c
6067 ++++ b/drivers/net/ethernet/sfc/siena/tx.c
6068 +@@ -336,7 +336,7 @@ netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
6069 + * previous packets out.
6070 + */
6071 + if (!netdev_xmit_more())
6072 +- efx_tx_send_pending(tx_queue->channel);
6073 ++ efx_tx_send_pending(efx_get_tx_channel(efx, index));
6074 + return NETDEV_TX_OK;
6075 + }
6076 +
6077 +diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
6078 +index 138bca6113415..80ed7f760bd30 100644
6079 +--- a/drivers/net/ethernet/sfc/tx.c
6080 ++++ b/drivers/net/ethernet/sfc/tx.c
6081 +@@ -549,7 +549,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
6082 + * previous packets out.
6083 + */
6084 + if (!netdev_xmit_more())
6085 +- efx_tx_send_pending(tx_queue->channel);
6086 ++ efx_tx_send_pending(efx_get_tx_channel(efx, index));
6087 + return NETDEV_TX_OK;
6088 + }
6089 +
6090 +diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
6091 +index 8594ee839628b..88aa0d310aeef 100644
6092 +--- a/drivers/net/ethernet/sun/sunhme.c
6093 ++++ b/drivers/net/ethernet/sun/sunhme.c
6094 +@@ -2020,9 +2020,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
6095 +
6096 + skb_reserve(copy_skb, 2);
6097 + skb_put(copy_skb, len);
6098 +- dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
6099 ++ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
6100 + skb_copy_from_linear_data(skb, copy_skb->data, len);
6101 +- dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
6102 ++ dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
6103 + /* Reuse original ring buffer. */
6104 + hme_write_rxd(hp, this,
6105 + (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
6106 +diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
6107 +index ec010cf2e816a..6f874f99b910c 100644
6108 +--- a/drivers/net/ipa/ipa_qmi.c
6109 ++++ b/drivers/net/ipa/ipa_qmi.c
6110 +@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
6111 + mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
6112 + req.v4_route_tbl_info_valid = 1;
6113 + req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
6114 +- req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
6115 ++ req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
6116 +
6117 + mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
6118 + req.v6_route_tbl_info_valid = 1;
6119 + req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
6120 +- req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
6121 ++ req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
6122 +
6123 + mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
6124 + req.v4_filter_tbl_start_valid = 1;
6125 +@@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
6126 + req.v4_hash_route_tbl_info_valid = 1;
6127 + req.v4_hash_route_tbl_info.start =
6128 + ipa->mem_offset + mem->offset;
6129 +- req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
6130 ++ req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
6131 + }
6132 +
6133 + mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
6134 +@@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
6135 + req.v6_hash_route_tbl_info_valid = 1;
6136 + req.v6_hash_route_tbl_info.start =
6137 + ipa->mem_offset + mem->offset;
6138 +- req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
6139 ++ req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
6140 + }
6141 +
6142 + mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
6143 +diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
6144 +index 6838e8065072b..75d3fc0092e92 100644
6145 +--- a/drivers/net/ipa/ipa_qmi_msg.c
6146 ++++ b/drivers/net/ipa/ipa_qmi_msg.c
6147 +@@ -311,7 +311,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
6148 + .tlv_type = 0x12,
6149 + .offset = offsetof(struct ipa_init_modem_driver_req,
6150 + v4_route_tbl_info),
6151 +- .ei_array = ipa_mem_array_ei,
6152 ++ .ei_array = ipa_mem_bounds_ei,
6153 + },
6154 + {
6155 + .data_type = QMI_OPT_FLAG,
6156 +@@ -332,7 +332,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
6157 + .tlv_type = 0x13,
6158 + .offset = offsetof(struct ipa_init_modem_driver_req,
6159 + v6_route_tbl_info),
6160 +- .ei_array = ipa_mem_array_ei,
6161 ++ .ei_array = ipa_mem_bounds_ei,
6162 + },
6163 + {
6164 + .data_type = QMI_OPT_FLAG,
6165 +@@ -496,7 +496,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
6166 + .tlv_type = 0x1b,
6167 + .offset = offsetof(struct ipa_init_modem_driver_req,
6168 + v4_hash_route_tbl_info),
6169 +- .ei_array = ipa_mem_array_ei,
6170 ++ .ei_array = ipa_mem_bounds_ei,
6171 + },
6172 + {
6173 + .data_type = QMI_OPT_FLAG,
6174 +@@ -517,7 +517,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
6175 + .tlv_type = 0x1c,
6176 + .offset = offsetof(struct ipa_init_modem_driver_req,
6177 + v6_hash_route_tbl_info),
6178 +- .ei_array = ipa_mem_array_ei,
6179 ++ .ei_array = ipa_mem_bounds_ei,
6180 + },
6181 + {
6182 + .data_type = QMI_OPT_FLAG,
6183 +diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
6184 +index 495e85abe50bd..9651aa59b5968 100644
6185 +--- a/drivers/net/ipa/ipa_qmi_msg.h
6186 ++++ b/drivers/net/ipa/ipa_qmi_msg.h
6187 +@@ -86,9 +86,11 @@ enum ipa_platform_type {
6188 + IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
6189 + };
6190 +
6191 +-/* This defines the start and end offset of a range of memory. Both
6192 +- * fields are offsets relative to the start of IPA shared memory.
6193 +- * The end value is the last addressable byte *within* the range.
6194 ++/* This defines the start and end offset of a range of memory. The start
6195 ++ * value is a byte offset relative to the start of IPA shared memory. The
6196 ++ * end value is the last addressable unit *within* the range. Typically
6197 ++ * the end value is in units of bytes, however it can also be a maximum
6198 ++ * array index value.
6199 + */
6200 + struct ipa_mem_bounds {
6201 + u32 start;
6202 +@@ -129,18 +131,19 @@ struct ipa_init_modem_driver_req {
6203 + u8 hdr_tbl_info_valid;
6204 + struct ipa_mem_bounds hdr_tbl_info;
6205 +
6206 +- /* Routing table information. These define the location and size of
6207 +- * non-hashable IPv4 and IPv6 filter tables. The start values are
6208 +- * offsets relative to the start of IPA shared memory.
6209 ++ /* Routing table information. These define the location and maximum
6210 ++ * *index* (not byte) for the modem portion of non-hashable IPv4 and
6211 ++ * IPv6 routing tables. The start values are byte offsets relative
6212 ++ * to the start of IPA shared memory.
6213 + */
6214 + u8 v4_route_tbl_info_valid;
6215 +- struct ipa_mem_array v4_route_tbl_info;
6216 ++ struct ipa_mem_bounds v4_route_tbl_info;
6217 + u8 v6_route_tbl_info_valid;
6218 +- struct ipa_mem_array v6_route_tbl_info;
6219 ++ struct ipa_mem_bounds v6_route_tbl_info;
6220 +
6221 + /* Filter table information. These define the location of the
6222 + * non-hashable IPv4 and IPv6 filter tables. The start values are
6223 +- * offsets relative to the start of IPA shared memory.
6224 ++ * byte offsets relative to the start of IPA shared memory.
6225 + */
6226 + u8 v4_filter_tbl_start_valid;
6227 + u32 v4_filter_tbl_start;
6228 +@@ -181,18 +184,20 @@ struct ipa_init_modem_driver_req {
6229 + u8 zip_tbl_info_valid;
6230 + struct ipa_mem_bounds zip_tbl_info;
6231 +
6232 +- /* Routing table information. These define the location and size
6233 +- * of hashable IPv4 and IPv6 filter tables. The start values are
6234 +- * offsets relative to the start of IPA shared memory.
6235 ++ /* Routing table information. These define the location and maximum
6236 ++ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
6237 ++ * routing tables (if supported by hardware). The start values are
6238 ++ * byte offsets relative to the start of IPA shared memory.
6239 + */
6240 + u8 v4_hash_route_tbl_info_valid;
6241 +- struct ipa_mem_array v4_hash_route_tbl_info;
6242 ++ struct ipa_mem_bounds v4_hash_route_tbl_info;
6243 + u8 v6_hash_route_tbl_info_valid;
6244 +- struct ipa_mem_array v6_hash_route_tbl_info;
6245 ++ struct ipa_mem_bounds v6_hash_route_tbl_info;
6246 +
6247 + /* Filter table information. These define the location and size
6248 +- * of hashable IPv4 and IPv6 filter tables. The start values are
6249 +- * offsets relative to the start of IPA shared memory.
6250 ++ * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
6251 ++ * The start values are byte offsets relative to the start of IPA
6252 ++ * shared memory.
6253 + */
6254 + u8 v4_hash_filter_tbl_start_valid;
6255 + u32 v4_hash_filter_tbl_start;
6256 +diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
6257 +index 2f5a58bfc529a..69efe672ca528 100644
6258 +--- a/drivers/net/ipa/ipa_table.c
6259 ++++ b/drivers/net/ipa/ipa_table.c
6260 +@@ -108,8 +108,6 @@
6261 +
6262 + /* Assignment of route table entries to the modem and AP */
6263 + #define IPA_ROUTE_MODEM_MIN 0
6264 +-#define IPA_ROUTE_MODEM_COUNT 8
6265 +-
6266 + #define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
6267 + #define IPA_ROUTE_AP_COUNT \
6268 + (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
6269 +diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
6270 +index b6a9a0d79d68e..1538e2e1732fe 100644
6271 +--- a/drivers/net/ipa/ipa_table.h
6272 ++++ b/drivers/net/ipa/ipa_table.h
6273 +@@ -13,6 +13,9 @@ struct ipa;
6274 + /* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
6275 + #define IPA_FILTER_COUNT_MAX 14
6276 +
6277 ++/* The number of route table entries allotted to the modem */
6278 ++#define IPA_ROUTE_MODEM_COUNT 8
6279 ++
6280 + /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
6281 + #define IPA_ROUTE_COUNT_MAX 15
6282 +
6283 +diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
6284 +index 6ffb27419e64b..c58123e136896 100644
6285 +--- a/drivers/net/ipvlan/ipvlan_core.c
6286 ++++ b/drivers/net/ipvlan/ipvlan_core.c
6287 +@@ -495,7 +495,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
6288 +
6289 + static int ipvlan_process_outbound(struct sk_buff *skb)
6290 + {
6291 +- struct ethhdr *ethh = eth_hdr(skb);
6292 + int ret = NET_XMIT_DROP;
6293 +
6294 + /* The ipvlan is a pseudo-L2 device, so the packets that we receive
6295 +@@ -505,6 +504,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
6296 + if (skb_mac_header_was_set(skb)) {
6297 + /* In this mode we dont care about
6298 + * multicast and broadcast traffic */
6299 ++ struct ethhdr *ethh = eth_hdr(skb);
6300 ++
6301 + if (is_multicast_ether_addr(ethh->h_dest)) {
6302 + pr_debug_ratelimited(
6303 + "Dropped {multi|broad}cast of type=[%x]\n",
6304 +@@ -589,7 +590,7 @@ out:
6305 + static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
6306 + {
6307 + const struct ipvl_dev *ipvlan = netdev_priv(dev);
6308 +- struct ethhdr *eth = eth_hdr(skb);
6309 ++ struct ethhdr *eth = skb_eth_hdr(skb);
6310 + struct ipvl_addr *addr;
6311 + void *lyr3h;
6312 + int addr_type;
6313 +@@ -619,6 +620,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
6314 + return dev_forward_skb(ipvlan->phy_dev, skb);
6315 +
6316 + } else if (is_multicast_ether_addr(eth->h_dest)) {
6317 ++ skb_reset_mac_header(skb);
6318 + ipvlan_skb_crossing_ns(skb, NULL);
6319 + ipvlan_multicast_enqueue(ipvlan->port, skb, true);
6320 + return NET_XMIT_SUCCESS;
6321 +diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
6322 +index 9e3c815a070f1..796e9c7857d09 100644
6323 +--- a/drivers/net/mdio/of_mdio.c
6324 ++++ b/drivers/net/mdio/of_mdio.c
6325 +@@ -231,6 +231,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
6326 + return 0;
6327 +
6328 + unregister:
6329 ++ of_node_put(child);
6330 + mdiobus_unregister(mdio);
6331 + return rc;
6332 + }
6333 +diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c
6334 +index 605a38e16db05..0e58aa7f0374e 100644
6335 +--- a/drivers/net/netdevsim/hwstats.c
6336 ++++ b/drivers/net/netdevsim/hwstats.c
6337 +@@ -433,11 +433,11 @@ int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
6338 + goto err_remove_hwstats_recursive;
6339 + }
6340 +
6341 +- debugfs_create_file("enable_ifindex", 0600, hwstats->l3_ddir, hwstats,
6342 ++ debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
6343 + &nsim_dev_hwstats_l3_enable_fops.fops);
6344 +- debugfs_create_file("disable_ifindex", 0600, hwstats->l3_ddir, hwstats,
6345 ++ debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
6346 + &nsim_dev_hwstats_l3_disable_fops.fops);
6347 +- debugfs_create_file("fail_next_enable", 0600, hwstats->l3_ddir, hwstats,
6348 ++ debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
6349 + &nsim_dev_hwstats_l3_fail_fops.fops);
6350 +
6351 + INIT_DELAYED_WORK(&hwstats->traffic_dw,
6352 +diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
6353 +index c7047f5d7a9b0..8bc0957a0f6d3 100644
6354 +--- a/drivers/net/phy/aquantia_main.c
6355 ++++ b/drivers/net/phy/aquantia_main.c
6356 +@@ -90,6 +90,9 @@
6357 + #define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
6358 + #define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
6359 +
6360 ++#define VEND1_GLOBAL_GEN_STAT2 0xc831
6361 ++#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
6362 ++
6363 + #define VEND1_GLOBAL_RSVD_STAT1 0xc885
6364 + #define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
6365 + #define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
6366 +@@ -124,6 +127,12 @@
6367 + #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
6368 + #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
6369 +
6370 ++/* Sleep and timeout for checking if the Processor-Intensive
6371 ++ * MDIO operation is finished
6372 ++ */
6373 ++#define AQR107_OP_IN_PROG_SLEEP 1000
6374 ++#define AQR107_OP_IN_PROG_TIMEOUT 100000
6375 ++
6376 + struct aqr107_hw_stat {
6377 + const char *name;
6378 + int reg;
6379 +@@ -596,16 +605,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
6380 + phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
6381 + }
6382 +
6383 ++static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
6384 ++{
6385 ++ int val, err;
6386 ++
6387 ++ /* The datasheet notes to wait at least 1ms after issuing a
6388 ++ * processor intensive operation before checking.
6389 ++ * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
6390 ++ * because that just determines the maximum time slept, not the minimum.
6391 ++ */
6392 ++ usleep_range(1000, 5000);
6393 ++
6394 ++ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
6395 ++ VEND1_GLOBAL_GEN_STAT2, val,
6396 ++ !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
6397 ++ AQR107_OP_IN_PROG_SLEEP,
6398 ++ AQR107_OP_IN_PROG_TIMEOUT, false);
6399 ++ if (err) {
6400 ++ phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
6401 ++ return err;
6402 ++ }
6403 ++
6404 ++ return 0;
6405 ++}
6406 ++
6407 + static int aqr107_suspend(struct phy_device *phydev)
6408 + {
6409 +- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
6410 +- MDIO_CTRL1_LPOWER);
6411 ++ int err;
6412 ++
6413 ++ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
6414 ++ MDIO_CTRL1_LPOWER);
6415 ++ if (err)
6416 ++ return err;
6417 ++
6418 ++ return aqr107_wait_processor_intensive_op(phydev);
6419 + }
6420 +
6421 + static int aqr107_resume(struct phy_device *phydev)
6422 + {
6423 +- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
6424 +- MDIO_CTRL1_LPOWER);
6425 ++ int err;
6426 ++
6427 ++ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
6428 ++ MDIO_CTRL1_LPOWER);
6429 ++ if (err)
6430 ++ return err;
6431 ++
6432 ++ return aqr107_wait_processor_intensive_op(phydev);
6433 + }
6434 +
6435 + static int aqr107_probe(struct phy_device *phydev)
6436 +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
6437 +index 34483a4bd688a..e8e1101911b2f 100644
6438 +--- a/drivers/net/phy/micrel.c
6439 ++++ b/drivers/net/phy/micrel.c
6440 +@@ -2662,16 +2662,19 @@ static int lan8804_config_init(struct phy_device *phydev)
6441 + static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
6442 + {
6443 + int irq_status, tsu_irq_status;
6444 ++ int ret = IRQ_NONE;
6445 +
6446 + irq_status = phy_read(phydev, LAN8814_INTS);
6447 +- if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
6448 +- phy_trigger_machine(phydev);
6449 +-
6450 + if (irq_status < 0) {
6451 + phy_error(phydev);
6452 + return IRQ_NONE;
6453 + }
6454 +
6455 ++ if (irq_status & LAN8814_INT_LINK) {
6456 ++ phy_trigger_machine(phydev);
6457 ++ ret = IRQ_HANDLED;
6458 ++ }
6459 ++
6460 + while (1) {
6461 + tsu_irq_status = lanphy_read_page_reg(phydev, 4,
6462 + LAN8814_INTR_STS_REG);
6463 +@@ -2680,12 +2683,15 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
6464 + (tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ |
6465 + LAN8814_INTR_STS_REG_1588_TSU1_ |
6466 + LAN8814_INTR_STS_REG_1588_TSU2_ |
6467 +- LAN8814_INTR_STS_REG_1588_TSU3_)))
6468 ++ LAN8814_INTR_STS_REG_1588_TSU3_))) {
6469 + lan8814_handle_ptp_interrupt(phydev);
6470 +- else
6471 ++ ret = IRQ_HANDLED;
6472 ++ } else {
6473 + break;
6474 ++ }
6475 + }
6476 +- return IRQ_HANDLED;
6477 ++
6478 ++ return ret;
6479 + }
6480 +
6481 + static int lan8814_ack_interrupt(struct phy_device *phydev)
6482 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
6483 +index b07dde6f0abf2..b9899913d2467 100644
6484 +--- a/drivers/net/team/team.c
6485 ++++ b/drivers/net/team/team.c
6486 +@@ -1275,10 +1275,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
6487 + }
6488 + }
6489 +
6490 +- netif_addr_lock_bh(dev);
6491 +- dev_uc_sync_multiple(port_dev, dev);
6492 +- dev_mc_sync_multiple(port_dev, dev);
6493 +- netif_addr_unlock_bh(dev);
6494 ++ if (dev->flags & IFF_UP) {
6495 ++ netif_addr_lock_bh(dev);
6496 ++ dev_uc_sync_multiple(port_dev, dev);
6497 ++ dev_mc_sync_multiple(port_dev, dev);
6498 ++ netif_addr_unlock_bh(dev);
6499 ++ }
6500 +
6501 + port->index = -1;
6502 + list_add_tail_rcu(&port->list, &team->port_list);
6503 +@@ -1349,8 +1351,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
6504 + netdev_rx_handler_unregister(port_dev);
6505 + team_port_disable_netpoll(port);
6506 + vlan_vids_del_by_dev(port_dev, dev);
6507 +- dev_uc_unsync(port_dev, dev);
6508 +- dev_mc_unsync(port_dev, dev);
6509 ++ if (dev->flags & IFF_UP) {
6510 ++ dev_uc_unsync(port_dev, dev);
6511 ++ dev_mc_unsync(port_dev, dev);
6512 ++ }
6513 + dev_close(port_dev);
6514 + team_port_leave(team, port);
6515 +
6516 +@@ -1700,6 +1704,14 @@ static int team_open(struct net_device *dev)
6517 +
6518 + static int team_close(struct net_device *dev)
6519 + {
6520 ++ struct team *team = netdev_priv(dev);
6521 ++ struct team_port *port;
6522 ++
6523 ++ list_for_each_entry(port, &team->port_list, list) {
6524 ++ dev_uc_unsync(port->dev, dev);
6525 ++ dev_mc_unsync(port->dev, dev);
6526 ++ }
6527 ++
6528 + return 0;
6529 + }
6530 +
6531 +diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
6532 +index d0f3b6d7f4089..5c804bcabfe6b 100644
6533 +--- a/drivers/net/wireguard/netlink.c
6534 ++++ b/drivers/net/wireguard/netlink.c
6535 +@@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
6536 + if (attrs[WGPEER_A_ENDPOINT]) {
6537 + struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
6538 + size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
6539 ++ struct endpoint endpoint = { { { 0 } } };
6540 +
6541 +- if ((len == sizeof(struct sockaddr_in) &&
6542 +- addr->sa_family == AF_INET) ||
6543 +- (len == sizeof(struct sockaddr_in6) &&
6544 +- addr->sa_family == AF_INET6)) {
6545 +- struct endpoint endpoint = { { { 0 } } };
6546 +-
6547 +- memcpy(&endpoint.addr, addr, len);
6548 ++ if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
6549 ++ endpoint.addr4 = *(struct sockaddr_in *)addr;
6550 ++ wg_socket_set_peer_endpoint(peer, &endpoint);
6551 ++ } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
6552 ++ endpoint.addr6 = *(struct sockaddr_in6 *)addr;
6553 + wg_socket_set_peer_endpoint(peer, &endpoint);
6554 + }
6555 + }
6556 +diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
6557 +index ba87d294604fe..d4bb40a695ab6 100644
6558 +--- a/drivers/net/wireguard/selftest/ratelimiter.c
6559 ++++ b/drivers/net/wireguard/selftest/ratelimiter.c
6560 +@@ -6,29 +6,28 @@
6561 + #ifdef DEBUG
6562 +
6563 + #include <linux/jiffies.h>
6564 +-#include <linux/hrtimer.h>
6565 +
6566 + static const struct {
6567 + bool result;
6568 +- u64 nsec_to_sleep_before;
6569 ++ unsigned int msec_to_sleep_before;
6570 + } expected_results[] __initconst = {
6571 + [0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
6572 + [PACKETS_BURSTABLE] = { false, 0 },
6573 +- [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND },
6574 ++ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
6575 + [PACKETS_BURSTABLE + 2] = { false, 0 },
6576 +- [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
6577 ++ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
6578 + [PACKETS_BURSTABLE + 4] = { true, 0 },
6579 + [PACKETS_BURSTABLE + 5] = { false, 0 }
6580 + };
6581 +
6582 + static __init unsigned int maximum_jiffies_at_index(int index)
6583 + {
6584 +- u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3;
6585 ++ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
6586 + int i;
6587 +
6588 + for (i = 0; i <= index; ++i)
6589 +- total_nsecs += expected_results[i].nsec_to_sleep_before;
6590 +- return nsecs_to_jiffies(total_nsecs);
6591 ++ total_msecs += expected_results[i].msec_to_sleep_before;
6592 ++ return msecs_to_jiffies(total_msecs);
6593 + }
6594 +
6595 + static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
6596 +@@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
6597 + loop_start_time = jiffies;
6598 +
6599 + for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
6600 +- if (expected_results[i].nsec_to_sleep_before) {
6601 +- ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3),
6602 +- ns_to_ktime(expected_results[i].nsec_to_sleep_before));
6603 +- set_current_state(TASK_UNINTERRUPTIBLE);
6604 +- schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME);
6605 +- }
6606 ++ if (expected_results[i].msec_to_sleep_before)
6607 ++ msleep(expected_results[i].msec_to_sleep_before);
6608 +
6609 + if (time_is_before_jiffies(loop_start_time +
6610 + maximum_jiffies_at_index(i)))
6611 +@@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void)
6612 + if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
6613 + return true;
6614 +
6615 +- BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0);
6616 ++ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
6617 +
6618 + if (wg_ratelimiter_init())
6619 + goto out;
6620 +@@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void)
6621 + ++test;
6622 + #endif
6623 +
6624 +- for (trials = TRIALS_BEFORE_GIVING_UP;;) {
6625 ++ for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
6626 + int test_count = 0, ret;
6627 +
6628 + ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
6629 +diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
6630 +index a647a406b87be..b20409f8c13ab 100644
6631 +--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
6632 ++++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
6633 +@@ -140,6 +140,7 @@ config IWLMEI
6634 + depends on INTEL_MEI
6635 + depends on PM
6636 + depends on CFG80211
6637 ++ depends on BROKEN
6638 + help
6639 + Enables the iwlmei kernel module.
6640 +
6641 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
6642 +index 9e832b27170fe..a4eb025f504f3 100644
6643 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
6644 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
6645 +@@ -1138,7 +1138,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
6646 + offset %= 32;
6647 +
6648 + val = mt76_rr(dev, addr);
6649 +- val >>= (tid % 32);
6650 ++ val >>= offset;
6651 +
6652 + if (offset > 20) {
6653 + addr += 4;
6654 +diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
6655 +index 629d10fcf53b2..b9f1a8e9f88cb 100644
6656 +--- a/drivers/nvdimm/pmem.c
6657 ++++ b/drivers/nvdimm/pmem.c
6658 +@@ -45,7 +45,7 @@ static struct nd_region *to_region(struct pmem_device *pmem)
6659 + return to_nd_region(to_dev(pmem)->parent);
6660 + }
6661 +
6662 +-static phys_addr_t to_phys(struct pmem_device *pmem, phys_addr_t offset)
6663 ++static phys_addr_t pmem_to_phys(struct pmem_device *pmem, phys_addr_t offset)
6664 + {
6665 + return pmem->phys_addr + offset;
6666 + }
6667 +@@ -63,7 +63,7 @@ static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector)
6668 + static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset,
6669 + unsigned int len)
6670 + {
6671 +- phys_addr_t phys = to_phys(pmem, offset);
6672 ++ phys_addr_t phys = pmem_to_phys(pmem, offset);
6673 + unsigned long pfn_start, pfn_end, pfn;
6674 +
6675 + /* only pmem in the linear map supports HWPoison */
6676 +@@ -97,7 +97,7 @@ static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks)
6677 + static long __pmem_clear_poison(struct pmem_device *pmem,
6678 + phys_addr_t offset, unsigned int len)
6679 + {
6680 +- phys_addr_t phys = to_phys(pmem, offset);
6681 ++ phys_addr_t phys = pmem_to_phys(pmem, offset);
6682 + long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len);
6683 +
6684 + if (cleared > 0) {
6685 +diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
6686 +index d702d7d60235d..2d23b7d41f7e6 100644
6687 +--- a/drivers/nvme/host/apple.c
6688 ++++ b/drivers/nvme/host/apple.c
6689 +@@ -1502,7 +1502,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
6690 +
6691 + if (!blk_get_queue(anv->ctrl.admin_q)) {
6692 + nvme_start_admin_queue(&anv->ctrl);
6693 +- blk_cleanup_queue(anv->ctrl.admin_q);
6694 ++ blk_mq_destroy_queue(anv->ctrl.admin_q);
6695 + anv->ctrl.admin_q = NULL;
6696 + ret = -ENODEV;
6697 + goto put_dev;
6698 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
6699 +index 2f965356f3453..6d76fc608b741 100644
6700 +--- a/drivers/nvme/host/core.c
6701 ++++ b/drivers/nvme/host/core.c
6702 +@@ -4105,7 +4105,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
6703 + if (!nvme_ns_head_multipath(ns->head))
6704 + nvme_cdev_del(&ns->cdev, &ns->cdev_device);
6705 + del_gendisk(ns->disk);
6706 +- blk_cleanup_queue(ns->queue);
6707 +
6708 + down_write(&ns->ctrl->namespaces_rwsem);
6709 + list_del_init(&ns->list);
6710 +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
6711 +index 4aff83b1b0c05..9a5ce70d7f215 100644
6712 +--- a/drivers/nvme/host/fc.c
6713 ++++ b/drivers/nvme/host/fc.c
6714 +@@ -2392,7 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref)
6715 + unsigned long flags;
6716 +
6717 + if (ctrl->ctrl.tagset) {
6718 +- blk_cleanup_queue(ctrl->ctrl.connect_q);
6719 ++ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
6720 + blk_mq_free_tag_set(&ctrl->tag_set);
6721 + }
6722 +
6723 +@@ -2402,8 +2402,8 @@ nvme_fc_ctrl_free(struct kref *ref)
6724 + spin_unlock_irqrestore(&ctrl->rport->lock, flags);
6725 +
6726 + nvme_start_admin_queue(&ctrl->ctrl);
6727 +- blk_cleanup_queue(ctrl->ctrl.admin_q);
6728 +- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
6729 ++ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
6730 ++ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
6731 + blk_mq_free_tag_set(&ctrl->admin_tag_set);
6732 +
6733 + kfree(ctrl->queues);
6734 +@@ -2953,7 +2953,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
6735 + out_delete_hw_queues:
6736 + nvme_fc_delete_hw_io_queues(ctrl);
6737 + out_cleanup_blk_queue:
6738 +- blk_cleanup_queue(ctrl->ctrl.connect_q);
6739 ++ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
6740 + out_free_tag_set:
6741 + blk_mq_free_tag_set(&ctrl->tag_set);
6742 + nvme_fc_free_io_queues(ctrl);
6743 +@@ -3642,9 +3642,9 @@ fail_ctrl:
6744 + return ERR_PTR(-EIO);
6745 +
6746 + out_cleanup_admin_q:
6747 +- blk_cleanup_queue(ctrl->ctrl.admin_q);
6748 ++ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
6749 + out_cleanup_fabrics_q:
6750 +- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
6751 ++ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
6752 + out_free_admin_tag_set:
6753 + blk_mq_free_tag_set(&ctrl->admin_tag_set);
6754 + out_free_queues:
6755 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
6756 +index 9f6614f7dbeb1..3516678d37541 100644
6757 +--- a/drivers/nvme/host/pci.c
6758 ++++ b/drivers/nvme/host/pci.c
6759 +@@ -1760,7 +1760,7 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
6760 + * queue to flush these to completion.
6761 + */
6762 + nvme_start_admin_queue(&dev->ctrl);
6763 +- blk_cleanup_queue(dev->ctrl.admin_q);
6764 ++ blk_mq_destroy_queue(dev->ctrl.admin_q);
6765 + blk_mq_free_tag_set(&dev->admin_tagset);
6766 + }
6767 + }
6768 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
6769 +index 46c2dcf72f7ea..240024dd5d857 100644
6770 +--- a/drivers/nvme/host/rdma.c
6771 ++++ b/drivers/nvme/host/rdma.c
6772 +@@ -840,8 +840,8 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
6773 + bool remove)
6774 + {
6775 + if (remove) {
6776 +- blk_cleanup_queue(ctrl->ctrl.admin_q);
6777 +- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
6778 ++ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
6779 ++ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
6780 + blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
6781 + }
6782 + if (ctrl->async_event_sqe.data) {
6783 +@@ -935,10 +935,10 @@ out_stop_queue:
6784 + nvme_cancel_admin_tagset(&ctrl->ctrl);
6785 + out_cleanup_queue:
6786 + if (new)
6787 +- blk_cleanup_queue(ctrl->ctrl.admin_q);
6788 ++ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
6789 + out_cleanup_fabrics_q:
6790 + if (new)
6791 +- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
6792 ++ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
6793 + out_free_tagset:
6794 + if (new)
6795 + blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
6796 +@@ -957,7 +957,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
6797 + bool remove)
6798 + {
6799 + if (remove) {
6800 +- blk_cleanup_queue(ctrl->ctrl.connect_q);
6801 ++ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
6802 + blk_mq_free_tag_set(ctrl->ctrl.tagset);
6803 + }
6804 + nvme_rdma_free_io_queues(ctrl);
6805 +@@ -1012,7 +1012,7 @@ out_wait_freeze_timed_out:
6806 + out_cleanup_connect_q:
6807 + nvme_cancel_tagset(&ctrl->ctrl);
6808 + if (new)
6809 +- blk_cleanup_queue(ctrl->ctrl.connect_q);
6810 ++ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
6811 + out_free_tag_set:
6812 + if (new)
6813 + blk_mq_free_tag_set(ctrl->ctrl.tagset);
6814 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
6815 +index daa0e160e1212..d7e5bbdb9b75a 100644
6816 +--- a/drivers/nvme/host/tcp.c
6817 ++++ b/drivers/nvme/host/tcp.c
6818 +@@ -1881,7 +1881,7 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
6819 + {
6820 + nvme_tcp_stop_io_queues(ctrl);
6821 + if (remove) {
6822 +- blk_cleanup_queue(ctrl->connect_q);
6823 ++ blk_mq_destroy_queue(ctrl->connect_q);
6824 + blk_mq_free_tag_set(ctrl->tagset);
6825 + }
6826 + nvme_tcp_free_io_queues(ctrl);
6827 +@@ -1936,7 +1936,7 @@ out_wait_freeze_timed_out:
6828 + out_cleanup_connect_q:
6829 + nvme_cancel_tagset(ctrl);
6830 + if (new)
6831 +- blk_cleanup_queue(ctrl->connect_q);
6832 ++ blk_mq_destroy_queue(ctrl->connect_q);
6833 + out_free_tag_set:
6834 + if (new)
6835 + blk_mq_free_tag_set(ctrl->tagset);
6836 +@@ -1949,8 +1949,8 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
6837 + {
6838 + nvme_tcp_stop_queue(ctrl, 0);
6839 + if (remove) {
6840 +- blk_cleanup_queue(ctrl->admin_q);
6841 +- blk_cleanup_queue(ctrl->fabrics_q);
6842 ++ blk_mq_destroy_queue(ctrl->admin_q);
6843 ++ blk_mq_destroy_queue(ctrl->fabrics_q);
6844 + blk_mq_free_tag_set(ctrl->admin_tagset);
6845 + }
6846 + nvme_tcp_free_admin_queue(ctrl);
6847 +@@ -2008,10 +2008,10 @@ out_stop_queue:
6848 + nvme_cancel_admin_tagset(ctrl);
6849 + out_cleanup_queue:
6850 + if (new)
6851 +- blk_cleanup_queue(ctrl->admin_q);
6852 ++ blk_mq_destroy_queue(ctrl->admin_q);
6853 + out_cleanup_fabrics_q:
6854 + if (new)
6855 +- blk_cleanup_queue(ctrl->fabrics_q);
6856 ++ blk_mq_destroy_queue(ctrl->fabrics_q);
6857 + out_free_tagset:
6858 + if (new)
6859 + blk_mq_free_tag_set(ctrl->admin_tagset);
6860 +diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
6861 +index 59024af2da2e3..0f5c77e22a0a9 100644
6862 +--- a/drivers/nvme/target/loop.c
6863 ++++ b/drivers/nvme/target/loop.c
6864 +@@ -266,8 +266,8 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
6865 + if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
6866 + return;
6867 + nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
6868 +- blk_cleanup_queue(ctrl->ctrl.admin_q);
6869 +- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
6870 ++ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
6871 ++ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
6872 + blk_mq_free_tag_set(&ctrl->admin_tag_set);
6873 + }
6874 +
6875 +@@ -283,7 +283,7 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
6876 + mutex_unlock(&nvme_loop_ctrl_mutex);
6877 +
6878 + if (nctrl->tagset) {
6879 +- blk_cleanup_queue(ctrl->ctrl.connect_q);
6880 ++ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
6881 + blk_mq_free_tag_set(&ctrl->tag_set);
6882 + }
6883 + kfree(ctrl->queues);
6884 +@@ -410,9 +410,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
6885 +
6886 + out_cleanup_queue:
6887 + clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
6888 +- blk_cleanup_queue(ctrl->ctrl.admin_q);
6889 ++ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
6890 + out_cleanup_fabrics_q:
6891 +- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
6892 ++ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
6893 + out_free_tagset:
6894 + blk_mq_free_tag_set(&ctrl->admin_tag_set);
6895 + out_free_sq:
6896 +@@ -554,7 +554,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
6897 + return 0;
6898 +
6899 + out_cleanup_connect_q:
6900 +- blk_cleanup_queue(ctrl->ctrl.connect_q);
6901 ++ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
6902 + out_free_tagset:
6903 + blk_mq_free_tag_set(&ctrl->tag_set);
6904 + out_destroy_queues:
6905 +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
6906 +index 80d8309652a4d..b80a9b74662b1 100644
6907 +--- a/drivers/perf/arm-cmn.c
6908 ++++ b/drivers/perf/arm-cmn.c
6909 +@@ -36,7 +36,7 @@
6910 + #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
6911 + #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
6912 +
6913 +-#define CMN_CHILD_NODE_ADDR GENMASK(27, 0)
6914 ++#define CMN_CHILD_NODE_ADDR GENMASK(29, 0)
6915 + #define CMN_CHILD_NODE_EXTERNAL BIT(31)
6916 +
6917 + #define CMN_MAX_DIMENSION 12
6918 +diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
6919 +index a4d7d9bd100d3..67712c77d806f 100644
6920 +--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
6921 ++++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
6922 +@@ -274,7 +274,6 @@ struct mvebu_a3700_comphy_lane {
6923 + int submode;
6924 + bool invert_tx;
6925 + bool invert_rx;
6926 +- bool needs_reset;
6927 + };
6928 +
6929 + struct gbe_phy_init_data_fix {
6930 +@@ -1097,40 +1096,12 @@ mvebu_a3700_comphy_pcie_power_off(struct mvebu_a3700_comphy_lane *lane)
6931 + 0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT);
6932 + }
6933 +
6934 +-static int mvebu_a3700_comphy_reset(struct phy *phy)
6935 ++static void mvebu_a3700_comphy_usb3_power_off(struct mvebu_a3700_comphy_lane *lane)
6936 + {
6937 +- struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
6938 +- u16 mask, data;
6939 +-
6940 +- dev_dbg(lane->dev, "resetting lane %d\n", lane->id);
6941 +-
6942 +- /* COMPHY reset for internal logic */
6943 +- comphy_lane_reg_set(lane, COMPHY_SFT_RESET,
6944 +- SFT_RST_NO_REG, SFT_RST_NO_REG);
6945 +-
6946 +- /* COMPHY register reset (cleared automatically) */
6947 +- comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST);
6948 +-
6949 +- /* PIPE soft and register reset */
6950 +- data = PIPE_SOFT_RESET | PIPE_REG_RESET;
6951 +- mask = data;
6952 +- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask);
6953 +-
6954 +- /* Release PIPE register reset */
6955 +- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL,
6956 +- 0x0, PIPE_REG_RESET);
6957 +-
6958 +- /* Reset SB configuration register (only for lanes 0 and 1) */
6959 +- if (lane->id == 0 || lane->id == 1) {
6960 +- u32 mask, data;
6961 +-
6962 +- data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT |
6963 +- PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT;
6964 +- mask = data | PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT;
6965 +- comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask);
6966 +- }
6967 +-
6968 +- return 0;
6969 ++ /*
6970 ++ * The USB3 MAC sets the USB3 PHY to low state, so we do not
6971 ++ * need to power off USB3 PHY again.
6972 ++ */
6973 + }
6974 +
6975 + static bool mvebu_a3700_comphy_check_mode(int lane,
6976 +@@ -1171,10 +1142,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode,
6977 + (lane->mode != mode || lane->submode != submode))
6978 + return -EBUSY;
6979 +
6980 +- /* If changing mode, ensure reset is called */
6981 +- if (lane->mode != PHY_MODE_INVALID && lane->mode != mode)
6982 +- lane->needs_reset = true;
6983 +-
6984 + /* Just remember the mode, ->power_on() will do the real setup */
6985 + lane->mode = mode;
6986 + lane->submode = submode;
6987 +@@ -1185,7 +1152,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode,
6988 + static int mvebu_a3700_comphy_power_on(struct phy *phy)
6989 + {
6990 + struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
6991 +- int ret;
6992 +
6993 + if (!mvebu_a3700_comphy_check_mode(lane->id, lane->mode,
6994 + lane->submode)) {
6995 +@@ -1193,14 +1159,6 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
6996 + return -EINVAL;
6997 + }
6998 +
6999 +- if (lane->needs_reset) {
7000 +- ret = mvebu_a3700_comphy_reset(phy);
7001 +- if (ret)
7002 +- return ret;
7003 +-
7004 +- lane->needs_reset = false;
7005 +- }
7006 +-
7007 + switch (lane->mode) {
7008 + case PHY_MODE_USB_HOST_SS:
7009 + dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id);
7010 +@@ -1224,38 +1182,28 @@ static int mvebu_a3700_comphy_power_off(struct phy *phy)
7011 + {
7012 + struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
7013 +
7014 +- switch (lane->mode) {
7015 +- case PHY_MODE_USB_HOST_SS:
7016 +- /*
7017 +- * The USB3 MAC sets the USB3 PHY to low state, so we do not
7018 +- * need to power off USB3 PHY again.
7019 +- */
7020 +- break;
7021 +-
7022 +- case PHY_MODE_SATA:
7023 +- mvebu_a3700_comphy_sata_power_off(lane);
7024 +- break;
7025 +-
7026 +- case PHY_MODE_ETHERNET:
7027 ++ switch (lane->id) {
7028 ++ case 0:
7029 ++ mvebu_a3700_comphy_usb3_power_off(lane);
7030 + mvebu_a3700_comphy_ethernet_power_off(lane);
7031 +- break;
7032 +-
7033 +- case PHY_MODE_PCIE:
7034 ++ return 0;
7035 ++ case 1:
7036 + mvebu_a3700_comphy_pcie_power_off(lane);
7037 +- break;
7038 +-
7039 ++ mvebu_a3700_comphy_ethernet_power_off(lane);
7040 ++ return 0;
7041 ++ case 2:
7042 ++ mvebu_a3700_comphy_usb3_power_off(lane);
7043 ++ mvebu_a3700_comphy_sata_power_off(lane);
7044 ++ return 0;
7045 + default:
7046 + dev_err(lane->dev, "invalid COMPHY mode\n");
7047 + return -EINVAL;
7048 + }
7049 +-
7050 +- return 0;
7051 + }
7052 +
7053 + static const struct phy_ops mvebu_a3700_comphy_ops = {
7054 + .power_on = mvebu_a3700_comphy_power_on,
7055 + .power_off = mvebu_a3700_comphy_power_off,
7056 +- .reset = mvebu_a3700_comphy_reset,
7057 + .set_mode = mvebu_a3700_comphy_set_mode,
7058 + .owner = THIS_MODULE,
7059 + };
7060 +@@ -1393,8 +1341,7 @@ static int mvebu_a3700_comphy_probe(struct platform_device *pdev)
7061 + * To avoid relying on the bootloader/firmware configuration,
7062 + * power off all comphys.
7063 + */
7064 +- mvebu_a3700_comphy_reset(phy);
7065 +- lane->needs_reset = false;
7066 ++ mvebu_a3700_comphy_power_off(phy);
7067 + }
7068 +
7069 + provider = devm_of_phy_provider_register(&pdev->dev,
7070 +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
7071 +index ba6d787896606..e8489331f12b8 100644
7072 +--- a/drivers/s390/block/dasd.c
7073 ++++ b/drivers/s390/block/dasd.c
7074 +@@ -3280,7 +3280,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
7075 + static void dasd_free_queue(struct dasd_block *block)
7076 + {
7077 + if (block->request_queue) {
7078 +- blk_cleanup_queue(block->request_queue);
7079 ++ blk_mq_destroy_queue(block->request_queue);
7080 + blk_mq_free_tag_set(&block->tag_set);
7081 + block->request_queue = NULL;
7082 + }
7083 +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
7084 +index dc78a523a69f2..b6b938aa66158 100644
7085 +--- a/drivers/s390/block/dasd_alias.c
7086 ++++ b/drivers/s390/block/dasd_alias.c
7087 +@@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device)
7088 + struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
7089 + {
7090 + struct dasd_eckd_private *alias_priv, *private = base_device->private;
7091 +- struct alias_pav_group *group = private->pavgroup;
7092 + struct alias_lcu *lcu = private->lcu;
7093 + struct dasd_device *alias_device;
7094 ++ struct alias_pav_group *group;
7095 + unsigned long flags;
7096 +
7097 +- if (!group || !lcu)
7098 ++ if (!lcu)
7099 + return NULL;
7100 + if (lcu->pav == NO_PAV ||
7101 + lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
7102 +@@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
7103 + }
7104 +
7105 + spin_lock_irqsave(&lcu->lock, flags);
7106 ++ group = private->pavgroup;
7107 ++ if (!group) {
7108 ++ spin_unlock_irqrestore(&lcu->lock, flags);
7109 ++ return NULL;
7110 ++ }
7111 + alias_device = group->next;
7112 + if (!alias_device) {
7113 + if (list_empty(&group->aliaslist)) {
7114 +diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
7115 +index a7a33ebf4bbe9..5a83f0a39901b 100644
7116 +--- a/drivers/s390/block/dasd_genhd.c
7117 ++++ b/drivers/s390/block/dasd_genhd.c
7118 +@@ -41,8 +41,8 @@ int dasd_gendisk_alloc(struct dasd_block *block)
7119 + if (base->devindex >= DASD_PER_MAJOR)
7120 + return -EBUSY;
7121 +
7122 +- gdp = __alloc_disk_node(block->request_queue, NUMA_NO_NODE,
7123 +- &dasd_bio_compl_lkclass);
7124 ++ gdp = blk_mq_alloc_disk_for_queue(block->request_queue,
7125 ++ &dasd_bio_compl_lkclass);
7126 + if (!gdp)
7127 + return -ENOMEM;
7128 +
7129 +diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
7130 +index 8352f90d997df..ae9a107c520d0 100644
7131 +--- a/drivers/scsi/hosts.c
7132 ++++ b/drivers/scsi/hosts.c
7133 +@@ -182,6 +182,15 @@ void scsi_remove_host(struct Scsi_Host *shost)
7134 + mutex_unlock(&shost->scan_mutex);
7135 + scsi_proc_host_rm(shost);
7136 +
7137 ++ /*
7138 ++ * New SCSI devices cannot be attached anymore because of the SCSI host
7139 ++ * state so drop the tag set refcnt. Wait until the tag set refcnt drops
7140 ++ * to zero because .exit_cmd_priv implementations may need the host
7141 ++ * pointer.
7142 ++ */
7143 ++ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
7144 ++ wait_for_completion(&shost->tagset_freed);
7145 ++
7146 + spin_lock_irqsave(shost->host_lock, flags);
7147 + if (scsi_host_set_state(shost, SHOST_DEL))
7148 + BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
7149 +@@ -240,6 +249,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
7150 + if (error)
7151 + goto fail;
7152 +
7153 ++ kref_init(&shost->tagset_refcnt);
7154 ++ init_completion(&shost->tagset_freed);
7155 ++
7156 + /*
7157 + * Increase usage count temporarily here so that calling
7158 + * scsi_autopm_put_host() will trigger runtime idle if there is
7159 +@@ -312,6 +324,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
7160 + pm_runtime_disable(&shost->shost_gendev);
7161 + pm_runtime_set_suspended(&shost->shost_gendev);
7162 + pm_runtime_put_noidle(&shost->shost_gendev);
7163 ++ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
7164 + fail:
7165 + return error;
7166 + }
7167 +@@ -345,9 +358,6 @@ static void scsi_host_dev_release(struct device *dev)
7168 + kfree(dev_name(&shost->shost_dev));
7169 + }
7170 +
7171 +- if (shost->tag_set.tags)
7172 +- scsi_mq_destroy_tags(shost);
7173 +-
7174 + kfree(shost->shost_data);
7175 +
7176 + ida_simple_remove(&host_index_ida, shost->host_no);
7177 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
7178 +index 9a1ae52bb621d..a6d3471a61057 100644
7179 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
7180 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
7181 +@@ -2993,7 +2993,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
7182 +
7183 + if (ioc->is_mcpu_endpoint ||
7184 + sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
7185 +- dma_get_required_mask(&pdev->dev) <= 32)
7186 ++ dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32))
7187 + ioc->dma_mask = 32;
7188 + /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
7189 + else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
7190 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
7191 +index 62666df1a59eb..4acff4e84b909 100644
7192 +--- a/drivers/scsi/qla2xxx/qla_target.c
7193 ++++ b/drivers/scsi/qla2xxx/qla_target.c
7194 +@@ -2151,8 +2151,10 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
7195 +
7196 + abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
7197 + le32_to_cpu(abts->exchange_addr_to_abort));
7198 +- if (!abort_cmd)
7199 ++ if (!abort_cmd) {
7200 ++ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
7201 + return -EIO;
7202 ++ }
7203 + mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
7204 +
7205 + if (abort_cmd->qpair) {
7206 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
7207 +index f5c876d03c1ad..7e990f7a9f164 100644
7208 +--- a/drivers/scsi/scsi_lib.c
7209 ++++ b/drivers/scsi/scsi_lib.c
7210 +@@ -168,7 +168,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
7211 + * Requeue this command. It will go before all other commands
7212 + * that are already in the queue. Schedule requeue work under
7213 + * lock such that the kblockd_schedule_work() call happens
7214 +- * before blk_cleanup_queue() finishes.
7215 ++ * before blk_mq_destroy_queue() finishes.
7216 + */
7217 + cmd->result = 0;
7218 +
7219 +@@ -429,9 +429,9 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
7220 + * it and the queue. Mitigate by taking a reference to the
7221 + * queue and never touching the sdev again after we drop the
7222 + * host lock. Note: if __scsi_remove_device() invokes
7223 +- * blk_cleanup_queue() before the queue is run from this
7224 ++ * blk_mq_destroy_queue() before the queue is run from this
7225 + * function then blk_run_queue() will return immediately since
7226 +- * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
7227 ++ * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING.
7228 + */
7229 + slq = sdev->request_queue;
7230 + if (!blk_get_queue(slq))
7231 +@@ -1995,9 +1995,13 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
7232 + return blk_mq_alloc_tag_set(tag_set);
7233 + }
7234 +
7235 +-void scsi_mq_destroy_tags(struct Scsi_Host *shost)
7236 ++void scsi_mq_free_tags(struct kref *kref)
7237 + {
7238 ++ struct Scsi_Host *shost = container_of(kref, typeof(*shost),
7239 ++ tagset_refcnt);
7240 ++
7241 + blk_mq_free_tag_set(&shost->tag_set);
7242 ++ complete(&shost->tagset_freed);
7243 + }
7244 +
7245 + /**
7246 +diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
7247 +index 5c4786310a31d..a0ee31d55f5f1 100644
7248 +--- a/drivers/scsi/scsi_priv.h
7249 ++++ b/drivers/scsi/scsi_priv.h
7250 +@@ -94,7 +94,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost);
7251 + extern void scsi_requeue_run_queue(struct work_struct *work);
7252 + extern void scsi_start_queue(struct scsi_device *sdev);
7253 + extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
7254 +-extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
7255 ++extern void scsi_mq_free_tags(struct kref *kref);
7256 + extern void scsi_exit_queue(void);
7257 + extern void scsi_evt_thread(struct work_struct *work);
7258 +
7259 +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
7260 +index 91ac901a66826..5d27f5196de6f 100644
7261 +--- a/drivers/scsi/scsi_scan.c
7262 ++++ b/drivers/scsi/scsi_scan.c
7263 +@@ -340,6 +340,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
7264 + kfree(sdev);
7265 + goto out;
7266 + }
7267 ++ kref_get(&sdev->host->tagset_refcnt);
7268 + sdev->request_queue = q;
7269 + q->queuedata = sdev;
7270 + __scsi_init_queue(sdev->host, q);
7271 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
7272 +index 43949798a2e47..5d61f58399dca 100644
7273 +--- a/drivers/scsi/scsi_sysfs.c
7274 ++++ b/drivers/scsi/scsi_sysfs.c
7275 +@@ -1475,7 +1475,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
7276 + scsi_device_set_state(sdev, SDEV_DEL);
7277 + mutex_unlock(&sdev->state_mutex);
7278 +
7279 +- blk_cleanup_queue(sdev->request_queue);
7280 ++ blk_mq_destroy_queue(sdev->request_queue);
7281 ++ kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
7282 + cancel_work_sync(&sdev->requeue_work);
7283 +
7284 + if (sdev->host->hostt->slave_destroy)
7285 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
7286 +index a1a2ac09066fd..cb587e488601c 100644
7287 +--- a/drivers/scsi/sd.c
7288 ++++ b/drivers/scsi/sd.c
7289 +@@ -3440,8 +3440,8 @@ static int sd_probe(struct device *dev)
7290 + if (!sdkp)
7291 + goto out;
7292 +
7293 +- gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE,
7294 +- &sd_bio_compl_lkclass);
7295 ++ gd = blk_mq_alloc_disk_for_queue(sdp->request_queue,
7296 ++ &sd_bio_compl_lkclass);
7297 + if (!gd)
7298 + goto out_free;
7299 +
7300 +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
7301 +index 32d3b8274f148..a278b739d0c5f 100644
7302 +--- a/drivers/scsi/sr.c
7303 ++++ b/drivers/scsi/sr.c
7304 +@@ -624,8 +624,8 @@ static int sr_probe(struct device *dev)
7305 + if (!cd)
7306 + goto fail;
7307 +
7308 +- disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE,
7309 +- &sr_bio_compl_lkclass);
7310 ++ disk = blk_mq_alloc_disk_for_queue(sdev->request_queue,
7311 ++ &sr_bio_compl_lkclass);
7312 + if (!disk)
7313 + goto fail_free;
7314 + mutex_init(&cd->lock);
7315 +diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
7316 +index fff0c740c8f33..6f088dd0ba4f3 100644
7317 +--- a/drivers/thunderbolt/icm.c
7318 ++++ b/drivers/thunderbolt/icm.c
7319 +@@ -2527,6 +2527,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
7320 + tb->cm_ops = &icm_icl_ops;
7321 + break;
7322 +
7323 ++ case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
7324 + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
7325 + icm->is_supported = icm_tgl_is_supported;
7326 + icm->get_mode = icm_ar_get_mode;
7327 +diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
7328 +index 69083aab2736c..5091677b3f4ba 100644
7329 +--- a/drivers/thunderbolt/nhi.h
7330 ++++ b/drivers/thunderbolt/nhi.h
7331 +@@ -55,6 +55,7 @@ extern const struct tb_nhi_ops icl_nhi_ops;
7332 + * need for the PCI quirk anymore as we will use ICM also on Apple
7333 + * hardware.
7334 + */
7335 ++#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134
7336 + #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137
7337 + #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
7338 + #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e
7339 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
7340 +index 2945c1b890880..cb83c66bd8a82 100644
7341 +--- a/drivers/tty/serial/fsl_lpuart.c
7342 ++++ b/drivers/tty/serial/fsl_lpuart.c
7343 +@@ -2706,14 +2706,15 @@ static int lpuart_probe(struct platform_device *pdev)
7344 + lpuart_reg.cons = LPUART_CONSOLE;
7345 + handler = lpuart_int;
7346 + }
7347 +- ret = uart_add_one_port(&lpuart_reg, &sport->port);
7348 +- if (ret)
7349 +- goto failed_attach_port;
7350 +
7351 + ret = lpuart_global_reset(sport);
7352 + if (ret)
7353 + goto failed_reset;
7354 +
7355 ++ ret = uart_add_one_port(&lpuart_reg, &sport->port);
7356 ++ if (ret)
7357 ++ goto failed_attach_port;
7358 ++
7359 + ret = uart_get_rs485_mode(&sport->port);
7360 + if (ret)
7361 + goto failed_get_rs485;
7362 +@@ -2736,9 +2737,9 @@ static int lpuart_probe(struct platform_device *pdev)
7363 +
7364 + failed_irq_request:
7365 + failed_get_rs485:
7366 +-failed_reset:
7367 + uart_remove_one_port(&lpuart_reg, &sport->port);
7368 + failed_attach_port:
7369 ++failed_reset:
7370 + lpuart_disable_clks(sport);
7371 + return ret;
7372 + }
7373 +diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
7374 +index d942ab152f5a4..24aa1dcc5ef7a 100644
7375 +--- a/drivers/tty/serial/serial-tegra.c
7376 ++++ b/drivers/tty/serial/serial-tegra.c
7377 +@@ -525,7 +525,7 @@ static void tegra_uart_tx_dma_complete(void *args)
7378 + count = tup->tx_bytes_requested - state.residue;
7379 + async_tx_ack(tup->tx_dma_desc);
7380 + spin_lock_irqsave(&tup->uport.lock, flags);
7381 +- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
7382 ++ uart_xmit_advance(&tup->uport, count);
7383 + tup->tx_in_progress = 0;
7384 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
7385 + uart_write_wakeup(&tup->uport);
7386 +@@ -613,7 +613,6 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u)
7387 + static void tegra_uart_stop_tx(struct uart_port *u)
7388 + {
7389 + struct tegra_uart_port *tup = to_tegra_uport(u);
7390 +- struct circ_buf *xmit = &tup->uport.state->xmit;
7391 + struct dma_tx_state state;
7392 + unsigned int count;
7393 +
7394 +@@ -624,7 +623,7 @@ static void tegra_uart_stop_tx(struct uart_port *u)
7395 + dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
7396 + count = tup->tx_bytes_requested - state.residue;
7397 + async_tx_ack(tup->tx_dma_desc);
7398 +- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
7399 ++ uart_xmit_advance(&tup->uport, count);
7400 + tup->tx_in_progress = 0;
7401 + }
7402 +
7403 +diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
7404 +index 4877c54c613d1..889b701ba7c62 100644
7405 +--- a/drivers/tty/serial/tegra-tcu.c
7406 ++++ b/drivers/tty/serial/tegra-tcu.c
7407 +@@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port)
7408 + break;
7409 +
7410 + tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count);
7411 +- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
7412 ++ uart_xmit_advance(port, count);
7413 + }
7414 +
7415 + uart_write_wakeup(port);
7416 +diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
7417 +index 829da9cb14a86..55bb0d0422d52 100644
7418 +--- a/drivers/ufs/core/ufshcd.c
7419 ++++ b/drivers/ufs/core/ufshcd.c
7420 +@@ -9519,7 +9519,7 @@ void ufshcd_remove(struct ufs_hba *hba)
7421 + ufs_bsg_remove(hba);
7422 + ufshpb_remove(hba);
7423 + ufs_sysfs_remove_nodes(hba->dev);
7424 +- blk_cleanup_queue(hba->tmf_queue);
7425 ++ blk_mq_destroy_queue(hba->tmf_queue);
7426 + blk_mq_free_tag_set(&hba->tmf_tag_set);
7427 + scsi_remove_host(hba->host);
7428 + /* disable interrupts */
7429 +@@ -9815,7 +9815,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
7430 + return 0;
7431 +
7432 + free_tmf_queue:
7433 +- blk_cleanup_queue(hba->tmf_queue);
7434 ++ blk_mq_destroy_queue(hba->tmf_queue);
7435 + free_tmf_tag_set:
7436 + blk_mq_free_tag_set(&hba->tmf_tag_set);
7437 + out_remove_scsi_host:
7438 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
7439 +index dfef85a18eb55..80b29f937c605 100644
7440 +--- a/drivers/usb/core/hub.c
7441 ++++ b/drivers/usb/core/hub.c
7442 +@@ -6049,7 +6049,7 @@ re_enumerate:
7443 + *
7444 + * Return: The same as for usb_reset_and_verify_device().
7445 + * However, if a reset is already in progress (for instance, if a
7446 +- * driver doesn't have pre_ or post_reset() callbacks, and while
7447 ++ * driver doesn't have pre_reset() or post_reset() callbacks, and while
7448 + * being unbound or re-bound during the ongoing reset its disconnect()
7449 + * or probe() routine tries to perform a second, nested reset), the
7450 + * routine returns -EINPROGRESS.
7451 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
7452 +index 1db9f51f98aef..08ca65ffe57b7 100644
7453 +--- a/drivers/usb/dwc3/core.c
7454 ++++ b/drivers/usb/dwc3/core.c
7455 +@@ -1718,12 +1718,6 @@ static int dwc3_probe(struct platform_device *pdev)
7456 +
7457 + dwc3_get_properties(dwc);
7458 +
7459 +- if (!dwc->sysdev_is_parent) {
7460 +- ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
7461 +- if (ret)
7462 +- return ret;
7463 +- }
7464 +-
7465 + dwc->reset = devm_reset_control_array_get_optional_shared(dev);
7466 + if (IS_ERR(dwc->reset))
7467 + return PTR_ERR(dwc->reset);
7468 +@@ -1789,6 +1783,13 @@ static int dwc3_probe(struct platform_device *pdev)
7469 + platform_set_drvdata(pdev, dwc);
7470 + dwc3_cache_hwparams(dwc);
7471 +
7472 ++ if (!dwc->sysdev_is_parent &&
7473 ++ DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
7474 ++ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
7475 ++ if (ret)
7476 ++ goto disable_clks;
7477 ++ }
7478 ++
7479 + spin_lock_init(&dwc->lock);
7480 + mutex_init(&dwc->mutex);
7481 +
7482 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
7483 +index a5e8374a8d710..697683e3fbffa 100644
7484 +--- a/drivers/usb/serial/option.c
7485 ++++ b/drivers/usb/serial/option.c
7486 +@@ -256,6 +256,7 @@ static void option_instat_callback(struct urb *urb);
7487 + #define QUECTEL_PRODUCT_EM060K 0x030b
7488 + #define QUECTEL_PRODUCT_EM12 0x0512
7489 + #define QUECTEL_PRODUCT_RM500Q 0x0800
7490 ++#define QUECTEL_PRODUCT_RM520N 0x0801
7491 + #define QUECTEL_PRODUCT_EC200S_CN 0x6002
7492 + #define QUECTEL_PRODUCT_EC200T 0x6026
7493 + #define QUECTEL_PRODUCT_RM500K 0x7001
7494 +@@ -1138,6 +1139,8 @@ static const struct usb_device_id option_ids[] = {
7495 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
7496 + .driver_info = NUMEP2 },
7497 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
7498 ++ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
7499 ++ .driver_info = ZLP },
7500 + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
7501 + .driver_info = RSVD(4) },
7502 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
7503 +@@ -1159,6 +1162,9 @@ static const struct usb_device_id option_ids[] = {
7504 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
7505 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
7506 + .driver_info = ZLP },
7507 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
7508 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
7509 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
7510 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
7511 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
7512 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
7513 +diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
7514 +index d5f3f763717ea..d4b2519257962 100644
7515 +--- a/drivers/xen/xenbus/xenbus_client.c
7516 ++++ b/drivers/xen/xenbus/xenbus_client.c
7517 +@@ -382,9 +382,10 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
7518 + unsigned long ring_size = nr_pages * XEN_PAGE_SIZE;
7519 + grant_ref_t gref_head;
7520 + unsigned int i;
7521 ++ void *addr;
7522 + int ret;
7523 +
7524 +- *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
7525 ++ addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
7526 + if (!*vaddr) {
7527 + ret = -ENOMEM;
7528 + goto err;
7529 +@@ -401,13 +402,15 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
7530 + unsigned long gfn;
7531 +
7532 + if (is_vmalloc_addr(*vaddr))
7533 +- gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr[i]));
7534 ++ gfn = pfn_to_gfn(vmalloc_to_pfn(addr));
7535 + else
7536 +- gfn = virt_to_gfn(vaddr[i]);
7537 ++ gfn = virt_to_gfn(addr);
7538 +
7539 + grefs[i] = gnttab_claim_grant_reference(&gref_head);
7540 + gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
7541 + gfn, 0);
7542 ++
7543 ++ addr += XEN_PAGE_SIZE;
7544 + }
7545 +
7546 + return 0;
7547 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
7548 +index 781952c5a5c23..20ad619a8a973 100644
7549 +--- a/fs/btrfs/disk-io.c
7550 ++++ b/fs/btrfs/disk-io.c
7551 +@@ -4586,6 +4586,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
7552 +
7553 + set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
7554 +
7555 ++ /*
7556 ++ * If we had UNFINISHED_DROPS we could still be processing them, so
7557 ++ * clear that bit and wake up relocation so it can stop.
7558 ++ * We must do this before stopping the block group reclaim task, because
7559 ++ * at btrfs_relocate_block_group() we wait for this bit, and after the
7560 ++ * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
7561 ++ * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
7562 ++ * return 1.
7563 ++ */
7564 ++ btrfs_wake_unfinished_drop(fs_info);
7565 ++
7566 + /*
7567 + * We may have the reclaim task running and relocating a data block group,
7568 + * in which case it may create delayed iputs. So stop it before we park
7569 +@@ -4604,12 +4615,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
7570 + */
7571 + kthread_park(fs_info->cleaner_kthread);
7572 +
7573 +- /*
7574 +- * If we had UNFINISHED_DROPS we could still be processing them, so
7575 +- * clear that bit and wake up relocation so it can stop.
7576 +- */
7577 +- btrfs_wake_unfinished_drop(fs_info);
7578 +-
7579 + /* wait for the qgroup rescan worker to stop */
7580 + btrfs_qgroup_wait_for_completion(fs_info, false);
7581 +
7582 +@@ -4632,6 +4637,31 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
7583 + /* clear out the rbtree of defraggable inodes */
7584 + btrfs_cleanup_defrag_inodes(fs_info);
7585 +
7586 ++ /*
7587 ++ * After we parked the cleaner kthread, ordered extents may have
7588 ++ * completed and created new delayed iputs. If one of the async reclaim
7589 ++ * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
7590 ++ * can hang forever trying to stop it, because if a delayed iput is
7591 ++ * added after it ran btrfs_run_delayed_iputs() and before it called
7592 ++ * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
7593 ++ * no one else to run iputs.
7594 ++ *
7595 ++ * So wait for all ongoing ordered extents to complete and then run
7596 ++ * delayed iputs. This works because once we reach this point no one
7597 ++ * can either create new ordered extents nor create delayed iputs
7598 ++ * through some other means.
7599 ++ *
7600 ++ * Also note that btrfs_wait_ordered_roots() is not safe here, because
7601 ++ * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
7602 ++ * but the delayed iput for the respective inode is made only when doing
7603 ++ * the final btrfs_put_ordered_extent() (which must happen at
7604 ++ * btrfs_finish_ordered_io() when we are unmounting).
7605 ++ */
7606 ++ btrfs_flush_workqueue(fs_info->endio_write_workers);
7607 ++ /* Ordered extents for free space inodes. */
7608 ++ btrfs_flush_workqueue(fs_info->endio_freespace_worker);
7609 ++ btrfs_run_delayed_iputs(fs_info);
7610 ++
7611 + cancel_work_sync(&fs_info->async_reclaim_work);
7612 + cancel_work_sync(&fs_info->async_data_reclaim_work);
7613 + cancel_work_sync(&fs_info->preempt_reclaim_work);
7614 +diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
7615 +index 1386362fad3b8..4448b7b6ea221 100644
7616 +--- a/fs/btrfs/zoned.c
7617 ++++ b/fs/btrfs/zoned.c
7618 +@@ -1918,10 +1918,44 @@ out_unlock:
7619 + return ret;
7620 + }
7621 +
7622 ++static void wait_eb_writebacks(struct btrfs_block_group *block_group)
7623 ++{
7624 ++ struct btrfs_fs_info *fs_info = block_group->fs_info;
7625 ++ const u64 end = block_group->start + block_group->length;
7626 ++ struct radix_tree_iter iter;
7627 ++ struct extent_buffer *eb;
7628 ++ void __rcu **slot;
7629 ++
7630 ++ rcu_read_lock();
7631 ++ radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter,
7632 ++ block_group->start >> fs_info->sectorsize_bits) {
7633 ++ eb = radix_tree_deref_slot(slot);
7634 ++ if (!eb)
7635 ++ continue;
7636 ++ if (radix_tree_deref_retry(eb)) {
7637 ++ slot = radix_tree_iter_retry(&iter);
7638 ++ continue;
7639 ++ }
7640 ++
7641 ++ if (eb->start < block_group->start)
7642 ++ continue;
7643 ++ if (eb->start >= end)
7644 ++ break;
7645 ++
7646 ++ slot = radix_tree_iter_resume(slot, &iter);
7647 ++ rcu_read_unlock();
7648 ++ wait_on_extent_buffer_writeback(eb);
7649 ++ rcu_read_lock();
7650 ++ }
7651 ++ rcu_read_unlock();
7652 ++}
7653 ++
7654 + static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
7655 + {
7656 + struct btrfs_fs_info *fs_info = block_group->fs_info;
7657 + struct map_lookup *map;
7658 ++ const bool is_metadata = (block_group->flags &
7659 ++ (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
7660 + int ret = 0;
7661 + int i;
7662 +
7663 +@@ -1932,8 +1966,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
7664 + }
7665 +
7666 + /* Check if we have unwritten allocated space */
7667 +- if ((block_group->flags &
7668 +- (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) &&
7669 ++ if (is_metadata &&
7670 + block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
7671 + spin_unlock(&block_group->lock);
7672 + return -EAGAIN;
7673 +@@ -1958,6 +1991,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
7674 + /* No need to wait for NOCOW writers. Zoned mode does not allow that */
7675 + btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
7676 + block_group->length);
7677 ++ /* Wait for extent buffers to be written. */
7678 ++ if (is_metadata)
7679 ++ wait_eb_writebacks(block_group);
7680 +
7681 + spin_lock(&block_group->lock);
7682 +
7683 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
7684 +index 8f2e003e05907..97278c43f8dc0 100644
7685 +--- a/fs/cifs/cifsfs.c
7686 ++++ b/fs/cifs/cifsfs.c
7687 +@@ -1232,6 +1232,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
7688 + lock_two_nondirectories(target_inode, src_inode);
7689 +
7690 + cifs_dbg(FYI, "about to flush pages\n");
7691 ++
7692 ++ rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
7693 ++ off + len - 1);
7694 ++ if (rc)
7695 ++ goto out;
7696 ++
7697 + /* should we flush first and last page first */
7698 + truncate_inode_pages(&target_inode->i_data, 0);
7699 +
7700 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
7701 +index e8a8daa82ed76..cc180d37b8ce1 100644
7702 +--- a/fs/cifs/smb2ops.c
7703 ++++ b/fs/cifs/smb2ops.c
7704 +@@ -1886,17 +1886,8 @@ smb2_copychunk_range(const unsigned int xid,
7705 + int chunks_copied = 0;
7706 + bool chunk_sizes_updated = false;
7707 + ssize_t bytes_written, total_bytes_written = 0;
7708 +- struct inode *inode;
7709 +
7710 + pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
7711 +-
7712 +- /*
7713 +- * We need to flush all unwritten data before we can send the
7714 +- * copychunk ioctl to the server.
7715 +- */
7716 +- inode = d_inode(trgtfile->dentry);
7717 +- filemap_write_and_wait(inode->i_mapping);
7718 +-
7719 + if (pcchunk == NULL)
7720 + return -ENOMEM;
7721 +
7722 +@@ -3961,39 +3952,50 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
7723 + {
7724 + int rc;
7725 + unsigned int xid;
7726 +- struct inode *inode;
7727 ++ struct inode *inode = file_inode(file);
7728 + struct cifsFileInfo *cfile = file->private_data;
7729 +- struct cifsInodeInfo *cifsi;
7730 ++ struct cifsInodeInfo *cifsi = CIFS_I(inode);
7731 + __le64 eof;
7732 ++ loff_t old_eof;
7733 +
7734 + xid = get_xid();
7735 +
7736 +- inode = d_inode(cfile->dentry);
7737 +- cifsi = CIFS_I(inode);
7738 ++ inode_lock(inode);
7739 +
7740 +- if (off >= i_size_read(inode) ||
7741 +- off + len >= i_size_read(inode)) {
7742 ++ old_eof = i_size_read(inode);
7743 ++ if ((off >= old_eof) ||
7744 ++ off + len >= old_eof) {
7745 + rc = -EINVAL;
7746 + goto out;
7747 + }
7748 +
7749 ++ filemap_invalidate_lock(inode->i_mapping);
7750 ++ rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof - 1);
7751 ++ if (rc < 0)
7752 ++ goto out_2;
7753 ++
7754 ++ truncate_pagecache_range(inode, off, old_eof);
7755 ++
7756 + rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
7757 +- i_size_read(inode) - off - len, off);
7758 ++ old_eof - off - len, off);
7759 + if (rc < 0)
7760 +- goto out;
7761 ++ goto out_2;
7762 +
7763 +- eof = cpu_to_le64(i_size_read(inode) - len);
7764 ++ eof = cpu_to_le64(old_eof - len);
7765 + rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
7766 + cfile->fid.volatile_fid, cfile->pid, &eof);
7767 + if (rc < 0)
7768 +- goto out;
7769 ++ goto out_2;
7770 +
7771 + rc = 0;
7772 +
7773 + cifsi->server_eof = i_size_read(inode) - len;
7774 + truncate_setsize(inode, cifsi->server_eof);
7775 + fscache_resize_cookie(cifs_inode_cookie(inode), cifsi->server_eof);
7776 ++out_2:
7777 ++ filemap_invalidate_unlock(inode->i_mapping);
7778 + out:
7779 ++ inode_unlock(inode);
7780 + free_xid(xid);
7781 + return rc;
7782 + }
7783 +@@ -4004,34 +4006,47 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
7784 + int rc;
7785 + unsigned int xid;
7786 + struct cifsFileInfo *cfile = file->private_data;
7787 ++ struct inode *inode = file_inode(file);
7788 + __le64 eof;
7789 +- __u64 count;
7790 ++ __u64 count, old_eof;
7791 +
7792 + xid = get_xid();
7793 +
7794 +- if (off >= i_size_read(file->f_inode)) {
7795 ++ inode_lock(inode);
7796 ++
7797 ++ old_eof = i_size_read(inode);
7798 ++ if (off >= old_eof) {
7799 + rc = -EINVAL;
7800 + goto out;
7801 + }
7802 +
7803 +- count = i_size_read(file->f_inode) - off;
7804 +- eof = cpu_to_le64(i_size_read(file->f_inode) + len);
7805 ++ count = old_eof - off;
7806 ++ eof = cpu_to_le64(old_eof + len);
7807 ++
7808 ++ filemap_invalidate_lock(inode->i_mapping);
7809 ++ rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof + len - 1);
7810 ++ if (rc < 0)
7811 ++ goto out_2;
7812 ++ truncate_pagecache_range(inode, off, old_eof);
7813 +
7814 + rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
7815 + cfile->fid.volatile_fid, cfile->pid, &eof);
7816 + if (rc < 0)
7817 +- goto out;
7818 ++ goto out_2;
7819 +
7820 + rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
7821 + if (rc < 0)
7822 +- goto out;
7823 ++ goto out_2;
7824 +
7825 +- rc = smb3_zero_range(file, tcon, off, len, 1);
7826 ++ rc = smb3_zero_data(file, tcon, off, len, xid);
7827 + if (rc < 0)
7828 +- goto out;
7829 ++ goto out_2;
7830 +
7831 + rc = 0;
7832 ++out_2:
7833 ++ filemap_invalidate_unlock(inode->i_mapping);
7834 + out:
7835 ++ inode_unlock(inode);
7836 + free_xid(xid);
7837 + return rc;
7838 + }
7839 +diff --git a/fs/dax.c b/fs/dax.c
7840 +index 4155a6107fa10..7ab248ed21aa3 100644
7841 +--- a/fs/dax.c
7842 ++++ b/fs/dax.c
7843 +@@ -1241,6 +1241,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
7844 + loff_t done = 0;
7845 + int ret;
7846 +
7847 ++ if (!iomi.len)
7848 ++ return 0;
7849 ++
7850 + if (iov_iter_rw(iter) == WRITE) {
7851 + lockdep_assert_held_write(&iomi.inode->i_rwsem);
7852 + iomi.flags |= IOMAP_WRITE;
7853 +diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
7854 +index 9de6a6b844c9e..e541a004f8efa 100644
7855 +--- a/fs/exfat/fatent.c
7856 ++++ b/fs/exfat/fatent.c
7857 +@@ -270,8 +270,7 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
7858 + struct super_block *sb = dir->i_sb;
7859 + struct exfat_sb_info *sbi = EXFAT_SB(sb);
7860 + struct buffer_head *bh;
7861 +- sector_t blknr, last_blknr;
7862 +- int i;
7863 ++ sector_t blknr, last_blknr, i;
7864 +
7865 + blknr = exfat_cluster_to_sector(sbi, clu);
7866 + last_blknr = blknr + sbi->sect_per_clus;
7867 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
7868 +index adfc30ee4b7be..0d86931269bfc 100644
7869 +--- a/fs/ext4/ext4.h
7870 ++++ b/fs/ext4/ext4.h
7871 +@@ -167,8 +167,6 @@ enum SHIFT_DIRECTION {
7872 + #define EXT4_MB_CR0_OPTIMIZED 0x8000
7873 + /* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */
7874 + #define EXT4_MB_CR1_OPTIMIZED 0x00010000
7875 +-/* Perform linear traversal for one group */
7876 +-#define EXT4_MB_SEARCH_NEXT_LINEAR 0x00020000
7877 + struct ext4_allocation_request {
7878 + /* target inode for block we're allocating */
7879 + struct inode *inode;
7880 +@@ -1589,8 +1587,8 @@ struct ext4_sb_info {
7881 + struct list_head s_discard_list;
7882 + struct work_struct s_discard_work;
7883 + atomic_t s_retry_alloc_pending;
7884 +- struct rb_root s_mb_avg_fragment_size_root;
7885 +- rwlock_t s_mb_rb_lock;
7886 ++ struct list_head *s_mb_avg_fragment_size;
7887 ++ rwlock_t *s_mb_avg_fragment_size_locks;
7888 + struct list_head *s_mb_largest_free_orders;
7889 + rwlock_t *s_mb_largest_free_orders_locks;
7890 +
7891 +@@ -3402,6 +3400,8 @@ struct ext4_group_info {
7892 + ext4_grpblk_t bb_first_free; /* first free block */
7893 + ext4_grpblk_t bb_free; /* total free blocks */
7894 + ext4_grpblk_t bb_fragments; /* nr of freespace fragments */
7895 ++ int bb_avg_fragment_size_order; /* order of average
7896 ++ fragment in BG */
7897 + ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */
7898 + ext4_group_t bb_group; /* Group number */
7899 + struct list_head bb_prealloc_list;
7900 +@@ -3409,7 +3409,7 @@ struct ext4_group_info {
7901 + void *bb_bitmap;
7902 + #endif
7903 + struct rw_semaphore alloc_sem;
7904 +- struct rb_node bb_avg_fragment_size_rb;
7905 ++ struct list_head bb_avg_fragment_size_node;
7906 + struct list_head bb_largest_free_order_node;
7907 + ext4_grpblk_t bb_counters[]; /* Nr of free power-of-two-block
7908 + * regions, index is order.
7909 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
7910 +index c148bb97b5273..5235974126bd3 100644
7911 +--- a/fs/ext4/extents.c
7912 ++++ b/fs/ext4/extents.c
7913 +@@ -460,6 +460,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
7914 + error_msg = "invalid eh_entries";
7915 + goto corrupted;
7916 + }
7917 ++ if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
7918 ++ error_msg = "eh_entries is 0 but eh_depth is > 0";
7919 ++ goto corrupted;
7920 ++ }
7921 + if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
7922 + error_msg = "invalid extent entries";
7923 + goto corrupted;
7924 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
7925 +index f73e5eb43eae1..208b87ce88588 100644
7926 +--- a/fs/ext4/ialloc.c
7927 ++++ b/fs/ext4/ialloc.c
7928 +@@ -510,7 +510,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
7929 + goto fallback;
7930 + }
7931 +
7932 +- max_dirs = ndirs / ngroups + inodes_per_group / 16;
7933 ++ max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
7934 + min_inodes = avefreei - inodes_per_group*flex_size / 4;
7935 + if (min_inodes < 1)
7936 + min_inodes = 1;
7937 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
7938 +index 38e7dc2531b17..fd29e15d1c3b5 100644
7939 +--- a/fs/ext4/mballoc.c
7940 ++++ b/fs/ext4/mballoc.c
7941 +@@ -140,13 +140,15 @@
7942 + * number of buddy bitmap orders possible) number of lists. Group-infos are
7943 + * placed in appropriate lists.
7944 + *
7945 +- * 2) Average fragment size rb tree (sbi->s_mb_avg_fragment_size_root)
7946 ++ * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
7947 + *
7948 +- * Locking: sbi->s_mb_rb_lock (rwlock)
7949 ++ * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
7950 + *
7951 +- * This is a red black tree consisting of group infos and the tree is sorted
7952 +- * by average fragment sizes (which is calculated as ext4_group_info->bb_free
7953 +- * / ext4_group_info->bb_fragments).
7954 ++ * This is an array of lists where in the i-th list there are groups with
7955 ++ * average fragment size >= 2^i and < 2^(i+1). The average fragment size
7956 ++ * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
7957 ++ * Note that we don't bother with a special list for completely empty groups
7958 ++ * so we only have MB_NUM_ORDERS(sb) lists.
7959 + *
7960 + * When "mb_optimize_scan" mount option is set, mballoc consults the above data
7961 + * structures to decide the order in which groups are to be traversed for
7962 +@@ -160,7 +162,8 @@
7963 + *
7964 + * At CR = 1, we only consider groups where average fragment size > request
7965 + * size. So, we lookup a group which has average fragment size just above or
7966 +- * equal to request size using our rb tree (data structure 2) in O(log N) time.
7967 ++ * equal to request size using our average fragment size group lists (data
7968 ++ * structure 2) in O(1) time.
7969 + *
7970 + * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
7971 + * linear order which requires O(N) search time for each CR 0 and CR 1 phase.
7972 +@@ -802,65 +805,51 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
7973 + }
7974 + }
7975 +
7976 +-static void ext4_mb_rb_insert(struct rb_root *root, struct rb_node *new,
7977 +- int (*cmp)(struct rb_node *, struct rb_node *))
7978 ++static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
7979 + {
7980 +- struct rb_node **iter = &root->rb_node, *parent = NULL;
7981 ++ int order;
7982 +
7983 +- while (*iter) {
7984 +- parent = *iter;
7985 +- if (cmp(new, *iter) > 0)
7986 +- iter = &((*iter)->rb_left);
7987 +- else
7988 +- iter = &((*iter)->rb_right);
7989 +- }
7990 +-
7991 +- rb_link_node(new, parent, iter);
7992 +- rb_insert_color(new, root);
7993 +-}
7994 +-
7995 +-static int
7996 +-ext4_mb_avg_fragment_size_cmp(struct rb_node *rb1, struct rb_node *rb2)
7997 +-{
7998 +- struct ext4_group_info *grp1 = rb_entry(rb1,
7999 +- struct ext4_group_info,
8000 +- bb_avg_fragment_size_rb);
8001 +- struct ext4_group_info *grp2 = rb_entry(rb2,
8002 +- struct ext4_group_info,
8003 +- bb_avg_fragment_size_rb);
8004 +- int num_frags_1, num_frags_2;
8005 +-
8006 +- num_frags_1 = grp1->bb_fragments ?
8007 +- grp1->bb_free / grp1->bb_fragments : 0;
8008 +- num_frags_2 = grp2->bb_fragments ?
8009 +- grp2->bb_free / grp2->bb_fragments : 0;
8010 +-
8011 +- return (num_frags_2 - num_frags_1);
8012 ++ /*
8013 ++ * We don't bother with a special lists groups with only 1 block free
8014 ++ * extents and for completely empty groups.
8015 ++ */
8016 ++ order = fls(len) - 2;
8017 ++ if (order < 0)
8018 ++ return 0;
8019 ++ if (order == MB_NUM_ORDERS(sb))
8020 ++ order--;
8021 ++ return order;
8022 + }
8023 +
8024 +-/*
8025 +- * Reinsert grpinfo into the avg_fragment_size tree with new average
8026 +- * fragment size.
8027 +- */
8028 ++/* Move group to appropriate avg_fragment_size list */
8029 + static void
8030 + mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
8031 + {
8032 + struct ext4_sb_info *sbi = EXT4_SB(sb);
8033 ++ int new_order;
8034 +
8035 + if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
8036 + return;
8037 +
8038 +- write_lock(&sbi->s_mb_rb_lock);
8039 +- if (!RB_EMPTY_NODE(&grp->bb_avg_fragment_size_rb)) {
8040 +- rb_erase(&grp->bb_avg_fragment_size_rb,
8041 +- &sbi->s_mb_avg_fragment_size_root);
8042 +- RB_CLEAR_NODE(&grp->bb_avg_fragment_size_rb);
8043 +- }
8044 ++ new_order = mb_avg_fragment_size_order(sb,
8045 ++ grp->bb_free / grp->bb_fragments);
8046 ++ if (new_order == grp->bb_avg_fragment_size_order)
8047 ++ return;
8048 +
8049 +- ext4_mb_rb_insert(&sbi->s_mb_avg_fragment_size_root,
8050 +- &grp->bb_avg_fragment_size_rb,
8051 +- ext4_mb_avg_fragment_size_cmp);
8052 +- write_unlock(&sbi->s_mb_rb_lock);
8053 ++ if (grp->bb_avg_fragment_size_order != -1) {
8054 ++ write_lock(&sbi->s_mb_avg_fragment_size_locks[
8055 ++ grp->bb_avg_fragment_size_order]);
8056 ++ list_del(&grp->bb_avg_fragment_size_node);
8057 ++ write_unlock(&sbi->s_mb_avg_fragment_size_locks[
8058 ++ grp->bb_avg_fragment_size_order]);
8059 ++ }
8060 ++ grp->bb_avg_fragment_size_order = new_order;
8061 ++ write_lock(&sbi->s_mb_avg_fragment_size_locks[
8062 ++ grp->bb_avg_fragment_size_order]);
8063 ++ list_add_tail(&grp->bb_avg_fragment_size_node,
8064 ++ &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
8065 ++ write_unlock(&sbi->s_mb_avg_fragment_size_locks[
8066 ++ grp->bb_avg_fragment_size_order]);
8067 + }
8068 +
8069 + /*
8070 +@@ -909,86 +898,55 @@ static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
8071 + *new_cr = 1;
8072 + } else {
8073 + *group = grp->bb_group;
8074 +- ac->ac_last_optimal_group = *group;
8075 + ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED;
8076 + }
8077 + }
8078 +
8079 + /*
8080 +- * Choose next group by traversing average fragment size tree. Updates *new_cr
8081 +- * if cr lvel needs an update. Sets EXT4_MB_SEARCH_NEXT_LINEAR to indicate that
8082 +- * the linear search should continue for one iteration since there's lock
8083 +- * contention on the rb tree lock.
8084 ++ * Choose next group by traversing average fragment size list of suitable
8085 ++ * order. Updates *new_cr if cr level needs an update.
8086 + */
8087 + static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
8088 + int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
8089 + {
8090 + struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
8091 +- int avg_fragment_size, best_so_far;
8092 +- struct rb_node *node, *found;
8093 +- struct ext4_group_info *grp;
8094 +-
8095 +- /*
8096 +- * If there is contention on the lock, instead of waiting for the lock
8097 +- * to become available, just continue searching lineraly. We'll resume
8098 +- * our rb tree search later starting at ac->ac_last_optimal_group.
8099 +- */
8100 +- if (!read_trylock(&sbi->s_mb_rb_lock)) {
8101 +- ac->ac_flags |= EXT4_MB_SEARCH_NEXT_LINEAR;
8102 +- return;
8103 +- }
8104 ++ struct ext4_group_info *grp = NULL, *iter;
8105 ++ int i;
8106 +
8107 + if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
8108 + if (sbi->s_mb_stats)
8109 + atomic_inc(&sbi->s_bal_cr1_bad_suggestions);
8110 +- /* We have found something at CR 1 in the past */
8111 +- grp = ext4_get_group_info(ac->ac_sb, ac->ac_last_optimal_group);
8112 +- for (found = rb_next(&grp->bb_avg_fragment_size_rb); found != NULL;
8113 +- found = rb_next(found)) {
8114 +- grp = rb_entry(found, struct ext4_group_info,
8115 +- bb_avg_fragment_size_rb);
8116 ++ }
8117 ++
8118 ++ for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
8119 ++ i < MB_NUM_ORDERS(ac->ac_sb); i++) {
8120 ++ if (list_empty(&sbi->s_mb_avg_fragment_size[i]))
8121 ++ continue;
8122 ++ read_lock(&sbi->s_mb_avg_fragment_size_locks[i]);
8123 ++ if (list_empty(&sbi->s_mb_avg_fragment_size[i])) {
8124 ++ read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
8125 ++ continue;
8126 ++ }
8127 ++ list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i],
8128 ++ bb_avg_fragment_size_node) {
8129 + if (sbi->s_mb_stats)
8130 + atomic64_inc(&sbi->s_bal_cX_groups_considered[1]);
8131 +- if (likely(ext4_mb_good_group(ac, grp->bb_group, 1)))
8132 ++ if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) {
8133 ++ grp = iter;
8134 + break;
8135 +- }
8136 +- goto done;
8137 +- }
8138 +-
8139 +- node = sbi->s_mb_avg_fragment_size_root.rb_node;
8140 +- best_so_far = 0;
8141 +- found = NULL;
8142 +-
8143 +- while (node) {
8144 +- grp = rb_entry(node, struct ext4_group_info,
8145 +- bb_avg_fragment_size_rb);
8146 +- avg_fragment_size = 0;
8147 +- if (ext4_mb_good_group(ac, grp->bb_group, 1)) {
8148 +- avg_fragment_size = grp->bb_fragments ?
8149 +- grp->bb_free / grp->bb_fragments : 0;
8150 +- if (!best_so_far || avg_fragment_size < best_so_far) {
8151 +- best_so_far = avg_fragment_size;
8152 +- found = node;
8153 + }
8154 + }
8155 +- if (avg_fragment_size > ac->ac_g_ex.fe_len)
8156 +- node = node->rb_right;
8157 +- else
8158 +- node = node->rb_left;
8159 ++ read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
8160 ++ if (grp)
8161 ++ break;
8162 + }
8163 +
8164 +-done:
8165 +- if (found) {
8166 +- grp = rb_entry(found, struct ext4_group_info,
8167 +- bb_avg_fragment_size_rb);
8168 ++ if (grp) {
8169 + *group = grp->bb_group;
8170 + ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED;
8171 + } else {
8172 + *new_cr = 2;
8173 + }
8174 +-
8175 +- read_unlock(&sbi->s_mb_rb_lock);
8176 +- ac->ac_last_optimal_group = *group;
8177 + }
8178 +
8179 + static inline int should_optimize_scan(struct ext4_allocation_context *ac)
8180 +@@ -1017,11 +975,6 @@ next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
8181 + goto inc_and_return;
8182 + }
8183 +
8184 +- if (ac->ac_flags & EXT4_MB_SEARCH_NEXT_LINEAR) {
8185 +- ac->ac_flags &= ~EXT4_MB_SEARCH_NEXT_LINEAR;
8186 +- goto inc_and_return;
8187 +- }
8188 +-
8189 + return group;
8190 + inc_and_return:
8191 + /*
8192 +@@ -1049,8 +1002,10 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
8193 + {
8194 + *new_cr = ac->ac_criteria;
8195 +
8196 +- if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining)
8197 ++ if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
8198 ++ *group = next_linear_group(ac, *group, ngroups);
8199 + return;
8200 ++ }
8201 +
8202 + if (*new_cr == 0) {
8203 + ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
8204 +@@ -1075,23 +1030,25 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
8205 + struct ext4_sb_info *sbi = EXT4_SB(sb);
8206 + int i;
8207 +
8208 +- if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) {
8209 ++ for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
8210 ++ if (grp->bb_counters[i] > 0)
8211 ++ break;
8212 ++ /* No need to move between order lists? */
8213 ++ if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
8214 ++ i == grp->bb_largest_free_order) {
8215 ++ grp->bb_largest_free_order = i;
8216 ++ return;
8217 ++ }
8218 ++
8219 ++ if (grp->bb_largest_free_order >= 0) {
8220 + write_lock(&sbi->s_mb_largest_free_orders_locks[
8221 + grp->bb_largest_free_order]);
8222 + list_del_init(&grp->bb_largest_free_order_node);
8223 + write_unlock(&sbi->s_mb_largest_free_orders_locks[
8224 + grp->bb_largest_free_order]);
8225 + }
8226 +- grp->bb_largest_free_order = -1; /* uninit */
8227 +-
8228 +- for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) {
8229 +- if (grp->bb_counters[i] > 0) {
8230 +- grp->bb_largest_free_order = i;
8231 +- break;
8232 +- }
8233 +- }
8234 +- if (test_opt2(sb, MB_OPTIMIZE_SCAN) &&
8235 +- grp->bb_largest_free_order >= 0 && grp->bb_free) {
8236 ++ grp->bb_largest_free_order = i;
8237 ++ if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
8238 + write_lock(&sbi->s_mb_largest_free_orders_locks[
8239 + grp->bb_largest_free_order]);
8240 + list_add_tail(&grp->bb_largest_free_order_node,
8241 +@@ -1148,13 +1105,13 @@ void ext4_mb_generate_buddy(struct super_block *sb,
8242 + EXT4_GROUP_INFO_BBITMAP_CORRUPT);
8243 + }
8244 + mb_set_largest_free_order(sb, grp);
8245 ++ mb_update_avg_fragment_size(sb, grp);
8246 +
8247 + clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
8248 +
8249 + period = get_cycles() - period;
8250 + atomic_inc(&sbi->s_mb_buddies_generated);
8251 + atomic64_add(period, &sbi->s_mb_generation_time);
8252 +- mb_update_avg_fragment_size(sb, grp);
8253 + }
8254 +
8255 + /* The buddy information is attached the buddy cache inode
8256 +@@ -2630,7 +2587,7 @@ static noinline_for_stack int
8257 + ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
8258 + {
8259 + ext4_group_t prefetch_grp = 0, ngroups, group, i;
8260 +- int cr = -1;
8261 ++ int cr = -1, new_cr;
8262 + int err = 0, first_err = 0;
8263 + unsigned int nr = 0, prefetch_ios = 0;
8264 + struct ext4_sb_info *sbi;
8265 +@@ -2701,17 +2658,14 @@ repeat:
8266 + * from the goal value specified
8267 + */
8268 + group = ac->ac_g_ex.fe_group;
8269 +- ac->ac_last_optimal_group = group;
8270 + ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
8271 + prefetch_grp = group;
8272 +
8273 +- for (i = 0; i < ngroups; group = next_linear_group(ac, group, ngroups),
8274 +- i++) {
8275 +- int ret = 0, new_cr;
8276 ++ for (i = 0, new_cr = cr; i < ngroups; i++,
8277 ++ ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
8278 ++ int ret = 0;
8279 +
8280 + cond_resched();
8281 +-
8282 +- ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups);
8283 + if (new_cr != cr) {
8284 + cr = new_cr;
8285 + goto repeat;
8286 +@@ -2985,9 +2939,7 @@ __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
8287 + struct super_block *sb = pde_data(file_inode(seq->file));
8288 + unsigned long position;
8289 +
8290 +- read_lock(&EXT4_SB(sb)->s_mb_rb_lock);
8291 +-
8292 +- if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
8293 ++ if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
8294 + return NULL;
8295 + position = *pos + 1;
8296 + return (void *) ((unsigned long) position);
8297 +@@ -2999,7 +2951,7 @@ static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, lof
8298 + unsigned long position;
8299 +
8300 + ++*pos;
8301 +- if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
8302 ++ if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
8303 + return NULL;
8304 + position = *pos + 1;
8305 + return (void *) ((unsigned long) position);
8306 +@@ -3011,29 +2963,22 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
8307 + struct ext4_sb_info *sbi = EXT4_SB(sb);
8308 + unsigned long position = ((unsigned long) v);
8309 + struct ext4_group_info *grp;
8310 +- struct rb_node *n;
8311 +- unsigned int count, min, max;
8312 ++ unsigned int count;
8313 +
8314 + position--;
8315 + if (position >= MB_NUM_ORDERS(sb)) {
8316 +- seq_puts(seq, "fragment_size_tree:\n");
8317 +- n = rb_first(&sbi->s_mb_avg_fragment_size_root);
8318 +- if (!n) {
8319 +- seq_puts(seq, "\ttree_min: 0\n\ttree_max: 0\n\ttree_nodes: 0\n");
8320 +- return 0;
8321 +- }
8322 +- grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
8323 +- min = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
8324 +- count = 1;
8325 +- while (rb_next(n)) {
8326 +- count++;
8327 +- n = rb_next(n);
8328 +- }
8329 +- grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
8330 +- max = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
8331 ++ position -= MB_NUM_ORDERS(sb);
8332 ++ if (position == 0)
8333 ++ seq_puts(seq, "avg_fragment_size_lists:\n");
8334 +
8335 +- seq_printf(seq, "\ttree_min: %u\n\ttree_max: %u\n\ttree_nodes: %u\n",
8336 +- min, max, count);
8337 ++ count = 0;
8338 ++ read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
8339 ++ list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
8340 ++ bb_avg_fragment_size_node)
8341 ++ count++;
8342 ++ read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
8343 ++ seq_printf(seq, "\tlist_order_%u_groups: %u\n",
8344 ++ (unsigned int)position, count);
8345 + return 0;
8346 + }
8347 +
8348 +@@ -3043,9 +2988,11 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
8349 + seq_puts(seq, "max_free_order_lists:\n");
8350 + }
8351 + count = 0;
8352 ++ read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
8353 + list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
8354 + bb_largest_free_order_node)
8355 + count++;
8356 ++ read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
8357 + seq_printf(seq, "\tlist_order_%u_groups: %u\n",
8358 + (unsigned int)position, count);
8359 +
8360 +@@ -3053,11 +3000,7 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
8361 + }
8362 +
8363 + static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
8364 +-__releases(&EXT4_SB(sb)->s_mb_rb_lock)
8365 + {
8366 +- struct super_block *sb = pde_data(file_inode(seq->file));
8367 +-
8368 +- read_unlock(&EXT4_SB(sb)->s_mb_rb_lock);
8369 + }
8370 +
8371 + const struct seq_operations ext4_mb_seq_structs_summary_ops = {
8372 +@@ -3170,8 +3113,9 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
8373 + init_rwsem(&meta_group_info[i]->alloc_sem);
8374 + meta_group_info[i]->bb_free_root = RB_ROOT;
8375 + INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
8376 +- RB_CLEAR_NODE(&meta_group_info[i]->bb_avg_fragment_size_rb);
8377 ++ INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
8378 + meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
8379 ++ meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */
8380 + meta_group_info[i]->bb_group = group;
8381 +
8382 + mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
8383 +@@ -3420,7 +3364,24 @@ int ext4_mb_init(struct super_block *sb)
8384 + i++;
8385 + } while (i < MB_NUM_ORDERS(sb));
8386 +
8387 +- sbi->s_mb_avg_fragment_size_root = RB_ROOT;
8388 ++ sbi->s_mb_avg_fragment_size =
8389 ++ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
8390 ++ GFP_KERNEL);
8391 ++ if (!sbi->s_mb_avg_fragment_size) {
8392 ++ ret = -ENOMEM;
8393 ++ goto out;
8394 ++ }
8395 ++ sbi->s_mb_avg_fragment_size_locks =
8396 ++ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
8397 ++ GFP_KERNEL);
8398 ++ if (!sbi->s_mb_avg_fragment_size_locks) {
8399 ++ ret = -ENOMEM;
8400 ++ goto out;
8401 ++ }
8402 ++ for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
8403 ++ INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
8404 ++ rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
8405 ++ }
8406 + sbi->s_mb_largest_free_orders =
8407 + kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
8408 + GFP_KERNEL);
8409 +@@ -3439,7 +3400,6 @@ int ext4_mb_init(struct super_block *sb)
8410 + INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
8411 + rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
8412 + }
8413 +- rwlock_init(&sbi->s_mb_rb_lock);
8414 +
8415 + spin_lock_init(&sbi->s_md_lock);
8416 + sbi->s_mb_free_pending = 0;
8417 +@@ -3510,6 +3470,8 @@ out_free_locality_groups:
8418 + free_percpu(sbi->s_locality_groups);
8419 + sbi->s_locality_groups = NULL;
8420 + out:
8421 ++ kfree(sbi->s_mb_avg_fragment_size);
8422 ++ kfree(sbi->s_mb_avg_fragment_size_locks);
8423 + kfree(sbi->s_mb_largest_free_orders);
8424 + kfree(sbi->s_mb_largest_free_orders_locks);
8425 + kfree(sbi->s_mb_offsets);
8426 +@@ -3576,6 +3538,8 @@ int ext4_mb_release(struct super_block *sb)
8427 + kvfree(group_info);
8428 + rcu_read_unlock();
8429 + }
8430 ++ kfree(sbi->s_mb_avg_fragment_size);
8431 ++ kfree(sbi->s_mb_avg_fragment_size_locks);
8432 + kfree(sbi->s_mb_largest_free_orders);
8433 + kfree(sbi->s_mb_largest_free_orders_locks);
8434 + kfree(sbi->s_mb_offsets);
8435 +@@ -5187,6 +5151,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
8436 + struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
8437 + int bsbits = ac->ac_sb->s_blocksize_bits;
8438 + loff_t size, isize;
8439 ++ bool inode_pa_eligible, group_pa_eligible;
8440 +
8441 + if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
8442 + return;
8443 +@@ -5194,25 +5159,27 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
8444 + if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
8445 + return;
8446 +
8447 ++ group_pa_eligible = sbi->s_mb_group_prealloc > 0;
8448 ++ inode_pa_eligible = true;
8449 + size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
8450 + isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
8451 + >> bsbits;
8452 +
8453 ++ /* No point in using inode preallocation for closed files */
8454 + if ((size == isize) && !ext4_fs_is_busy(sbi) &&
8455 +- !inode_is_open_for_write(ac->ac_inode)) {
8456 +- ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
8457 +- return;
8458 +- }
8459 ++ !inode_is_open_for_write(ac->ac_inode))
8460 ++ inode_pa_eligible = false;
8461 +
8462 +- if (sbi->s_mb_group_prealloc <= 0) {
8463 +- ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
8464 +- return;
8465 +- }
8466 +-
8467 +- /* don't use group allocation for large files */
8468 + size = max(size, isize);
8469 +- if (size > sbi->s_mb_stream_request) {
8470 +- ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
8471 ++ /* Don't use group allocation for large files */
8472 ++ if (size > sbi->s_mb_stream_request)
8473 ++ group_pa_eligible = false;
8474 ++
8475 ++ if (!group_pa_eligible) {
8476 ++ if (inode_pa_eligible)
8477 ++ ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
8478 ++ else
8479 ++ ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
8480 + return;
8481 + }
8482 +
8483 +@@ -5559,6 +5526,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
8484 + ext4_fsblk_t block = 0;
8485 + unsigned int inquota = 0;
8486 + unsigned int reserv_clstrs = 0;
8487 ++ int retries = 0;
8488 + u64 seq;
8489 +
8490 + might_sleep();
8491 +@@ -5661,7 +5629,8 @@ repeat:
8492 + ar->len = ac->ac_b_ex.fe_len;
8493 + }
8494 + } else {
8495 +- if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
8496 ++ if (++retries < 3 &&
8497 ++ ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
8498 + goto repeat;
8499 + /*
8500 + * If block allocation fails then the pa allocated above
8501 +diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
8502 +index 39da92ceabf88..dcda2a943cee0 100644
8503 +--- a/fs/ext4/mballoc.h
8504 ++++ b/fs/ext4/mballoc.h
8505 +@@ -178,7 +178,6 @@ struct ext4_allocation_context {
8506 + /* copy of the best found extent taken before preallocation efforts */
8507 + struct ext4_free_extent ac_f_ex;
8508 +
8509 +- ext4_group_t ac_last_optimal_group;
8510 + __u32 ac_groups_considered;
8511 + __u32 ac_flags; /* allocation hints */
8512 + __u16 ac_groups_scanned;
8513 +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
8514 +index 7515a465ec03a..7c90b1ab3e00d 100644
8515 +--- a/include/asm-generic/vmlinux.lds.h
8516 ++++ b/include/asm-generic/vmlinux.lds.h
8517 +@@ -543,10 +543,9 @@
8518 + */
8519 + #ifdef CONFIG_CFI_CLANG
8520 + #define TEXT_CFI_JT \
8521 +- . = ALIGN(PMD_SIZE); \
8522 ++ ALIGN_FUNCTION(); \
8523 + __cfi_jt_start = .; \
8524 + *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \
8525 +- . = ALIGN(PMD_SIZE); \
8526 + __cfi_jt_end = .;
8527 + #else
8528 + #define TEXT_CFI_JT
8529 +diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
8530 +index e2d9daf7e8dd0..0fd96e92c6c65 100644
8531 +--- a/include/linux/blk-mq.h
8532 ++++ b/include/linux/blk-mq.h
8533 +@@ -686,10 +686,13 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
8534 + \
8535 + __blk_mq_alloc_disk(set, queuedata, &__key); \
8536 + })
8537 ++struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
8538 ++ struct lock_class_key *lkclass);
8539 + struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
8540 + int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
8541 + struct request_queue *q);
8542 + void blk_mq_unregister_dev(struct device *, struct request_queue *);
8543 ++void blk_mq_destroy_queue(struct request_queue *);
8544 +
8545 + int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
8546 + int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
8547 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
8548 +index 62e3ff52ab033..83eb8869a8c94 100644
8549 +--- a/include/linux/blkdev.h
8550 ++++ b/include/linux/blkdev.h
8551 +@@ -148,6 +148,7 @@ struct gendisk {
8552 + #define GD_NATIVE_CAPACITY 3
8553 + #define GD_ADDED 4
8554 + #define GD_SUPPRESS_PART_SCAN 5
8555 ++#define GD_OWNS_QUEUE 6
8556 +
8557 + struct mutex open_mutex; /* open/close mutex */
8558 + unsigned open_partitions; /* number of open partitions */
8559 +@@ -559,7 +560,6 @@ struct request_queue {
8560 + #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
8561 + #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
8562 + #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
8563 +-#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
8564 + #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
8565 + #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
8566 + #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
8567 +@@ -587,7 +587,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
8568 + #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
8569 + #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
8570 + #define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags)
8571 +-#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
8572 + #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
8573 + #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
8574 + #define blk_queue_noxmerges(q) \
8575 +@@ -812,8 +811,6 @@ static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
8576 +
8577 + int bdev_disk_changed(struct gendisk *disk, bool invalidate);
8578 +
8579 +-struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
8580 +- struct lock_class_key *lkclass);
8581 + void put_disk(struct gendisk *disk);
8582 + struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
8583 +
8584 +@@ -955,7 +952,6 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
8585 + /*
8586 + * Access functions for manipulating queue properties
8587 + */
8588 +-extern void blk_cleanup_queue(struct request_queue *);
8589 + void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
8590 + extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
8591 + extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
8592 +diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
8593 +index 4592d08459417..57aa459c6618a 100644
8594 +--- a/include/linux/cpumask.h
8595 ++++ b/include/linux/cpumask.h
8596 +@@ -1083,9 +1083,10 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
8597 + * cover a worst-case of every other cpu being on one of two nodes for a
8598 + * very large NR_CPUS.
8599 + *
8600 +- * Use PAGE_SIZE as a minimum for smaller configurations.
8601 ++ * Use PAGE_SIZE as a minimum for smaller configurations while avoiding
8602 ++ * unsigned comparison to -1.
8603 + */
8604 +-#define CPUMAP_FILE_MAX_BYTES ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \
8605 ++#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
8606 + ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
8607 + #define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
8608 +
8609 +diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
8610 +index fde258b3decd5..037a8d81a66cf 100644
8611 +--- a/include/linux/serial_core.h
8612 ++++ b/include/linux/serial_core.h
8613 +@@ -302,6 +302,23 @@ struct uart_state {
8614 + /* number of characters left in xmit buffer before we ask for more */
8615 + #define WAKEUP_CHARS 256
8616 +
8617 ++/**
8618 ++ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
8619 ++ * @up: uart_port structure describing the port
8620 ++ * @chars: number of characters sent
8621 ++ *
8622 ++ * This function advances the tail of circular xmit buffer by the number of
8623 ++ * @chars transmitted and handles accounting of transmitted bytes (into
8624 ++ * @up's icount.tx).
8625 ++ */
8626 ++static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
8627 ++{
8628 ++ struct circ_buf *xmit = &up->state->xmit;
8629 ++
8630 ++ xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1);
8631 ++ up->icount.tx += chars;
8632 ++}
8633 ++
8634 + struct module;
8635 + struct tty_driver;
8636 +
8637 +diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
8638 +index 184105d682942..f2273bd5a4c58 100644
8639 +--- a/include/net/bond_3ad.h
8640 ++++ b/include/net/bond_3ad.h
8641 +@@ -15,8 +15,6 @@
8642 + #define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
8643 + #define AD_TIMER_INTERVAL 100 /*msec*/
8644 +
8645 +-#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
8646 +-
8647 + #define AD_LACP_SLOW 0
8648 + #define AD_LACP_FAST 1
8649 +
8650 +diff --git a/include/net/bonding.h b/include/net/bonding.h
8651 +index 3b816ae8b1f3b..7ac1773b99224 100644
8652 +--- a/include/net/bonding.h
8653 ++++ b/include/net/bonding.h
8654 +@@ -785,6 +785,9 @@ extern struct rtnl_link_ops bond_link_ops;
8655 + /* exported from bond_sysfs_slave.c */
8656 + extern const struct sysfs_ops slave_sysfs_ops;
8657 +
8658 ++/* exported from bond_3ad.c */
8659 ++extern const u8 lacpdu_mcast_addr[];
8660 ++
8661 + static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
8662 + {
8663 + dev_core_stats_tx_dropped_inc(dev);
8664 +diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
8665 +index 667d889b92b52..3e1cea155049b 100644
8666 +--- a/include/scsi/scsi_host.h
8667 ++++ b/include/scsi/scsi_host.h
8668 +@@ -557,6 +557,8 @@ struct Scsi_Host {
8669 + struct scsi_host_template *hostt;
8670 + struct scsi_transport_template *transportt;
8671 +
8672 ++ struct kref tagset_refcnt;
8673 ++ struct completion tagset_freed;
8674 + /* Area to keep a shared tag map */
8675 + struct blk_mq_tag_set tag_set;
8676 +
8677 +diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
8678 +index 65e13a099b1a0..a9f5d884560ac 100644
8679 +--- a/include/uapi/linux/xfrm.h
8680 ++++ b/include/uapi/linux/xfrm.h
8681 +@@ -296,7 +296,7 @@ enum xfrm_attr_type_t {
8682 + XFRMA_ETIMER_THRESH,
8683 + XFRMA_SRCADDR, /* xfrm_address_t */
8684 + XFRMA_COADDR, /* xfrm_address_t */
8685 +- XFRMA_LASTUSED, /* unsigned long */
8686 ++ XFRMA_LASTUSED, /* __u64 */
8687 + XFRMA_POLICY_TYPE, /* struct xfrm_userpolicy_type */
8688 + XFRMA_MIGRATE,
8689 + XFRMA_ALG_AEAD, /* struct xfrm_algo_aead */
8690 +diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
8691 +index 602da2cfd57c8..15a6f1e93e5af 100644
8692 +--- a/io_uring/io_uring.c
8693 ++++ b/io_uring/io_uring.c
8694 +@@ -10951,6 +10951,9 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8695 + io_poll_remove_all(ctx, NULL, true);
8696 + /* if we failed setting up the ctx, we might not have any rings */
8697 + io_iopoll_try_reap_events(ctx);
8698 ++ /* drop cached put refs after potentially doing completions */
8699 ++ if (current->io_uring)
8700 ++ io_uring_drop_tctx_refs(current);
8701 + }
8702 +
8703 + INIT_WORK(&ctx->exit_work, io_ring_exit_work);
8704 +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
8705 +index e702ca368539a..80c23f48f3b4b 100644
8706 +--- a/kernel/cgroup/cgroup.c
8707 ++++ b/kernel/cgroup/cgroup.c
8708 +@@ -6026,6 +6026,9 @@ struct cgroup *cgroup_get_from_id(u64 id)
8709 + if (!kn)
8710 + goto out;
8711 +
8712 ++ if (kernfs_type(kn) != KERNFS_DIR)
8713 ++ goto put;
8714 ++
8715 + rcu_read_lock();
8716 +
8717 + cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
8718 +@@ -6033,7 +6036,7 @@ struct cgroup *cgroup_get_from_id(u64 id)
8719 + cgrp = NULL;
8720 +
8721 + rcu_read_unlock();
8722 +-
8723 ++put:
8724 + kernfs_put(kn);
8725 + out:
8726 + return cgrp;
8727 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
8728 +index aa8a82bc67384..fc6e4f2523452 100644
8729 +--- a/kernel/workqueue.c
8730 ++++ b/kernel/workqueue.c
8731 +@@ -3066,10 +3066,8 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
8732 + if (WARN_ON(!work->func))
8733 + return false;
8734 +
8735 +- if (!from_cancel) {
8736 +- lock_map_acquire(&work->lockdep_map);
8737 +- lock_map_release(&work->lockdep_map);
8738 +- }
8739 ++ lock_map_acquire(&work->lockdep_map);
8740 ++ lock_map_release(&work->lockdep_map);
8741 +
8742 + if (start_flush_work(work, &barr, from_cancel)) {
8743 + wait_for_completion(&barr.done);
8744 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
8745 +index 2e24db4bff192..c399ab486557f 100644
8746 +--- a/lib/Kconfig.debug
8747 ++++ b/lib/Kconfig.debug
8748 +@@ -264,8 +264,10 @@ config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
8749 + config DEBUG_INFO_DWARF4
8750 + bool "Generate DWARF Version 4 debuginfo"
8751 + select DEBUG_INFO
8752 ++ depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
8753 + help
8754 +- Generate DWARF v4 debug info. This requires gcc 4.5+ and gdb 7.0+.
8755 ++ Generate DWARF v4 debug info. This requires gcc 4.5+, binutils 2.35.2
8756 ++ if using clang without clang's integrated assembler, and gdb 7.0+.
8757 +
8758 + If you have consumers of DWARF debug info that are not ready for
8759 + newer revisions of DWARF, you may wish to choose this or have your
8760 +diff --git a/mm/slab_common.c b/mm/slab_common.c
8761 +index dbd4b6f9b0e79..29ae1358d5f07 100644
8762 +--- a/mm/slab_common.c
8763 ++++ b/mm/slab_common.c
8764 +@@ -503,6 +503,7 @@ void slab_kmem_cache_release(struct kmem_cache *s)
8765 + void kmem_cache_destroy(struct kmem_cache *s)
8766 + {
8767 + int refcnt;
8768 ++ bool rcu_set;
8769 +
8770 + if (unlikely(!s) || !kasan_check_byte(s))
8771 + return;
8772 +@@ -510,6 +511,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
8773 + cpus_read_lock();
8774 + mutex_lock(&slab_mutex);
8775 +
8776 ++ rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
8777 ++
8778 + refcnt = --s->refcount;
8779 + if (refcnt)
8780 + goto out_unlock;
8781 +@@ -520,7 +523,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
8782 + out_unlock:
8783 + mutex_unlock(&slab_mutex);
8784 + cpus_read_unlock();
8785 +- if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU))
8786 ++ if (!refcnt && !rcu_set)
8787 + kmem_cache_release(s);
8788 + }
8789 + EXPORT_SYMBOL(kmem_cache_destroy);
8790 +diff --git a/mm/slub.c b/mm/slub.c
8791 +index b1281b8654bd3..1eec942b8336c 100644
8792 +--- a/mm/slub.c
8793 ++++ b/mm/slub.c
8794 +@@ -310,6 +310,11 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
8795 + */
8796 + static nodemask_t slab_nodes;
8797 +
8798 ++/*
8799 ++ * Workqueue used for flush_cpu_slab().
8800 ++ */
8801 ++static struct workqueue_struct *flushwq;
8802 ++
8803 + /********************************************************************
8804 + * Core slab cache functions
8805 + *******************************************************************/
8806 +@@ -2730,7 +2735,7 @@ static void flush_all_cpus_locked(struct kmem_cache *s)
8807 + INIT_WORK(&sfw->work, flush_cpu_slab);
8808 + sfw->skip = false;
8809 + sfw->s = s;
8810 +- schedule_work_on(cpu, &sfw->work);
8811 ++ queue_work_on(cpu, flushwq, &sfw->work);
8812 + }
8813 +
8814 + for_each_online_cpu(cpu) {
8815 +@@ -4880,6 +4885,8 @@ void __init kmem_cache_init(void)
8816 +
8817 + void __init kmem_cache_init_late(void)
8818 + {
8819 ++ flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
8820 ++ WARN_ON(!flushwq);
8821 + }
8822 +
8823 + struct kmem_cache *
8824 +@@ -4950,6 +4957,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
8825 + /* Honor the call site pointer we received. */
8826 + trace_kmalloc(caller, ret, size, s->size, gfpflags);
8827 +
8828 ++ ret = kasan_kmalloc(s, ret, size, gfpflags);
8829 ++
8830 + return ret;
8831 + }
8832 + EXPORT_SYMBOL(__kmalloc_track_caller);
8833 +@@ -4981,6 +4990,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
8834 + /* Honor the call site pointer we received. */
8835 + trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
8836 +
8837 ++ ret = kasan_kmalloc(s, ret, size, gfpflags);
8838 ++
8839 + return ret;
8840 + }
8841 + EXPORT_SYMBOL(__kmalloc_node_track_caller);
8842 +@@ -5914,7 +5925,8 @@ static char *create_unique_id(struct kmem_cache *s)
8843 + char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
8844 + char *p = name;
8845 +
8846 +- BUG_ON(!name);
8847 ++ if (!name)
8848 ++ return ERR_PTR(-ENOMEM);
8849 +
8850 + *p++ = ':';
8851 + /*
8852 +@@ -5972,6 +5984,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
8853 + * for the symlinks.
8854 + */
8855 + name = create_unique_id(s);
8856 ++ if (IS_ERR(name))
8857 ++ return PTR_ERR(name);
8858 + }
8859 +
8860 + s->kobj.kset = kset;
8861 +diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
8862 +index b8f8da7ee3dea..41c1ad33d009f 100644
8863 +--- a/net/batman-adv/hard-interface.c
8864 ++++ b/net/batman-adv/hard-interface.c
8865 +@@ -10,6 +10,7 @@
8866 + #include <linux/atomic.h>
8867 + #include <linux/byteorder/generic.h>
8868 + #include <linux/container_of.h>
8869 ++#include <linux/errno.h>
8870 + #include <linux/gfp.h>
8871 + #include <linux/if.h>
8872 + #include <linux/if_arp.h>
8873 +@@ -700,6 +701,9 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
8874 + int max_header_len = batadv_max_header_len();
8875 + int ret;
8876 +
8877 ++ if (hard_iface->net_dev->mtu < ETH_MIN_MTU + max_header_len)
8878 ++ return -EINVAL;
8879 ++
8880 + if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
8881 + goto out;
8882 +
8883 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
8884 +index 9a0ae59cdc500..4f385d52a1c49 100644
8885 +--- a/net/bridge/netfilter/ebtables.c
8886 ++++ b/net/bridge/netfilter/ebtables.c
8887 +@@ -1040,8 +1040,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
8888 + goto free_iterate;
8889 + }
8890 +
8891 +- if (repl->valid_hooks != t->valid_hooks)
8892 ++ if (repl->valid_hooks != t->valid_hooks) {
8893 ++ ret = -EINVAL;
8894 + goto free_unlock;
8895 ++ }
8896 +
8897 + if (repl->num_counters && repl->num_counters != t->private->nentries) {
8898 + ret = -EINVAL;
8899 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
8900 +index 6aee04f75e3e4..bcba61ef5b378 100644
8901 +--- a/net/core/flow_dissector.c
8902 ++++ b/net/core/flow_dissector.c
8903 +@@ -1572,9 +1572,8 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
8904 +
8905 + switch (keys->control.addr_type) {
8906 + case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
8907 +- addr_diff = (__force u32)keys->addrs.v4addrs.dst -
8908 +- (__force u32)keys->addrs.v4addrs.src;
8909 +- if (addr_diff < 0)
8910 ++ if ((__force u32)keys->addrs.v4addrs.dst <
8911 ++ (__force u32)keys->addrs.v4addrs.src)
8912 + swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
8913 +
8914 + if ((__force u16)keys->ports.dst <
8915 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
8916 +index 9f6f4a41245d4..1012012a061fe 100644
8917 +--- a/net/ipv6/af_inet6.c
8918 ++++ b/net/ipv6/af_inet6.c
8919 +@@ -1069,13 +1069,13 @@ static int __init inet6_init(void)
8920 + for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
8921 + INIT_LIST_HEAD(r);
8922 +
8923 ++ raw_hashinfo_init(&raw_v6_hashinfo);
8924 ++
8925 + if (disable_ipv6_mod) {
8926 + pr_info("Loaded, but administratively disabled, reboot required to enable\n");
8927 + goto out;
8928 + }
8929 +
8930 +- raw_hashinfo_init(&raw_v6_hashinfo);
8931 +-
8932 + err = proto_register(&tcpv6_prot, 1);
8933 + if (err)
8934 + goto out;
8935 +diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
8936 +index 0d9332e9cf71a..617f744a2e3a3 100644
8937 +--- a/net/netfilter/nf_conntrack_ftp.c
8938 ++++ b/net/netfilter/nf_conntrack_ftp.c
8939 +@@ -33,6 +33,7 @@ MODULE_AUTHOR("Rusty Russell <rusty@××××××××××××.au>");
8940 + MODULE_DESCRIPTION("ftp connection tracking helper");
8941 + MODULE_ALIAS("ip_conntrack_ftp");
8942 + MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
8943 ++static DEFINE_SPINLOCK(nf_ftp_lock);
8944 +
8945 + #define MAX_PORTS 8
8946 + static u_int16_t ports[MAX_PORTS];
8947 +@@ -409,7 +410,8 @@ static int help(struct sk_buff *skb,
8948 + }
8949 + datalen = skb->len - dataoff;
8950 +
8951 +- spin_lock_bh(&ct->lock);
8952 ++ /* seqadj (nat) uses ct->lock internally, nf_nat_ftp would cause deadlock */
8953 ++ spin_lock_bh(&nf_ftp_lock);
8954 + fb_ptr = skb->data + dataoff;
8955 +
8956 + ends_in_nl = (fb_ptr[datalen - 1] == '\n');
8957 +@@ -538,7 +540,7 @@ out_update_nl:
8958 + if (ends_in_nl)
8959 + update_nl_seq(ct, seq, ct_ftp_info, dir, skb);
8960 + out:
8961 +- spin_unlock_bh(&ct->lock);
8962 ++ spin_unlock_bh(&nf_ftp_lock);
8963 + return ret;
8964 + }
8965 +
8966 +diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
8967 +index 992decbcaa5c1..5703846bea3b6 100644
8968 +--- a/net/netfilter/nf_conntrack_irc.c
8969 ++++ b/net/netfilter/nf_conntrack_irc.c
8970 +@@ -157,15 +157,37 @@ static int help(struct sk_buff *skb, unsigned int protoff,
8971 + data = ib_ptr;
8972 + data_limit = ib_ptr + datalen;
8973 +
8974 +- /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
8975 +- * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
8976 +- while (data < data_limit - (19 + MINMATCHLEN)) {
8977 +- if (memcmp(data, "\1DCC ", 5)) {
8978 ++ /* Skip any whitespace */
8979 ++ while (data < data_limit - 10) {
8980 ++ if (*data == ' ' || *data == '\r' || *data == '\n')
8981 ++ data++;
8982 ++ else
8983 ++ break;
8984 ++ }
8985 ++
8986 ++ /* strlen("PRIVMSG x ")=10 */
8987 ++ if (data < data_limit - 10) {
8988 ++ if (strncasecmp("PRIVMSG ", data, 8))
8989 ++ goto out;
8990 ++ data += 8;
8991 ++ }
8992 ++
8993 ++ /* strlen(" :\1DCC SENT t AAAAAAAA P\1\n")=26
8994 ++ * 7+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=26
8995 ++ */
8996 ++ while (data < data_limit - (21 + MINMATCHLEN)) {
8997 ++ /* Find first " :", the start of message */
8998 ++ if (memcmp(data, " :", 2)) {
8999 + data++;
9000 + continue;
9001 + }
9002 ++ data += 2;
9003 ++
9004 ++ /* then check that place only for the DCC command */
9005 ++ if (memcmp(data, "\1DCC ", 5))
9006 ++ goto out;
9007 + data += 5;
9008 +- /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
9009 ++ /* we have at least (21+MINMATCHLEN)-(2+5) bytes valid data left */
9010 +
9011 + iph = ip_hdr(skb);
9012 + pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
9013 +@@ -181,7 +203,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
9014 + pr_debug("DCC %s detected\n", dccprotos[i]);
9015 +
9016 + /* we have at least
9017 +- * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
9018 ++ * (21+MINMATCHLEN)-7-dccprotos[i].matchlen bytes valid
9019 + * data left (== 14/13 bytes) */
9020 + if (parse_dcc(data, data_limit, &dcc_ip,
9021 + &dcc_port, &addr_beg_p, &addr_end_p)) {
9022 +diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
9023 +index b83dc9bf0a5dd..78fd9122b70c7 100644
9024 +--- a/net/netfilter/nf_conntrack_sip.c
9025 ++++ b/net/netfilter/nf_conntrack_sip.c
9026 +@@ -477,7 +477,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
9027 + return ret;
9028 + if (ret == 0)
9029 + break;
9030 +- dataoff += *matchoff;
9031 ++ dataoff = *matchoff;
9032 + }
9033 + *in_header = 0;
9034 + }
9035 +@@ -489,7 +489,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
9036 + break;
9037 + if (ret == 0)
9038 + return ret;
9039 +- dataoff += *matchoff;
9040 ++ dataoff = *matchoff;
9041 + }
9042 +
9043 + if (in_header)
9044 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
9045 +index 848cc81d69926..2fde193c3d26a 100644
9046 +--- a/net/netfilter/nf_tables_api.c
9047 ++++ b/net/netfilter/nf_tables_api.c
9048 +@@ -2197,7 +2197,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
9049 + struct netlink_ext_ack *extack)
9050 + {
9051 + const struct nlattr * const *nla = ctx->nla;
9052 +- struct nft_stats __percpu *stats = NULL;
9053 + struct nft_table *table = ctx->table;
9054 + struct nft_base_chain *basechain;
9055 + struct net *net = ctx->net;
9056 +@@ -2212,6 +2211,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
9057 + return -EOVERFLOW;
9058 +
9059 + if (nla[NFTA_CHAIN_HOOK]) {
9060 ++ struct nft_stats __percpu *stats = NULL;
9061 + struct nft_chain_hook hook;
9062 +
9063 + if (flags & NFT_CHAIN_BINDING)
9064 +@@ -2243,8 +2243,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
9065 + if (err < 0) {
9066 + nft_chain_release_hook(&hook);
9067 + kfree(basechain);
9068 ++ free_percpu(stats);
9069 + return err;
9070 + }
9071 ++ if (stats)
9072 ++ static_branch_inc(&nft_counters_enabled);
9073 + } else {
9074 + if (flags & NFT_CHAIN_BASE)
9075 + return -EINVAL;
9076 +@@ -2319,9 +2322,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
9077 + goto err_unregister_hook;
9078 + }
9079 +
9080 +- if (stats)
9081 +- static_branch_inc(&nft_counters_enabled);
9082 +-
9083 + table->use++;
9084 +
9085 + return 0;
9086 +diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
9087 +index 0fa2e20304272..ee6840bd59337 100644
9088 +--- a/net/netfilter/nfnetlink_osf.c
9089 ++++ b/net/netfilter/nfnetlink_osf.c
9090 +@@ -269,6 +269,7 @@ bool nf_osf_find(const struct sk_buff *skb,
9091 + struct nf_osf_hdr_ctx ctx;
9092 + const struct tcphdr *tcp;
9093 + struct tcphdr _tcph;
9094 ++ bool found = false;
9095 +
9096 + memset(&ctx, 0, sizeof(ctx));
9097 +
9098 +@@ -283,10 +284,11 @@ bool nf_osf_find(const struct sk_buff *skb,
9099 +
9100 + data->genre = f->genre;
9101 + data->version = f->version;
9102 ++ found = true;
9103 + break;
9104 + }
9105 +
9106 +- return true;
9107 ++ return found;
9108 + }
9109 + EXPORT_SYMBOL_GPL(nf_osf_find);
9110 +
9111 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
9112 +index ac366c99086fd..7d7f7bac0216a 100644
9113 +--- a/net/sched/cls_api.c
9114 ++++ b/net/sched/cls_api.c
9115 +@@ -2136,6 +2136,7 @@ replay:
9116 + }
9117 +
9118 + if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
9119 ++ tfilter_put(tp, fh);
9120 + NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
9121 + err = -EINVAL;
9122 + goto errout;
9123 +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
9124 +index 0b941dd63d268..86675a79da1e4 100644
9125 +--- a/net/sched/sch_taprio.c
9126 ++++ b/net/sched/sch_taprio.c
9127 +@@ -67,6 +67,7 @@ struct taprio_sched {
9128 + u32 flags;
9129 + enum tk_offsets tk_offset;
9130 + int clockid;
9131 ++ bool offloaded;
9132 + atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
9133 + * speeds it's sub-nanoseconds per byte
9134 + */
9135 +@@ -1279,6 +1280,8 @@ static int taprio_enable_offload(struct net_device *dev,
9136 + goto done;
9137 + }
9138 +
9139 ++ q->offloaded = true;
9140 ++
9141 + done:
9142 + taprio_offload_free(offload);
9143 +
9144 +@@ -1293,12 +1296,9 @@ static int taprio_disable_offload(struct net_device *dev,
9145 + struct tc_taprio_qopt_offload *offload;
9146 + int err;
9147 +
9148 +- if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
9149 ++ if (!q->offloaded)
9150 + return 0;
9151 +
9152 +- if (!ops->ndo_setup_tc)
9153 +- return -EOPNOTSUPP;
9154 +-
9155 + offload = taprio_offload_alloc(0);
9156 + if (!offload) {
9157 + NL_SET_ERR_MSG(extack,
9158 +@@ -1314,6 +1314,8 @@ static int taprio_disable_offload(struct net_device *dev,
9159 + goto out;
9160 + }
9161 +
9162 ++ q->offloaded = false;
9163 ++
9164 + out:
9165 + taprio_offload_free(offload);
9166 +
9167 +@@ -1949,12 +1951,14 @@ start_error:
9168 +
9169 + static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
9170 + {
9171 +- struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
9172 ++ struct taprio_sched *q = qdisc_priv(sch);
9173 ++ struct net_device *dev = qdisc_dev(sch);
9174 ++ unsigned int ntx = cl - 1;
9175 +
9176 +- if (!dev_queue)
9177 ++ if (ntx >= dev->num_tx_queues)
9178 + return NULL;
9179 +
9180 +- return dev_queue->qdisc_sleeping;
9181 ++ return q->qdiscs[ntx];
9182 + }
9183 +
9184 + static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
9185 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
9186 +index 1f3bb1f6b1f7b..8095876b66eb6 100644
9187 +--- a/net/smc/smc_core.c
9188 ++++ b/net/smc/smc_core.c
9189 +@@ -2148,7 +2148,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
9190 + static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
9191 + struct smc_buf_desc *buf_desc, bool is_rmb)
9192 + {
9193 +- int i, rc = 0;
9194 ++ int i, rc = 0, cnt = 0;
9195 +
9196 + /* protect against parallel link reconfiguration */
9197 + mutex_lock(&lgr->llc_conf_mutex);
9198 +@@ -2161,9 +2161,12 @@ static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
9199 + rc = -ENOMEM;
9200 + goto out;
9201 + }
9202 ++ cnt++;
9203 + }
9204 + out:
9205 + mutex_unlock(&lgr->llc_conf_mutex);
9206 ++ if (!rc && !cnt)
9207 ++ rc = -EINVAL;
9208 + return rc;
9209 + }
9210 +
9211 +diff --git a/scripts/Makefile.debug b/scripts/Makefile.debug
9212 +index 9f39b0130551f..8cf1cb22dd934 100644
9213 +--- a/scripts/Makefile.debug
9214 ++++ b/scripts/Makefile.debug
9215 +@@ -1,20 +1,19 @@
9216 + DEBUG_CFLAGS :=
9217 ++debug-flags-y := -g
9218 +
9219 + ifdef CONFIG_DEBUG_INFO_SPLIT
9220 + DEBUG_CFLAGS += -gsplit-dwarf
9221 +-else
9222 +-DEBUG_CFLAGS += -g
9223 + endif
9224 +
9225 +-ifndef CONFIG_AS_IS_LLVM
9226 +-KBUILD_AFLAGS += -Wa,-gdwarf-2
9227 +-endif
9228 +-
9229 +-ifndef CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
9230 +-dwarf-version-$(CONFIG_DEBUG_INFO_DWARF4) := 4
9231 +-dwarf-version-$(CONFIG_DEBUG_INFO_DWARF5) := 5
9232 +-DEBUG_CFLAGS += -gdwarf-$(dwarf-version-y)
9233 ++debug-flags-$(CONFIG_DEBUG_INFO_DWARF4) += -gdwarf-4
9234 ++debug-flags-$(CONFIG_DEBUG_INFO_DWARF5) += -gdwarf-5
9235 ++ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_AS_IS_GNU),yy)
9236 ++# Clang does not pass -g or -gdwarf-* option down to GAS.
9237 ++# Add -Wa, prefix to explicitly specify the flags.
9238 ++KBUILD_AFLAGS += $(addprefix -Wa$(comma), $(debug-flags-y))
9239 + endif
9240 ++DEBUG_CFLAGS += $(debug-flags-y)
9241 ++KBUILD_AFLAGS += $(debug-flags-y)
9242 +
9243 + ifdef CONFIG_DEBUG_INFO_REDUCED
9244 + DEBUG_CFLAGS += -fno-var-tracking
9245 +@@ -29,5 +28,5 @@ KBUILD_AFLAGS += -gz=zlib
9246 + KBUILD_LDFLAGS += --compress-debug-sections=zlib
9247 + endif
9248 +
9249 +-KBUILD_CFLAGS += $(DEBUG_CFLAGS)
9250 ++KBUILD_CFLAGS += $(DEBUG_CFLAGS)
9251 + export DEBUG_CFLAGS
9252 +diff --git a/sound/core/init.c b/sound/core/init.c
9253 +index 726a8353201f8..4eacfafa41730 100644
9254 +--- a/sound/core/init.c
9255 ++++ b/sound/core/init.c
9256 +@@ -178,10 +178,8 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
9257 + return -ENOMEM;
9258 +
9259 + err = snd_card_init(card, parent, idx, xid, module, extra_size);
9260 +- if (err < 0) {
9261 +- kfree(card);
9262 +- return err;
9263 +- }
9264 ++ if (err < 0)
9265 ++ return err; /* card is freed by error handler */
9266 +
9267 + *card_ret = card;
9268 + return 0;
9269 +@@ -231,7 +229,7 @@ int snd_devm_card_new(struct device *parent, int idx, const char *xid,
9270 + card->managed = true;
9271 + err = snd_card_init(card, parent, idx, xid, module, extra_size);
9272 + if (err < 0) {
9273 +- devres_free(card);
9274 ++ devres_free(card); /* in managed mode, we need to free manually */
9275 + return err;
9276 + }
9277 +
9278 +@@ -293,6 +291,8 @@ static int snd_card_init(struct snd_card *card, struct device *parent,
9279 + mutex_unlock(&snd_card_mutex);
9280 + dev_err(parent, "cannot find the slot for index %d (range 0-%i), error: %d\n",
9281 + idx, snd_ecards_limit - 1, err);
9282 ++ if (!card->managed)
9283 ++ kfree(card); /* manually free here, as no destructor called */
9284 + return err;
9285 + }
9286 + set_bit(idx, snd_cards_lock); /* lock it */
9287 +diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
9288 +index c572fb5886d5d..7af2515735957 100644
9289 +--- a/sound/pci/hda/hda_bind.c
9290 ++++ b/sound/pci/hda/hda_bind.c
9291 +@@ -157,10 +157,10 @@ static int hda_codec_driver_remove(struct device *dev)
9292 + return codec->bus->core.ext_ops->hdev_detach(&codec->core);
9293 + }
9294 +
9295 +- refcount_dec(&codec->pcm_ref);
9296 + snd_hda_codec_disconnect_pcms(codec);
9297 + snd_hda_jack_tbl_disconnect(codec);
9298 +- wait_event(codec->remove_sleep, !refcount_read(&codec->pcm_ref));
9299 ++ if (!refcount_dec_and_test(&codec->pcm_ref))
9300 ++ wait_event(codec->remove_sleep, !refcount_read(&codec->pcm_ref));
9301 + snd_power_sync_ref(codec->bus->card);
9302 +
9303 + if (codec->patch_ops.free)
9304 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
9305 +index b20694fd69dea..6f30c374f896e 100644
9306 +--- a/sound/pci/hda/hda_intel.c
9307 ++++ b/sound/pci/hda/hda_intel.c
9308 +@@ -2550,6 +2550,8 @@ static const struct pci_device_id azx_ids[] = {
9309 + /* 5 Series/3400 */
9310 + { PCI_DEVICE(0x8086, 0x3b56),
9311 + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
9312 ++ { PCI_DEVICE(0x8086, 0x3b57),
9313 ++ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
9314 + /* Poulsbo */
9315 + { PCI_DEVICE(0x8086, 0x811b),
9316 + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE },
9317 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
9318 +index 6c209cd26c0ca..c9d9aa6351ecf 100644
9319 +--- a/sound/pci/hda/patch_hdmi.c
9320 ++++ b/sound/pci/hda/patch_hdmi.c
9321 +@@ -170,6 +170,8 @@ struct hdmi_spec {
9322 + bool dyn_pcm_no_legacy;
9323 + /* hdmi interrupt trigger control flag for Nvidia codec */
9324 + bool hdmi_intr_trig_ctrl;
9325 ++ bool nv_dp_workaround; /* workaround DP audio infoframe for Nvidia */
9326 ++
9327 + bool intel_hsw_fixup; /* apply Intel platform-specific fixups */
9328 + /*
9329 + * Non-generic VIA/NVIDIA specific
9330 +@@ -679,15 +681,24 @@ static void hdmi_pin_setup_infoframe(struct hda_codec *codec,
9331 + int ca, int active_channels,
9332 + int conn_type)
9333 + {
9334 ++ struct hdmi_spec *spec = codec->spec;
9335 + union audio_infoframe ai;
9336 +
9337 + memset(&ai, 0, sizeof(ai));
9338 +- if (conn_type == 0) { /* HDMI */
9339 ++ if ((conn_type == 0) || /* HDMI */
9340 ++ /* Nvidia DisplayPort: Nvidia HW expects same layout as HDMI */
9341 ++ (conn_type == 1 && spec->nv_dp_workaround)) {
9342 + struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
9343 +
9344 +- hdmi_ai->type = 0x84;
9345 +- hdmi_ai->ver = 0x01;
9346 +- hdmi_ai->len = 0x0a;
9347 ++ if (conn_type == 0) { /* HDMI */
9348 ++ hdmi_ai->type = 0x84;
9349 ++ hdmi_ai->ver = 0x01;
9350 ++ hdmi_ai->len = 0x0a;
9351 ++ } else {/* Nvidia DP */
9352 ++ hdmi_ai->type = 0x84;
9353 ++ hdmi_ai->ver = 0x1b;
9354 ++ hdmi_ai->len = 0x11 << 2;
9355 ++ }
9356 + hdmi_ai->CC02_CT47 = active_channels - 1;
9357 + hdmi_ai->CA = ca;
9358 + hdmi_checksum_audio_infoframe(hdmi_ai);
9359 +@@ -3617,6 +3628,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
9360 + spec->pcm_playback.rates = SUPPORTED_RATES;
9361 + spec->pcm_playback.maxbps = SUPPORTED_MAXBPS;
9362 + spec->pcm_playback.formats = SUPPORTED_FORMATS;
9363 ++ spec->nv_dp_workaround = true;
9364 + return 0;
9365 + }
9366 +
9367 +@@ -3756,6 +3768,7 @@ static int patch_nvhdmi(struct hda_codec *codec)
9368 + spec->chmap.ops.chmap_cea_alloc_validate_get_type =
9369 + nvhdmi_chmap_cea_alloc_validate_get_type;
9370 + spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
9371 ++ spec->nv_dp_workaround = true;
9372 +
9373 + codec->link_down_at_suspend = 1;
9374 +
9375 +@@ -3779,6 +3792,7 @@ static int patch_nvhdmi_legacy(struct hda_codec *codec)
9376 + spec->chmap.ops.chmap_cea_alloc_validate_get_type =
9377 + nvhdmi_chmap_cea_alloc_validate_get_type;
9378 + spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
9379 ++ spec->nv_dp_workaround = true;
9380 +
9381 + codec->link_down_at_suspend = 1;
9382 +
9383 +@@ -3984,6 +3998,7 @@ static int tegra_hdmi_init(struct hda_codec *codec)
9384 +
9385 + generic_hdmi_init_per_pins(codec);
9386 +
9387 ++ codec->depop_delay = 10;
9388 + codec->patch_ops.build_pcms = tegra_hdmi_build_pcms;
9389 + spec->chmap.ops.chmap_cea_alloc_validate_get_type =
9390 + nvhdmi_chmap_cea_alloc_validate_get_type;
9391 +@@ -3992,6 +4007,7 @@ static int tegra_hdmi_init(struct hda_codec *codec)
9392 + spec->chmap.ops.chmap_cea_alloc_validate_get_type =
9393 + nvhdmi_chmap_cea_alloc_validate_get_type;
9394 + spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
9395 ++ spec->nv_dp_workaround = true;
9396 +
9397 + return 0;
9398 + }
9399 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
9400 +index 799f6bf266dd0..9614b63415a8e 100644
9401 +--- a/sound/pci/hda/patch_realtek.c
9402 ++++ b/sound/pci/hda/patch_realtek.c
9403 +@@ -7037,6 +7037,8 @@ enum {
9404 + ALC294_FIXUP_ASUS_GU502_HP,
9405 + ALC294_FIXUP_ASUS_GU502_PINS,
9406 + ALC294_FIXUP_ASUS_GU502_VERBS,
9407 ++ ALC294_FIXUP_ASUS_G513_PINS,
9408 ++ ALC285_FIXUP_ASUS_G533Z_PINS,
9409 + ALC285_FIXUP_HP_GPIO_LED,
9410 + ALC285_FIXUP_HP_MUTE_LED,
9411 + ALC236_FIXUP_HP_GPIO_LED,
9412 +@@ -8374,6 +8376,24 @@ static const struct hda_fixup alc269_fixups[] = {
9413 + [ALC294_FIXUP_ASUS_GU502_HP] = {
9414 + .type = HDA_FIXUP_FUNC,
9415 + .v.func = alc294_fixup_gu502_hp,
9416 ++ },
9417 ++ [ALC294_FIXUP_ASUS_G513_PINS] = {
9418 ++ .type = HDA_FIXUP_PINS,
9419 ++ .v.pins = (const struct hda_pintbl[]) {
9420 ++ { 0x19, 0x03a11050 }, /* front HP mic */
9421 ++ { 0x1a, 0x03a11c30 }, /* rear external mic */
9422 ++ { 0x21, 0x03211420 }, /* front HP out */
9423 ++ { }
9424 ++ },
9425 ++ },
9426 ++ [ALC285_FIXUP_ASUS_G533Z_PINS] = {
9427 ++ .type = HDA_FIXUP_PINS,
9428 ++ .v.pins = (const struct hda_pintbl[]) {
9429 ++ { 0x14, 0x90170120 },
9430 ++ { }
9431 ++ },
9432 ++ .chained = true,
9433 ++ .chain_id = ALC294_FIXUP_ASUS_G513_PINS,
9434 + },
9435 + [ALC294_FIXUP_ASUS_COEF_1B] = {
9436 + .type = HDA_FIXUP_VERBS,
9437 +@@ -9114,6 +9134,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9438 + SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
9439 + SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
9440 + SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
9441 ++ SND_PCI_QUIRK(0x1028, 0x087d, "Dell Precision 5530", ALC289_FIXUP_DUAL_SPK),
9442 + SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
9443 + SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
9444 + SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
9445 +@@ -9130,6 +9151,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9446 + SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
9447 + SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
9448 + SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK),
9449 ++ SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
9450 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
9451 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
9452 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
9453 +@@ -9257,6 +9279,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9454 + SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
9455 + SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
9456 + SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
9457 ++ SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
9458 + SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
9459 + SND_PCI_QUIRK(0x103c, 0x8971, "HP EliteBook 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
9460 + SND_PCI_QUIRK(0x103c, 0x8972, "HP EliteBook 840 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
9461 +@@ -9304,10 +9327,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9462 + SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
9463 + SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
9464 + SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
9465 ++ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
9466 ++ SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
9467 + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
9468 + SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
9469 + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
9470 +- SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
9471 + SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
9472 + SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
9473 + SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
9474 +@@ -9323,14 +9347,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9475 + SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
9476 + SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
9477 + SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
9478 ++ SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
9479 + SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
9480 ++ SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
9481 + SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
9482 + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
9483 + SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
9484 ++ SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
9485 + SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
9486 ++ SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
9487 + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
9488 +- SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
9489 +- SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
9490 + SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
9491 + SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
9492 + SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
9493 +@@ -9532,6 +9558,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9494 + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
9495 + SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
9496 + SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
9497 ++ SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
9498 + SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
9499 + SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
9500 + SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
9501 +diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
9502 +index ff2aa13b7b26f..5d105c44b46df 100644
9503 +--- a/sound/usb/endpoint.c
9504 ++++ b/sound/usb/endpoint.c
9505 +@@ -758,8 +758,7 @@ bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
9506 + * The endpoint needs to be closed via snd_usb_endpoint_close() later.
9507 + *
9508 + * Note that this function doesn't configure the endpoint. The substream
9509 +- * needs to set it up later via snd_usb_endpoint_set_params() and
9510 +- * snd_usb_endpoint_prepare().
9511 ++ * needs to set it up later via snd_usb_endpoint_configure().
9512 + */
9513 + struct snd_usb_endpoint *
9514 + snd_usb_endpoint_open(struct snd_usb_audio *chip,
9515 +@@ -1293,13 +1292,12 @@ out_of_memory:
9516 + /*
9517 + * snd_usb_endpoint_set_params: configure an snd_usb_endpoint
9518 + *
9519 +- * It's called either from hw_params callback.
9520 + * Determine the number of URBs to be used on this endpoint.
9521 + * An endpoint must be configured before it can be started.
9522 + * An endpoint that is already running can not be reconfigured.
9523 + */
9524 +-int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
9525 +- struct snd_usb_endpoint *ep)
9526 ++static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
9527 ++ struct snd_usb_endpoint *ep)
9528 + {
9529 + const struct audioformat *fmt = ep->cur_audiofmt;
9530 + int err;
9531 +@@ -1382,18 +1380,18 @@ static int init_sample_rate(struct snd_usb_audio *chip,
9532 + }
9533 +
9534 + /*
9535 +- * snd_usb_endpoint_prepare: Prepare the endpoint
9536 ++ * snd_usb_endpoint_configure: Configure the endpoint
9537 + *
9538 + * This function sets up the EP to be fully usable state.
9539 +- * It's called either from prepare callback.
9540 ++ * It's called either from hw_params or prepare callback.
9541 + * The function checks need_setup flag, and performs nothing unless needed,
9542 + * so it's safe to call this multiple times.
9543 + *
9544 + * This returns zero if unchanged, 1 if the configuration has changed,
9545 + * or a negative error code.
9546 + */
9547 +-int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
9548 +- struct snd_usb_endpoint *ep)
9549 ++int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
9550 ++ struct snd_usb_endpoint *ep)
9551 + {
9552 + bool iface_first;
9553 + int err = 0;
9554 +@@ -1414,6 +1412,9 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
9555 + if (err < 0)
9556 + goto unlock;
9557 + }
9558 ++ err = snd_usb_endpoint_set_params(chip, ep);
9559 ++ if (err < 0)
9560 ++ goto unlock;
9561 + goto done;
9562 + }
9563 +
9564 +@@ -1441,6 +1442,10 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
9565 + if (err < 0)
9566 + goto unlock;
9567 +
9568 ++ err = snd_usb_endpoint_set_params(chip, ep);
9569 ++ if (err < 0)
9570 ++ goto unlock;
9571 ++
9572 + err = snd_usb_select_mode_quirk(chip, ep->cur_audiofmt);
9573 + if (err < 0)
9574 + goto unlock;
9575 +diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
9576 +index e67ea28faa54f..6a9af04cf175a 100644
9577 +--- a/sound/usb/endpoint.h
9578 ++++ b/sound/usb/endpoint.h
9579 +@@ -17,10 +17,8 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
9580 + bool is_sync_ep);
9581 + void snd_usb_endpoint_close(struct snd_usb_audio *chip,
9582 + struct snd_usb_endpoint *ep);
9583 +-int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
9584 +- struct snd_usb_endpoint *ep);
9585 +-int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
9586 +- struct snd_usb_endpoint *ep);
9587 ++int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
9588 ++ struct snd_usb_endpoint *ep);
9589 + int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock);
9590 +
9591 + bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
9592 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
9593 +index 02035b545f9dd..e692ae04436a5 100644
9594 +--- a/sound/usb/pcm.c
9595 ++++ b/sound/usb/pcm.c
9596 +@@ -443,17 +443,17 @@ static int configure_endpoints(struct snd_usb_audio *chip,
9597 + if (stop_endpoints(subs, false))
9598 + sync_pending_stops(subs);
9599 + if (subs->sync_endpoint) {
9600 +- err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
9601 ++ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
9602 + if (err < 0)
9603 + return err;
9604 + }
9605 +- err = snd_usb_endpoint_prepare(chip, subs->data_endpoint);
9606 ++ err = snd_usb_endpoint_configure(chip, subs->data_endpoint);
9607 + if (err < 0)
9608 + return err;
9609 + snd_usb_set_format_quirk(subs, subs->cur_audiofmt);
9610 + } else {
9611 + if (subs->sync_endpoint) {
9612 +- err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
9613 ++ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
9614 + if (err < 0)
9615 + return err;
9616 + }
9617 +@@ -551,13 +551,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
9618 + subs->cur_audiofmt = fmt;
9619 + mutex_unlock(&chip->mutex);
9620 +
9621 +- if (subs->sync_endpoint) {
9622 +- ret = snd_usb_endpoint_set_params(chip, subs->sync_endpoint);
9623 +- if (ret < 0)
9624 +- goto unlock;
9625 +- }
9626 +-
9627 +- ret = snd_usb_endpoint_set_params(chip, subs->data_endpoint);
9628 ++ ret = configure_endpoints(chip, subs);
9629 +
9630 + unlock:
9631 + if (ret < 0)
9632 +diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
9633 +index 6b1bafe267a42..8ec5b9f344e02 100644
9634 +--- a/tools/lib/perf/evlist.c
9635 ++++ b/tools/lib/perf/evlist.c
9636 +@@ -441,6 +441,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
9637 +
9638 + perf_evlist__for_each_entry(evlist, evsel) {
9639 + bool overwrite = evsel->attr.write_backward;
9640 ++ enum fdarray_flags flgs;
9641 + struct perf_mmap *map;
9642 + int *output, fd, cpu;
9643 +
9644 +@@ -504,8 +505,8 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
9645 +
9646 + revent = !overwrite ? POLLIN : 0;
9647 +
9648 +- if (!evsel->system_wide &&
9649 +- perf_evlist__add_pollfd(evlist, fd, map, revent, fdarray_flag__default) < 0) {
9650 ++ flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
9651 ++ if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
9652 + perf_mmap__put(map);
9653 + return -1;
9654 + }
9655 +diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
9656 +index 63b9db6574425..97c69a249c6e4 100644
9657 +--- a/tools/perf/util/bpf_counter_cgroup.c
9658 ++++ b/tools/perf/util/bpf_counter_cgroup.c
9659 +@@ -95,7 +95,7 @@ static int bperf_load_program(struct evlist *evlist)
9660 +
9661 + perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
9662 + link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
9663 +- FD(cgrp_switch, cpu.cpu));
9664 ++ FD(cgrp_switch, i));
9665 + if (IS_ERR(link)) {
9666 + pr_err("Failed to attach cgroup program\n");
9667 + err = PTR_ERR(link);
9668 +@@ -123,7 +123,7 @@ static int bperf_load_program(struct evlist *evlist)
9669 +
9670 + map_fd = bpf_map__fd(skel->maps.events);
9671 + perf_cpu_map__for_each_cpu(cpu, j, evlist->core.all_cpus) {
9672 +- int fd = FD(evsel, cpu.cpu);
9673 ++ int fd = FD(evsel, j);
9674 + __u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
9675 +
9676 + err = bpf_map_update_elem(map_fd, &idx, &fd,
9677 +diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
9678 +index 292c430768b52..c72f8ad96f751 100644
9679 +--- a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
9680 ++++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
9681 +@@ -176,7 +176,7 @@ static int bperf_cgroup_count(void)
9682 + }
9683 +
9684 + // This will be attached to cgroup-switches event for each cpu
9685 +-SEC("perf_events")
9686 ++SEC("perf_event")
9687 + int BPF_PROG(on_cgrp_switch)
9688 + {
9689 + return bperf_cgroup_count();
9690 +diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
9691 +index 953338b9e887e..02cd9f75e3d2f 100644
9692 +--- a/tools/perf/util/genelf.c
9693 ++++ b/tools/perf/util/genelf.c
9694 +@@ -251,6 +251,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
9695 + Elf_Data *d;
9696 + Elf_Scn *scn;
9697 + Elf_Ehdr *ehdr;
9698 ++ Elf_Phdr *phdr;
9699 + Elf_Shdr *shdr;
9700 + uint64_t eh_frame_base_offset;
9701 + char *strsym = NULL;
9702 +@@ -285,6 +286,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
9703 + ehdr->e_version = EV_CURRENT;
9704 + ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
9705 +
9706 ++ /*
9707 ++ * setup program header
9708 ++ */
9709 ++ phdr = elf_newphdr(e, 1);
9710 ++ phdr[0].p_type = PT_LOAD;
9711 ++ phdr[0].p_offset = 0;
9712 ++ phdr[0].p_vaddr = 0;
9713 ++ phdr[0].p_paddr = 0;
9714 ++ phdr[0].p_filesz = csize;
9715 ++ phdr[0].p_memsz = csize;
9716 ++ phdr[0].p_flags = PF_X | PF_R;
9717 ++ phdr[0].p_align = 8;
9718 ++
9719 + /*
9720 + * setup text section
9721 + */
9722 +diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
9723 +index ae138afe6c563..b5c909546e3f2 100644
9724 +--- a/tools/perf/util/genelf.h
9725 ++++ b/tools/perf/util/genelf.h
9726 +@@ -53,8 +53,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
9727 +
9728 + #if GEN_ELF_CLASS == ELFCLASS64
9729 + #define elf_newehdr elf64_newehdr
9730 ++#define elf_newphdr elf64_newphdr
9731 + #define elf_getshdr elf64_getshdr
9732 + #define Elf_Ehdr Elf64_Ehdr
9733 ++#define Elf_Phdr Elf64_Phdr
9734 + #define Elf_Shdr Elf64_Shdr
9735 + #define Elf_Sym Elf64_Sym
9736 + #define ELF_ST_TYPE(a) ELF64_ST_TYPE(a)
9737 +@@ -62,8 +64,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
9738 + #define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a)
9739 + #else
9740 + #define elf_newehdr elf32_newehdr
9741 ++#define elf_newphdr elf32_newphdr
9742 + #define elf_getshdr elf32_getshdr
9743 + #define Elf_Ehdr Elf32_Ehdr
9744 ++#define Elf_Phdr Elf32_Phdr
9745 + #define Elf_Shdr Elf32_Shdr
9746 + #define Elf_Sym Elf32_Sym
9747 + #define ELF_ST_TYPE(a) ELF32_ST_TYPE(a)
9748 +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
9749 +index 75bec32d4f571..647b7dff8ef36 100644
9750 +--- a/tools/perf/util/symbol-elf.c
9751 ++++ b/tools/perf/util/symbol-elf.c
9752 +@@ -2102,8 +2102,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
9753 + * unusual. One significant peculiarity is that the mapping (start -> pgoff)
9754 + * is not the same for the kernel map and the modules map. That happens because
9755 + * the data is copied adjacently whereas the original kcore has gaps. Finally,
9756 +- * kallsyms and modules files are compared with their copies to check that
9757 +- * modules have not been loaded or unloaded while the copies were taking place.
9758 ++ * kallsyms file is compared with its copy to check that modules have not been
9759 ++ * loaded or unloaded while the copies were taking place.
9760 + *
9761 + * Return: %0 on success, %-1 on failure.
9762 + */
9763 +@@ -2166,9 +2166,6 @@ int kcore_copy(const char *from_dir, const char *to_dir)
9764 + goto out_extract_close;
9765 + }
9766 +
9767 +- if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
9768 +- goto out_extract_close;
9769 +-
9770 + if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
9771 + goto out_extract_close;
9772 +
9773 +diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
9774 +index 84d17bd4efaed..64e273b2b1b21 100644
9775 +--- a/tools/perf/util/synthetic-events.c
9776 ++++ b/tools/perf/util/synthetic-events.c
9777 +@@ -367,13 +367,24 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
9778 + bool is_kernel)
9779 + {
9780 + struct build_id bid;
9781 ++ struct nsinfo *nsi;
9782 ++ struct nscookie nc;
9783 + int rc;
9784 +
9785 +- if (is_kernel)
9786 ++ if (is_kernel) {
9787 + rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
9788 +- else
9789 +- rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
9790 ++ goto out;
9791 ++ }
9792 ++
9793 ++ nsi = nsinfo__new(event->pid);
9794 ++ nsinfo__mountns_enter(nsi, &nc);
9795 +
9796 ++ rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
9797 ++
9798 ++ nsinfo__mountns_exit(&nc);
9799 ++ nsinfo__put(nsi);
9800 ++
9801 ++out:
9802 + if (rc == 0) {
9803 + memcpy(event->build_id, bid.data, sizeof(bid.data));
9804 + event->build_id_size = (u8) bid.size;
9805 +diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh
9806 +index e714bae473fb4..81f31179ac887 100755
9807 +--- a/tools/testing/selftests/net/forwarding/sch_red.sh
9808 ++++ b/tools/testing/selftests/net/forwarding/sch_red.sh
9809 +@@ -1,3 +1,4 @@
9810 ++#!/bin/bash
9811 + # SPDX-License-Identifier: GPL-2.0
9812 +
9813 + # This test sends one stream of traffic from H1 through a TBF shaper, to a RED