Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Wed, 28 Sep 2022 09:33:39
Message-Id: 1664357601.5a844dd925581feb47903fa6a040b75e66fa2d31.mpagano@gentoo
1 commit: 5a844dd925581feb47903fa6a040b75e66fa2d31
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 28 09:33:21 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 28 09:33:21 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5a844dd9
7
8 Linux patch 5.15.71
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1070_linux-5.15.71.patch | 4762 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4766 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 673caa1d..0489f33c 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -323,6 +323,10 @@ Patch: 1069_linux-5.15.70.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.70
23
24 +Patch: 1070_linux-5.15.71.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.71
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1070_linux-5.15.71.patch b/1070_linux-5.15.71.patch
33 new file mode 100644
34 index 00000000..8ea36f60
35 --- /dev/null
36 +++ b/1070_linux-5.15.71.patch
37 @@ -0,0 +1,4762 @@
38 +diff --git a/Makefile b/Makefile
39 +index e815677ec0112..4c06cbe89ece2 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 15
46 +-SUBLEVEL = 70
47 ++SUBLEVEL = 71
48 + EXTRAVERSION =
49 + NAME = Trick or Treat
50 +
51 +diff --git a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
52 +index 7249871530ab9..5eecbefa8a336 100644
53 +--- a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
54 ++++ b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
55 +@@ -2,8 +2,8 @@
56 + /*
57 + * Copyright (c) 2020 Fuzhou Rockchip Electronics Co., Ltd
58 + * Copyright (c) 2020 Engicam srl
59 +- * Copyright (c) 2020 Amarula Solutons
60 +- * Copyright (c) 2020 Amarula Solutons(India)
61 ++ * Copyright (c) 2020 Amarula Solutions
62 ++ * Copyright (c) 2020 Amarula Solutions(India)
63 + */
64 +
65 + #include <dt-bindings/gpio/gpio.h>
66 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
67 +index e6c1c94c8d69c..07737b65d7a3d 100644
68 +--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
69 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
70 +@@ -87,3 +87,8 @@
71 + };
72 + };
73 + };
74 ++
75 ++&wlan_host_wake_l {
76 ++ /* Kevin has an external pull up, but Bob does not. */
77 ++ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
78 ++};
79 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
80 +index 1384dabbdf406..739937f70f8d0 100644
81 +--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
82 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
83 +@@ -237,6 +237,14 @@
84 + &edp {
85 + status = "okay";
86 +
87 ++ /*
88 ++ * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only
89 ++ * set this here, because rk3399-gru.dtsi ensures we can generate this
90 ++ * off GPLL=600MHz, whereas some other RK3399 boards may not.
91 ++ */
92 ++ assigned-clocks = <&cru PCLK_EDP>;
93 ++ assigned-clock-rates = <24000000>;
94 ++
95 + ports {
96 + edp_out: port@1 {
97 + reg = <1>;
98 +@@ -395,6 +403,7 @@ ap_i2c_tp: &i2c5 {
99 + };
100 +
101 + wlan_host_wake_l: wlan-host-wake-l {
102 ++ /* Kevin has an external pull up, but Bob does not */
103 + rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
104 + };
105 + };
106 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
107 +index 08fa00364b42f..7b27079fd6116 100644
108 +--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
109 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
110 +@@ -62,7 +62,6 @@
111 + vcc5v0_host: vcc5v0-host-regulator {
112 + compatible = "regulator-fixed";
113 + gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
114 +- enable-active-low;
115 + pinctrl-names = "default";
116 + pinctrl-0 = <&vcc5v0_host_en>;
117 + regulator-name = "vcc5v0_host";
118 +diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
119 +index acf67ef4c505d..d844b5317d2d9 100644
120 +--- a/arch/arm64/kernel/topology.c
121 ++++ b/arch/arm64/kernel/topology.c
122 +@@ -249,7 +249,7 @@ static void amu_fie_setup(const struct cpumask *cpus)
123 + for_each_cpu(cpu, cpus) {
124 + if (!freq_counters_valid(cpu) ||
125 + freq_inv_set_max_ratio(cpu,
126 +- cpufreq_get_hw_max_freq(cpu) * 1000,
127 ++ cpufreq_get_hw_max_freq(cpu) * 1000ULL,
128 + arch_timer_get_rate()))
129 + return;
130 + }
131 +diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
132 +index 7a623684d9b5e..2d5a0bcb0cec1 100644
133 +--- a/arch/mips/lantiq/clk.c
134 ++++ b/arch/mips/lantiq/clk.c
135 +@@ -50,6 +50,7 @@ struct clk *clk_get_io(void)
136 + {
137 + return &cpu_clk_generic[2];
138 + }
139 ++EXPORT_SYMBOL_GPL(clk_get_io);
140 +
141 + struct clk *clk_get_ppe(void)
142 + {
143 +diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
144 +index 794c96c2a4cdd..311dc1580bbde 100644
145 +--- a/arch/mips/loongson32/common/platform.c
146 ++++ b/arch/mips/loongson32/common/platform.c
147 +@@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
148 + if (plat_dat->bus_id) {
149 + __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
150 + GMAC1_USE_UART0, LS1X_MUX_CTRL0);
151 +- switch (plat_dat->interface) {
152 ++ switch (plat_dat->phy_interface) {
153 + case PHY_INTERFACE_MODE_RGMII:
154 + val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
155 + break;
156 +@@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
157 + break;
158 + default:
159 + pr_err("unsupported mii mode %d\n",
160 +- plat_dat->interface);
161 ++ plat_dat->phy_interface);
162 + return -ENOTSUPP;
163 + }
164 + val &= ~GMAC1_SHUT;
165 + } else {
166 +- switch (plat_dat->interface) {
167 ++ switch (plat_dat->phy_interface) {
168 + case PHY_INTERFACE_MODE_RGMII:
169 + val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
170 + break;
171 +@@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
172 + break;
173 + default:
174 + pr_err("unsupported mii mode %d\n",
175 +- plat_dat->interface);
176 ++ plat_dat->phy_interface);
177 + return -ENOTSUPP;
178 + }
179 + val &= ~GMAC0_SHUT;
180 +@@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
181 + plat_dat = dev_get_platdata(&pdev->dev);
182 +
183 + val &= ~PHY_INTF_SELI;
184 +- if (plat_dat->interface == PHY_INTERFACE_MODE_RMII)
185 ++ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII)
186 + val |= 0x4 << PHY_INTF_SELI_SHIFT;
187 + __raw_writel(val, LS1X_MUX_CTRL1);
188 +
189 +@@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = {
190 + .bus_id = 0,
191 + .phy_addr = -1,
192 + #if defined(CONFIG_LOONGSON1_LS1B)
193 +- .interface = PHY_INTERFACE_MODE_MII,
194 ++ .phy_interface = PHY_INTERFACE_MODE_MII,
195 + #elif defined(CONFIG_LOONGSON1_LS1C)
196 +- .interface = PHY_INTERFACE_MODE_RMII,
197 ++ .phy_interface = PHY_INTERFACE_MODE_RMII,
198 + #endif
199 + .mdio_bus_data = &ls1x_mdio_bus_data,
200 + .dma_cfg = &ls1x_eth_dma_cfg,
201 +@@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = {
202 + static struct plat_stmmacenet_data ls1x_eth1_pdata = {
203 + .bus_id = 1,
204 + .phy_addr = -1,
205 +- .interface = PHY_INTERFACE_MODE_MII,
206 ++ .phy_interface = PHY_INTERFACE_MODE_MII,
207 + .mdio_bus_data = &ls1x_mdio_bus_data,
208 + .dma_cfg = &ls1x_eth_dma_cfg,
209 + .has_gmac = 1,
210 +diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
211 +index c2d5ecbe55264..f8fb85dc94b7a 100644
212 +--- a/arch/riscv/kernel/signal.c
213 ++++ b/arch/riscv/kernel/signal.c
214 +@@ -121,6 +121,8 @@ SYSCALL_DEFINE0(rt_sigreturn)
215 + if (restore_altstack(&frame->uc.uc_stack))
216 + goto badframe;
217 +
218 ++ regs->cause = -1UL;
219 ++
220 + return regs->a0;
221 +
222 + badframe:
223 +diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
224 +index 960f5c35ad1b6..8dc7ab1f3cd4e 100644
225 +--- a/arch/um/kernel/um_arch.c
226 ++++ b/arch/um/kernel/um_arch.c
227 +@@ -31,7 +31,7 @@
228 + #include <os.h>
229 +
230 + #define DEFAULT_COMMAND_LINE_ROOT "root=98:0"
231 +-#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty"
232 ++#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty0"
233 +
234 + /* Changed in add_arg and setup_arch, which run before SMP is started */
235 + static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
236 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
237 +index 8496ffc67c32d..3b4e1d8d239a2 100644
238 +--- a/arch/x86/kvm/emulate.c
239 ++++ b/arch/x86/kvm/emulate.c
240 +@@ -4122,6 +4122,9 @@ static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
241 + {
242 + u32 eax, ecx, edx;
243 +
244 ++ if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
245 ++ return emulate_ud(ctxt);
246 ++
247 + eax = reg_read(ctxt, VCPU_REGS_RAX);
248 + edx = reg_read(ctxt, VCPU_REGS_RDX);
249 + ecx = reg_read(ctxt, VCPU_REGS_RCX);
250 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
251 +index f267cca9fe094..ba1749a770eb1 100644
252 +--- a/arch/x86/kvm/mmu/mmu.c
253 ++++ b/arch/x86/kvm/mmu/mmu.c
254 +@@ -1071,20 +1071,6 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
255 + return kvm_mmu_memory_cache_nr_free_objects(mc);
256 + }
257 +
258 +-static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
259 +-{
260 +- struct kvm_memory_slot *slot;
261 +- struct kvm_mmu_page *sp;
262 +- struct kvm_rmap_head *rmap_head;
263 +-
264 +- sp = sptep_to_sp(spte);
265 +- kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
266 +- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
267 +- rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
268 +- return pte_list_add(vcpu, spte, rmap_head);
269 +-}
270 +-
271 +-
272 + static void rmap_remove(struct kvm *kvm, u64 *spte)
273 + {
274 + struct kvm_memslots *slots;
275 +@@ -1097,9 +1083,9 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
276 + gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
277 +
278 + /*
279 +- * Unlike rmap_add and rmap_recycle, rmap_remove does not run in the
280 +- * context of a vCPU so have to determine which memslots to use based
281 +- * on context information in sp->role.
282 ++ * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
283 ++ * so we have to determine which memslots to use based on context
284 ++ * information in sp->role.
285 + */
286 + slots = kvm_memslots_for_spte_role(kvm, sp->role);
287 +
288 +@@ -1639,19 +1625,24 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
289 +
290 + #define RMAP_RECYCLE_THRESHOLD 1000
291 +
292 +-static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
293 ++static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
294 + {
295 + struct kvm_memory_slot *slot;
296 +- struct kvm_rmap_head *rmap_head;
297 + struct kvm_mmu_page *sp;
298 ++ struct kvm_rmap_head *rmap_head;
299 ++ int rmap_count;
300 +
301 + sp = sptep_to_sp(spte);
302 ++ kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
303 + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
304 + rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
305 ++ rmap_count = pte_list_add(vcpu, spte, rmap_head);
306 +
307 +- kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
308 +- kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
309 +- KVM_PAGES_PER_HPAGE(sp->role.level));
310 ++ if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
311 ++ kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
312 ++ kvm_flush_remote_tlbs_with_address(
313 ++ vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
314 ++ }
315 + }
316 +
317 + bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
318 +@@ -2718,7 +2709,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
319 + bool host_writable)
320 + {
321 + int was_rmapped = 0;
322 +- int rmap_count;
323 + int set_spte_ret;
324 + int ret = RET_PF_FIXED;
325 + bool flush = false;
326 +@@ -2778,9 +2768,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
327 +
328 + if (!was_rmapped) {
329 + kvm_update_page_stats(vcpu->kvm, level, 1);
330 +- rmap_count = rmap_add(vcpu, sptep, gfn);
331 +- if (rmap_count > RMAP_RECYCLE_THRESHOLD)
332 +- rmap_recycle(vcpu, sptep, gfn);
333 ++ rmap_add(vcpu, sptep, gfn);
334 + }
335 +
336 + return ret;
337 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
338 +index 11e73d02fb3ae..8648799d48f8b 100644
339 +--- a/arch/x86/kvm/x86.c
340 ++++ b/arch/x86/kvm/x86.c
341 +@@ -1021,6 +1021,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
342 +
343 + int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
344 + {
345 ++ /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
346 + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
347 + __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
348 + kvm_inject_gp(vcpu, 0);
349 +diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
350 +index cb6401c9e9a4f..acf31cc1dbcca 100644
351 +--- a/drivers/dax/hmem/device.c
352 ++++ b/drivers/dax/hmem/device.c
353 +@@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r)
354 + .start = r->start,
355 + .end = r->end,
356 + .flags = IORESOURCE_MEM,
357 ++ .desc = IORES_DESC_SOFT_RESERVED,
358 + };
359 + struct platform_device *pdev;
360 + struct memregion_info info;
361 +diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
362 +index aada84f40723c..3257b2f5157c3 100644
363 +--- a/drivers/dma/ti/k3-udma-private.c
364 ++++ b/drivers/dma/ti/k3-udma-private.c
365 +@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
366 + }
367 +
368 + pdev = of_find_device_by_node(udma_node);
369 ++ if (np != udma_node)
370 ++ of_node_put(udma_node);
371 ++
372 + if (!pdev) {
373 + pr_debug("UDMA device not found\n");
374 + return ERR_PTR(-EPROBE_DEFER);
375 + }
376 +
377 +- if (np != udma_node)
378 +- of_node_put(udma_node);
379 +-
380 + ud = platform_get_drvdata(pdev);
381 + if (!ud) {
382 + pr_debug("UDMA has not been probed\n");
383 +diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
384 +index 9bf2478ec6d17..e80a782058458 100644
385 +--- a/drivers/firmware/arm_scmi/reset.c
386 ++++ b/drivers/firmware/arm_scmi/reset.c
387 +@@ -152,9 +152,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
388 + struct scmi_xfer *t;
389 + struct scmi_msg_reset_domain_reset *dom;
390 + struct scmi_reset_info *pi = ph->get_priv(ph);
391 +- struct reset_dom_info *rdom = pi->dom_info + domain;
392 ++ struct reset_dom_info *rdom;
393 +
394 +- if (rdom->async_reset)
395 ++ if (domain >= pi->num_domains)
396 ++ return -EINVAL;
397 ++
398 ++ rdom = pi->dom_info + domain;
399 ++ if (rdom->async_reset && flags & AUTONOMOUS_RESET)
400 + flags |= ASYNCHRONOUS_RESET;
401 +
402 + ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
403 +@@ -166,7 +170,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
404 + dom->flags = cpu_to_le32(flags);
405 + dom->reset_state = cpu_to_le32(state);
406 +
407 +- if (rdom->async_reset)
408 ++ if (flags & ASYNCHRONOUS_RESET)
409 + ret = ph->xops->do_xfer_with_response(ph, t);
410 + else
411 + ret = ph->xops->do_xfer(ph, t);
412 +diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
413 +index 8a18930f3eb69..516f4f0069bd2 100644
414 +--- a/drivers/firmware/efi/libstub/secureboot.c
415 ++++ b/drivers/firmware/efi/libstub/secureboot.c
416 +@@ -14,7 +14,7 @@
417 +
418 + /* SHIM variables */
419 + static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
420 +-static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
421 ++static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
422 +
423 + static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
424 + unsigned long *data_size, void *data)
425 +@@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void)
426 +
427 + /*
428 + * See if a user has put the shim into insecure mode. If so, and if the
429 +- * variable doesn't have the runtime attribute set, we might as well
430 +- * honor that.
431 ++ * variable doesn't have the non-volatile attribute set, we might as
432 ++ * well honor that.
433 + */
434 + size = sizeof(moksbstate);
435 + status = get_efi_var(shim_MokSBState_name, &shim_guid,
436 +@@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void)
437 + /* If it fails, we don't care why. Default to secure */
438 + if (status != EFI_SUCCESS)
439 + goto secure_boot_enabled;
440 +- if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
441 ++ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
442 + return efi_secureboot_mode_disabled;
443 +
444 + secure_boot_enabled:
445 +diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
446 +index f14c4ff5839f9..72162645b553e 100644
447 +--- a/drivers/firmware/efi/libstub/x86-stub.c
448 ++++ b/drivers/firmware/efi/libstub/x86-stub.c
449 +@@ -414,6 +414,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
450 + hdr->ramdisk_image = 0;
451 + hdr->ramdisk_size = 0;
452 +
453 ++ /*
454 ++ * Disregard any setup data that was provided by the bootloader:
455 ++ * setup_data could be pointing anywhere, and we have no way of
456 ++ * authenticating or validating the payload.
457 ++ */
458 ++ hdr->setup_data = 0;
459 ++
460 + efi_stub_entry(handle, sys_table_arg, boot_params);
461 + /* not reached */
462 +
463 +diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
464 +index 369a832d96203..0bded5853c41b 100644
465 +--- a/drivers/gpio/gpio-mockup.c
466 ++++ b/drivers/gpio/gpio-mockup.c
467 +@@ -554,8 +554,10 @@ static int __init gpio_mockup_register_chip(int idx)
468 + }
469 +
470 + fwnode = fwnode_create_software_node(properties, NULL);
471 +- if (IS_ERR(fwnode))
472 ++ if (IS_ERR(fwnode)) {
473 ++ kfree_strarray(line_names, ngpio);
474 + return PTR_ERR(fwnode);
475 ++ }
476 +
477 + pdevinfo.name = "gpio-mockup";
478 + pdevinfo.id = idx;
479 +@@ -618,9 +620,9 @@ static int __init gpio_mockup_init(void)
480 +
481 + static void __exit gpio_mockup_exit(void)
482 + {
483 ++ gpio_mockup_unregister_pdevs();
484 + debugfs_remove_recursive(gpio_mockup_dbg_dir);
485 + platform_driver_unregister(&gpio_mockup_driver);
486 +- gpio_mockup_unregister_pdevs();
487 + }
488 +
489 + module_init(gpio_mockup_init);
490 +diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
491 +index ffa0256cad5a0..937e7a8dd8a96 100644
492 +--- a/drivers/gpio/gpiolib-cdev.c
493 ++++ b/drivers/gpio/gpiolib-cdev.c
494 +@@ -1784,7 +1784,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
495 + ret = -ENODEV;
496 + goto out_free_le;
497 + }
498 +- le->irq = irq;
499 +
500 + if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
501 + irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
502 +@@ -1798,7 +1797,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
503 + init_waitqueue_head(&le->wait);
504 +
505 + /* Request a thread to read the events */
506 +- ret = request_threaded_irq(le->irq,
507 ++ ret = request_threaded_irq(irq,
508 + lineevent_irq_handler,
509 + lineevent_irq_thread,
510 + irqflags,
511 +@@ -1807,6 +1806,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
512 + if (ret)
513 + goto out_free_le;
514 +
515 ++ le->irq = irq;
516 ++
517 + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
518 + if (fd < 0) {
519 + ret = fd;
520 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
521 +index d1af709cc7dca..0e3137fd5c353 100644
522 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
523 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
524 +@@ -2388,8 +2388,20 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
525 + }
526 + adev->ip_blocks[i].status.sw = true;
527 +
528 +- /* need to do gmc hw init early so we can allocate gpu mem */
529 +- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
530 ++ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
531 ++ /* need to do common hw init early so everything is set up for gmc */
532 ++ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
533 ++ if (r) {
534 ++ DRM_ERROR("hw_init %d failed %d\n", i, r);
535 ++ goto init_failed;
536 ++ }
537 ++ adev->ip_blocks[i].status.hw = true;
538 ++ } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
539 ++ /* need to do gmc hw init early so we can allocate gpu mem */
540 ++ /* Try to reserve bad pages early */
541 ++ if (amdgpu_sriov_vf(adev))
542 ++ amdgpu_virt_exchange_data(adev);
543 ++
544 + r = amdgpu_device_vram_scratch_init(adev);
545 + if (r) {
546 + DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
547 +@@ -3033,8 +3045,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
548 + int i, r;
549 +
550 + static enum amd_ip_block_type ip_order[] = {
551 +- AMD_IP_BLOCK_TYPE_GMC,
552 + AMD_IP_BLOCK_TYPE_COMMON,
553 ++ AMD_IP_BLOCK_TYPE_GMC,
554 + AMD_IP_BLOCK_TYPE_PSP,
555 + AMD_IP_BLOCK_TYPE_IH,
556 + };
557 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
558 +index 5c08047adb594..d3d2c214554e6 100644
559 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
560 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
561 +@@ -35,6 +35,8 @@
562 + #include <linux/pci.h>
563 + #include <linux/pm_runtime.h>
564 + #include <drm/drm_crtc_helper.h>
565 ++#include <drm/drm_damage_helper.h>
566 ++#include <drm/drm_drv.h>
567 + #include <drm/drm_edid.h>
568 + #include <drm/drm_gem_framebuffer_helper.h>
569 + #include <drm/drm_fb_helper.h>
570 +@@ -492,6 +494,12 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
571 + .create_handle = drm_gem_fb_create_handle,
572 + };
573 +
574 ++static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
575 ++ .destroy = drm_gem_fb_destroy,
576 ++ .create_handle = drm_gem_fb_create_handle,
577 ++ .dirty = drm_atomic_helper_dirtyfb,
578 ++};
579 ++
580 + uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
581 + uint64_t bo_flags)
582 + {
583 +@@ -1109,7 +1117,10 @@ int amdgpu_display_gem_fb_verify_and_init(
584 + if (ret)
585 + goto err;
586 +
587 +- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
588 ++ if (drm_drv_uses_atomic_modeset(dev))
589 ++ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
590 ++ else
591 ++ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
592 + if (ret)
593 + goto err;
594 +
595 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
596 +index 16787c675f35e..a0803425b4566 100644
597 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
598 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
599 +@@ -614,16 +614,34 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
600 +
601 + void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
602 + {
603 +- uint64_t bp_block_offset = 0;
604 +- uint32_t bp_block_size = 0;
605 +- struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
606 +-
607 + adev->virt.fw_reserve.p_pf2vf = NULL;
608 + adev->virt.fw_reserve.p_vf2pf = NULL;
609 + adev->virt.vf2pf_update_interval_ms = 0;
610 +
611 + if (adev->mman.fw_vram_usage_va != NULL) {
612 +- adev->virt.vf2pf_update_interval_ms = 2000;
613 ++ /* go through this logic in ip_init and reset to init workqueue*/
614 ++ amdgpu_virt_exchange_data(adev);
615 ++
616 ++ INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
617 ++ schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
618 ++ } else if (adev->bios != NULL) {
619 ++ /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
620 ++ adev->virt.fw_reserve.p_pf2vf =
621 ++ (struct amd_sriov_msg_pf2vf_info_header *)
622 ++ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
623 ++
624 ++ amdgpu_virt_read_pf2vf_data(adev);
625 ++ }
626 ++}
627 ++
628 ++
629 ++void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
630 ++{
631 ++ uint64_t bp_block_offset = 0;
632 ++ uint32_t bp_block_size = 0;
633 ++ struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
634 ++
635 ++ if (adev->mman.fw_vram_usage_va != NULL) {
636 +
637 + adev->virt.fw_reserve.p_pf2vf =
638 + (struct amd_sriov_msg_pf2vf_info_header *)
639 +@@ -649,22 +667,10 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
640 + if (adev->virt.ras_init_done)
641 + amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
642 + }
643 +- } else if (adev->bios != NULL) {
644 +- adev->virt.fw_reserve.p_pf2vf =
645 +- (struct amd_sriov_msg_pf2vf_info_header *)
646 +- (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
647 +-
648 +- amdgpu_virt_read_pf2vf_data(adev);
649 +-
650 +- return;
651 +- }
652 +-
653 +- if (adev->virt.vf2pf_update_interval_ms != 0) {
654 +- INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
655 +- schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
656 + }
657 + }
658 +
659 ++
660 + void amdgpu_detect_virtualization(struct amdgpu_device *adev)
661 + {
662 + uint32_t reg;
663 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
664 +index 8d4c20bb71c59..9adfb8d63280a 100644
665 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
666 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
667 +@@ -308,6 +308,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
668 + void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
669 + void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
670 + void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
671 ++void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
672 + void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
673 + void amdgpu_detect_virtualization(struct amdgpu_device *adev);
674 +
675 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
676 +index e3d9f1decdfc7..518672a2450f4 100644
677 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
678 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
679 +@@ -6658,8 +6658,7 @@ static double CalculateUrgentLatency(
680 + return ret;
681 + }
682 +
683 +-
684 +-static void UseMinimumDCFCLK(
685 ++static noinline_for_stack void UseMinimumDCFCLK(
686 + struct display_mode_lib *mode_lib,
687 + int MaxInterDCNTileRepeaters,
688 + int MaxPrefetchMode,
689 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
690 +index d58925cff420e..aa0507e017926 100644
691 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
692 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
693 +@@ -259,33 +259,13 @@ static void CalculateRowBandwidth(
694 +
695 + static void CalculateFlipSchedule(
696 + struct display_mode_lib *mode_lib,
697 ++ unsigned int k,
698 + double HostVMInefficiencyFactor,
699 + double UrgentExtraLatency,
700 + double UrgentLatency,
701 +- unsigned int GPUVMMaxPageTableLevels,
702 +- bool HostVMEnable,
703 +- unsigned int HostVMMaxNonCachedPageTableLevels,
704 +- bool GPUVMEnable,
705 +- double HostVMMinPageSize,
706 + double PDEAndMetaPTEBytesPerFrame,
707 + double MetaRowBytes,
708 +- double DPTEBytesPerRow,
709 +- double BandwidthAvailableForImmediateFlip,
710 +- unsigned int TotImmediateFlipBytes,
711 +- enum source_format_class SourcePixelFormat,
712 +- double LineTime,
713 +- double VRatio,
714 +- double VRatioChroma,
715 +- double Tno_bw,
716 +- bool DCCEnable,
717 +- unsigned int dpte_row_height,
718 +- unsigned int meta_row_height,
719 +- unsigned int dpte_row_height_chroma,
720 +- unsigned int meta_row_height_chroma,
721 +- double *DestinationLinesToRequestVMInImmediateFlip,
722 +- double *DestinationLinesToRequestRowInImmediateFlip,
723 +- double *final_flip_bw,
724 +- bool *ImmediateFlipSupportedForPipe);
725 ++ double DPTEBytesPerRow);
726 + static double CalculateWriteBackDelay(
727 + enum source_format_class WritebackPixelFormat,
728 + double WritebackHRatio,
729 +@@ -319,64 +299,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
730 + static void CalculateWatermarksAndDRAMSpeedChangeSupport(
731 + struct display_mode_lib *mode_lib,
732 + unsigned int PrefetchMode,
733 +- unsigned int NumberOfActivePlanes,
734 +- unsigned int MaxLineBufferLines,
735 +- unsigned int LineBufferSize,
736 +- unsigned int WritebackInterfaceBufferSize,
737 + double DCFCLK,
738 + double ReturnBW,
739 +- bool SynchronizedVBlank,
740 +- unsigned int dpte_group_bytes[],
741 +- unsigned int MetaChunkSize,
742 + double UrgentLatency,
743 + double ExtraLatency,
744 +- double WritebackLatency,
745 +- double WritebackChunkSize,
746 + double SOCCLK,
747 +- double DRAMClockChangeLatency,
748 +- double SRExitTime,
749 +- double SREnterPlusExitTime,
750 +- double SRExitZ8Time,
751 +- double SREnterPlusExitZ8Time,
752 + double DCFCLKDeepSleep,
753 + unsigned int DETBufferSizeY[],
754 + unsigned int DETBufferSizeC[],
755 + unsigned int SwathHeightY[],
756 + unsigned int SwathHeightC[],
757 +- unsigned int LBBitPerPixel[],
758 + double SwathWidthY[],
759 + double SwathWidthC[],
760 +- double HRatio[],
761 +- double HRatioChroma[],
762 +- unsigned int vtaps[],
763 +- unsigned int VTAPsChroma[],
764 +- double VRatio[],
765 +- double VRatioChroma[],
766 +- unsigned int HTotal[],
767 +- double PixelClock[],
768 +- unsigned int BlendingAndTiming[],
769 + unsigned int DPPPerPlane[],
770 + double BytePerPixelDETY[],
771 + double BytePerPixelDETC[],
772 +- double DSTXAfterScaler[],
773 +- double DSTYAfterScaler[],
774 +- bool WritebackEnable[],
775 +- enum source_format_class WritebackPixelFormat[],
776 +- double WritebackDestinationWidth[],
777 +- double WritebackDestinationHeight[],
778 +- double WritebackSourceHeight[],
779 + bool UnboundedRequestEnabled,
780 + int unsigned CompressedBufferSizeInkByte,
781 + enum clock_change_support *DRAMClockChangeSupport,
782 +- double *UrgentWatermark,
783 +- double *WritebackUrgentWatermark,
784 +- double *DRAMClockChangeWatermark,
785 +- double *WritebackDRAMClockChangeWatermark,
786 + double *StutterExitWatermark,
787 + double *StutterEnterPlusExitWatermark,
788 + double *Z8StutterExitWatermark,
789 +- double *Z8StutterEnterPlusExitWatermark,
790 +- double *MinActiveDRAMClockChangeLatencySupported);
791 ++ double *Z8StutterEnterPlusExitWatermark);
792 +
793 + static void CalculateDCFCLKDeepSleep(
794 + struct display_mode_lib *mode_lib,
795 +@@ -2959,33 +2903,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
796 + for (k = 0; k < v->NumberOfActivePlanes; ++k) {
797 + CalculateFlipSchedule(
798 + mode_lib,
799 ++ k,
800 + HostVMInefficiencyFactor,
801 + v->UrgentExtraLatency,
802 + v->UrgentLatency,
803 +- v->GPUVMMaxPageTableLevels,
804 +- v->HostVMEnable,
805 +- v->HostVMMaxNonCachedPageTableLevels,
806 +- v->GPUVMEnable,
807 +- v->HostVMMinPageSize,
808 + v->PDEAndMetaPTEBytesFrame[k],
809 + v->MetaRowByte[k],
810 +- v->PixelPTEBytesPerRow[k],
811 +- v->BandwidthAvailableForImmediateFlip,
812 +- v->TotImmediateFlipBytes,
813 +- v->SourcePixelFormat[k],
814 +- v->HTotal[k] / v->PixelClock[k],
815 +- v->VRatio[k],
816 +- v->VRatioChroma[k],
817 +- v->Tno_bw[k],
818 +- v->DCCEnable[k],
819 +- v->dpte_row_height[k],
820 +- v->meta_row_height[k],
821 +- v->dpte_row_height_chroma[k],
822 +- v->meta_row_height_chroma[k],
823 +- &v->DestinationLinesToRequestVMInImmediateFlip[k],
824 +- &v->DestinationLinesToRequestRowInImmediateFlip[k],
825 +- &v->final_flip_bw[k],
826 +- &v->ImmediateFlipSupportedForPipe[k]);
827 ++ v->PixelPTEBytesPerRow[k]);
828 + }
829 +
830 + v->total_dcn_read_bw_with_flip = 0.0;
831 +@@ -3072,64 +2996,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
832 + CalculateWatermarksAndDRAMSpeedChangeSupport(
833 + mode_lib,
834 + PrefetchMode,
835 +- v->NumberOfActivePlanes,
836 +- v->MaxLineBufferLines,
837 +- v->LineBufferSize,
838 +- v->WritebackInterfaceBufferSize,
839 + v->DCFCLK,
840 + v->ReturnBW,
841 +- v->SynchronizedVBlank,
842 +- v->dpte_group_bytes,
843 +- v->MetaChunkSize,
844 + v->UrgentLatency,
845 + v->UrgentExtraLatency,
846 +- v->WritebackLatency,
847 +- v->WritebackChunkSize,
848 + v->SOCCLK,
849 +- v->DRAMClockChangeLatency,
850 +- v->SRExitTime,
851 +- v->SREnterPlusExitTime,
852 +- v->SRExitZ8Time,
853 +- v->SREnterPlusExitZ8Time,
854 + v->DCFCLKDeepSleep,
855 + v->DETBufferSizeY,
856 + v->DETBufferSizeC,
857 + v->SwathHeightY,
858 + v->SwathHeightC,
859 +- v->LBBitPerPixel,
860 + v->SwathWidthY,
861 + v->SwathWidthC,
862 +- v->HRatio,
863 +- v->HRatioChroma,
864 +- v->vtaps,
865 +- v->VTAPsChroma,
866 +- v->VRatio,
867 +- v->VRatioChroma,
868 +- v->HTotal,
869 +- v->PixelClock,
870 +- v->BlendingAndTiming,
871 + v->DPPPerPlane,
872 + v->BytePerPixelDETY,
873 + v->BytePerPixelDETC,
874 +- v->DSTXAfterScaler,
875 +- v->DSTYAfterScaler,
876 +- v->WritebackEnable,
877 +- v->WritebackPixelFormat,
878 +- v->WritebackDestinationWidth,
879 +- v->WritebackDestinationHeight,
880 +- v->WritebackSourceHeight,
881 + v->UnboundedRequestEnabled,
882 + v->CompressedBufferSizeInkByte,
883 + &DRAMClockChangeSupport,
884 +- &v->UrgentWatermark,
885 +- &v->WritebackUrgentWatermark,
886 +- &v->DRAMClockChangeWatermark,
887 +- &v->WritebackDRAMClockChangeWatermark,
888 + &v->StutterExitWatermark,
889 + &v->StutterEnterPlusExitWatermark,
890 + &v->Z8StutterExitWatermark,
891 +- &v->Z8StutterEnterPlusExitWatermark,
892 +- &v->MinActiveDRAMClockChangeLatencySupported);
893 ++ &v->Z8StutterEnterPlusExitWatermark);
894 +
895 + for (k = 0; k < v->NumberOfActivePlanes; ++k) {
896 + if (v->WritebackEnable[k] == true) {
897 +@@ -3741,61 +3629,43 @@ static void CalculateRowBandwidth(
898 +
899 + static void CalculateFlipSchedule(
900 + struct display_mode_lib *mode_lib,
901 ++ unsigned int k,
902 + double HostVMInefficiencyFactor,
903 + double UrgentExtraLatency,
904 + double UrgentLatency,
905 +- unsigned int GPUVMMaxPageTableLevels,
906 +- bool HostVMEnable,
907 +- unsigned int HostVMMaxNonCachedPageTableLevels,
908 +- bool GPUVMEnable,
909 +- double HostVMMinPageSize,
910 + double PDEAndMetaPTEBytesPerFrame,
911 + double MetaRowBytes,
912 +- double DPTEBytesPerRow,
913 +- double BandwidthAvailableForImmediateFlip,
914 +- unsigned int TotImmediateFlipBytes,
915 +- enum source_format_class SourcePixelFormat,
916 +- double LineTime,
917 +- double VRatio,
918 +- double VRatioChroma,
919 +- double Tno_bw,
920 +- bool DCCEnable,
921 +- unsigned int dpte_row_height,
922 +- unsigned int meta_row_height,
923 +- unsigned int dpte_row_height_chroma,
924 +- unsigned int meta_row_height_chroma,
925 +- double *DestinationLinesToRequestVMInImmediateFlip,
926 +- double *DestinationLinesToRequestRowInImmediateFlip,
927 +- double *final_flip_bw,
928 +- bool *ImmediateFlipSupportedForPipe)
929 ++ double DPTEBytesPerRow)
930 + {
931 ++ struct vba_vars_st *v = &mode_lib->vba;
932 + double min_row_time = 0.0;
933 + unsigned int HostVMDynamicLevelsTrips;
934 + double TimeForFetchingMetaPTEImmediateFlip;
935 + double TimeForFetchingRowInVBlankImmediateFlip;
936 + double ImmediateFlipBW;
937 ++ double LineTime = v->HTotal[k] / v->PixelClock[k];
938 +
939 +- if (GPUVMEnable == true && HostVMEnable == true) {
940 +- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
941 ++ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
942 ++ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
943 + } else {
944 + HostVMDynamicLevelsTrips = 0;
945 + }
946 +
947 +- if (GPUVMEnable == true || DCCEnable == true) {
948 +- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
949 ++ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
950 ++ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
951 + }
952 +
953 +- if (GPUVMEnable == true) {
954 ++ if (v->GPUVMEnable == true) {
955 + TimeForFetchingMetaPTEImmediateFlip = dml_max3(
956 +- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
957 +- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
958 ++ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
959 ++ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
960 + LineTime / 4.0);
961 + } else {
962 + TimeForFetchingMetaPTEImmediateFlip = 0;
963 + }
964 +
965 +- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
966 +- if ((GPUVMEnable == true || DCCEnable == true)) {
967 ++ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
968 ++ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
969 + TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
970 + (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
971 + UrgentLatency * (HostVMDynamicLevelsTrips + 1),
972 +@@ -3804,54 +3674,54 @@ static void CalculateFlipSchedule(
973 + TimeForFetchingRowInVBlankImmediateFlip = 0;
974 + }
975 +
976 +- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
977 ++ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
978 +
979 +- if (GPUVMEnable == true) {
980 +- *final_flip_bw = dml_max(
981 +- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
982 +- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
983 +- } else if ((GPUVMEnable == true || DCCEnable == true)) {
984 +- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
985 ++ if (v->GPUVMEnable == true) {
986 ++ v->final_flip_bw[k] = dml_max(
987 ++ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
988 ++ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
989 ++ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
990 ++ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
991 + } else {
992 +- *final_flip_bw = 0;
993 ++ v->final_flip_bw[k] = 0;
994 + }
995 +
996 +- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
997 +- if (GPUVMEnable == true && DCCEnable != true) {
998 +- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
999 +- } else if (GPUVMEnable != true && DCCEnable == true) {
1000 +- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
1001 ++ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
1002 ++ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
1003 ++ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
1004 ++ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
1005 ++ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
1006 + } else {
1007 + min_row_time = dml_min4(
1008 +- dpte_row_height * LineTime / VRatio,
1009 +- meta_row_height * LineTime / VRatio,
1010 +- dpte_row_height_chroma * LineTime / VRatioChroma,
1011 +- meta_row_height_chroma * LineTime / VRatioChroma);
1012 ++ v->dpte_row_height[k] * LineTime / v->VRatio[k],
1013 ++ v->meta_row_height[k] * LineTime / v->VRatio[k],
1014 ++ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
1015 ++ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
1016 + }
1017 + } else {
1018 +- if (GPUVMEnable == true && DCCEnable != true) {
1019 +- min_row_time = dpte_row_height * LineTime / VRatio;
1020 +- } else if (GPUVMEnable != true && DCCEnable == true) {
1021 +- min_row_time = meta_row_height * LineTime / VRatio;
1022 ++ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
1023 ++ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
1024 ++ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
1025 ++ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
1026 + } else {
1027 +- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
1028 ++ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
1029 + }
1030 + }
1031 +
1032 +- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
1033 ++ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
1034 + || TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
1035 +- *ImmediateFlipSupportedForPipe = false;
1036 ++ v->ImmediateFlipSupportedForPipe[k] = false;
1037 + } else {
1038 +- *ImmediateFlipSupportedForPipe = true;
1039 ++ v->ImmediateFlipSupportedForPipe[k] = true;
1040 + }
1041 +
1042 + #ifdef __DML_VBA_DEBUG__
1043 +- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
1044 +- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
1045 ++ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
1046 ++ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
1047 + dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
1048 + dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
1049 + dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
1050 +- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
1051 ++ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
1052 + #endif
1053 +
1054 + }
1055 +@@ -5477,33 +5347,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
1056 + for (k = 0; k < v->NumberOfActivePlanes; k++) {
1057 + CalculateFlipSchedule(
1058 + mode_lib,
1059 ++ k,
1060 + HostVMInefficiencyFactor,
1061 + v->ExtraLatency,
1062 + v->UrgLatency[i],
1063 +- v->GPUVMMaxPageTableLevels,
1064 +- v->HostVMEnable,
1065 +- v->HostVMMaxNonCachedPageTableLevels,
1066 +- v->GPUVMEnable,
1067 +- v->HostVMMinPageSize,
1068 + v->PDEAndMetaPTEBytesPerFrame[i][j][k],
1069 + v->MetaRowBytes[i][j][k],
1070 +- v->DPTEBytesPerRow[i][j][k],
1071 +- v->BandwidthAvailableForImmediateFlip,
1072 +- v->TotImmediateFlipBytes,
1073 +- v->SourcePixelFormat[k],
1074 +- v->HTotal[k] / v->PixelClock[k],
1075 +- v->VRatio[k],
1076 +- v->VRatioChroma[k],
1077 +- v->Tno_bw[k],
1078 +- v->DCCEnable[k],
1079 +- v->dpte_row_height[k],
1080 +- v->meta_row_height[k],
1081 +- v->dpte_row_height_chroma[k],
1082 +- v->meta_row_height_chroma[k],
1083 +- &v->DestinationLinesToRequestVMInImmediateFlip[k],
1084 +- &v->DestinationLinesToRequestRowInImmediateFlip[k],
1085 +- &v->final_flip_bw[k],
1086 +- &v->ImmediateFlipSupportedForPipe[k]);
1087 ++ v->DPTEBytesPerRow[i][j][k]);
1088 + }
1089 + v->total_dcn_read_bw_with_flip = 0.0;
1090 + for (k = 0; k < v->NumberOfActivePlanes; k++) {
1091 +@@ -5561,64 +5411,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
1092 + CalculateWatermarksAndDRAMSpeedChangeSupport(
1093 + mode_lib,
1094 + v->PrefetchModePerState[i][j],
1095 +- v->NumberOfActivePlanes,
1096 +- v->MaxLineBufferLines,
1097 +- v->LineBufferSize,
1098 +- v->WritebackInterfaceBufferSize,
1099 + v->DCFCLKState[i][j],
1100 + v->ReturnBWPerState[i][j],
1101 +- v->SynchronizedVBlank,
1102 +- v->dpte_group_bytes,
1103 +- v->MetaChunkSize,
1104 + v->UrgLatency[i],
1105 + v->ExtraLatency,
1106 +- v->WritebackLatency,
1107 +- v->WritebackChunkSize,
1108 + v->SOCCLKPerState[i],
1109 +- v->DRAMClockChangeLatency,
1110 +- v->SRExitTime,
1111 +- v->SREnterPlusExitTime,
1112 +- v->SRExitZ8Time,
1113 +- v->SREnterPlusExitZ8Time,
1114 + v->ProjectedDCFCLKDeepSleep[i][j],
1115 + v->DETBufferSizeYThisState,
1116 + v->DETBufferSizeCThisState,
1117 + v->SwathHeightYThisState,
1118 + v->SwathHeightCThisState,
1119 +- v->LBBitPerPixel,
1120 + v->SwathWidthYThisState,
1121 + v->SwathWidthCThisState,
1122 +- v->HRatio,
1123 +- v->HRatioChroma,
1124 +- v->vtaps,
1125 +- v->VTAPsChroma,
1126 +- v->VRatio,
1127 +- v->VRatioChroma,
1128 +- v->HTotal,
1129 +- v->PixelClock,
1130 +- v->BlendingAndTiming,
1131 + v->NoOfDPPThisState,
1132 + v->BytePerPixelInDETY,
1133 + v->BytePerPixelInDETC,
1134 +- v->DSTXAfterScaler,
1135 +- v->DSTYAfterScaler,
1136 +- v->WritebackEnable,
1137 +- v->WritebackPixelFormat,
1138 +- v->WritebackDestinationWidth,
1139 +- v->WritebackDestinationHeight,
1140 +- v->WritebackSourceHeight,
1141 + UnboundedRequestEnabledThisState,
1142 + CompressedBufferSizeInkByteThisState,
1143 + &v->DRAMClockChangeSupport[i][j],
1144 +- &v->UrgentWatermark,
1145 +- &v->WritebackUrgentWatermark,
1146 +- &v->DRAMClockChangeWatermark,
1147 +- &v->WritebackDRAMClockChangeWatermark,
1148 +- &dummy,
1149 + &dummy,
1150 + &dummy,
1151 + &dummy,
1152 +- &v->MinActiveDRAMClockChangeLatencySupported);
1153 ++ &dummy);
1154 + }
1155 + }
1156 +
1157 +@@ -5743,64 +5557,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
1158 + static void CalculateWatermarksAndDRAMSpeedChangeSupport(
1159 + struct display_mode_lib *mode_lib,
1160 + unsigned int PrefetchMode,
1161 +- unsigned int NumberOfActivePlanes,
1162 +- unsigned int MaxLineBufferLines,
1163 +- unsigned int LineBufferSize,
1164 +- unsigned int WritebackInterfaceBufferSize,
1165 + double DCFCLK,
1166 + double ReturnBW,
1167 +- bool SynchronizedVBlank,
1168 +- unsigned int dpte_group_bytes[],
1169 +- unsigned int MetaChunkSize,
1170 + double UrgentLatency,
1171 + double ExtraLatency,
1172 +- double WritebackLatency,
1173 +- double WritebackChunkSize,
1174 + double SOCCLK,
1175 +- double DRAMClockChangeLatency,
1176 +- double SRExitTime,
1177 +- double SREnterPlusExitTime,
1178 +- double SRExitZ8Time,
1179 +- double SREnterPlusExitZ8Time,
1180 + double DCFCLKDeepSleep,
1181 + unsigned int DETBufferSizeY[],
1182 + unsigned int DETBufferSizeC[],
1183 + unsigned int SwathHeightY[],
1184 + unsigned int SwathHeightC[],
1185 +- unsigned int LBBitPerPixel[],
1186 + double SwathWidthY[],
1187 + double SwathWidthC[],
1188 +- double HRatio[],
1189 +- double HRatioChroma[],
1190 +- unsigned int vtaps[],
1191 +- unsigned int VTAPsChroma[],
1192 +- double VRatio[],
1193 +- double VRatioChroma[],
1194 +- unsigned int HTotal[],
1195 +- double PixelClock[],
1196 +- unsigned int BlendingAndTiming[],
1197 + unsigned int DPPPerPlane[],
1198 + double BytePerPixelDETY[],
1199 + double BytePerPixelDETC[],
1200 +- double DSTXAfterScaler[],
1201 +- double DSTYAfterScaler[],
1202 +- bool WritebackEnable[],
1203 +- enum source_format_class WritebackPixelFormat[],
1204 +- double WritebackDestinationWidth[],
1205 +- double WritebackDestinationHeight[],
1206 +- double WritebackSourceHeight[],
1207 + bool UnboundedRequestEnabled,
1208 + int unsigned CompressedBufferSizeInkByte,
1209 + enum clock_change_support *DRAMClockChangeSupport,
1210 +- double *UrgentWatermark,
1211 +- double *WritebackUrgentWatermark,
1212 +- double *DRAMClockChangeWatermark,
1213 +- double *WritebackDRAMClockChangeWatermark,
1214 + double *StutterExitWatermark,
1215 + double *StutterEnterPlusExitWatermark,
1216 + double *Z8StutterExitWatermark,
1217 +- double *Z8StutterEnterPlusExitWatermark,
1218 +- double *MinActiveDRAMClockChangeLatencySupported)
1219 ++ double *Z8StutterEnterPlusExitWatermark)
1220 + {
1221 + struct vba_vars_st *v = &mode_lib->vba;
1222 + double EffectiveLBLatencyHidingY;
1223 +@@ -5820,103 +5598,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
1224 + double TotalPixelBW = 0.0;
1225 + int k, j;
1226 +
1227 +- *UrgentWatermark = UrgentLatency + ExtraLatency;
1228 ++ v->UrgentWatermark = UrgentLatency + ExtraLatency;
1229 +
1230 + #ifdef __DML_VBA_DEBUG__
1231 + dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
1232 + dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
1233 +- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
1234 ++ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
1235 + #endif
1236 +
1237 +- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
1238 ++ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
1239 +
1240 + #ifdef __DML_VBA_DEBUG__
1241 +- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
1242 +- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
1243 ++ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
1244 ++ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
1245 + #endif
1246 +
1247 + v->TotalActiveWriteback = 0;
1248 +- for (k = 0; k < NumberOfActivePlanes; ++k) {
1249 +- if (WritebackEnable[k] == true) {
1250 ++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
1251 ++ if (v->WritebackEnable[k] == true) {
1252 + v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
1253 + }
1254 + }
1255 +
1256 + if (v->TotalActiveWriteback <= 1) {
1257 +- *WritebackUrgentWatermark = WritebackLatency;
1258 ++ v->WritebackUrgentWatermark = v->WritebackLatency;
1259 + } else {
1260 +- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
1261 ++ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
1262 + }
1263 +
1264 + if (v->TotalActiveWriteback <= 1) {
1265 +- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
1266 ++ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
1267 + } else {
1268 +- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
1269 ++ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
1270 + }
1271 +
1272 +- for (k = 0; k < NumberOfActivePlanes; ++k) {
1273 ++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
1274 + TotalPixelBW = TotalPixelBW
1275 +- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
1276 +- / (HTotal[k] / PixelClock[k]);
1277 ++ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
1278 ++ / (v->HTotal[k] / v->PixelClock[k]);
1279 + }
1280 +
1281 +- for (k = 0; k < NumberOfActivePlanes; ++k) {
1282 ++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
1283 + double EffectiveDETBufferSizeY = DETBufferSizeY[k];
1284 +
1285 + v->LBLatencyHidingSourceLinesY = dml_min(
1286 +- (double) MaxLineBufferLines,
1287 +- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
1288 ++ (double) v->MaxLineBufferLines,
1289 ++ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
1290 +
1291 + v->LBLatencyHidingSourceLinesC = dml_min(
1292 +- (double) MaxLineBufferLines,
1293 +- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
1294 ++ (double) v->MaxLineBufferLines,
1295 ++ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
1296 +
1297 +- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
1298 ++ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
1299 +
1300 +- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
1301 ++ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
1302 +
1303 + if (UnboundedRequestEnabled) {
1304 + EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
1305 +- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
1306 ++ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
1307 + }
1308 +
1309 + LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
1310 + LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
1311 +- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
1312 ++ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
1313 + if (BytePerPixelDETC[k] > 0) {
1314 + LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
1315 + LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
1316 +- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
1317 ++ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
1318 + } else {
1319 + LinesInDETC = 0;
1320 + FullDETBufferingTimeC = 999999;
1321 + }
1322 +
1323 + ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
1324 +- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
1325 ++ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
1326 +
1327 +- if (NumberOfActivePlanes > 1) {
1328 ++ if (v->NumberOfActivePlanes > 1) {
1329 + ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
1330 +- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
1331 ++ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
1332 + }
1333 +
1334 + if (BytePerPixelDETC[k] > 0) {
1335 + ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
1336 +- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
1337 ++ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
1338 +
1339 +- if (NumberOfActivePlanes > 1) {
1340 ++ if (v->NumberOfActivePlanes > 1) {
1341 + ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
1342 +- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
1343 ++ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
1344 + }
1345 + v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
1346 + } else {
1347 + v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
1348 + }
1349 +
1350 +- if (WritebackEnable[k] == true) {
1351 +- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
1352 +- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
1353 +- if (WritebackPixelFormat[k] == dm_444_64) {
1354 ++ if (v->WritebackEnable[k] == true) {
1355 ++ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
1356 ++ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
1357 ++ if (v->WritebackPixelFormat[k] == dm_444_64) {
1358 + WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
1359 + }
1360 + WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
1361 +@@ -5926,14 +5704,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
1362 +
1363 + v->MinActiveDRAMClockChangeMargin = 999999;
1364 + PlaneWithMinActiveDRAMClockChangeMargin = 0;
1365 +- for (k = 0; k < NumberOfActivePlanes; ++k) {
1366 ++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
1367 + if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
1368 + v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
1369 +- if (BlendingAndTiming[k] == k) {
1370 ++ if (v->BlendingAndTiming[k] == k) {
1371 + PlaneWithMinActiveDRAMClockChangeMargin = k;
1372 + } else {
1373 +- for (j = 0; j < NumberOfActivePlanes; ++j) {
1374 +- if (BlendingAndTiming[k] == j) {
1375 ++ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
1376 ++ if (v->BlendingAndTiming[k] == j) {
1377 + PlaneWithMinActiveDRAMClockChangeMargin = j;
1378 + }
1379 + }
1380 +@@ -5941,11 +5719,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
1381 + }
1382 + }
1383 +
1384 +- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
1385 ++ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
1386 +
1387 + SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
1388 +- for (k = 0; k < NumberOfActivePlanes; ++k) {
1389 +- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
1390 ++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
1391 ++ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
1392 + && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
1393 + SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
1394 + }
1395 +@@ -5953,25 +5731,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
1396 +
1397 + v->TotalNumberOfActiveOTG = 0;
1398 +
1399 +- for (k = 0; k < NumberOfActivePlanes; ++k) {
1400 +- if (BlendingAndTiming[k] == k) {
1401 ++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
1402 ++ if (v->BlendingAndTiming[k] == k) {
1403 + v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
1404 + }
1405 + }
1406 +
1407 + if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
1408 + *DRAMClockChangeSupport = dm_dram_clock_change_vactive;
1409 +- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
1410 ++ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
1411 + || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
1412 + *DRAMClockChangeSupport = dm_dram_clock_change_vblank;
1413 + } else {
1414 + *DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
1415 + }
1416 +
1417 +- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
1418 +- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
1419 +- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
1420 +- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
1421 ++ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
1422 ++ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
1423 ++ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
1424 ++ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
1425 +
1426 + #ifdef __DML_VBA_DEBUG__
1427 + dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
1428 +diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
1429 +index ef742d95ef057..c707c9bfed433 100644
1430 +--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
1431 ++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
1432 +@@ -1597,6 +1597,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
1433 + struct fixed31_32 lut2;
1434 + struct fixed31_32 delta_lut;
1435 + struct fixed31_32 delta_index;
1436 ++ const struct fixed31_32 one = dc_fixpt_from_int(1);
1437 +
1438 + i = 0;
1439 + /* fixed_pt library has problems handling too small values */
1440 +@@ -1625,6 +1626,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
1441 + } else
1442 + hw_x = coordinates_x[i].x;
1443 +
1444 ++ if (dc_fixpt_le(one, hw_x))
1445 ++ hw_x = one;
1446 ++
1447 + norm_x = dc_fixpt_mul(norm_factor, hw_x);
1448 + index = dc_fixpt_floor(norm_x);
1449 + if (index < 0 || index > 255)
1450 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1451 +index 79976921dc46f..c71d50e821682 100644
1452 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1453 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1454 +@@ -358,6 +358,17 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
1455 + smu_baco->platform_support =
1456 + (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
1457 + false;
1458 ++
1459 ++ /*
1460 ++ * Disable BACO entry/exit completely on below SKUs to
1461 ++ * avoid hardware intermittent failures.
1462 ++ */
1463 ++ if (((adev->pdev->device == 0x73A1) &&
1464 ++ (adev->pdev->revision == 0x00)) ||
1465 ++ ((adev->pdev->device == 0x73BF) &&
1466 ++ (adev->pdev->revision == 0xCF)))
1467 ++ smu_baco->platform_support = false;
1468 ++
1469 + }
1470 + }
1471 +
1472 +diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
1473 +index b03f7b8241f2b..7162f4c946afe 100644
1474 +--- a/drivers/gpu/drm/gma500/gma_display.c
1475 ++++ b/drivers/gpu/drm/gma500/gma_display.c
1476 +@@ -529,15 +529,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
1477 + WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1478 +
1479 + gma_crtc->page_flip_event = event;
1480 ++ spin_unlock_irqrestore(&dev->event_lock, flags);
1481 +
1482 + /* Call this locked if we want an event at vblank interrupt. */
1483 + ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
1484 + if (ret) {
1485 +- gma_crtc->page_flip_event = NULL;
1486 +- drm_crtc_vblank_put(crtc);
1487 ++ spin_lock_irqsave(&dev->event_lock, flags);
1488 ++ if (gma_crtc->page_flip_event) {
1489 ++ gma_crtc->page_flip_event = NULL;
1490 ++ drm_crtc_vblank_put(crtc);
1491 ++ }
1492 ++ spin_unlock_irqrestore(&dev->event_lock, flags);
1493 + }
1494 +-
1495 +- spin_unlock_irqrestore(&dev->event_lock, flags);
1496 + } else {
1497 + ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
1498 + }
1499 +diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
1500 +index 43943e9802036..4e41c144a2902 100644
1501 +--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
1502 ++++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
1503 +@@ -1,7 +1,8 @@
1504 + # SPDX-License-Identifier: GPL-2.0-only
1505 + config DRM_HISI_HIBMC
1506 + tristate "DRM Support for Hisilicon Hibmc"
1507 +- depends on DRM && PCI && ARM64
1508 ++ depends on DRM && PCI && (ARM64 || COMPILE_TEST)
1509 ++ depends on MMU
1510 + select DRM_KMS_HELPER
1511 + select DRM_VRAM_HELPER
1512 + select DRM_TTM
1513 +diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
1514 +index ac14e598a14f7..a6d28533f1b12 100644
1515 +--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
1516 ++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
1517 +@@ -673,6 +673,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
1518 + if (--dsi->refcount != 0)
1519 + return;
1520 +
1521 ++ /*
1522 ++ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
1523 ++ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
1524 ++ * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
1525 ++ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
1526 ++ * after dsi is fully set.
1527 ++ */
1528 ++ mtk_dsi_stop(dsi);
1529 ++
1530 ++ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
1531 + mtk_dsi_reset_engine(dsi);
1532 + mtk_dsi_lane0_ulp_mode_enter(dsi);
1533 + mtk_dsi_clk_ulp_mode_enter(dsi);
1534 +@@ -723,17 +733,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
1535 + if (!dsi->enabled)
1536 + return;
1537 +
1538 +- /*
1539 +- * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
1540 +- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
1541 +- * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
1542 +- * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
1543 +- * after dsi is fully set.
1544 +- */
1545 +- mtk_dsi_stop(dsi);
1546 +-
1547 +- mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
1548 +-
1549 + dsi->enabled = false;
1550 + }
1551 +
1552 +@@ -796,10 +795,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
1553 +
1554 + static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
1555 + .attach = mtk_dsi_bridge_attach,
1556 ++ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
1557 + .atomic_disable = mtk_dsi_bridge_atomic_disable,
1558 ++ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
1559 + .atomic_enable = mtk_dsi_bridge_atomic_enable,
1560 + .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
1561 + .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
1562 ++ .atomic_reset = drm_atomic_helper_bridge_reset,
1563 + .mode_set = mtk_dsi_bridge_mode_set,
1564 + };
1565 +
1566 +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
1567 +index 8dd7013c75f20..1a9685eb80026 100644
1568 +--- a/drivers/gpu/drm/panel/panel-simple.c
1569 ++++ b/drivers/gpu/drm/panel/panel-simple.c
1570 +@@ -2579,7 +2579,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
1571 + .enable = 200,
1572 + .disable = 20,
1573 + },
1574 +- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
1575 ++ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
1576 + .connector_type = DRM_MODE_CONNECTOR_LVDS,
1577 + };
1578 +
1579 +diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
1580 +index 13c6b857158fc..6b5d0722afa6c 100644
1581 +--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
1582 ++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
1583 +@@ -277,8 +277,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
1584 + return ret;
1585 + }
1586 +
1587 +-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
1588 +- struct drm_display_mode *mode)
1589 ++static enum drm_mode_status
1590 ++cdn_dp_connector_mode_valid(struct drm_connector *connector,
1591 ++ struct drm_display_mode *mode)
1592 + {
1593 + struct cdn_dp_device *dp = connector_to_dp(connector);
1594 + struct drm_display_info *display_info = &dp->connector.display_info;
1595 +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
1596 +index 50d9113f54025..ecfc299834e15 100644
1597 +--- a/drivers/hv/vmbus_drv.c
1598 ++++ b/drivers/hv/vmbus_drv.c
1599 +@@ -2340,7 +2340,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1600 + bool fb_overlap_ok)
1601 + {
1602 + struct resource *iter, *shadow;
1603 +- resource_size_t range_min, range_max, start;
1604 ++ resource_size_t range_min, range_max, start, end;
1605 + const char *dev_n = dev_name(&device_obj->device);
1606 + int retval;
1607 +
1608 +@@ -2375,6 +2375,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1609 + range_max = iter->end;
1610 + start = (range_min + align - 1) & ~(align - 1);
1611 + for (; start + size - 1 <= range_max; start += align) {
1612 ++ end = start + size - 1;
1613 ++
1614 ++ /* Skip the whole fb_mmio region if not fb_overlap_ok */
1615 ++ if (!fb_overlap_ok && fb_mmio &&
1616 ++ (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
1617 ++ ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
1618 ++ continue;
1619 ++
1620 + shadow = __request_region(iter, start, size, NULL,
1621 + IORESOURCE_BUSY);
1622 + if (!shadow)
1623 +diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
1624 +index 3f40995c0ca9a..2e4d05040e50e 100644
1625 +--- a/drivers/i2c/busses/i2c-imx.c
1626 ++++ b/drivers/i2c/busses/i2c-imx.c
1627 +@@ -1496,7 +1496,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
1628 + if (i2c_imx->dma)
1629 + i2c_imx_dma_free(i2c_imx);
1630 +
1631 +- if (ret == 0) {
1632 ++ if (ret >= 0) {
1633 + /* setup chip registers to defaults */
1634 + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
1635 + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
1636 +diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
1637 +index 8716032f030a0..ad5efd7497d1c 100644
1638 +--- a/drivers/i2c/busses/i2c-mlxbf.c
1639 ++++ b/drivers/i2c/busses/i2c-mlxbf.c
1640 +@@ -6,6 +6,7 @@
1641 + */
1642 +
1643 + #include <linux/acpi.h>
1644 ++#include <linux/bitfield.h>
1645 + #include <linux/delay.h>
1646 + #include <linux/err.h>
1647 + #include <linux/interrupt.h>
1648 +@@ -63,13 +64,14 @@
1649 + */
1650 + #define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000)
1651 + /* Reference clock for Bluefield - 156 MHz. */
1652 +-#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000)
1653 ++#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL
1654 +
1655 + /* Constant used to determine the PLL frequency. */
1656 +-#define MLNXBF_I2C_COREPLL_CONST 16384
1657 ++#define MLNXBF_I2C_COREPLL_CONST 16384ULL
1658 ++
1659 ++#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL
1660 +
1661 + /* PLL registers. */
1662 +-#define MLXBF_I2C_CORE_PLL_REG0 0x0
1663 + #define MLXBF_I2C_CORE_PLL_REG1 0x4
1664 + #define MLXBF_I2C_CORE_PLL_REG2 0x8
1665 +
1666 +@@ -181,22 +183,15 @@
1667 + #define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ
1668 +
1669 + /* Core PLL TYU configuration. */
1670 +-#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0)
1671 +-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0)
1672 +-#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0)
1673 +-
1674 +-#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3
1675 +-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16
1676 +-#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20
1677 ++#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3)
1678 ++#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16)
1679 ++#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20)
1680 +
1681 + /* Core PLL YU configuration. */
1682 + #define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0)
1683 + #define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0)
1684 +-#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0)
1685 ++#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26)
1686 +
1687 +-#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0
1688 +-#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1
1689 +-#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26
1690 +
1691 + /* Core PLL frequency. */
1692 + static u64 mlxbf_i2c_corepll_frequency;
1693 +@@ -479,8 +474,6 @@ static struct mutex mlxbf_i2c_bus_lock;
1694 + #define MLXBF_I2C_MASK_8 GENMASK(7, 0)
1695 + #define MLXBF_I2C_MASK_16 GENMASK(15, 0)
1696 +
1697 +-#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000
1698 +-
1699 + /*
1700 + * Function to poll a set of bits at a specific address; it checks whether
1701 + * the bits are equal to zero when eq_zero is set to 'true', and not equal
1702 +@@ -669,7 +662,7 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
1703 + /* Clear status bits. */
1704 + writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
1705 + /* Set the cause data. */
1706 +- writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR);
1707 ++ writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
1708 + /* Zero PEC byte. */
1709 + writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC);
1710 + /* Zero byte count. */
1711 +@@ -738,6 +731,9 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
1712 + if (flags & MLXBF_I2C_F_WRITE) {
1713 + write_en = 1;
1714 + write_len += operation->length;
1715 ++ if (data_idx + operation->length >
1716 ++ MLXBF_I2C_MASTER_DATA_DESC_SIZE)
1717 ++ return -ENOBUFS;
1718 + memcpy(data_desc + data_idx,
1719 + operation->buffer, operation->length);
1720 + data_idx += operation->length;
1721 +@@ -1407,24 +1403,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev,
1722 + return 0;
1723 + }
1724 +
1725 +-static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
1726 ++static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
1727 + {
1728 +- u64 core_frequency, pad_frequency;
1729 ++ u64 core_frequency;
1730 + u8 core_od, core_r;
1731 + u32 corepll_val;
1732 + u16 core_f;
1733 +
1734 +- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
1735 +-
1736 + corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
1737 +
1738 + /* Get Core PLL configuration bits. */
1739 +- core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) &
1740 +- MLXBF_I2C_COREPLL_CORE_F_TYU_MASK;
1741 +- core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) &
1742 +- MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK;
1743 +- core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) &
1744 +- MLXBF_I2C_COREPLL_CORE_R_TYU_MASK;
1745 ++ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val);
1746 ++ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val);
1747 ++ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val);
1748 +
1749 + /*
1750 + * Compute PLL output frequency as follow:
1751 +@@ -1436,31 +1427,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
1752 + * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
1753 + * and PadFrequency, respectively.
1754 + */
1755 +- core_frequency = pad_frequency * (++core_f);
1756 ++ core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
1757 + core_frequency /= (++core_r) * (++core_od);
1758 +
1759 + return core_frequency;
1760 + }
1761 +
1762 +-static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
1763 ++static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
1764 + {
1765 + u32 corepll_reg1_val, corepll_reg2_val;
1766 +- u64 corepll_frequency, pad_frequency;
1767 ++ u64 corepll_frequency;
1768 + u8 core_od, core_r;
1769 + u32 core_f;
1770 +
1771 +- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
1772 +-
1773 + corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
1774 + corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
1775 +
1776 + /* Get Core PLL configuration bits */
1777 +- core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) &
1778 +- MLXBF_I2C_COREPLL_CORE_F_YU_MASK;
1779 +- core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) &
1780 +- MLXBF_I2C_COREPLL_CORE_R_YU_MASK;
1781 +- core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) &
1782 +- MLXBF_I2C_COREPLL_CORE_OD_YU_MASK;
1783 ++ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val);
1784 ++ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val);
1785 ++ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val);
1786 +
1787 + /*
1788 + * Compute PLL output frequency as follow:
1789 +@@ -1472,7 +1458,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
1790 + * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
1791 + * and PadFrequency, respectively.
1792 + */
1793 +- corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST;
1794 ++ corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
1795 + corepll_frequency /= (++core_r) * (++core_od);
1796 +
1797 + return corepll_frequency;
1798 +@@ -2180,14 +2166,14 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = {
1799 + [1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1],
1800 + [2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1]
1801 + },
1802 +- .calculate_freq = mlxbf_calculate_freq_from_tyu
1803 ++ .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu
1804 + },
1805 + [MLXBF_I2C_CHIP_TYPE_2] = {
1806 + .type = MLXBF_I2C_CHIP_TYPE_2,
1807 + .shared_res = {
1808 + [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2]
1809 + },
1810 +- .calculate_freq = mlxbf_calculate_freq_from_yu
1811 ++ .calculate_freq = mlxbf_i2c_calculate_freq_from_yu
1812 + }
1813 + };
1814 +
1815 +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
1816 +index 2affdccb58e47..71a9320177727 100644
1817 +--- a/drivers/iommu/intel/iommu.c
1818 ++++ b/drivers/iommu/intel/iommu.c
1819 +@@ -539,7 +539,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
1820 + {
1821 + unsigned long fl_sagaw, sl_sagaw;
1822 +
1823 +- fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0);
1824 ++ fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
1825 + sl_sagaw = cap_sagaw(iommu->cap);
1826 +
1827 + /* Second level only. */
1828 +diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
1829 +index bfeb92d93de39..8ab1be03e7319 100644
1830 +--- a/drivers/media/usb/b2c2/flexcop-usb.c
1831 ++++ b/drivers/media/usb/b2c2/flexcop-usb.c
1832 +@@ -511,7 +511,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
1833 +
1834 + if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
1835 + return -ENODEV;
1836 +- if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc))
1837 ++ if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc))
1838 + return -ENODEV;
1839 +
1840 + switch (fc_usb->udev->speed) {
1841 +diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
1842 +index 1f0120cbe9e80..8ad095c19f271 100644
1843 +--- a/drivers/net/bonding/bond_3ad.c
1844 ++++ b/drivers/net/bonding/bond_3ad.c
1845 +@@ -87,8 +87,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
1846 + static u16 ad_ticks_per_sec;
1847 + static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
1848 +
1849 +-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
1850 +- MULTICAST_LACPDU_ADDR;
1851 ++const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
1852 ++ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
1853 ++};
1854 +
1855 + /* ================= main 802.3ad protocol functions ================== */
1856 + static int ad_lacpdu_send(struct port *port);
1857 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1858 +index cd0d7b24f0140..402dffc508efb 100644
1859 +--- a/drivers/net/bonding/bond_main.c
1860 ++++ b/drivers/net/bonding/bond_main.c
1861 +@@ -862,12 +862,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
1862 + dev_uc_unsync(slave_dev, bond_dev);
1863 + dev_mc_unsync(slave_dev, bond_dev);
1864 +
1865 +- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1866 +- /* del lacpdu mc addr from mc list */
1867 +- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1868 +-
1869 +- dev_mc_del(slave_dev, lacpdu_multicast);
1870 +- }
1871 ++ if (BOND_MODE(bond) == BOND_MODE_8023AD)
1872 ++ dev_mc_del(slave_dev, lacpdu_mcast_addr);
1873 + }
1874 +
1875 + /*--------------------------- Active slave change ---------------------------*/
1876 +@@ -887,7 +883,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
1877 + if (bond->dev->flags & IFF_ALLMULTI)
1878 + dev_set_allmulti(old_active->dev, -1);
1879 +
1880 +- bond_hw_addr_flush(bond->dev, old_active->dev);
1881 ++ if (bond->dev->flags & IFF_UP)
1882 ++ bond_hw_addr_flush(bond->dev, old_active->dev);
1883 + }
1884 +
1885 + if (new_active) {
1886 +@@ -898,10 +895,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
1887 + if (bond->dev->flags & IFF_ALLMULTI)
1888 + dev_set_allmulti(new_active->dev, 1);
1889 +
1890 +- netif_addr_lock_bh(bond->dev);
1891 +- dev_uc_sync(new_active->dev, bond->dev);
1892 +- dev_mc_sync(new_active->dev, bond->dev);
1893 +- netif_addr_unlock_bh(bond->dev);
1894 ++ if (bond->dev->flags & IFF_UP) {
1895 ++ netif_addr_lock_bh(bond->dev);
1896 ++ dev_uc_sync(new_active->dev, bond->dev);
1897 ++ dev_mc_sync(new_active->dev, bond->dev);
1898 ++ netif_addr_unlock_bh(bond->dev);
1899 ++ }
1900 + }
1901 + }
1902 +
1903 +@@ -2134,16 +2133,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1904 + }
1905 + }
1906 +
1907 +- netif_addr_lock_bh(bond_dev);
1908 +- dev_mc_sync_multiple(slave_dev, bond_dev);
1909 +- dev_uc_sync_multiple(slave_dev, bond_dev);
1910 +- netif_addr_unlock_bh(bond_dev);
1911 +-
1912 +- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1913 +- /* add lacpdu mc addr to mc list */
1914 +- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1915 ++ if (bond_dev->flags & IFF_UP) {
1916 ++ netif_addr_lock_bh(bond_dev);
1917 ++ dev_mc_sync_multiple(slave_dev, bond_dev);
1918 ++ dev_uc_sync_multiple(slave_dev, bond_dev);
1919 ++ netif_addr_unlock_bh(bond_dev);
1920 +
1921 +- dev_mc_add(slave_dev, lacpdu_multicast);
1922 ++ if (BOND_MODE(bond) == BOND_MODE_8023AD)
1923 ++ dev_mc_add(slave_dev, lacpdu_mcast_addr);
1924 + }
1925 + }
1926 +
1927 +@@ -2415,7 +2412,8 @@ static int __bond_release_one(struct net_device *bond_dev,
1928 + if (old_flags & IFF_ALLMULTI)
1929 + dev_set_allmulti(slave_dev, -1);
1930 +
1931 +- bond_hw_addr_flush(bond_dev, slave_dev);
1932 ++ if (old_flags & IFF_UP)
1933 ++ bond_hw_addr_flush(bond_dev, slave_dev);
1934 + }
1935 +
1936 + slave_disable_netpoll(slave);
1937 +@@ -3932,6 +3930,12 @@ static int bond_open(struct net_device *bond_dev)
1938 + struct list_head *iter;
1939 + struct slave *slave;
1940 +
1941 ++ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
1942 ++ bond->rr_tx_counter = alloc_percpu(u32);
1943 ++ if (!bond->rr_tx_counter)
1944 ++ return -ENOMEM;
1945 ++ }
1946 ++
1947 + /* reset slave->backup and slave->inactive */
1948 + if (bond_has_slaves(bond)) {
1949 + bond_for_each_slave(bond, slave, iter) {
1950 +@@ -3969,6 +3973,9 @@ static int bond_open(struct net_device *bond_dev)
1951 + /* register to receive LACPDUs */
1952 + bond->recv_probe = bond_3ad_lacpdu_recv;
1953 + bond_3ad_initiate_agg_selection(bond, 1);
1954 ++
1955 ++ bond_for_each_slave(bond, slave, iter)
1956 ++ dev_mc_add(slave->dev, lacpdu_mcast_addr);
1957 + }
1958 +
1959 + if (bond_mode_can_use_xmit_hash(bond))
1960 +@@ -3980,6 +3987,7 @@ static int bond_open(struct net_device *bond_dev)
1961 + static int bond_close(struct net_device *bond_dev)
1962 + {
1963 + struct bonding *bond = netdev_priv(bond_dev);
1964 ++ struct slave *slave;
1965 +
1966 + bond_work_cancel_all(bond);
1967 + bond->send_peer_notif = 0;
1968 +@@ -3987,6 +3995,19 @@ static int bond_close(struct net_device *bond_dev)
1969 + bond_alb_deinitialize(bond);
1970 + bond->recv_probe = NULL;
1971 +
1972 ++ if (bond_uses_primary(bond)) {
1973 ++ rcu_read_lock();
1974 ++ slave = rcu_dereference(bond->curr_active_slave);
1975 ++ if (slave)
1976 ++ bond_hw_addr_flush(bond_dev, slave->dev);
1977 ++ rcu_read_unlock();
1978 ++ } else {
1979 ++ struct list_head *iter;
1980 ++
1981 ++ bond_for_each_slave(bond, slave, iter)
1982 ++ bond_hw_addr_flush(bond_dev, slave->dev);
1983 ++ }
1984 ++
1985 + return 0;
1986 + }
1987 +
1988 +@@ -5892,15 +5913,6 @@ static int bond_init(struct net_device *bond_dev)
1989 + if (!bond->wq)
1990 + return -ENOMEM;
1991 +
1992 +- if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
1993 +- bond->rr_tx_counter = alloc_percpu(u32);
1994 +- if (!bond->rr_tx_counter) {
1995 +- destroy_workqueue(bond->wq);
1996 +- bond->wq = NULL;
1997 +- return -ENOMEM;
1998 +- }
1999 +- }
2000 +-
2001 + spin_lock_init(&bond->stats_lock);
2002 + netdev_lockdep_set_classes(bond_dev);
2003 +
2004 +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
2005 +index 18d7bb99ec1bd..837bca7347594 100644
2006 +--- a/drivers/net/can/flexcan.c
2007 ++++ b/drivers/net/can/flexcan.c
2008 +@@ -1036,11 +1036,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
2009 + u32 reg_ctrl, reg_id, reg_iflag1;
2010 + int i;
2011 +
2012 +- if (unlikely(drop)) {
2013 +- skb = ERR_PTR(-ENOBUFS);
2014 +- goto mark_as_read;
2015 +- }
2016 +-
2017 + mb = flexcan_get_mb(priv, n);
2018 +
2019 + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
2020 +@@ -1069,6 +1064,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
2021 + reg_ctrl = priv->read(&mb->can_ctrl);
2022 + }
2023 +
2024 ++ if (unlikely(drop)) {
2025 ++ skb = ERR_PTR(-ENOBUFS);
2026 ++ goto mark_as_read;
2027 ++ }
2028 ++
2029 + if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
2030 + skb = alloc_canfd_skb(offload->dev, &cfd);
2031 + else
2032 +diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
2033 +index e26b3d6f5b482..5a43e542b302e 100644
2034 +--- a/drivers/net/can/usb/gs_usb.c
2035 ++++ b/drivers/net/can/usb/gs_usb.c
2036 +@@ -680,6 +680,7 @@ static int gs_can_open(struct net_device *netdev)
2037 + flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
2038 +
2039 + /* finally start device */
2040 ++ dev->can.state = CAN_STATE_ERROR_ACTIVE;
2041 + dm->mode = cpu_to_le32(GS_CAN_MODE_START);
2042 + dm->flags = cpu_to_le32(flags);
2043 + rc = usb_control_msg(interface_to_usbdev(dev->iface),
2044 +@@ -696,13 +697,12 @@ static int gs_can_open(struct net_device *netdev)
2045 + if (rc < 0) {
2046 + netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
2047 + kfree(dm);
2048 ++ dev->can.state = CAN_STATE_STOPPED;
2049 + return rc;
2050 + }
2051 +
2052 + kfree(dm);
2053 +
2054 +- dev->can.state = CAN_STATE_ERROR_ACTIVE;
2055 +-
2056 + parent->active_channels++;
2057 + if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
2058 + netif_start_queue(netdev);
2059 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2060 +index 6962abe2358b9..a6ca7ba5276c4 100644
2061 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2062 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2063 +@@ -709,7 +709,6 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
2064 +
2065 + for (i = 0; i < nr_pkts; i++) {
2066 + struct bnxt_sw_tx_bd *tx_buf;
2067 +- bool compl_deferred = false;
2068 + struct sk_buff *skb;
2069 + int j, last;
2070 +
2071 +@@ -718,6 +717,8 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
2072 + skb = tx_buf->skb;
2073 + tx_buf->skb = NULL;
2074 +
2075 ++ tx_bytes += skb->len;
2076 ++
2077 + if (tx_buf->is_push) {
2078 + tx_buf->is_push = 0;
2079 + goto next_tx_int;
2080 +@@ -738,8 +739,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
2081 + }
2082 + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2083 + if (bp->flags & BNXT_FLAG_CHIP_P5) {
2084 ++ /* PTP worker takes ownership of the skb */
2085 + if (!bnxt_get_tx_ts_p5(bp, skb))
2086 +- compl_deferred = true;
2087 ++ skb = NULL;
2088 + else
2089 + atomic_inc(&bp->ptp_cfg->tx_avail);
2090 + }
2091 +@@ -748,9 +750,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
2092 + next_tx_int:
2093 + cons = NEXT_TX(cons);
2094 +
2095 +- tx_bytes += skb->len;
2096 +- if (!compl_deferred)
2097 +- dev_kfree_skb_any(skb);
2098 ++ dev_kfree_skb_any(skb);
2099 + }
2100 +
2101 + netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
2102 +diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
2103 +index a139f2e9d59f0..e0e8dfd137930 100644
2104 +--- a/drivers/net/ethernet/freescale/enetc/Makefile
2105 ++++ b/drivers/net/ethernet/freescale/enetc/Makefile
2106 +@@ -9,7 +9,6 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
2107 +
2108 + obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
2109 + fsl-enetc-vf-y := enetc_vf.o $(common-objs)
2110 +-fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
2111 +
2112 + obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
2113 + fsl-enetc-ierb-y := enetc_ierb.o
2114 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
2115 +index 042327b9981fa..c0265a6f10c00 100644
2116 +--- a/drivers/net/ethernet/freescale/enetc/enetc.c
2117 ++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
2118 +@@ -2142,7 +2142,7 @@ int enetc_close(struct net_device *ndev)
2119 + return 0;
2120 + }
2121 +
2122 +-static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
2123 ++int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
2124 + {
2125 + struct enetc_ndev_priv *priv = netdev_priv(ndev);
2126 + struct tc_mqprio_qopt *mqprio = type_data;
2127 +@@ -2196,25 +2196,6 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
2128 + return 0;
2129 + }
2130 +
2131 +-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2132 +- void *type_data)
2133 +-{
2134 +- switch (type) {
2135 +- case TC_SETUP_QDISC_MQPRIO:
2136 +- return enetc_setup_tc_mqprio(ndev, type_data);
2137 +- case TC_SETUP_QDISC_TAPRIO:
2138 +- return enetc_setup_tc_taprio(ndev, type_data);
2139 +- case TC_SETUP_QDISC_CBS:
2140 +- return enetc_setup_tc_cbs(ndev, type_data);
2141 +- case TC_SETUP_QDISC_ETF:
2142 +- return enetc_setup_tc_txtime(ndev, type_data);
2143 +- case TC_SETUP_BLOCK:
2144 +- return enetc_setup_tc_psfp(ndev, type_data);
2145 +- default:
2146 +- return -EOPNOTSUPP;
2147 +- }
2148 +-}
2149 +-
2150 + static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
2151 + struct netlink_ext_ack *extack)
2152 + {
2153 +@@ -2307,29 +2288,6 @@ static int enetc_set_rss(struct net_device *ndev, int en)
2154 + return 0;
2155 + }
2156 +
2157 +-static int enetc_set_psfp(struct net_device *ndev, int en)
2158 +-{
2159 +- struct enetc_ndev_priv *priv = netdev_priv(ndev);
2160 +- int err;
2161 +-
2162 +- if (en) {
2163 +- err = enetc_psfp_enable(priv);
2164 +- if (err)
2165 +- return err;
2166 +-
2167 +- priv->active_offloads |= ENETC_F_QCI;
2168 +- return 0;
2169 +- }
2170 +-
2171 +- err = enetc_psfp_disable(priv);
2172 +- if (err)
2173 +- return err;
2174 +-
2175 +- priv->active_offloads &= ~ENETC_F_QCI;
2176 +-
2177 +- return 0;
2178 +-}
2179 +-
2180 + static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
2181 + {
2182 + struct enetc_ndev_priv *priv = netdev_priv(ndev);
2183 +@@ -2348,11 +2306,9 @@ static void enetc_enable_txvlan(struct net_device *ndev, bool en)
2184 + enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
2185 + }
2186 +
2187 +-int enetc_set_features(struct net_device *ndev,
2188 +- netdev_features_t features)
2189 ++void enetc_set_features(struct net_device *ndev, netdev_features_t features)
2190 + {
2191 + netdev_features_t changed = ndev->features ^ features;
2192 +- int err = 0;
2193 +
2194 + if (changed & NETIF_F_RXHASH)
2195 + enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
2196 +@@ -2364,11 +2320,6 @@ int enetc_set_features(struct net_device *ndev,
2197 + if (changed & NETIF_F_HW_VLAN_CTAG_TX)
2198 + enetc_enable_txvlan(ndev,
2199 + !!(features & NETIF_F_HW_VLAN_CTAG_TX));
2200 +-
2201 +- if (changed & NETIF_F_HW_TC)
2202 +- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
2203 +-
2204 +- return err;
2205 + }
2206 +
2207 + #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
2208 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
2209 +index 08b283347d9ce..f304cdb854ec4 100644
2210 +--- a/drivers/net/ethernet/freescale/enetc/enetc.h
2211 ++++ b/drivers/net/ethernet/freescale/enetc/enetc.h
2212 +@@ -385,11 +385,9 @@ void enetc_start(struct net_device *ndev);
2213 + void enetc_stop(struct net_device *ndev);
2214 + netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
2215 + struct net_device_stats *enetc_get_stats(struct net_device *ndev);
2216 +-int enetc_set_features(struct net_device *ndev,
2217 +- netdev_features_t features);
2218 ++void enetc_set_features(struct net_device *ndev, netdev_features_t features);
2219 + int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
2220 +-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2221 +- void *type_data);
2222 ++int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
2223 + int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
2224 + int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
2225 + struct xdp_frame **frames, u32 flags);
2226 +@@ -421,6 +419,7 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2227 + int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
2228 + int enetc_psfp_init(struct enetc_ndev_priv *priv);
2229 + int enetc_psfp_clean(struct enetc_ndev_priv *priv);
2230 ++int enetc_set_psfp(struct net_device *ndev, bool en);
2231 +
2232 + static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
2233 + {
2234 +@@ -496,4 +495,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
2235 + {
2236 + return 0;
2237 + }
2238 ++
2239 ++static inline int enetc_set_psfp(struct net_device *ndev, bool en)
2240 ++{
2241 ++ return 0;
2242 ++}
2243 + #endif
2244 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
2245 +index d522bd5c90b49..3615357cc60fb 100644
2246 +--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
2247 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
2248 +@@ -708,6 +708,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
2249 + {
2250 + netdev_features_t changed = ndev->features ^ features;
2251 + struct enetc_ndev_priv *priv = netdev_priv(ndev);
2252 ++ int err;
2253 ++
2254 ++ if (changed & NETIF_F_HW_TC) {
2255 ++ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
2256 ++ if (err)
2257 ++ return err;
2258 ++ }
2259 +
2260 + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
2261 + struct enetc_pf *pf = enetc_si_priv(priv->si);
2262 +@@ -721,7 +728,28 @@ static int enetc_pf_set_features(struct net_device *ndev,
2263 + if (changed & NETIF_F_LOOPBACK)
2264 + enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
2265 +
2266 +- return enetc_set_features(ndev, features);
2267 ++ enetc_set_features(ndev, features);
2268 ++
2269 ++ return 0;
2270 ++}
2271 ++
2272 ++static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2273 ++ void *type_data)
2274 ++{
2275 ++ switch (type) {
2276 ++ case TC_SETUP_QDISC_MQPRIO:
2277 ++ return enetc_setup_tc_mqprio(ndev, type_data);
2278 ++ case TC_SETUP_QDISC_TAPRIO:
2279 ++ return enetc_setup_tc_taprio(ndev, type_data);
2280 ++ case TC_SETUP_QDISC_CBS:
2281 ++ return enetc_setup_tc_cbs(ndev, type_data);
2282 ++ case TC_SETUP_QDISC_ETF:
2283 ++ return enetc_setup_tc_txtime(ndev, type_data);
2284 ++ case TC_SETUP_BLOCK:
2285 ++ return enetc_setup_tc_psfp(ndev, type_data);
2286 ++ default:
2287 ++ return -EOPNOTSUPP;
2288 ++ }
2289 + }
2290 +
2291 + static const struct net_device_ops enetc_ndev_ops = {
2292 +@@ -738,7 +766,7 @@ static const struct net_device_ops enetc_ndev_ops = {
2293 + .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
2294 + .ndo_set_features = enetc_pf_set_features,
2295 + .ndo_eth_ioctl = enetc_ioctl,
2296 +- .ndo_setup_tc = enetc_setup_tc,
2297 ++ .ndo_setup_tc = enetc_pf_setup_tc,
2298 + .ndo_bpf = enetc_setup_bpf,
2299 + .ndo_xdp_xmit = enetc_xdp_xmit,
2300 + };
2301 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
2302 +index d779dde522c86..6b236e0fd806b 100644
2303 +--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
2304 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
2305 +@@ -1529,6 +1529,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2306 + }
2307 + }
2308 +
2309 ++int enetc_set_psfp(struct net_device *ndev, bool en)
2310 ++{
2311 ++ struct enetc_ndev_priv *priv = netdev_priv(ndev);
2312 ++ int err;
2313 ++
2314 ++ if (en) {
2315 ++ err = enetc_psfp_enable(priv);
2316 ++ if (err)
2317 ++ return err;
2318 ++
2319 ++ priv->active_offloads |= ENETC_F_QCI;
2320 ++ return 0;
2321 ++ }
2322 ++
2323 ++ err = enetc_psfp_disable(priv);
2324 ++ if (err)
2325 ++ return err;
2326 ++
2327 ++ priv->active_offloads &= ~ENETC_F_QCI;
2328 ++
2329 ++ return 0;
2330 ++}
2331 ++
2332 + int enetc_psfp_init(struct enetc_ndev_priv *priv)
2333 + {
2334 + if (epsfp.psfp_sfi_bitmap)
2335 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
2336 +index 1a9d1e8b772ce..acd4a3167ed6a 100644
2337 +--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
2338 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
2339 +@@ -88,7 +88,20 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
2340 + static int enetc_vf_set_features(struct net_device *ndev,
2341 + netdev_features_t features)
2342 + {
2343 +- return enetc_set_features(ndev, features);
2344 ++ enetc_set_features(ndev, features);
2345 ++
2346 ++ return 0;
2347 ++}
2348 ++
2349 ++static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2350 ++ void *type_data)
2351 ++{
2352 ++ switch (type) {
2353 ++ case TC_SETUP_QDISC_MQPRIO:
2354 ++ return enetc_setup_tc_mqprio(ndev, type_data);
2355 ++ default:
2356 ++ return -EOPNOTSUPP;
2357 ++ }
2358 + }
2359 +
2360 + /* Probing/ Init */
2361 +@@ -100,7 +113,7 @@ static const struct net_device_ops enetc_ndev_ops = {
2362 + .ndo_set_mac_address = enetc_vf_set_mac_addr,
2363 + .ndo_set_features = enetc_vf_set_features,
2364 + .ndo_eth_ioctl = enetc_ioctl,
2365 +- .ndo_setup_tc = enetc_setup_tc,
2366 ++ .ndo_setup_tc = enetc_vf_setup_tc,
2367 + };
2368 +
2369 + static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
2370 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
2371 +index ce6eea7a60027..5922520fdb01d 100644
2372 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
2373 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
2374 +@@ -5766,6 +5766,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
2375 + }
2376 + }
2377 +
2378 ++/**
2379 ++ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
2380 ++ * @vsi: Pointer to vsi structure
2381 ++ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
2382 ++ *
2383 ++ * Helper function to convert units before send to set BW limit
2384 ++ **/
2385 ++static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
2386 ++{
2387 ++ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
2388 ++ dev_warn(&vsi->back->pdev->dev,
2389 ++ "Setting max tx rate to minimum usable value of 50Mbps.\n");
2390 ++ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
2391 ++ } else {
2392 ++ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
2393 ++ }
2394 ++
2395 ++ return max_tx_rate;
2396 ++}
2397 ++
2398 + /**
2399 + * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
2400 + * @vsi: VSI to be configured
2401 +@@ -5788,10 +5808,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
2402 + max_tx_rate, seid);
2403 + return -EINVAL;
2404 + }
2405 +- if (max_tx_rate && max_tx_rate < 50) {
2406 ++ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
2407 + dev_warn(&pf->pdev->dev,
2408 + "Setting max tx rate to minimum usable value of 50Mbps.\n");
2409 +- max_tx_rate = 50;
2410 ++ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
2411 + }
2412 +
2413 + /* Tx rate credits are in values of 50Mbps, 0 is disabled */
2414 +@@ -8082,9 +8102,9 @@ config_tc:
2415 +
2416 + if (i40e_is_tc_mqprio_enabled(pf)) {
2417 + if (vsi->mqprio_qopt.max_rate[0]) {
2418 +- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
2419 ++ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
2420 ++ vsi->mqprio_qopt.max_rate[0]);
2421 +
2422 +- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
2423 + ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
2424 + if (!ret) {
2425 + u64 credits = max_tx_rate;
2426 +@@ -10829,10 +10849,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
2427 + }
2428 +
2429 + if (vsi->mqprio_qopt.max_rate[0]) {
2430 +- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
2431 ++ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
2432 ++ vsi->mqprio_qopt.max_rate[0]);
2433 + u64 credits = 0;
2434 +
2435 +- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
2436 + ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
2437 + if (ret)
2438 + goto end_unlock;
2439 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2440 +index d78ac5e7f658f..c078fbaf19fd4 100644
2441 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2442 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2443 +@@ -2038,6 +2038,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
2444 + }
2445 + }
2446 +
2447 ++/**
2448 ++ * i40e_vc_get_max_frame_size
2449 ++ * @vf: pointer to the VF
2450 ++ *
2451 ++ * Max frame size is determined based on the current port's max frame size and
2452 ++ * whether a port VLAN is configured on this VF. The VF is not aware whether
2453 ++ * it's in a port VLAN so the PF needs to account for this in max frame size
2454 ++ * checks and sending the max frame size to the VF.
2455 ++ **/
2456 ++static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
2457 ++{
2458 ++ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
2459 ++
2460 ++ if (vf->port_vlan_id)
2461 ++ max_frame_size -= VLAN_HLEN;
2462 ++
2463 ++ return max_frame_size;
2464 ++}
2465 ++
2466 + /**
2467 + * i40e_vc_get_vf_resources_msg
2468 + * @vf: pointer to the VF info
2469 +@@ -2139,6 +2158,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2470 + vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2471 + vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2472 + vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2473 ++ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
2474 +
2475 + if (vf->lan_vsi_idx) {
2476 + vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2477 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
2478 +index 5448ed0e0357f..e76e3df3e2d9e 100644
2479 +--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
2480 ++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
2481 +@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
2482 + {
2483 + u32 head, tail;
2484 +
2485 ++ /* underlying hardware might not allow access and/or always return
2486 ++ * 0 for the head/tail registers so just use the cached values
2487 ++ */
2488 + head = ring->next_to_clean;
2489 +- tail = readl(ring->tail);
2490 ++ tail = ring->next_to_use;
2491 +
2492 + if (head != tail)
2493 + return (head < tail) ?
2494 +@@ -1355,7 +1358,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
2495 + #endif
2496 + struct sk_buff *skb;
2497 +
2498 +- if (!rx_buffer)
2499 ++ if (!rx_buffer || !size)
2500 + return NULL;
2501 + /* prefetch first cache line of first page */
2502 + va = page_address(rx_buffer->page) + rx_buffer->page_offset;
2503 +@@ -1513,7 +1516,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
2504 + /* exit if we failed to retrieve a buffer */
2505 + if (!skb) {
2506 + rx_ring->rx_stats.alloc_buff_failed++;
2507 +- if (rx_buffer)
2508 ++ if (rx_buffer && size)
2509 + rx_buffer->pagecnt_bias++;
2510 + break;
2511 + }
2512 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
2513 +index 7013769fc0389..c6eb0d0748ea9 100644
2514 +--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
2515 ++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
2516 +@@ -244,11 +244,14 @@ out:
2517 + void iavf_configure_queues(struct iavf_adapter *adapter)
2518 + {
2519 + struct virtchnl_vsi_queue_config_info *vqci;
2520 +- struct virtchnl_queue_pair_info *vqpi;
2521 ++ int i, max_frame = adapter->vf_res->max_mtu;
2522 + int pairs = adapter->num_active_queues;
2523 +- int i, max_frame = IAVF_MAX_RXBUFFER;
2524 ++ struct virtchnl_queue_pair_info *vqpi;
2525 + size_t len;
2526 +
2527 ++ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
2528 ++ max_frame = IAVF_MAX_RXBUFFER;
2529 ++
2530 + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
2531 + /* bail because we already have a command pending */
2532 + dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
2533 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
2534 +index 63ae4674d2000..ffbba5f6b7a5f 100644
2535 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
2536 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
2537 +@@ -2255,8 +2255,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2538 + return -EBUSY;
2539 + }
2540 +
2541 +- ice_unplug_aux_dev(pf);
2542 +-
2543 + switch (reset) {
2544 + case ICE_RESET_PFR:
2545 + set_bit(ICE_PFR_REQ, pf->state);
2546 +diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
2547 +index f979ba7e5effc..caa4380ada138 100644
2548 +--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
2549 ++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
2550 +@@ -178,6 +178,9 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
2551 + /* Only return ad bits of the gw register */
2552 + ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
2553 +
2554 ++ /* The MDIO lock is set on read. To release it, clear gw register */
2555 ++ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
2556 ++
2557 + return ret;
2558 + }
2559 +
2560 +@@ -201,6 +204,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
2561 + ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
2562 + temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK), 100, 1000000);
2563 +
2564 ++ /* The MDIO lock is set on read. To release it, clear gw register */
2565 ++ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
2566 ++
2567 + return ret;
2568 + }
2569 +
2570 +diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
2571 +index cee75b561f59d..f577507f522b7 100644
2572 +--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
2573 ++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
2574 +@@ -368,6 +368,11 @@ static void mana_gd_process_eq_events(void *arg)
2575 + break;
2576 + }
2577 +
2578 ++ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
2579 ++ * reading eqe.
2580 ++ */
2581 ++ rmb();
2582 ++
2583 + mana_gd_process_eqe(eq);
2584 +
2585 + eq->head++;
2586 +@@ -1096,6 +1101,11 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
2587 + if (WARN_ON_ONCE(owner_bits != new_bits))
2588 + return -1;
2589 +
2590 ++ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
2591 ++ * reading completion info
2592 ++ */
2593 ++ rmb();
2594 ++
2595 + comp->wq_num = cqe->cqe_info.wq_num;
2596 + comp->is_sq = cqe->cqe_info.is_sq;
2597 + memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
2598 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
2599 +index 4e08b7219403c..12420239c8ca2 100644
2600 +--- a/drivers/net/ethernet/renesas/ravb_main.c
2601 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
2602 +@@ -1115,6 +1115,8 @@ static int ravb_phy_init(struct net_device *ndev)
2603 + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2604 + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
2605 +
2606 ++ /* Indicate that the MAC is responsible for managing PHY PM */
2607 ++ phydev->mac_managed_pm = true;
2608 + phy_attached_info(phydev);
2609 +
2610 + return 0;
2611 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
2612 +index 1374faa229a27..4e190f5e32c3d 100644
2613 +--- a/drivers/net/ethernet/renesas/sh_eth.c
2614 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
2615 +@@ -2033,6 +2033,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
2616 + }
2617 + }
2618 +
2619 ++ /* Indicate that the MAC is responsible for managing PHY PM */
2620 ++ phydev->mac_managed_pm = true;
2621 + phy_attached_info(phydev);
2622 +
2623 + return 0;
2624 +diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
2625 +index b1657e03a74f5..450fcedb7042a 100644
2626 +--- a/drivers/net/ethernet/sfc/efx_channels.c
2627 ++++ b/drivers/net/ethernet/sfc/efx_channels.c
2628 +@@ -329,7 +329,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
2629 + efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
2630 + efx->n_rx_channels = 1;
2631 + efx->n_tx_channels = 1;
2632 +- efx->tx_channel_offset = 1;
2633 ++ efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
2634 + efx->n_xdp_channels = 0;
2635 + efx->xdp_channel_offset = efx->n_channels;
2636 + efx->legacy_irq = efx->pci_dev->irq;
2637 +diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
2638 +index 6983799e1c05d..e0bc2c1dc81a5 100644
2639 +--- a/drivers/net/ethernet/sfc/tx.c
2640 ++++ b/drivers/net/ethernet/sfc/tx.c
2641 +@@ -548,7 +548,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
2642 + * previous packets out.
2643 + */
2644 + if (!netdev_xmit_more())
2645 +- efx_tx_send_pending(tx_queue->channel);
2646 ++ efx_tx_send_pending(efx_get_tx_channel(efx, index));
2647 + return NETDEV_TX_OK;
2648 + }
2649 +
2650 +diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
2651 +index b05ee2e0e3051..735f24a70626b 100644
2652 +--- a/drivers/net/ethernet/sun/sunhme.c
2653 ++++ b/drivers/net/ethernet/sun/sunhme.c
2654 +@@ -2039,9 +2039,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2655 +
2656 + skb_reserve(copy_skb, 2);
2657 + skb_put(copy_skb, len);
2658 +- dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
2659 ++ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
2660 + skb_copy_from_linear_data(skb, copy_skb->data, len);
2661 +- dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
2662 ++ dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
2663 + /* Reuse original ring buffer. */
2664 + hme_write_rxd(hp, this,
2665 + (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2666 +diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
2667 +index 90f3aec55b365..b84baedda5f69 100644
2668 +--- a/drivers/net/ipa/ipa_qmi.c
2669 ++++ b/drivers/net/ipa/ipa_qmi.c
2670 +@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
2671 + mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
2672 + req.v4_route_tbl_info_valid = 1;
2673 + req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
2674 +- req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
2675 ++ req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
2676 +
2677 + mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
2678 + req.v6_route_tbl_info_valid = 1;
2679 + req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
2680 +- req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
2681 ++ req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
2682 +
2683 + mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
2684 + req.v4_filter_tbl_start_valid = 1;
2685 +@@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
2686 + req.v4_hash_route_tbl_info_valid = 1;
2687 + req.v4_hash_route_tbl_info.start =
2688 + ipa->mem_offset + mem->offset;
2689 +- req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
2690 ++ req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
2691 + }
2692 +
2693 + mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
2694 +@@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
2695 + req.v6_hash_route_tbl_info_valid = 1;
2696 + req.v6_hash_route_tbl_info.start =
2697 + ipa->mem_offset + mem->offset;
2698 +- req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
2699 ++ req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
2700 + }
2701 +
2702 + mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
2703 +diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
2704 +index 6838e8065072b..75d3fc0092e92 100644
2705 +--- a/drivers/net/ipa/ipa_qmi_msg.c
2706 ++++ b/drivers/net/ipa/ipa_qmi_msg.c
2707 +@@ -311,7 +311,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
2708 + .tlv_type = 0x12,
2709 + .offset = offsetof(struct ipa_init_modem_driver_req,
2710 + v4_route_tbl_info),
2711 +- .ei_array = ipa_mem_array_ei,
2712 ++ .ei_array = ipa_mem_bounds_ei,
2713 + },
2714 + {
2715 + .data_type = QMI_OPT_FLAG,
2716 +@@ -332,7 +332,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
2717 + .tlv_type = 0x13,
2718 + .offset = offsetof(struct ipa_init_modem_driver_req,
2719 + v6_route_tbl_info),
2720 +- .ei_array = ipa_mem_array_ei,
2721 ++ .ei_array = ipa_mem_bounds_ei,
2722 + },
2723 + {
2724 + .data_type = QMI_OPT_FLAG,
2725 +@@ -496,7 +496,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
2726 + .tlv_type = 0x1b,
2727 + .offset = offsetof(struct ipa_init_modem_driver_req,
2728 + v4_hash_route_tbl_info),
2729 +- .ei_array = ipa_mem_array_ei,
2730 ++ .ei_array = ipa_mem_bounds_ei,
2731 + },
2732 + {
2733 + .data_type = QMI_OPT_FLAG,
2734 +@@ -517,7 +517,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
2735 + .tlv_type = 0x1c,
2736 + .offset = offsetof(struct ipa_init_modem_driver_req,
2737 + v6_hash_route_tbl_info),
2738 +- .ei_array = ipa_mem_array_ei,
2739 ++ .ei_array = ipa_mem_bounds_ei,
2740 + },
2741 + {
2742 + .data_type = QMI_OPT_FLAG,
2743 +diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
2744 +index 3233d145fd87c..51b39ffe020ed 100644
2745 +--- a/drivers/net/ipa/ipa_qmi_msg.h
2746 ++++ b/drivers/net/ipa/ipa_qmi_msg.h
2747 +@@ -86,9 +86,11 @@ enum ipa_platform_type {
2748 + IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
2749 + };
2750 +
2751 +-/* This defines the start and end offset of a range of memory. Both
2752 +- * fields are offsets relative to the start of IPA shared memory.
2753 +- * The end value is the last addressable byte *within* the range.
2754 ++/* This defines the start and end offset of a range of memory. The start
2755 ++ * value is a byte offset relative to the start of IPA shared memory. The
2756 ++ * end value is the last addressable unit *within* the range. Typically
2757 ++ * the end value is in units of bytes, however it can also be a maximum
2758 ++ * array index value.
2759 + */
2760 + struct ipa_mem_bounds {
2761 + u32 start;
2762 +@@ -129,18 +131,19 @@ struct ipa_init_modem_driver_req {
2763 + u8 hdr_tbl_info_valid;
2764 + struct ipa_mem_bounds hdr_tbl_info;
2765 +
2766 +- /* Routing table information. These define the location and size of
2767 +- * non-hashable IPv4 and IPv6 filter tables. The start values are
2768 +- * offsets relative to the start of IPA shared memory.
2769 ++ /* Routing table information. These define the location and maximum
2770 ++ * *index* (not byte) for the modem portion of non-hashable IPv4 and
2771 ++ * IPv6 routing tables. The start values are byte offsets relative
2772 ++ * to the start of IPA shared memory.
2773 + */
2774 + u8 v4_route_tbl_info_valid;
2775 +- struct ipa_mem_array v4_route_tbl_info;
2776 ++ struct ipa_mem_bounds v4_route_tbl_info;
2777 + u8 v6_route_tbl_info_valid;
2778 +- struct ipa_mem_array v6_route_tbl_info;
2779 ++ struct ipa_mem_bounds v6_route_tbl_info;
2780 +
2781 + /* Filter table information. These define the location of the
2782 + * non-hashable IPv4 and IPv6 filter tables. The start values are
2783 +- * offsets relative to the start of IPA shared memory.
2784 ++ * byte offsets relative to the start of IPA shared memory.
2785 + */
2786 + u8 v4_filter_tbl_start_valid;
2787 + u32 v4_filter_tbl_start;
2788 +@@ -181,18 +184,20 @@ struct ipa_init_modem_driver_req {
2789 + u8 zip_tbl_info_valid;
2790 + struct ipa_mem_bounds zip_tbl_info;
2791 +
2792 +- /* Routing table information. These define the location and size
2793 +- * of hashable IPv4 and IPv6 filter tables. The start values are
2794 +- * offsets relative to the start of IPA shared memory.
2795 ++ /* Routing table information. These define the location and maximum
2796 ++ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
2797 ++ * routing tables (if supported by hardware). The start values are
2798 ++ * byte offsets relative to the start of IPA shared memory.
2799 + */
2800 + u8 v4_hash_route_tbl_info_valid;
2801 +- struct ipa_mem_array v4_hash_route_tbl_info;
2802 ++ struct ipa_mem_bounds v4_hash_route_tbl_info;
2803 + u8 v6_hash_route_tbl_info_valid;
2804 +- struct ipa_mem_array v6_hash_route_tbl_info;
2805 ++ struct ipa_mem_bounds v6_hash_route_tbl_info;
2806 +
2807 + /* Filter table information. These define the location and size
2808 +- * of hashable IPv4 and IPv6 filter tables. The start values are
2809 +- * offsets relative to the start of IPA shared memory.
2810 ++ * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
2811 ++ * The start values are byte offsets relative to the start of IPA
2812 ++ * shared memory.
2813 + */
2814 + u8 v4_hash_filter_tbl_start_valid;
2815 + u32 v4_hash_filter_tbl_start;
2816 +diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
2817 +index 1da334f54944a..6bf486d2b6799 100644
2818 +--- a/drivers/net/ipa/ipa_table.c
2819 ++++ b/drivers/net/ipa/ipa_table.c
2820 +@@ -108,8 +108,6 @@
2821 +
2822 + /* Assignment of route table entries to the modem and AP */
2823 + #define IPA_ROUTE_MODEM_MIN 0
2824 +-#define IPA_ROUTE_MODEM_COUNT 8
2825 +-
2826 + #define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
2827 + #define IPA_ROUTE_AP_COUNT \
2828 + (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
2829 +diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
2830 +index b6a9a0d79d68e..1538e2e1732fe 100644
2831 +--- a/drivers/net/ipa/ipa_table.h
2832 ++++ b/drivers/net/ipa/ipa_table.h
2833 +@@ -13,6 +13,9 @@ struct ipa;
2834 + /* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
2835 + #define IPA_FILTER_COUNT_MAX 14
2836 +
2837 ++/* The number of route table entries allotted to the modem */
2838 ++#define IPA_ROUTE_MODEM_COUNT 8
2839 ++
2840 + /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
2841 + #define IPA_ROUTE_COUNT_MAX 15
2842 +
2843 +diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
2844 +index 6cd50106e6112..d7fb6302d699b 100644
2845 +--- a/drivers/net/ipvlan/ipvlan_core.c
2846 ++++ b/drivers/net/ipvlan/ipvlan_core.c
2847 +@@ -496,7 +496,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
2848 +
2849 + static int ipvlan_process_outbound(struct sk_buff *skb)
2850 + {
2851 +- struct ethhdr *ethh = eth_hdr(skb);
2852 + int ret = NET_XMIT_DROP;
2853 +
2854 + /* The ipvlan is a pseudo-L2 device, so the packets that we receive
2855 +@@ -506,6 +505,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
2856 + if (skb_mac_header_was_set(skb)) {
2857 + /* In this mode we dont care about
2858 + * multicast and broadcast traffic */
2859 ++ struct ethhdr *ethh = eth_hdr(skb);
2860 ++
2861 + if (is_multicast_ether_addr(ethh->h_dest)) {
2862 + pr_debug_ratelimited(
2863 + "Dropped {multi|broad}cast of type=[%x]\n",
2864 +@@ -590,7 +591,7 @@ out:
2865 + static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
2866 + {
2867 + const struct ipvl_dev *ipvlan = netdev_priv(dev);
2868 +- struct ethhdr *eth = eth_hdr(skb);
2869 ++ struct ethhdr *eth = skb_eth_hdr(skb);
2870 + struct ipvl_addr *addr;
2871 + void *lyr3h;
2872 + int addr_type;
2873 +@@ -620,6 +621,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
2874 + return dev_forward_skb(ipvlan->phy_dev, skb);
2875 +
2876 + } else if (is_multicast_ether_addr(eth->h_dest)) {
2877 ++ skb_reset_mac_header(skb);
2878 + ipvlan_skb_crossing_ns(skb, NULL);
2879 + ipvlan_multicast_enqueue(ipvlan->port, skb, true);
2880 + return NET_XMIT_SUCCESS;
2881 +diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
2882 +index 9e3c815a070f1..796e9c7857d09 100644
2883 +--- a/drivers/net/mdio/of_mdio.c
2884 ++++ b/drivers/net/mdio/of_mdio.c
2885 +@@ -231,6 +231,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
2886 + return 0;
2887 +
2888 + unregister:
2889 ++ of_node_put(child);
2890 + mdiobus_unregister(mdio);
2891 + return rc;
2892 + }
2893 +diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
2894 +index 3221224525ac9..2f2765d7f84c8 100644
2895 +--- a/drivers/net/phy/aquantia_main.c
2896 ++++ b/drivers/net/phy/aquantia_main.c
2897 +@@ -90,6 +90,9 @@
2898 + #define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
2899 + #define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
2900 +
2901 ++#define VEND1_GLOBAL_GEN_STAT2 0xc831
2902 ++#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
2903 ++
2904 + #define VEND1_GLOBAL_RSVD_STAT1 0xc885
2905 + #define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
2906 + #define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
2907 +@@ -124,6 +127,12 @@
2908 + #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
2909 + #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
2910 +
2911 ++/* Sleep and timeout for checking if the Processor-Intensive
2912 ++ * MDIO operation is finished
2913 ++ */
2914 ++#define AQR107_OP_IN_PROG_SLEEP 1000
2915 ++#define AQR107_OP_IN_PROG_TIMEOUT 100000
2916 ++
2917 + struct aqr107_hw_stat {
2918 + const char *name;
2919 + int reg;
2920 +@@ -598,16 +607,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
2921 + phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
2922 + }
2923 +
2924 ++static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
2925 ++{
2926 ++ int val, err;
2927 ++
2928 ++ /* The datasheet notes to wait at least 1ms after issuing a
2929 ++ * processor intensive operation before checking.
2930 ++ * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
2931 ++ * because that just determines the maximum time slept, not the minimum.
2932 ++ */
2933 ++ usleep_range(1000, 5000);
2934 ++
2935 ++ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
2936 ++ VEND1_GLOBAL_GEN_STAT2, val,
2937 ++ !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
2938 ++ AQR107_OP_IN_PROG_SLEEP,
2939 ++ AQR107_OP_IN_PROG_TIMEOUT, false);
2940 ++ if (err) {
2941 ++ phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
2942 ++ return err;
2943 ++ }
2944 ++
2945 ++ return 0;
2946 ++}
2947 ++
2948 + static int aqr107_suspend(struct phy_device *phydev)
2949 + {
2950 +- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
2951 +- MDIO_CTRL1_LPOWER);
2952 ++ int err;
2953 ++
2954 ++ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
2955 ++ MDIO_CTRL1_LPOWER);
2956 ++ if (err)
2957 ++ return err;
2958 ++
2959 ++ return aqr107_wait_processor_intensive_op(phydev);
2960 + }
2961 +
2962 + static int aqr107_resume(struct phy_device *phydev)
2963 + {
2964 +- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
2965 +- MDIO_CTRL1_LPOWER);
2966 ++ int err;
2967 ++
2968 ++ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
2969 ++ MDIO_CTRL1_LPOWER);
2970 ++ if (err)
2971 ++ return err;
2972 ++
2973 ++ return aqr107_wait_processor_intensive_op(phydev);
2974 + }
2975 +
2976 + static int aqr107_probe(struct phy_device *phydev)
2977 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2978 +index dd7917cab2b12..ab8f5097d3b00 100644
2979 +--- a/drivers/net/team/team.c
2980 ++++ b/drivers/net/team/team.c
2981 +@@ -1270,10 +1270,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
2982 + }
2983 + }
2984 +
2985 +- netif_addr_lock_bh(dev);
2986 +- dev_uc_sync_multiple(port_dev, dev);
2987 +- dev_mc_sync_multiple(port_dev, dev);
2988 +- netif_addr_unlock_bh(dev);
2989 ++ if (dev->flags & IFF_UP) {
2990 ++ netif_addr_lock_bh(dev);
2991 ++ dev_uc_sync_multiple(port_dev, dev);
2992 ++ dev_mc_sync_multiple(port_dev, dev);
2993 ++ netif_addr_unlock_bh(dev);
2994 ++ }
2995 +
2996 + port->index = -1;
2997 + list_add_tail_rcu(&port->list, &team->port_list);
2998 +@@ -1344,8 +1346,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
2999 + netdev_rx_handler_unregister(port_dev);
3000 + team_port_disable_netpoll(port);
3001 + vlan_vids_del_by_dev(port_dev, dev);
3002 +- dev_uc_unsync(port_dev, dev);
3003 +- dev_mc_unsync(port_dev, dev);
3004 ++ if (dev->flags & IFF_UP) {
3005 ++ dev_uc_unsync(port_dev, dev);
3006 ++ dev_mc_unsync(port_dev, dev);
3007 ++ }
3008 + dev_close(port_dev);
3009 + team_port_leave(team, port);
3010 +
3011 +@@ -1695,6 +1699,14 @@ static int team_open(struct net_device *dev)
3012 +
3013 + static int team_close(struct net_device *dev)
3014 + {
3015 ++ struct team *team = netdev_priv(dev);
3016 ++ struct team_port *port;
3017 ++
3018 ++ list_for_each_entry(port, &team->port_list, list) {
3019 ++ dev_uc_unsync(port->dev, dev);
3020 ++ dev_mc_unsync(port->dev, dev);
3021 ++ }
3022 ++
3023 + return 0;
3024 + }
3025 +
3026 +diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
3027 +index d0f3b6d7f4089..5c804bcabfe6b 100644
3028 +--- a/drivers/net/wireguard/netlink.c
3029 ++++ b/drivers/net/wireguard/netlink.c
3030 +@@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
3031 + if (attrs[WGPEER_A_ENDPOINT]) {
3032 + struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
3033 + size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
3034 ++ struct endpoint endpoint = { { { 0 } } };
3035 +
3036 +- if ((len == sizeof(struct sockaddr_in) &&
3037 +- addr->sa_family == AF_INET) ||
3038 +- (len == sizeof(struct sockaddr_in6) &&
3039 +- addr->sa_family == AF_INET6)) {
3040 +- struct endpoint endpoint = { { { 0 } } };
3041 +-
3042 +- memcpy(&endpoint.addr, addr, len);
3043 ++ if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
3044 ++ endpoint.addr4 = *(struct sockaddr_in *)addr;
3045 ++ wg_socket_set_peer_endpoint(peer, &endpoint);
3046 ++ } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
3047 ++ endpoint.addr6 = *(struct sockaddr_in6 *)addr;
3048 + wg_socket_set_peer_endpoint(peer, &endpoint);
3049 + }
3050 + }
3051 +diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
3052 +index ba87d294604fe..d4bb40a695ab6 100644
3053 +--- a/drivers/net/wireguard/selftest/ratelimiter.c
3054 ++++ b/drivers/net/wireguard/selftest/ratelimiter.c
3055 +@@ -6,29 +6,28 @@
3056 + #ifdef DEBUG
3057 +
3058 + #include <linux/jiffies.h>
3059 +-#include <linux/hrtimer.h>
3060 +
3061 + static const struct {
3062 + bool result;
3063 +- u64 nsec_to_sleep_before;
3064 ++ unsigned int msec_to_sleep_before;
3065 + } expected_results[] __initconst = {
3066 + [0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
3067 + [PACKETS_BURSTABLE] = { false, 0 },
3068 +- [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND },
3069 ++ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
3070 + [PACKETS_BURSTABLE + 2] = { false, 0 },
3071 +- [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
3072 ++ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
3073 + [PACKETS_BURSTABLE + 4] = { true, 0 },
3074 + [PACKETS_BURSTABLE + 5] = { false, 0 }
3075 + };
3076 +
3077 + static __init unsigned int maximum_jiffies_at_index(int index)
3078 + {
3079 +- u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3;
3080 ++ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
3081 + int i;
3082 +
3083 + for (i = 0; i <= index; ++i)
3084 +- total_nsecs += expected_results[i].nsec_to_sleep_before;
3085 +- return nsecs_to_jiffies(total_nsecs);
3086 ++ total_msecs += expected_results[i].msec_to_sleep_before;
3087 ++ return msecs_to_jiffies(total_msecs);
3088 + }
3089 +
3090 + static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
3091 +@@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
3092 + loop_start_time = jiffies;
3093 +
3094 + for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
3095 +- if (expected_results[i].nsec_to_sleep_before) {
3096 +- ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3),
3097 +- ns_to_ktime(expected_results[i].nsec_to_sleep_before));
3098 +- set_current_state(TASK_UNINTERRUPTIBLE);
3099 +- schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME);
3100 +- }
3101 ++ if (expected_results[i].msec_to_sleep_before)
3102 ++ msleep(expected_results[i].msec_to_sleep_before);
3103 +
3104 + if (time_is_before_jiffies(loop_start_time +
3105 + maximum_jiffies_at_index(i)))
3106 +@@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void)
3107 + if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
3108 + return true;
3109 +
3110 +- BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0);
3111 ++ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
3112 +
3113 + if (wg_ratelimiter_init())
3114 + goto out;
3115 +@@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void)
3116 + ++test;
3117 + #endif
3118 +
3119 +- for (trials = TRIALS_BEFORE_GIVING_UP;;) {
3120 ++ for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
3121 + int test_count = 0, ret;
3122 +
3123 + ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
3124 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
3125 +index 8f4a5d4929e09..9ba7963a89f65 100644
3126 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
3127 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
3128 +@@ -1038,7 +1038,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
3129 + offset %= 32;
3130 +
3131 + val = mt76_rr(dev, addr);
3132 +- val >>= (tid % 32);
3133 ++ val >>= offset;
3134 +
3135 + if (offset > 20) {
3136 + addr += 4;
3137 +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
3138 +index dc78a523a69f2..b6b938aa66158 100644
3139 +--- a/drivers/s390/block/dasd_alias.c
3140 ++++ b/drivers/s390/block/dasd_alias.c
3141 +@@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device)
3142 + struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
3143 + {
3144 + struct dasd_eckd_private *alias_priv, *private = base_device->private;
3145 +- struct alias_pav_group *group = private->pavgroup;
3146 + struct alias_lcu *lcu = private->lcu;
3147 + struct dasd_device *alias_device;
3148 ++ struct alias_pav_group *group;
3149 + unsigned long flags;
3150 +
3151 +- if (!group || !lcu)
3152 ++ if (!lcu)
3153 + return NULL;
3154 + if (lcu->pav == NO_PAV ||
3155 + lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
3156 +@@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
3157 + }
3158 +
3159 + spin_lock_irqsave(&lcu->lock, flags);
3160 ++ group = private->pavgroup;
3161 ++ if (!group) {
3162 ++ spin_unlock_irqrestore(&lcu->lock, flags);
3163 ++ return NULL;
3164 ++ }
3165 + alias_device = group->next;
3166 + if (!alias_device) {
3167 + if (list_empty(&group->aliaslist)) {
3168 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
3169 +index fafa9fbf3b107..be024b2b6bd43 100644
3170 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
3171 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
3172 +@@ -3005,7 +3005,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
3173 +
3174 + if (ioc->is_mcpu_endpoint ||
3175 + sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
3176 +- dma_get_required_mask(&pdev->dev) <= 32)
3177 ++ dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32))
3178 + ioc->dma_mask = 32;
3179 + /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
3180 + else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
3181 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
3182 +index b86f6e1f21b5c..4b4ca2a9524d9 100644
3183 +--- a/drivers/scsi/qla2xxx/qla_target.c
3184 ++++ b/drivers/scsi/qla2xxx/qla_target.c
3185 +@@ -2166,8 +2166,10 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
3186 +
3187 + abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
3188 + le32_to_cpu(abts->exchange_addr_to_abort));
3189 +- if (!abort_cmd)
3190 ++ if (!abort_cmd) {
3191 ++ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
3192 + return -EIO;
3193 ++ }
3194 + mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
3195 +
3196 + if (abort_cmd->qpair) {
3197 +diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
3198 +index bb85ab77fd261..640f1ca2d9855 100644
3199 +--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
3200 ++++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
3201 +@@ -30,7 +30,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
3202 + /*=== Realtek demoboard ===*/
3203 + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8179)}, /* 8188EUS */
3204 + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
3205 +- {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xf179)}, /* 8188FU */
3206 ++ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill USB-N150 Nano */
3207 + /*=== Customer ID ===*/
3208 + /****** 8188EUS ********/
3209 + {USB_DEVICE(0x07B8, 0x8179)}, /* Abocom - Abocom */
3210 +diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
3211 +index 6255f1ef95994..69eead8a6015c 100644
3212 +--- a/drivers/thunderbolt/icm.c
3213 ++++ b/drivers/thunderbolt/icm.c
3214 +@@ -2522,6 +2522,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
3215 + tb->cm_ops = &icm_icl_ops;
3216 + break;
3217 +
3218 ++ case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
3219 + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
3220 + icm->is_supported = icm_tgl_is_supported;
3221 + icm->get_mode = icm_ar_get_mode;
3222 +diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
3223 +index 69083aab2736c..5091677b3f4ba 100644
3224 +--- a/drivers/thunderbolt/nhi.h
3225 ++++ b/drivers/thunderbolt/nhi.h
3226 +@@ -55,6 +55,7 @@ extern const struct tb_nhi_ops icl_nhi_ops;
3227 + * need for the PCI quirk anymore as we will use ICM also on Apple
3228 + * hardware.
3229 + */
3230 ++#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134
3231 + #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137
3232 + #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
3233 + #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e
3234 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
3235 +index bf11ffafcad53..b6548b910d946 100644
3236 +--- a/drivers/tty/serial/fsl_lpuart.c
3237 ++++ b/drivers/tty/serial/fsl_lpuart.c
3238 +@@ -2726,14 +2726,15 @@ static int lpuart_probe(struct platform_device *pdev)
3239 + lpuart_reg.cons = LPUART_CONSOLE;
3240 + handler = lpuart_int;
3241 + }
3242 +- ret = uart_add_one_port(&lpuart_reg, &sport->port);
3243 +- if (ret)
3244 +- goto failed_attach_port;
3245 +
3246 + ret = lpuart_global_reset(sport);
3247 + if (ret)
3248 + goto failed_reset;
3249 +
3250 ++ ret = uart_add_one_port(&lpuart_reg, &sport->port);
3251 ++ if (ret)
3252 ++ goto failed_attach_port;
3253 ++
3254 + ret = uart_get_rs485_mode(&sport->port);
3255 + if (ret)
3256 + goto failed_get_rs485;
3257 +@@ -2756,9 +2757,9 @@ static int lpuart_probe(struct platform_device *pdev)
3258 +
3259 + failed_irq_request:
3260 + failed_get_rs485:
3261 +-failed_reset:
3262 + uart_remove_one_port(&lpuart_reg, &sport->port);
3263 + failed_attach_port:
3264 ++failed_reset:
3265 + lpuart_disable_clks(sport);
3266 + return ret;
3267 + }
3268 +diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
3269 +index b6223fab0687d..d4dba298de7af 100644
3270 +--- a/drivers/tty/serial/serial-tegra.c
3271 ++++ b/drivers/tty/serial/serial-tegra.c
3272 +@@ -525,7 +525,7 @@ static void tegra_uart_tx_dma_complete(void *args)
3273 + count = tup->tx_bytes_requested - state.residue;
3274 + async_tx_ack(tup->tx_dma_desc);
3275 + spin_lock_irqsave(&tup->uport.lock, flags);
3276 +- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
3277 ++ uart_xmit_advance(&tup->uport, count);
3278 + tup->tx_in_progress = 0;
3279 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
3280 + uart_write_wakeup(&tup->uport);
3281 +@@ -613,7 +613,6 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u)
3282 + static void tegra_uart_stop_tx(struct uart_port *u)
3283 + {
3284 + struct tegra_uart_port *tup = to_tegra_uport(u);
3285 +- struct circ_buf *xmit = &tup->uport.state->xmit;
3286 + struct dma_tx_state state;
3287 + unsigned int count;
3288 +
3289 +@@ -624,7 +623,7 @@ static void tegra_uart_stop_tx(struct uart_port *u)
3290 + dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
3291 + count = tup->tx_bytes_requested - state.residue;
3292 + async_tx_ack(tup->tx_dma_desc);
3293 +- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
3294 ++ uart_xmit_advance(&tup->uport, count);
3295 + tup->tx_in_progress = 0;
3296 + }
3297 +
3298 +diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
3299 +index 4877c54c613d1..889b701ba7c62 100644
3300 +--- a/drivers/tty/serial/tegra-tcu.c
3301 ++++ b/drivers/tty/serial/tegra-tcu.c
3302 +@@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port)
3303 + break;
3304 +
3305 + tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count);
3306 +- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
3307 ++ uart_xmit_advance(port, count);
3308 + }
3309 +
3310 + uart_write_wakeup(port);
3311 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3312 +index 23896c8e018a3..98bdae4ac314e 100644
3313 +--- a/drivers/usb/core/hub.c
3314 ++++ b/drivers/usb/core/hub.c
3315 +@@ -6044,7 +6044,7 @@ re_enumerate_no_bos:
3316 + *
3317 + * Return: The same as for usb_reset_and_verify_device().
3318 + * However, if a reset is already in progress (for instance, if a
3319 +- * driver doesn't have pre_ or post_reset() callbacks, and while
3320 ++ * driver doesn't have pre_reset() or post_reset() callbacks, and while
3321 + * being unbound or re-bound during the ongoing reset its disconnect()
3322 + * or probe() routine tries to perform a second, nested reset), the
3323 + * routine returns -EINPROGRESS.
3324 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
3325 +index 9c24cf46b9a08..c32ca691bcc78 100644
3326 +--- a/drivers/usb/dwc3/core.c
3327 ++++ b/drivers/usb/dwc3/core.c
3328 +@@ -114,8 +114,6 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
3329 + dwc->current_dr_role = mode;
3330 + }
3331 +
3332 +-static int dwc3_core_soft_reset(struct dwc3 *dwc);
3333 +-
3334 + static void __dwc3_set_mode(struct work_struct *work)
3335 + {
3336 + struct dwc3 *dwc = work_to_dwc(work);
3337 +@@ -265,7 +263,7 @@ u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
3338 + * dwc3_core_soft_reset - Issues core soft reset and PHY reset
3339 + * @dwc: pointer to our context structure
3340 + */
3341 +-static int dwc3_core_soft_reset(struct dwc3 *dwc)
3342 ++int dwc3_core_soft_reset(struct dwc3 *dwc)
3343 + {
3344 + u32 reg;
3345 + int retries = 1000;
3346 +@@ -1572,12 +1570,6 @@ static int dwc3_probe(struct platform_device *pdev)
3347 +
3348 + dwc3_get_properties(dwc);
3349 +
3350 +- if (!dwc->sysdev_is_parent) {
3351 +- ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
3352 +- if (ret)
3353 +- return ret;
3354 +- }
3355 +-
3356 + dwc->reset = devm_reset_control_array_get_optional_shared(dev);
3357 + if (IS_ERR(dwc->reset))
3358 + return PTR_ERR(dwc->reset);
3359 +@@ -1614,6 +1606,13 @@ static int dwc3_probe(struct platform_device *pdev)
3360 + platform_set_drvdata(pdev, dwc);
3361 + dwc3_cache_hwparams(dwc);
3362 +
3363 ++ if (!dwc->sysdev_is_parent &&
3364 ++ DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
3365 ++ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
3366 ++ if (ret)
3367 ++ goto disable_clks;
3368 ++ }
3369 ++
3370 + spin_lock_init(&dwc->lock);
3371 + mutex_init(&dwc->mutex);
3372 +
3373 +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
3374 +index fd5d42ec53501..077d03a33388e 100644
3375 +--- a/drivers/usb/dwc3/core.h
3376 ++++ b/drivers/usb/dwc3/core.h
3377 +@@ -1028,6 +1028,7 @@ struct dwc3_scratchpad_array {
3378 + * @tx_fifo_resize_max_num: max number of fifos allocated during txfifo resize
3379 + * @hsphy_interface: "utmi" or "ulpi"
3380 + * @connected: true when we're connected to a host, false otherwise
3381 ++ * @softconnect: true when gadget connect is called, false when disconnect runs
3382 + * @delayed_status: true when gadget driver asks for delayed status
3383 + * @ep0_bounced: true when we used bounce buffer
3384 + * @ep0_expect_in: true when we expect a DATA IN transfer
3385 +@@ -1247,6 +1248,7 @@ struct dwc3 {
3386 + const char *hsphy_interface;
3387 +
3388 + unsigned connected:1;
3389 ++ unsigned softconnect:1;
3390 + unsigned delayed_status:1;
3391 + unsigned ep0_bounced:1;
3392 + unsigned ep0_expect_in:1;
3393 +@@ -1508,6 +1510,8 @@ bool dwc3_has_imod(struct dwc3 *dwc);
3394 + int dwc3_event_buffers_setup(struct dwc3 *dwc);
3395 + void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
3396 +
3397 ++int dwc3_core_soft_reset(struct dwc3 *dwc);
3398 ++
3399 + #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
3400 + int dwc3_host_init(struct dwc3 *dwc);
3401 + void dwc3_host_exit(struct dwc3 *dwc);
3402 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3403 +index 322754a7f91ca..14dcdb923f405 100644
3404 +--- a/drivers/usb/dwc3/gadget.c
3405 ++++ b/drivers/usb/dwc3/gadget.c
3406 +@@ -2435,14 +2435,42 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
3407 + static void __dwc3_gadget_stop(struct dwc3 *dwc);
3408 + static int __dwc3_gadget_start(struct dwc3 *dwc);
3409 +
3410 ++static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
3411 ++{
3412 ++ unsigned long flags;
3413 ++
3414 ++ spin_lock_irqsave(&dwc->lock, flags);
3415 ++ dwc->connected = false;
3416 ++
3417 ++ /*
3418 ++ * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
3419 ++ * Section 4.1.8 Table 4-7, it states that for a device-initiated
3420 ++ * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
3421 ++ * command for any active transfers" before clearing the RunStop
3422 ++ * bit.
3423 ++ */
3424 ++ dwc3_stop_active_transfers(dwc);
3425 ++ __dwc3_gadget_stop(dwc);
3426 ++ spin_unlock_irqrestore(&dwc->lock, flags);
3427 ++
3428 ++ /*
3429 ++ * Note: if the GEVNTCOUNT indicates events in the event buffer, the
3430 ++ * driver needs to acknowledge them before the controller can halt.
3431 ++ * Simply let the interrupt handler acknowledges and handle the
3432 ++ * remaining event generated by the controller while polling for
3433 ++ * DSTS.DEVCTLHLT.
3434 ++ */
3435 ++ return dwc3_gadget_run_stop(dwc, false, false);
3436 ++}
3437 ++
3438 + static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
3439 + {
3440 + struct dwc3 *dwc = gadget_to_dwc(g);
3441 +- unsigned long flags;
3442 + int ret;
3443 +
3444 + is_on = !!is_on;
3445 +
3446 ++ dwc->softconnect = is_on;
3447 + /*
3448 + * Per databook, when we want to stop the gadget, if a control transfer
3449 + * is still in process, complete it and get the core into setup phase.
3450 +@@ -2478,50 +2506,27 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
3451 + return 0;
3452 + }
3453 +
3454 +- /*
3455 +- * Synchronize and disable any further event handling while controller
3456 +- * is being enabled/disabled.
3457 +- */
3458 +- disable_irq(dwc->irq_gadget);
3459 +-
3460 +- spin_lock_irqsave(&dwc->lock, flags);
3461 ++ if (dwc->pullups_connected == is_on) {
3462 ++ pm_runtime_put(dwc->dev);
3463 ++ return 0;
3464 ++ }
3465 +
3466 + if (!is_on) {
3467 +- u32 count;
3468 +-
3469 +- dwc->connected = false;
3470 ++ ret = dwc3_gadget_soft_disconnect(dwc);
3471 ++ } else {
3472 + /*
3473 +- * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
3474 +- * Section 4.1.8 Table 4-7, it states that for a device-initiated
3475 +- * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
3476 +- * command for any active transfers" before clearing the RunStop
3477 +- * bit.
3478 ++ * In the Synopsys DWC_usb31 1.90a programming guide section
3479 ++ * 4.1.9, it specifies that for a reconnect after a
3480 ++ * device-initiated disconnect requires a core soft reset
3481 ++ * (DCTL.CSftRst) before enabling the run/stop bit.
3482 + */
3483 +- dwc3_stop_active_transfers(dwc);
3484 +- __dwc3_gadget_stop(dwc);
3485 ++ dwc3_core_soft_reset(dwc);
3486 +
3487 +- /*
3488 +- * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
3489 +- * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
3490 +- * "software needs to acknowledge the events that are generated
3491 +- * (by writing to GEVNTCOUNTn) while it is waiting for this bit
3492 +- * to be set to '1'."
3493 +- */
3494 +- count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
3495 +- count &= DWC3_GEVNTCOUNT_MASK;
3496 +- if (count > 0) {
3497 +- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
3498 +- dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
3499 +- dwc->ev_buf->length;
3500 +- }
3501 +- } else {
3502 ++ dwc3_event_buffers_setup(dwc);
3503 + __dwc3_gadget_start(dwc);
3504 ++ ret = dwc3_gadget_run_stop(dwc, true, false);
3505 + }
3506 +
3507 +- ret = dwc3_gadget_run_stop(dwc, is_on, false);
3508 +- spin_unlock_irqrestore(&dwc->lock, flags);
3509 +- enable_irq(dwc->irq_gadget);
3510 +-
3511 + pm_runtime_put(dwc->dev);
3512 +
3513 + return ret;
3514 +@@ -4421,7 +4426,7 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
3515 + {
3516 + int ret;
3517 +
3518 +- if (!dwc->gadget_driver)
3519 ++ if (!dwc->gadget_driver || !dwc->softconnect)
3520 + return 0;
3521 +
3522 + ret = __dwc3_gadget_start(dwc);
3523 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3524 +index a5e8374a8d710..697683e3fbffa 100644
3525 +--- a/drivers/usb/serial/option.c
3526 ++++ b/drivers/usb/serial/option.c
3527 +@@ -256,6 +256,7 @@ static void option_instat_callback(struct urb *urb);
3528 + #define QUECTEL_PRODUCT_EM060K 0x030b
3529 + #define QUECTEL_PRODUCT_EM12 0x0512
3530 + #define QUECTEL_PRODUCT_RM500Q 0x0800
3531 ++#define QUECTEL_PRODUCT_RM520N 0x0801
3532 + #define QUECTEL_PRODUCT_EC200S_CN 0x6002
3533 + #define QUECTEL_PRODUCT_EC200T 0x6026
3534 + #define QUECTEL_PRODUCT_RM500K 0x7001
3535 +@@ -1138,6 +1139,8 @@ static const struct usb_device_id option_ids[] = {
3536 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
3537 + .driver_info = NUMEP2 },
3538 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
3539 ++ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
3540 ++ .driver_info = ZLP },
3541 + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
3542 + .driver_info = RSVD(4) },
3543 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
3544 +@@ -1159,6 +1162,9 @@ static const struct usb_device_id option_ids[] = {
3545 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
3546 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
3547 + .driver_info = ZLP },
3548 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
3549 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
3550 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
3551 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
3552 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
3553 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
3554 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
3555 +index c76c360bece59..f4015556cafad 100644
3556 +--- a/fs/btrfs/disk-io.c
3557 ++++ b/fs/btrfs/disk-io.c
3558 +@@ -4297,6 +4297,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
3559 +
3560 + set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3561 +
3562 ++ /*
3563 ++ * If we had UNFINISHED_DROPS we could still be processing them, so
3564 ++ * clear that bit and wake up relocation so it can stop.
3565 ++ * We must do this before stopping the block group reclaim task, because
3566 ++ * at btrfs_relocate_block_group() we wait for this bit, and after the
3567 ++ * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
3568 ++ * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
3569 ++ * return 1.
3570 ++ */
3571 ++ btrfs_wake_unfinished_drop(fs_info);
3572 ++
3573 + /*
3574 + * We may have the reclaim task running and relocating a data block group,
3575 + * in which case it may create delayed iputs. So stop it before we park
3576 +@@ -4315,12 +4326,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
3577 + */
3578 + kthread_park(fs_info->cleaner_kthread);
3579 +
3580 +- /*
3581 +- * If we had UNFINISHED_DROPS we could still be processing them, so
3582 +- * clear that bit and wake up relocation so it can stop.
3583 +- */
3584 +- btrfs_wake_unfinished_drop(fs_info);
3585 +-
3586 + /* wait for the qgroup rescan worker to stop */
3587 + btrfs_qgroup_wait_for_completion(fs_info, false);
3588 +
3589 +@@ -4343,6 +4348,31 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
3590 + /* clear out the rbtree of defraggable inodes */
3591 + btrfs_cleanup_defrag_inodes(fs_info);
3592 +
3593 ++ /*
3594 ++ * After we parked the cleaner kthread, ordered extents may have
3595 ++ * completed and created new delayed iputs. If one of the async reclaim
3596 ++ * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
3597 ++ * can hang forever trying to stop it, because if a delayed iput is
3598 ++ * added after it ran btrfs_run_delayed_iputs() and before it called
3599 ++ * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
3600 ++ * no one else to run iputs.
3601 ++ *
3602 ++ * So wait for all ongoing ordered extents to complete and then run
3603 ++ * delayed iputs. This works because once we reach this point no one
3604 ++ * can either create new ordered extents nor create delayed iputs
3605 ++ * through some other means.
3606 ++ *
3607 ++ * Also note that btrfs_wait_ordered_roots() is not safe here, because
3608 ++ * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
3609 ++ * but the delayed iput for the respective inode is made only when doing
3610 ++ * the final btrfs_put_ordered_extent() (which must happen at
3611 ++ * btrfs_finish_ordered_io() when we are unmounting).
3612 ++ */
3613 ++ btrfs_flush_workqueue(fs_info->endio_write_workers);
3614 ++ /* Ordered extents for free space inodes. */
3615 ++ btrfs_flush_workqueue(fs_info->endio_freespace_worker);
3616 ++ btrfs_run_delayed_iputs(fs_info);
3617 ++
3618 + cancel_work_sync(&fs_info->async_reclaim_work);
3619 + cancel_work_sync(&fs_info->async_data_reclaim_work);
3620 + cancel_work_sync(&fs_info->preempt_reclaim_work);
3621 +diff --git a/fs/dax.c b/fs/dax.c
3622 +index 1d0658cf9dcf4..4ab1c493c73f1 100644
3623 +--- a/fs/dax.c
3624 ++++ b/fs/dax.c
3625 +@@ -1279,6 +1279,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
3626 + loff_t done = 0;
3627 + int ret;
3628 +
3629 ++ if (!iomi.len)
3630 ++ return 0;
3631 ++
3632 + if (iov_iter_rw(iter) == WRITE) {
3633 + lockdep_assert_held_write(&iomi.inode->i_rwsem);
3634 + iomi.flags |= IOMAP_WRITE;
3635 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3636 +index 29be8783b9a68..725607520e84c 100644
3637 +--- a/fs/ext4/extents.c
3638 ++++ b/fs/ext4/extents.c
3639 +@@ -460,6 +460,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
3640 + error_msg = "invalid eh_entries";
3641 + goto corrupted;
3642 + }
3643 ++ if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
3644 ++ error_msg = "eh_entries is 0 but eh_depth is > 0";
3645 ++ goto corrupted;
3646 ++ }
3647 + if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
3648 + error_msg = "invalid extent entries";
3649 + goto corrupted;
3650 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
3651 +index f73e5eb43eae1..208b87ce88588 100644
3652 +--- a/fs/ext4/ialloc.c
3653 ++++ b/fs/ext4/ialloc.c
3654 +@@ -510,7 +510,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
3655 + goto fallback;
3656 + }
3657 +
3658 +- max_dirs = ndirs / ngroups + inodes_per_group / 16;
3659 ++ max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
3660 + min_inodes = avefreei - inodes_per_group*flex_size / 4;
3661 + if (min_inodes < 1)
3662 + min_inodes = 1;
3663 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
3664 +index ad78bddfb6377..0c7498a599430 100644
3665 +--- a/fs/ext4/mballoc.c
3666 ++++ b/fs/ext4/mballoc.c
3667 +@@ -1052,8 +1052,10 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
3668 + {
3669 + *new_cr = ac->ac_criteria;
3670 +
3671 +- if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining)
3672 ++ if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
3673 ++ *group = next_linear_group(ac, *group, ngroups);
3674 + return;
3675 ++ }
3676 +
3677 + if (*new_cr == 0) {
3678 + ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
3679 +@@ -1078,23 +1080,25 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
3680 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3681 + int i;
3682 +
3683 +- if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) {
3684 ++ for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
3685 ++ if (grp->bb_counters[i] > 0)
3686 ++ break;
3687 ++ /* No need to move between order lists? */
3688 ++ if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
3689 ++ i == grp->bb_largest_free_order) {
3690 ++ grp->bb_largest_free_order = i;
3691 ++ return;
3692 ++ }
3693 ++
3694 ++ if (grp->bb_largest_free_order >= 0) {
3695 + write_lock(&sbi->s_mb_largest_free_orders_locks[
3696 + grp->bb_largest_free_order]);
3697 + list_del_init(&grp->bb_largest_free_order_node);
3698 + write_unlock(&sbi->s_mb_largest_free_orders_locks[
3699 + grp->bb_largest_free_order]);
3700 + }
3701 +- grp->bb_largest_free_order = -1; /* uninit */
3702 +-
3703 +- for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) {
3704 +- if (grp->bb_counters[i] > 0) {
3705 +- grp->bb_largest_free_order = i;
3706 +- break;
3707 +- }
3708 +- }
3709 +- if (test_opt2(sb, MB_OPTIMIZE_SCAN) &&
3710 +- grp->bb_largest_free_order >= 0 && grp->bb_free) {
3711 ++ grp->bb_largest_free_order = i;
3712 ++ if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
3713 + write_lock(&sbi->s_mb_largest_free_orders_locks[
3714 + grp->bb_largest_free_order]);
3715 + list_add_tail(&grp->bb_largest_free_order_node,
3716 +@@ -2633,7 +2637,7 @@ static noinline_for_stack int
3717 + ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
3718 + {
3719 + ext4_group_t prefetch_grp = 0, ngroups, group, i;
3720 +- int cr = -1;
3721 ++ int cr = -1, new_cr;
3722 + int err = 0, first_err = 0;
3723 + unsigned int nr = 0, prefetch_ios = 0;
3724 + struct ext4_sb_info *sbi;
3725 +@@ -2708,13 +2712,11 @@ repeat:
3726 + ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
3727 + prefetch_grp = group;
3728 +
3729 +- for (i = 0; i < ngroups; group = next_linear_group(ac, group, ngroups),
3730 +- i++) {
3731 +- int ret = 0, new_cr;
3732 ++ for (i = 0, new_cr = cr; i < ngroups; i++,
3733 ++ ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
3734 ++ int ret = 0;
3735 +
3736 + cond_resched();
3737 +-
3738 +- ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups);
3739 + if (new_cr != cr) {
3740 + cr = new_cr;
3741 + goto repeat;
3742 +@@ -5167,6 +5169,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3743 + struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3744 + int bsbits = ac->ac_sb->s_blocksize_bits;
3745 + loff_t size, isize;
3746 ++ bool inode_pa_eligible, group_pa_eligible;
3747 +
3748 + if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3749 + return;
3750 +@@ -5174,25 +5177,27 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3751 + if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3752 + return;
3753 +
3754 ++ group_pa_eligible = sbi->s_mb_group_prealloc > 0;
3755 ++ inode_pa_eligible = true;
3756 + size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
3757 + isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
3758 + >> bsbits;
3759 +
3760 ++ /* No point in using inode preallocation for closed files */
3761 + if ((size == isize) && !ext4_fs_is_busy(sbi) &&
3762 +- !inode_is_open_for_write(ac->ac_inode)) {
3763 +- ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
3764 +- return;
3765 +- }
3766 +-
3767 +- if (sbi->s_mb_group_prealloc <= 0) {
3768 +- ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3769 +- return;
3770 +- }
3771 ++ !inode_is_open_for_write(ac->ac_inode))
3772 ++ inode_pa_eligible = false;
3773 +
3774 +- /* don't use group allocation for large files */
3775 + size = max(size, isize);
3776 +- if (size > sbi->s_mb_stream_request) {
3777 +- ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3778 ++ /* Don't use group allocation for large files */
3779 ++ if (size > sbi->s_mb_stream_request)
3780 ++ group_pa_eligible = false;
3781 ++
3782 ++ if (!group_pa_eligible) {
3783 ++ if (inode_pa_eligible)
3784 ++ ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3785 ++ else
3786 ++ ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
3787 + return;
3788 + }
3789 +
3790 +@@ -5539,6 +5544,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
3791 + ext4_fsblk_t block = 0;
3792 + unsigned int inquota = 0;
3793 + unsigned int reserv_clstrs = 0;
3794 ++ int retries = 0;
3795 + u64 seq;
3796 +
3797 + might_sleep();
3798 +@@ -5641,7 +5647,8 @@ repeat:
3799 + ar->len = ac->ac_b_ex.fe_len;
3800 + }
3801 + } else {
3802 +- if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
3803 ++ if (++retries < 3 &&
3804 ++ ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
3805 + goto repeat;
3806 + /*
3807 + * If block allocation fails then the pa allocated above
3808 +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
3809 +index 11118398f495c..7c9eb679dbdbf 100644
3810 +--- a/fs/nfs/delegation.c
3811 ++++ b/fs/nfs/delegation.c
3812 +@@ -755,11 +755,13 @@ int nfs4_inode_return_delegation(struct inode *inode)
3813 + struct nfs_delegation *delegation;
3814 +
3815 + delegation = nfs_start_delegation_return(nfsi);
3816 +- /* Synchronous recall of any application leases */
3817 +- break_lease(inode, O_WRONLY | O_RDWR);
3818 +- nfs_wb_all(inode);
3819 +- if (delegation != NULL)
3820 ++ if (delegation != NULL) {
3821 ++ /* Synchronous recall of any application leases */
3822 ++ break_lease(inode, O_WRONLY | O_RDWR);
3823 ++ if (S_ISREG(inode->i_mode))
3824 ++ nfs_wb_all(inode);
3825 + return nfs_end_delegation_return(inode, delegation, 1);
3826 ++ }
3827 + return 0;
3828 + }
3829 +
3830 +diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
3831 +index 3932b4ebf9037..f84d3fbb9d3da 100644
3832 +--- a/fs/xfs/libxfs/xfs_inode_buf.c
3833 ++++ b/fs/xfs/libxfs/xfs_inode_buf.c
3834 +@@ -337,19 +337,36 @@ xfs_dinode_verify_fork(
3835 + int whichfork)
3836 + {
3837 + uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
3838 ++ mode_t mode = be16_to_cpu(dip->di_mode);
3839 ++ uint32_t fork_size = XFS_DFORK_SIZE(dip, mp, whichfork);
3840 ++ uint32_t fork_format = XFS_DFORK_FORMAT(dip, whichfork);
3841 +
3842 +- switch (XFS_DFORK_FORMAT(dip, whichfork)) {
3843 ++ /*
3844 ++ * For fork types that can contain local data, check that the fork
3845 ++ * format matches the size of local data contained within the fork.
3846 ++ *
3847 ++ * For all types, check that when the size says the should be in extent
3848 ++ * or btree format, the inode isn't claiming it is in local format.
3849 ++ */
3850 ++ if (whichfork == XFS_DATA_FORK) {
3851 ++ if (S_ISDIR(mode) || S_ISLNK(mode)) {
3852 ++ if (be64_to_cpu(dip->di_size) <= fork_size &&
3853 ++ fork_format != XFS_DINODE_FMT_LOCAL)
3854 ++ return __this_address;
3855 ++ }
3856 ++
3857 ++ if (be64_to_cpu(dip->di_size) > fork_size &&
3858 ++ fork_format == XFS_DINODE_FMT_LOCAL)
3859 ++ return __this_address;
3860 ++ }
3861 ++
3862 ++ switch (fork_format) {
3863 + case XFS_DINODE_FMT_LOCAL:
3864 + /*
3865 +- * no local regular files yet
3866 ++ * No local regular files yet.
3867 + */
3868 +- if (whichfork == XFS_DATA_FORK) {
3869 +- if (S_ISREG(be16_to_cpu(dip->di_mode)))
3870 +- return __this_address;
3871 +- if (be64_to_cpu(dip->di_size) >
3872 +- XFS_DFORK_SIZE(dip, mp, whichfork))
3873 +- return __this_address;
3874 +- }
3875 ++ if (S_ISREG(mode) && whichfork == XFS_DATA_FORK)
3876 ++ return __this_address;
3877 + if (di_nextents)
3878 + return __this_address;
3879 + break;
3880 +diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
3881 +index fb7a97cdf99f1..b2ea853182141 100644
3882 +--- a/fs/xfs/xfs_inode.c
3883 ++++ b/fs/xfs/xfs_inode.c
3884 +@@ -2599,14 +2599,13 @@ xfs_ifree_cluster(
3885 + }
3886 +
3887 + /*
3888 +- * This is called to return an inode to the inode free list.
3889 +- * The inode should already be truncated to 0 length and have
3890 +- * no pages associated with it. This routine also assumes that
3891 +- * the inode is already a part of the transaction.
3892 ++ * This is called to return an inode to the inode free list. The inode should
3893 ++ * already be truncated to 0 length and have no pages associated with it. This
3894 ++ * routine also assumes that the inode is already a part of the transaction.
3895 + *
3896 +- * The on-disk copy of the inode will have been added to the list
3897 +- * of unlinked inodes in the AGI. We need to remove the inode from
3898 +- * that list atomically with respect to freeing it here.
3899 ++ * The on-disk copy of the inode will have been added to the list of unlinked
3900 ++ * inodes in the AGI. We need to remove the inode from that list atomically with
3901 ++ * respect to freeing it here.
3902 + */
3903 + int
3904 + xfs_ifree(
3905 +@@ -2628,13 +2627,16 @@ xfs_ifree(
3906 + pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
3907 +
3908 + /*
3909 +- * Pull the on-disk inode from the AGI unlinked list.
3910 ++ * Free the inode first so that we guarantee that the AGI lock is going
3911 ++ * to be taken before we remove the inode from the unlinked list. This
3912 ++ * makes the AGI lock -> unlinked list modification order the same as
3913 ++ * used in O_TMPFILE creation.
3914 + */
3915 +- error = xfs_iunlink_remove(tp, pag, ip);
3916 ++ error = xfs_difree(tp, pag, ip->i_ino, &xic);
3917 + if (error)
3918 + goto out;
3919 +
3920 +- error = xfs_difree(tp, pag, ip->i_ino, &xic);
3921 ++ error = xfs_iunlink_remove(tp, pag, ip);
3922 + if (error)
3923 + goto out;
3924 +
3925 +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
3926 +index f2984af2b85bd..9eac202fbcfdf 100644
3927 +--- a/include/asm-generic/vmlinux.lds.h
3928 ++++ b/include/asm-generic/vmlinux.lds.h
3929 +@@ -549,10 +549,9 @@
3930 + */
3931 + #ifdef CONFIG_CFI_CLANG
3932 + #define TEXT_CFI_JT \
3933 +- . = ALIGN(PMD_SIZE); \
3934 ++ ALIGN_FUNCTION(); \
3935 + __cfi_jt_start = .; \
3936 + *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \
3937 +- . = ALIGN(PMD_SIZE); \
3938 + __cfi_jt_end = .;
3939 + #else
3940 + #define TEXT_CFI_JT
3941 +diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
3942 +index 054e654f06def..b3c230dea0713 100644
3943 +--- a/include/linux/cpumask.h
3944 ++++ b/include/linux/cpumask.h
3945 +@@ -1057,9 +1057,10 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
3946 + * cover a worst-case of every other cpu being on one of two nodes for a
3947 + * very large NR_CPUS.
3948 + *
3949 +- * Use PAGE_SIZE as a minimum for smaller configurations.
3950 ++ * Use PAGE_SIZE as a minimum for smaller configurations while avoiding
3951 ++ * unsigned comparison to -1.
3952 + */
3953 +-#define CPUMAP_FILE_MAX_BYTES ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \
3954 ++#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
3955 + ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
3956 + #define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
3957 +
3958 +diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
3959 +index 6d07b5f9e3b81..8757b4a6be319 100644
3960 +--- a/include/linux/serial_core.h
3961 ++++ b/include/linux/serial_core.h
3962 +@@ -300,6 +300,23 @@ struct uart_state {
3963 + /* number of characters left in xmit buffer before we ask for more */
3964 + #define WAKEUP_CHARS 256
3965 +
3966 ++/**
3967 ++ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
3968 ++ * @up: uart_port structure describing the port
3969 ++ * @chars: number of characters sent
3970 ++ *
3971 ++ * This function advances the tail of circular xmit buffer by the number of
3972 ++ * @chars transmitted and handles accounting of transmitted bytes (into
3973 ++ * @up's icount.tx).
3974 ++ */
3975 ++static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
3976 ++{
3977 ++ struct circ_buf *xmit = &up->state->xmit;
3978 ++
3979 ++ xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1);
3980 ++ up->icount.tx += chars;
3981 ++}
3982 ++
3983 + struct module;
3984 + struct tty_driver;
3985 +
3986 +diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
3987 +index 184105d682942..f2273bd5a4c58 100644
3988 +--- a/include/net/bond_3ad.h
3989 ++++ b/include/net/bond_3ad.h
3990 +@@ -15,8 +15,6 @@
3991 + #define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
3992 + #define AD_TIMER_INTERVAL 100 /*msec*/
3993 +
3994 +-#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
3995 +-
3996 + #define AD_LACP_SLOW 0
3997 + #define AD_LACP_FAST 1
3998 +
3999 +diff --git a/include/net/bonding.h b/include/net/bonding.h
4000 +index 15e083e18f75f..8c18c6b01634c 100644
4001 +--- a/include/net/bonding.h
4002 ++++ b/include/net/bonding.h
4003 +@@ -757,6 +757,9 @@ extern struct rtnl_link_ops bond_link_ops;
4004 + /* exported from bond_sysfs_slave.c */
4005 + extern const struct sysfs_ops slave_sysfs_ops;
4006 +
4007 ++/* exported from bond_3ad.c */
4008 ++extern const u8 lacpdu_mcast_addr[];
4009 ++
4010 + static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
4011 + {
4012 + atomic_long_inc(&dev->tx_dropped);
4013 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
4014 +index 3f4d276685768..f5fa7be8d17ea 100644
4015 +--- a/kernel/workqueue.c
4016 ++++ b/kernel/workqueue.c
4017 +@@ -3083,10 +3083,8 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
4018 + if (WARN_ON(!work->func))
4019 + return false;
4020 +
4021 +- if (!from_cancel) {
4022 +- lock_map_acquire(&work->lockdep_map);
4023 +- lock_map_release(&work->lockdep_map);
4024 +- }
4025 ++ lock_map_acquire(&work->lockdep_map);
4026 ++ lock_map_release(&work->lockdep_map);
4027 +
4028 + if (start_flush_work(work, &barr, from_cancel)) {
4029 + wait_for_completion(&barr.done);
4030 +diff --git a/mm/slub.c b/mm/slub.c
4031 +index 519bbbad7b2f6..f95ae136a0698 100644
4032 +--- a/mm/slub.c
4033 ++++ b/mm/slub.c
4034 +@@ -308,6 +308,11 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
4035 + */
4036 + static nodemask_t slab_nodes;
4037 +
4038 ++/*
4039 ++ * Workqueue used for flush_cpu_slab().
4040 ++ */
4041 ++static struct workqueue_struct *flushwq;
4042 ++
4043 + /********************************************************************
4044 + * Core slab cache functions
4045 + *******************************************************************/
4046 +@@ -2688,7 +2693,7 @@ static void flush_all_cpus_locked(struct kmem_cache *s)
4047 + INIT_WORK(&sfw->work, flush_cpu_slab);
4048 + sfw->skip = false;
4049 + sfw->s = s;
4050 +- schedule_work_on(cpu, &sfw->work);
4051 ++ queue_work_on(cpu, flushwq, &sfw->work);
4052 + }
4053 +
4054 + for_each_online_cpu(cpu) {
4055 +@@ -4850,6 +4855,8 @@ void __init kmem_cache_init(void)
4056 +
4057 + void __init kmem_cache_init_late(void)
4058 + {
4059 ++ flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
4060 ++ WARN_ON(!flushwq);
4061 + }
4062 +
4063 + struct kmem_cache *
4064 +@@ -4920,6 +4927,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4065 + /* Honor the call site pointer we received. */
4066 + trace_kmalloc(caller, ret, size, s->size, gfpflags);
4067 +
4068 ++ ret = kasan_kmalloc(s, ret, size, gfpflags);
4069 ++
4070 + return ret;
4071 + }
4072 + EXPORT_SYMBOL(__kmalloc_track_caller);
4073 +@@ -4951,6 +4960,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4074 + /* Honor the call site pointer we received. */
4075 + trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4076 +
4077 ++ ret = kasan_kmalloc(s, ret, size, gfpflags);
4078 ++
4079 + return ret;
4080 + }
4081 + EXPORT_SYMBOL(__kmalloc_node_track_caller);
4082 +@@ -5865,7 +5876,8 @@ static char *create_unique_id(struct kmem_cache *s)
4083 + char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4084 + char *p = name;
4085 +
4086 +- BUG_ON(!name);
4087 ++ if (!name)
4088 ++ return ERR_PTR(-ENOMEM);
4089 +
4090 + *p++ = ':';
4091 + /*
4092 +@@ -5923,6 +5935,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
4093 + * for the symlinks.
4094 + */
4095 + name = create_unique_id(s);
4096 ++ if (IS_ERR(name))
4097 ++ return PTR_ERR(name);
4098 + }
4099 +
4100 + s->kobj.kset = kset;
4101 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
4102 +index 8905fe2fe023d..16774559c52cb 100644
4103 +--- a/net/bridge/netfilter/ebtables.c
4104 ++++ b/net/bridge/netfilter/ebtables.c
4105 +@@ -1040,8 +1040,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
4106 + goto free_iterate;
4107 + }
4108 +
4109 +- if (repl->valid_hooks != t->valid_hooks)
4110 ++ if (repl->valid_hooks != t->valid_hooks) {
4111 ++ ret = -EINVAL;
4112 + goto free_unlock;
4113 ++ }
4114 +
4115 + if (repl->num_counters && repl->num_counters != t->private->nentries) {
4116 + ret = -EINVAL;
4117 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
4118 +index bc50bd331d5b6..1c34e22665781 100644
4119 +--- a/net/core/flow_dissector.c
4120 ++++ b/net/core/flow_dissector.c
4121 +@@ -1519,9 +1519,8 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
4122 +
4123 + switch (keys->control.addr_type) {
4124 + case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
4125 +- addr_diff = (__force u32)keys->addrs.v4addrs.dst -
4126 +- (__force u32)keys->addrs.v4addrs.src;
4127 +- if (addr_diff < 0)
4128 ++ if ((__force u32)keys->addrs.v4addrs.dst <
4129 ++ (__force u32)keys->addrs.v4addrs.src)
4130 + swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
4131 +
4132 + if ((__force u16)keys->ports.dst <
4133 +diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
4134 +index 18b90e334b5bd..159e1e4441a43 100644
4135 +--- a/net/netfilter/nf_conntrack_irc.c
4136 ++++ b/net/netfilter/nf_conntrack_irc.c
4137 +@@ -151,15 +151,37 @@ static int help(struct sk_buff *skb, unsigned int protoff,
4138 + data = ib_ptr;
4139 + data_limit = ib_ptr + skb->len - dataoff;
4140 +
4141 +- /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
4142 +- * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
4143 +- while (data < data_limit - (19 + MINMATCHLEN)) {
4144 +- if (memcmp(data, "\1DCC ", 5)) {
4145 ++ /* Skip any whitespace */
4146 ++ while (data < data_limit - 10) {
4147 ++ if (*data == ' ' || *data == '\r' || *data == '\n')
4148 ++ data++;
4149 ++ else
4150 ++ break;
4151 ++ }
4152 ++
4153 ++ /* strlen("PRIVMSG x ")=10 */
4154 ++ if (data < data_limit - 10) {
4155 ++ if (strncasecmp("PRIVMSG ", data, 8))
4156 ++ goto out;
4157 ++ data += 8;
4158 ++ }
4159 ++
4160 ++ /* strlen(" :\1DCC SENT t AAAAAAAA P\1\n")=26
4161 ++ * 7+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=26
4162 ++ */
4163 ++ while (data < data_limit - (21 + MINMATCHLEN)) {
4164 ++ /* Find first " :", the start of message */
4165 ++ if (memcmp(data, " :", 2)) {
4166 + data++;
4167 + continue;
4168 + }
4169 ++ data += 2;
4170 ++
4171 ++ /* then check that place only for the DCC command */
4172 ++ if (memcmp(data, "\1DCC ", 5))
4173 ++ goto out;
4174 + data += 5;
4175 +- /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
4176 ++ /* we have at least (21+MINMATCHLEN)-(2+5) bytes valid data left */
4177 +
4178 + iph = ip_hdr(skb);
4179 + pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
4180 +@@ -175,7 +197,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
4181 + pr_debug("DCC %s detected\n", dccprotos[i]);
4182 +
4183 + /* we have at least
4184 +- * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
4185 ++ * (21+MINMATCHLEN)-7-dccprotos[i].matchlen bytes valid
4186 + * data left (== 14/13 bytes) */
4187 + if (parse_dcc(data, data_limit, &dcc_ip,
4188 + &dcc_port, &addr_beg_p, &addr_end_p)) {
4189 +diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
4190 +index b83dc9bf0a5dd..78fd9122b70c7 100644
4191 +--- a/net/netfilter/nf_conntrack_sip.c
4192 ++++ b/net/netfilter/nf_conntrack_sip.c
4193 +@@ -477,7 +477,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
4194 + return ret;
4195 + if (ret == 0)
4196 + break;
4197 +- dataoff += *matchoff;
4198 ++ dataoff = *matchoff;
4199 + }
4200 + *in_header = 0;
4201 + }
4202 +@@ -489,7 +489,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
4203 + break;
4204 + if (ret == 0)
4205 + return ret;
4206 +- dataoff += *matchoff;
4207 ++ dataoff = *matchoff;
4208 + }
4209 +
4210 + if (in_header)
4211 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
4212 +index d35d09df83fee..460ad341d160c 100644
4213 +--- a/net/netfilter/nf_tables_api.c
4214 ++++ b/net/netfilter/nf_tables_api.c
4215 +@@ -2103,7 +2103,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
4216 + struct netlink_ext_ack *extack)
4217 + {
4218 + const struct nlattr * const *nla = ctx->nla;
4219 +- struct nft_stats __percpu *stats = NULL;
4220 + struct nft_table *table = ctx->table;
4221 + struct nft_base_chain *basechain;
4222 + struct net *net = ctx->net;
4223 +@@ -2117,6 +2116,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
4224 + return -EOVERFLOW;
4225 +
4226 + if (nla[NFTA_CHAIN_HOOK]) {
4227 ++ struct nft_stats __percpu *stats = NULL;
4228 + struct nft_chain_hook hook;
4229 +
4230 + if (flags & NFT_CHAIN_BINDING)
4231 +@@ -2148,8 +2148,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
4232 + if (err < 0) {
4233 + nft_chain_release_hook(&hook);
4234 + kfree(basechain);
4235 ++ free_percpu(stats);
4236 + return err;
4237 + }
4238 ++ if (stats)
4239 ++ static_branch_inc(&nft_counters_enabled);
4240 + } else {
4241 + if (flags & NFT_CHAIN_BASE)
4242 + return -EINVAL;
4243 +@@ -2224,9 +2227,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
4244 + goto err_unregister_hook;
4245 + }
4246 +
4247 +- if (stats)
4248 +- static_branch_inc(&nft_counters_enabled);
4249 +-
4250 + table->use++;
4251 +
4252 + return 0;
4253 +diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
4254 +index 0fa2e20304272..ee6840bd59337 100644
4255 +--- a/net/netfilter/nfnetlink_osf.c
4256 ++++ b/net/netfilter/nfnetlink_osf.c
4257 +@@ -269,6 +269,7 @@ bool nf_osf_find(const struct sk_buff *skb,
4258 + struct nf_osf_hdr_ctx ctx;
4259 + const struct tcphdr *tcp;
4260 + struct tcphdr _tcph;
4261 ++ bool found = false;
4262 +
4263 + memset(&ctx, 0, sizeof(ctx));
4264 +
4265 +@@ -283,10 +284,11 @@ bool nf_osf_find(const struct sk_buff *skb,
4266 +
4267 + data->genre = f->genre;
4268 + data->version = f->version;
4269 ++ found = true;
4270 + break;
4271 + }
4272 +
4273 +- return true;
4274 ++ return found;
4275 + }
4276 + EXPORT_SYMBOL_GPL(nf_osf_find);
4277 +
4278 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
4279 +index 4b552c10e7b98..62ce6981942b7 100644
4280 +--- a/net/sched/cls_api.c
4281 ++++ b/net/sched/cls_api.c
4282 +@@ -2117,6 +2117,7 @@ replay:
4283 + }
4284 +
4285 + if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
4286 ++ tfilter_put(tp, fh);
4287 + NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
4288 + err = -EINVAL;
4289 + goto errout;
4290 +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
4291 +index 474ba4db5de2d..ae7ca68f2cf9b 100644
4292 +--- a/net/sched/sch_taprio.c
4293 ++++ b/net/sched/sch_taprio.c
4294 +@@ -66,6 +66,7 @@ struct taprio_sched {
4295 + u32 flags;
4296 + enum tk_offsets tk_offset;
4297 + int clockid;
4298 ++ bool offloaded;
4299 + atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
4300 + * speeds it's sub-nanoseconds per byte
4301 + */
4302 +@@ -1278,6 +1279,8 @@ static int taprio_enable_offload(struct net_device *dev,
4303 + goto done;
4304 + }
4305 +
4306 ++ q->offloaded = true;
4307 ++
4308 + done:
4309 + taprio_offload_free(offload);
4310 +
4311 +@@ -1292,12 +1295,9 @@ static int taprio_disable_offload(struct net_device *dev,
4312 + struct tc_taprio_qopt_offload *offload;
4313 + int err;
4314 +
4315 +- if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
4316 ++ if (!q->offloaded)
4317 + return 0;
4318 +
4319 +- if (!ops->ndo_setup_tc)
4320 +- return -EOPNOTSUPP;
4321 +-
4322 + offload = taprio_offload_alloc(0);
4323 + if (!offload) {
4324 + NL_SET_ERR_MSG(extack,
4325 +@@ -1313,6 +1313,8 @@ static int taprio_disable_offload(struct net_device *dev,
4326 + goto out;
4327 + }
4328 +
4329 ++ q->offloaded = false;
4330 ++
4331 + out:
4332 + taprio_offload_free(offload);
4333 +
4334 +@@ -1948,12 +1950,14 @@ start_error:
4335 +
4336 + static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
4337 + {
4338 +- struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
4339 ++ struct taprio_sched *q = qdisc_priv(sch);
4340 ++ struct net_device *dev = qdisc_dev(sch);
4341 ++ unsigned int ntx = cl - 1;
4342 +
4343 +- if (!dev_queue)
4344 ++ if (ntx >= dev->num_tx_queues)
4345 + return NULL;
4346 +
4347 +- return dev_queue->qdisc_sleeping;
4348 ++ return q->qdiscs[ntx];
4349 + }
4350 +
4351 + static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
4352 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
4353 +index 7401ec67ebcf9..2eafefa15a1ae 100644
4354 +--- a/net/smc/smc_core.c
4355 ++++ b/net/smc/smc_core.c
4356 +@@ -1980,7 +1980,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
4357 + static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
4358 + struct smc_buf_desc *buf_desc, bool is_rmb)
4359 + {
4360 +- int i, rc = 0;
4361 ++ int i, rc = 0, cnt = 0;
4362 +
4363 + /* protect against parallel link reconfiguration */
4364 + mutex_lock(&lgr->llc_conf_mutex);
4365 +@@ -1993,9 +1993,12 @@ static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
4366 + rc = -ENOMEM;
4367 + goto out;
4368 + }
4369 ++ cnt++;
4370 + }
4371 + out:
4372 + mutex_unlock(&lgr->llc_conf_mutex);
4373 ++ if (!rc && !cnt)
4374 ++ rc = -EINVAL;
4375 + return rc;
4376 + }
4377 +
4378 +diff --git a/sound/core/init.c b/sound/core/init.c
4379 +index 362588e3a275b..7b3618997d347 100644
4380 +--- a/sound/core/init.c
4381 ++++ b/sound/core/init.c
4382 +@@ -178,10 +178,8 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
4383 + return -ENOMEM;
4384 +
4385 + err = snd_card_init(card, parent, idx, xid, module, extra_size);
4386 +- if (err < 0) {
4387 +- kfree(card);
4388 +- return err;
4389 +- }
4390 ++ if (err < 0)
4391 ++ return err; /* card is freed by error handler */
4392 +
4393 + *card_ret = card;
4394 + return 0;
4395 +@@ -231,7 +229,7 @@ int snd_devm_card_new(struct device *parent, int idx, const char *xid,
4396 + card->managed = true;
4397 + err = snd_card_init(card, parent, idx, xid, module, extra_size);
4398 + if (err < 0) {
4399 +- devres_free(card);
4400 ++ devres_free(card); /* in managed mode, we need to free manually */
4401 + return err;
4402 + }
4403 +
4404 +@@ -293,6 +291,8 @@ static int snd_card_init(struct snd_card *card, struct device *parent,
4405 + mutex_unlock(&snd_card_mutex);
4406 + dev_err(parent, "cannot find the slot for index %d (range 0-%i), error: %d\n",
4407 + idx, snd_ecards_limit - 1, err);
4408 ++ if (!card->managed)
4409 ++ kfree(card); /* manually free here, as no destructor called */
4410 + return err;
4411 + }
4412 + set_bit(idx, snd_cards_lock); /* lock it */
4413 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4414 +index 9e36f992605ab..cc94da9151c3b 100644
4415 +--- a/sound/pci/hda/hda_intel.c
4416 ++++ b/sound/pci/hda/hda_intel.c
4417 +@@ -2519,6 +2519,8 @@ static const struct pci_device_id azx_ids[] = {
4418 + /* 5 Series/3400 */
4419 + { PCI_DEVICE(0x8086, 0x3b56),
4420 + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
4421 ++ { PCI_DEVICE(0x8086, 0x3b57),
4422 ++ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
4423 + /* Poulsbo */
4424 + { PCI_DEVICE(0x8086, 0x811b),
4425 + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE },
4426 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4427 +index 24da843f39a11..d19bc2b9f778e 100644
4428 +--- a/sound/pci/hda/patch_hdmi.c
4429 ++++ b/sound/pci/hda/patch_hdmi.c
4430 +@@ -3868,6 +3868,7 @@ static int patch_tegra_hdmi(struct hda_codec *codec)
4431 + if (err)
4432 + return err;
4433 +
4434 ++ codec->depop_delay = 10;
4435 + codec->patch_ops.build_pcms = tegra_hdmi_build_pcms;
4436 + spec = codec->spec;
4437 + spec->chmap.ops.chmap_cea_alloc_validate_get_type =
4438 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4439 +index 45b8ebda284d9..c4b3f2d3c7e34 100644
4440 +--- a/sound/pci/hda/patch_realtek.c
4441 ++++ b/sound/pci/hda/patch_realtek.c
4442 +@@ -6879,6 +6879,8 @@ enum {
4443 + ALC294_FIXUP_ASUS_GU502_HP,
4444 + ALC294_FIXUP_ASUS_GU502_PINS,
4445 + ALC294_FIXUP_ASUS_GU502_VERBS,
4446 ++ ALC294_FIXUP_ASUS_G513_PINS,
4447 ++ ALC285_FIXUP_ASUS_G533Z_PINS,
4448 + ALC285_FIXUP_HP_GPIO_LED,
4449 + ALC285_FIXUP_HP_MUTE_LED,
4450 + ALC236_FIXUP_HP_GPIO_LED,
4451 +@@ -8205,6 +8207,24 @@ static const struct hda_fixup alc269_fixups[] = {
4452 + [ALC294_FIXUP_ASUS_GU502_HP] = {
4453 + .type = HDA_FIXUP_FUNC,
4454 + .v.func = alc294_fixup_gu502_hp,
4455 ++ },
4456 ++ [ALC294_FIXUP_ASUS_G513_PINS] = {
4457 ++ .type = HDA_FIXUP_PINS,
4458 ++ .v.pins = (const struct hda_pintbl[]) {
4459 ++ { 0x19, 0x03a11050 }, /* front HP mic */
4460 ++ { 0x1a, 0x03a11c30 }, /* rear external mic */
4461 ++ { 0x21, 0x03211420 }, /* front HP out */
4462 ++ { }
4463 ++ },
4464 ++ },
4465 ++ [ALC285_FIXUP_ASUS_G533Z_PINS] = {
4466 ++ .type = HDA_FIXUP_PINS,
4467 ++ .v.pins = (const struct hda_pintbl[]) {
4468 ++ { 0x14, 0x90170120 },
4469 ++ { }
4470 ++ },
4471 ++ .chained = true,
4472 ++ .chain_id = ALC294_FIXUP_ASUS_G513_PINS,
4473 + },
4474 + [ALC294_FIXUP_ASUS_COEF_1B] = {
4475 + .type = HDA_FIXUP_VERBS,
4476 +@@ -8816,6 +8836,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4477 + SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
4478 + SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
4479 + SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
4480 ++ SND_PCI_QUIRK(0x1028, 0x087d, "Dell Precision 5530", ALC289_FIXUP_DUAL_SPK),
4481 + SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
4482 + SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
4483 + SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
4484 +@@ -8831,6 +8852,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4485 + SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
4486 + SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
4487 + SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK),
4488 ++ SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
4489 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4490 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4491 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
4492 +@@ -8983,10 +9005,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4493 + SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
4494 + SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
4495 + SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
4496 ++ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
4497 ++ SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
4498 + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
4499 + SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
4500 + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
4501 +- SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
4502 + SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
4503 + SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
4504 + SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
4505 +@@ -9001,14 +9024,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4506 + SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
4507 + SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
4508 + SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4509 ++ SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
4510 + SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
4511 ++ SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
4512 + SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
4513 + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
4514 + SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
4515 ++ SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
4516 + SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
4517 ++ SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
4518 + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
4519 +- SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
4520 +- SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
4521 + SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
4522 + SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
4523 + SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
4524 +@@ -9205,6 +9230,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4525 + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
4526 + SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
4527 + SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
4528 ++ SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
4529 + SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
4530 + SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
4531 + SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
4532 +diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
4533 +index 11fa7745c017e..743b8287cfcdd 100644
4534 +--- a/sound/usb/endpoint.c
4535 ++++ b/sound/usb/endpoint.c
4536 +@@ -731,8 +731,7 @@ bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
4537 + * The endpoint needs to be closed via snd_usb_endpoint_close() later.
4538 + *
4539 + * Note that this function doesn't configure the endpoint. The substream
4540 +- * needs to set it up later via snd_usb_endpoint_set_params() and
4541 +- * snd_usb_endpoint_prepare().
4542 ++ * needs to set it up later via snd_usb_endpoint_configure().
4543 + */
4544 + struct snd_usb_endpoint *
4545 + snd_usb_endpoint_open(struct snd_usb_audio *chip,
4546 +@@ -1255,13 +1254,12 @@ out_of_memory:
4547 + /*
4548 + * snd_usb_endpoint_set_params: configure an snd_usb_endpoint
4549 + *
4550 +- * It's called either from hw_params callback.
4551 + * Determine the number of URBs to be used on this endpoint.
4552 + * An endpoint must be configured before it can be started.
4553 + * An endpoint that is already running can not be reconfigured.
4554 + */
4555 +-int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
4556 +- struct snd_usb_endpoint *ep)
4557 ++static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
4558 ++ struct snd_usb_endpoint *ep)
4559 + {
4560 + const struct audioformat *fmt = ep->cur_audiofmt;
4561 + int err;
4562 +@@ -1317,18 +1315,18 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
4563 + }
4564 +
4565 + /*
4566 +- * snd_usb_endpoint_prepare: Prepare the endpoint
4567 ++ * snd_usb_endpoint_configure: Configure the endpoint
4568 + *
4569 + * This function sets up the EP to be fully usable state.
4570 +- * It's called either from prepare callback.
4571 ++ * It's called either from hw_params or prepare callback.
4572 + * The function checks need_setup flag, and performs nothing unless needed,
4573 + * so it's safe to call this multiple times.
4574 + *
4575 + * This returns zero if unchanged, 1 if the configuration has changed,
4576 + * or a negative error code.
4577 + */
4578 +-int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
4579 +- struct snd_usb_endpoint *ep)
4580 ++int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
4581 ++ struct snd_usb_endpoint *ep)
4582 + {
4583 + bool iface_first;
4584 + int err = 0;
4585 +@@ -1350,6 +1348,9 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
4586 + if (err < 0)
4587 + goto unlock;
4588 + }
4589 ++ err = snd_usb_endpoint_set_params(chip, ep);
4590 ++ if (err < 0)
4591 ++ goto unlock;
4592 + goto done;
4593 + }
4594 +
4595 +@@ -1377,6 +1378,10 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
4596 + if (err < 0)
4597 + goto unlock;
4598 +
4599 ++ err = snd_usb_endpoint_set_params(chip, ep);
4600 ++ if (err < 0)
4601 ++ goto unlock;
4602 ++
4603 + err = snd_usb_select_mode_quirk(chip, ep->cur_audiofmt);
4604 + if (err < 0)
4605 + goto unlock;
4606 +diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
4607 +index e67ea28faa54f..6a9af04cf175a 100644
4608 +--- a/sound/usb/endpoint.h
4609 ++++ b/sound/usb/endpoint.h
4610 +@@ -17,10 +17,8 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
4611 + bool is_sync_ep);
4612 + void snd_usb_endpoint_close(struct snd_usb_audio *chip,
4613 + struct snd_usb_endpoint *ep);
4614 +-int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
4615 +- struct snd_usb_endpoint *ep);
4616 +-int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
4617 +- struct snd_usb_endpoint *ep);
4618 ++int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
4619 ++ struct snd_usb_endpoint *ep);
4620 + int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock);
4621 +
4622 + bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
4623 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
4624 +index 2d60e6d1f8dff..b6cd43c5ea3e6 100644
4625 +--- a/sound/usb/pcm.c
4626 ++++ b/sound/usb/pcm.c
4627 +@@ -443,17 +443,17 @@ static int configure_endpoints(struct snd_usb_audio *chip,
4628 + if (stop_endpoints(subs, false))
4629 + sync_pending_stops(subs);
4630 + if (subs->sync_endpoint) {
4631 +- err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
4632 ++ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
4633 + if (err < 0)
4634 + return err;
4635 + }
4636 +- err = snd_usb_endpoint_prepare(chip, subs->data_endpoint);
4637 ++ err = snd_usb_endpoint_configure(chip, subs->data_endpoint);
4638 + if (err < 0)
4639 + return err;
4640 + snd_usb_set_format_quirk(subs, subs->cur_audiofmt);
4641 + } else {
4642 + if (subs->sync_endpoint) {
4643 +- err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
4644 ++ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
4645 + if (err < 0)
4646 + return err;
4647 + }
4648 +@@ -551,13 +551,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
4649 + subs->cur_audiofmt = fmt;
4650 + mutex_unlock(&chip->mutex);
4651 +
4652 +- if (subs->sync_endpoint) {
4653 +- ret = snd_usb_endpoint_set_params(chip, subs->sync_endpoint);
4654 +- if (ret < 0)
4655 +- goto unlock;
4656 +- }
4657 +-
4658 +- ret = snd_usb_endpoint_set_params(chip, subs->data_endpoint);
4659 ++ ret = configure_endpoints(chip, subs);
4660 +
4661 + unlock:
4662 + if (ret < 0)
4663 +diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
4664 +index 292c430768b52..c72f8ad96f751 100644
4665 +--- a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
4666 ++++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
4667 +@@ -176,7 +176,7 @@ static int bperf_cgroup_count(void)
4668 + }
4669 +
4670 + // This will be attached to cgroup-switches event for each cpu
4671 +-SEC("perf_events")
4672 ++SEC("perf_event")
4673 + int BPF_PROG(on_cgrp_switch)
4674 + {
4675 + return bperf_cgroup_count();
4676 +diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
4677 +index 953338b9e887e..02cd9f75e3d2f 100644
4678 +--- a/tools/perf/util/genelf.c
4679 ++++ b/tools/perf/util/genelf.c
4680 +@@ -251,6 +251,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
4681 + Elf_Data *d;
4682 + Elf_Scn *scn;
4683 + Elf_Ehdr *ehdr;
4684 ++ Elf_Phdr *phdr;
4685 + Elf_Shdr *shdr;
4686 + uint64_t eh_frame_base_offset;
4687 + char *strsym = NULL;
4688 +@@ -285,6 +286,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
4689 + ehdr->e_version = EV_CURRENT;
4690 + ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
4691 +
4692 ++ /*
4693 ++ * setup program header
4694 ++ */
4695 ++ phdr = elf_newphdr(e, 1);
4696 ++ phdr[0].p_type = PT_LOAD;
4697 ++ phdr[0].p_offset = 0;
4698 ++ phdr[0].p_vaddr = 0;
4699 ++ phdr[0].p_paddr = 0;
4700 ++ phdr[0].p_filesz = csize;
4701 ++ phdr[0].p_memsz = csize;
4702 ++ phdr[0].p_flags = PF_X | PF_R;
4703 ++ phdr[0].p_align = 8;
4704 ++
4705 + /*
4706 + * setup text section
4707 + */
4708 +diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
4709 +index d4137559be053..ac638945b4cb0 100644
4710 +--- a/tools/perf/util/genelf.h
4711 ++++ b/tools/perf/util/genelf.h
4712 +@@ -50,8 +50,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
4713 +
4714 + #if GEN_ELF_CLASS == ELFCLASS64
4715 + #define elf_newehdr elf64_newehdr
4716 ++#define elf_newphdr elf64_newphdr
4717 + #define elf_getshdr elf64_getshdr
4718 + #define Elf_Ehdr Elf64_Ehdr
4719 ++#define Elf_Phdr Elf64_Phdr
4720 + #define Elf_Shdr Elf64_Shdr
4721 + #define Elf_Sym Elf64_Sym
4722 + #define ELF_ST_TYPE(a) ELF64_ST_TYPE(a)
4723 +@@ -59,8 +61,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
4724 + #define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a)
4725 + #else
4726 + #define elf_newehdr elf32_newehdr
4727 ++#define elf_newphdr elf32_newphdr
4728 + #define elf_getshdr elf32_getshdr
4729 + #define Elf_Ehdr Elf32_Ehdr
4730 ++#define Elf_Phdr Elf32_Phdr
4731 + #define Elf_Shdr Elf32_Shdr
4732 + #define Elf_Sym Elf32_Sym
4733 + #define ELF_ST_TYPE(a) ELF32_ST_TYPE(a)
4734 +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
4735 +index cb7b244937826..6c183df191aaa 100644
4736 +--- a/tools/perf/util/symbol-elf.c
4737 ++++ b/tools/perf/util/symbol-elf.c
4738 +@@ -2091,8 +2091,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
4739 + * unusual. One significant peculiarity is that the mapping (start -> pgoff)
4740 + * is not the same for the kernel map and the modules map. That happens because
4741 + * the data is copied adjacently whereas the original kcore has gaps. Finally,
4742 +- * kallsyms and modules files are compared with their copies to check that
4743 +- * modules have not been loaded or unloaded while the copies were taking place.
4744 ++ * kallsyms file is compared with its copy to check that modules have not been
4745 ++ * loaded or unloaded while the copies were taking place.
4746 + *
4747 + * Return: %0 on success, %-1 on failure.
4748 + */
4749 +@@ -2155,9 +2155,6 @@ int kcore_copy(const char *from_dir, const char *to_dir)
4750 + goto out_extract_close;
4751 + }
4752 +
4753 +- if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
4754 +- goto out_extract_close;
4755 +-
4756 + if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
4757 + goto out_extract_close;
4758 +
4759 +diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
4760 +index a7e981b2d7dec..c69ad7a1a6a78 100644
4761 +--- a/tools/perf/util/synthetic-events.c
4762 ++++ b/tools/perf/util/synthetic-events.c
4763 +@@ -367,13 +367,24 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
4764 + bool is_kernel)
4765 + {
4766 + struct build_id bid;
4767 ++ struct nsinfo *nsi;
4768 ++ struct nscookie nc;
4769 + int rc;
4770 +
4771 +- if (is_kernel)
4772 ++ if (is_kernel) {
4773 + rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
4774 +- else
4775 +- rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
4776 ++ goto out;
4777 ++ }
4778 ++
4779 ++ nsi = nsinfo__new(event->pid);
4780 ++ nsinfo__mountns_enter(nsi, &nc);
4781 +
4782 ++ rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
4783 ++
4784 ++ nsinfo__mountns_exit(&nc);
4785 ++ nsinfo__put(nsi);
4786 ++
4787 ++out:
4788 + if (rc == 0) {
4789 + memcpy(event->build_id, bid.data, sizeof(bid.data));
4790 + event->build_id_size = (u8) bid.size;
4791 +diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh
4792 +index e714bae473fb4..81f31179ac887 100755
4793 +--- a/tools/testing/selftests/net/forwarding/sch_red.sh
4794 ++++ b/tools/testing/selftests/net/forwarding/sch_red.sh
4795 +@@ -1,3 +1,4 @@
4796 ++#!/bin/bash
4797 + # SPDX-License-Identifier: GPL-2.0
4798 +
4799 + # This test sends one stream of traffic from H1 through a TBF shaper, to a RED