Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Wed, 28 Sep 2022 09:30:33
Message-Id: 1664357412.909e1c9f3afbe76ba852d8efe3eec49b98577a7b.mpagano@gentoo
1 commit: 909e1c9f3afbe76ba852d8efe3eec49b98577a7b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 28 09:30:12 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 28 09:30:12 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=909e1c9f
7
8 Linux patch 5.10.146
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1145_linux-5.10.146.patch | 4791 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4795 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 0670d018..ef3cbd20 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -623,6 +623,10 @@ Patch: 1144_linux-5.10.145.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.145
23
24 +Patch: 1145_linux-5.10.146.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.146
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1145_linux-5.10.146.patch b/1145_linux-5.10.146.patch
33 new file mode 100644
34 index 00000000..51366bac
35 --- /dev/null
36 +++ b/1145_linux-5.10.146.patch
37 @@ -0,0 +1,4791 @@
38 +diff --git a/Makefile b/Makefile
39 +index 76c85e40beea3..26a871eebe924 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 10
46 +-SUBLEVEL = 145
47 ++SUBLEVEL = 146
48 + EXTRAVERSION =
49 + NAME = Dare mighty things
50 +
51 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
52 +index 1116a8d092c01..af65ab83e63d4 100644
53 +--- a/arch/arm64/Kconfig
54 ++++ b/arch/arm64/Kconfig
55 +@@ -1654,7 +1654,10 @@ config ARM64_BTI_KERNEL
56 + depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
57 + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
58 + depends on !CC_IS_GCC || GCC_VERSION >= 100100
59 +- depends on !(CC_IS_CLANG && GCOV_KERNEL)
60 ++ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671
61 ++ depends on !CC_IS_GCC
62 ++ # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9
63 ++ depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
64 + depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
65 + help
66 + Build the kernel with Branch Target Identification annotations
67 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
68 +index e6c1c94c8d69c..07737b65d7a3d 100644
69 +--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
70 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
71 +@@ -87,3 +87,8 @@
72 + };
73 + };
74 + };
75 ++
76 ++&wlan_host_wake_l {
77 ++ /* Kevin has an external pull up, but Bob does not. */
78 ++ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
79 ++};
80 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
81 +index 1384dabbdf406..739937f70f8d0 100644
82 +--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
83 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
84 +@@ -237,6 +237,14 @@
85 + &edp {
86 + status = "okay";
87 +
88 ++ /*
89 ++ * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only
90 ++ * set this here, because rk3399-gru.dtsi ensures we can generate this
91 ++ * off GPLL=600MHz, whereas some other RK3399 boards may not.
92 ++ */
93 ++ assigned-clocks = <&cru PCLK_EDP>;
94 ++ assigned-clock-rates = <24000000>;
95 ++
96 + ports {
97 + edp_out: port@1 {
98 + reg = <1>;
99 +@@ -395,6 +403,7 @@ ap_i2c_tp: &i2c5 {
100 + };
101 +
102 + wlan_host_wake_l: wlan-host-wake-l {
103 ++ /* Kevin has an external pull up, but Bob does not */
104 + rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
105 + };
106 + };
107 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
108 +index 544110aaffc56..95bc7a5f61dd5 100644
109 +--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
110 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
111 +@@ -102,7 +102,6 @@
112 + vcc5v0_host: vcc5v0-host-regulator {
113 + compatible = "regulator-fixed";
114 + gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
115 +- enable-active-low;
116 + pinctrl-names = "default";
117 + pinctrl-0 = <&vcc5v0_host_en>;
118 + regulator-name = "vcc5v0_host";
119 +diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
120 +index 7a623684d9b5e..2d5a0bcb0cec1 100644
121 +--- a/arch/mips/lantiq/clk.c
122 ++++ b/arch/mips/lantiq/clk.c
123 +@@ -50,6 +50,7 @@ struct clk *clk_get_io(void)
124 + {
125 + return &cpu_clk_generic[2];
126 + }
127 ++EXPORT_SYMBOL_GPL(clk_get_io);
128 +
129 + struct clk *clk_get_ppe(void)
130 + {
131 +diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
132 +index 794c96c2a4cdd..311dc1580bbde 100644
133 +--- a/arch/mips/loongson32/common/platform.c
134 ++++ b/arch/mips/loongson32/common/platform.c
135 +@@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
136 + if (plat_dat->bus_id) {
137 + __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
138 + GMAC1_USE_UART0, LS1X_MUX_CTRL0);
139 +- switch (plat_dat->interface) {
140 ++ switch (plat_dat->phy_interface) {
141 + case PHY_INTERFACE_MODE_RGMII:
142 + val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
143 + break;
144 +@@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
145 + break;
146 + default:
147 + pr_err("unsupported mii mode %d\n",
148 +- plat_dat->interface);
149 ++ plat_dat->phy_interface);
150 + return -ENOTSUPP;
151 + }
152 + val &= ~GMAC1_SHUT;
153 + } else {
154 +- switch (plat_dat->interface) {
155 ++ switch (plat_dat->phy_interface) {
156 + case PHY_INTERFACE_MODE_RGMII:
157 + val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
158 + break;
159 +@@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
160 + break;
161 + default:
162 + pr_err("unsupported mii mode %d\n",
163 +- plat_dat->interface);
164 ++ plat_dat->phy_interface);
165 + return -ENOTSUPP;
166 + }
167 + val &= ~GMAC0_SHUT;
168 +@@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
169 + plat_dat = dev_get_platdata(&pdev->dev);
170 +
171 + val &= ~PHY_INTF_SELI;
172 +- if (plat_dat->interface == PHY_INTERFACE_MODE_RMII)
173 ++ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII)
174 + val |= 0x4 << PHY_INTF_SELI_SHIFT;
175 + __raw_writel(val, LS1X_MUX_CTRL1);
176 +
177 +@@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = {
178 + .bus_id = 0,
179 + .phy_addr = -1,
180 + #if defined(CONFIG_LOONGSON1_LS1B)
181 +- .interface = PHY_INTERFACE_MODE_MII,
182 ++ .phy_interface = PHY_INTERFACE_MODE_MII,
183 + #elif defined(CONFIG_LOONGSON1_LS1C)
184 +- .interface = PHY_INTERFACE_MODE_RMII,
185 ++ .phy_interface = PHY_INTERFACE_MODE_RMII,
186 + #endif
187 + .mdio_bus_data = &ls1x_mdio_bus_data,
188 + .dma_cfg = &ls1x_eth_dma_cfg,
189 +@@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = {
190 + static struct plat_stmmacenet_data ls1x_eth1_pdata = {
191 + .bus_id = 1,
192 + .phy_addr = -1,
193 +- .interface = PHY_INTERFACE_MODE_MII,
194 ++ .phy_interface = PHY_INTERFACE_MODE_MII,
195 + .mdio_bus_data = &ls1x_mdio_bus_data,
196 + .dma_cfg = &ls1x_eth_dma_cfg,
197 + .has_gmac = 1,
198 +diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
199 +index bc6841867b512..529c123cf0a47 100644
200 +--- a/arch/riscv/kernel/signal.c
201 ++++ b/arch/riscv/kernel/signal.c
202 +@@ -121,6 +121,8 @@ SYSCALL_DEFINE0(rt_sigreturn)
203 + if (restore_altstack(&frame->uc.uc_stack))
204 + goto badframe;
205 +
206 ++ regs->cause = -1UL;
207 ++
208 + return regs->a0;
209 +
210 + badframe:
211 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
212 +index 38c63a78aba6f..660012ab7bfa5 100644
213 +--- a/arch/x86/include/asm/kvm_host.h
214 ++++ b/arch/x86/include/asm/kvm_host.h
215 +@@ -1275,6 +1275,7 @@ struct kvm_x86_ops {
216 + int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
217 + int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
218 + int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
219 ++ void (*guest_memory_reclaimed)(struct kvm *kvm);
220 +
221 + int (*get_msr_feature)(struct kvm_msr_entry *entry);
222 +
223 +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
224 +index 7397cc449e2fc..c2b34998c27df 100644
225 +--- a/arch/x86/kvm/svm/sev.c
226 ++++ b/arch/x86/kvm/svm/sev.c
227 +@@ -1177,6 +1177,14 @@ void sev_hardware_teardown(void)
228 + sev_flush_asids();
229 + }
230 +
231 ++void sev_guest_memory_reclaimed(struct kvm *kvm)
232 ++{
233 ++ if (!sev_guest(kvm))
234 ++ return;
235 ++
236 ++ wbinvd_on_all_cpus();
237 ++}
238 ++
239 + void pre_sev_run(struct vcpu_svm *svm, int cpu)
240 + {
241 + struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
242 +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
243 +index 442705517caf4..a0512a91760d2 100644
244 +--- a/arch/x86/kvm/svm/svm.c
245 ++++ b/arch/x86/kvm/svm/svm.c
246 +@@ -4325,6 +4325,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
247 + .mem_enc_op = svm_mem_enc_op,
248 + .mem_enc_reg_region = svm_register_enc_region,
249 + .mem_enc_unreg_region = svm_unregister_enc_region,
250 ++ .guest_memory_reclaimed = sev_guest_memory_reclaimed,
251 +
252 + .can_emulate_instruction = svm_can_emulate_instruction,
253 +
254 +diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
255 +index 10aba1dd264ed..f62d13fc6e01f 100644
256 +--- a/arch/x86/kvm/svm/svm.h
257 ++++ b/arch/x86/kvm/svm/svm.h
258 +@@ -491,6 +491,8 @@ int svm_register_enc_region(struct kvm *kvm,
259 + struct kvm_enc_region *range);
260 + int svm_unregister_enc_region(struct kvm *kvm,
261 + struct kvm_enc_region *range);
262 ++void sev_guest_memory_reclaimed(struct kvm *kvm);
263 ++
264 + void pre_sev_run(struct vcpu_svm *svm, int cpu);
265 + int __init sev_hardware_setup(void);
266 + void sev_hardware_teardown(void);
267 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
268 +index c5a08ec348e6f..f3473418dcd5d 100644
269 +--- a/arch/x86/kvm/x86.c
270 ++++ b/arch/x86/kvm/x86.c
271 +@@ -8875,6 +8875,12 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
272 + kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
273 + }
274 +
275 ++void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
276 ++{
277 ++ if (kvm_x86_ops.guest_memory_reclaimed)
278 ++ kvm_x86_ops.guest_memory_reclaimed(kvm);
279 ++}
280 ++
281 + void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
282 + {
283 + if (!lapic_in_kernel(vcpu))
284 +diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
285 +index cb6401c9e9a4f..acf31cc1dbcca 100644
286 +--- a/drivers/dax/hmem/device.c
287 ++++ b/drivers/dax/hmem/device.c
288 +@@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r)
289 + .start = r->start,
290 + .end = r->end,
291 + .flags = IORESOURCE_MEM,
292 ++ .desc = IORES_DESC_SOFT_RESERVED,
293 + };
294 + struct platform_device *pdev;
295 + struct memregion_info info;
296 +diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
297 +index 8563a392f30bf..dadab2feca080 100644
298 +--- a/drivers/dma/ti/k3-udma-private.c
299 ++++ b/drivers/dma/ti/k3-udma-private.c
300 +@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
301 + }
302 +
303 + pdev = of_find_device_by_node(udma_node);
304 ++ if (np != udma_node)
305 ++ of_node_put(udma_node);
306 ++
307 + if (!pdev) {
308 + pr_debug("UDMA device not found\n");
309 + return ERR_PTR(-EPROBE_DEFER);
310 + }
311 +
312 +- if (np != udma_node)
313 +- of_node_put(udma_node);
314 +-
315 + ud = platform_get_drvdata(pdev);
316 + if (!ud) {
317 + pr_debug("UDMA has not been probed\n");
318 +diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
319 +index 5efc524b14bef..a2be3a71bcf8e 100644
320 +--- a/drivers/firmware/efi/libstub/secureboot.c
321 ++++ b/drivers/firmware/efi/libstub/secureboot.c
322 +@@ -19,7 +19,7 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode";
323 +
324 + /* SHIM variables */
325 + static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
326 +-static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
327 ++static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
328 +
329 + /*
330 + * Determine whether we're in secure boot mode.
331 +@@ -53,8 +53,8 @@ enum efi_secureboot_mode efi_get_secureboot(void)
332 +
333 + /*
334 + * See if a user has put the shim into insecure mode. If so, and if the
335 +- * variable doesn't have the runtime attribute set, we might as well
336 +- * honor that.
337 ++ * variable doesn't have the non-volatile attribute set, we might as
338 ++ * well honor that.
339 + */
340 + size = sizeof(moksbstate);
341 + status = get_efi_var(shim_MokSBState_name, &shim_guid,
342 +@@ -63,7 +63,7 @@ enum efi_secureboot_mode efi_get_secureboot(void)
343 + /* If it fails, we don't care why. Default to secure */
344 + if (status != EFI_SUCCESS)
345 + goto secure_boot_enabled;
346 +- if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
347 ++ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
348 + return efi_secureboot_mode_disabled;
349 +
350 + secure_boot_enabled:
351 +diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
352 +index 3672539cb96eb..5d0f1b1966fc6 100644
353 +--- a/drivers/firmware/efi/libstub/x86-stub.c
354 ++++ b/drivers/firmware/efi/libstub/x86-stub.c
355 +@@ -414,6 +414,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
356 + hdr->ramdisk_image = 0;
357 + hdr->ramdisk_size = 0;
358 +
359 ++ /*
360 ++ * Disregard any setup data that was provided by the bootloader:
361 ++ * setup_data could be pointing anywhere, and we have no way of
362 ++ * authenticating or validating the payload.
363 ++ */
364 ++ hdr->setup_data = 0;
365 ++
366 + efi_stub_entry(handle, sys_table_arg, boot_params);
367 + /* not reached */
368 +
369 +diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
370 +index 780cba4e30d0e..876027fdefc95 100644
371 +--- a/drivers/gpio/gpio-mockup.c
372 ++++ b/drivers/gpio/gpio-mockup.c
373 +@@ -604,9 +604,9 @@ static int __init gpio_mockup_init(void)
374 +
375 + static void __exit gpio_mockup_exit(void)
376 + {
377 ++ gpio_mockup_unregister_pdevs();
378 + debugfs_remove_recursive(gpio_mockup_dbg_dir);
379 + platform_driver_unregister(&gpio_mockup_driver);
380 +- gpio_mockup_unregister_pdevs();
381 + }
382 +
383 + module_init(gpio_mockup_init);
384 +diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
385 +index 2613881a66e66..381cfa26a4a1a 100644
386 +--- a/drivers/gpio/gpiolib-cdev.c
387 ++++ b/drivers/gpio/gpiolib-cdev.c
388 +@@ -1769,7 +1769,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
389 + ret = -ENODEV;
390 + goto out_free_le;
391 + }
392 +- le->irq = irq;
393 +
394 + if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
395 + irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
396 +@@ -1783,7 +1782,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
397 + init_waitqueue_head(&le->wait);
398 +
399 + /* Request a thread to read the events */
400 +- ret = request_threaded_irq(le->irq,
401 ++ ret = request_threaded_irq(irq,
402 + lineevent_irq_handler,
403 + lineevent_irq_thread,
404 + irqflags,
405 +@@ -1792,6 +1791,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
406 + if (ret)
407 + goto out_free_le;
408 +
409 ++ le->irq = irq;
410 ++
411 + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
412 + if (fd < 0) {
413 + ret = fd;
414 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
415 +index f262c4e7a48a2..881045e600af2 100644
416 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
417 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
418 +@@ -2047,6 +2047,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
419 + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
420 + return r;
421 + }
422 ++
423 ++ /*get pf2vf msg info at it's earliest time*/
424 ++ if (amdgpu_sriov_vf(adev))
425 ++ amdgpu_virt_init_data_exchange(adev);
426 ++
427 + }
428 + }
429 +
430 +@@ -2174,8 +2179,20 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
431 + }
432 + adev->ip_blocks[i].status.sw = true;
433 +
434 +- /* need to do gmc hw init early so we can allocate gpu mem */
435 +- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
436 ++ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
437 ++ /* need to do common hw init early so everything is set up for gmc */
438 ++ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
439 ++ if (r) {
440 ++ DRM_ERROR("hw_init %d failed %d\n", i, r);
441 ++ goto init_failed;
442 ++ }
443 ++ adev->ip_blocks[i].status.hw = true;
444 ++ } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
445 ++ /* need to do gmc hw init early so we can allocate gpu mem */
446 ++ /* Try to reserve bad pages early */
447 ++ if (amdgpu_sriov_vf(adev))
448 ++ amdgpu_virt_exchange_data(adev);
449 ++
450 + r = amdgpu_device_vram_scratch_init(adev);
451 + if (r) {
452 + DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
453 +@@ -2753,8 +2770,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
454 + int i, r;
455 +
456 + static enum amd_ip_block_type ip_order[] = {
457 +- AMD_IP_BLOCK_TYPE_GMC,
458 + AMD_IP_BLOCK_TYPE_COMMON,
459 ++ AMD_IP_BLOCK_TYPE_GMC,
460 + AMD_IP_BLOCK_TYPE_PSP,
461 + AMD_IP_BLOCK_TYPE_IH,
462 + };
463 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
464 +index 7cc7af2a6822e..947f50e402ba0 100644
465 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
466 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
467 +@@ -35,6 +35,7 @@
468 + #include <linux/pci.h>
469 + #include <linux/pm_runtime.h>
470 + #include <drm/drm_crtc_helper.h>
471 ++#include <drm/drm_damage_helper.h>
472 + #include <drm/drm_edid.h>
473 + #include <drm/drm_gem_framebuffer_helper.h>
474 + #include <drm/drm_fb_helper.h>
475 +@@ -498,6 +499,7 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
476 + static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
477 + .destroy = drm_gem_fb_destroy,
478 + .create_handle = drm_gem_fb_create_handle,
479 ++ .dirty = drm_atomic_helper_dirtyfb,
480 + };
481 +
482 + uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
483 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
484 +index eb22a190c2423..3638f0e12a2b8 100644
485 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
486 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
487 +@@ -1979,15 +1979,12 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
488 + return 0;
489 + }
490 +
491 +-static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev)
492 ++static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
493 + {
494 +- if (adev->asic_type != CHIP_VEGA10 &&
495 +- adev->asic_type != CHIP_VEGA20 &&
496 +- adev->asic_type != CHIP_ARCTURUS &&
497 +- adev->asic_type != CHIP_SIENNA_CICHLID)
498 +- return 1;
499 +- else
500 +- return 0;
501 ++ return adev->asic_type == CHIP_VEGA10 ||
502 ++ adev->asic_type == CHIP_VEGA20 ||
503 ++ adev->asic_type == CHIP_ARCTURUS ||
504 ++ adev->asic_type == CHIP_SIENNA_CICHLID;
505 + }
506 +
507 + /*
508 +@@ -2006,7 +2003,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
509 + *supported = 0;
510 +
511 + if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
512 +- amdgpu_ras_check_asic_type(adev))
513 ++ !amdgpu_ras_asic_supported(adev))
514 + return;
515 +
516 + if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
517 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
518 +index e7678ba8fdcf8..16bfb36c27e41 100644
519 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
520 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
521 +@@ -580,16 +580,34 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
522 +
523 + void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
524 + {
525 +- uint64_t bp_block_offset = 0;
526 +- uint32_t bp_block_size = 0;
527 +- struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
528 +-
529 + adev->virt.fw_reserve.p_pf2vf = NULL;
530 + adev->virt.fw_reserve.p_vf2pf = NULL;
531 + adev->virt.vf2pf_update_interval_ms = 0;
532 +
533 + if (adev->mman.fw_vram_usage_va != NULL) {
534 +- adev->virt.vf2pf_update_interval_ms = 2000;
535 ++ /* go through this logic in ip_init and reset to init workqueue*/
536 ++ amdgpu_virt_exchange_data(adev);
537 ++
538 ++ INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
539 ++ schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
540 ++ } else if (adev->bios != NULL) {
541 ++ /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
542 ++ adev->virt.fw_reserve.p_pf2vf =
543 ++ (struct amd_sriov_msg_pf2vf_info_header *)
544 ++ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
545 ++
546 ++ amdgpu_virt_read_pf2vf_data(adev);
547 ++ }
548 ++}
549 ++
550 ++
551 ++void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
552 ++{
553 ++ uint64_t bp_block_offset = 0;
554 ++ uint32_t bp_block_size = 0;
555 ++ struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
556 ++
557 ++ if (adev->mman.fw_vram_usage_va != NULL) {
558 +
559 + adev->virt.fw_reserve.p_pf2vf =
560 + (struct amd_sriov_msg_pf2vf_info_header *)
561 +@@ -616,13 +634,9 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
562 + amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
563 + }
564 + }
565 +-
566 +- if (adev->virt.vf2pf_update_interval_ms != 0) {
567 +- INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
568 +- schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
569 +- }
570 + }
571 +
572 ++
573 + void amdgpu_detect_virtualization(struct amdgpu_device *adev)
574 + {
575 + uint32_t reg;
576 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
577 +index 8dd624c20f895..77b9d37bfa1b2 100644
578 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
579 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
580 +@@ -271,6 +271,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
581 + void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
582 + void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
583 + void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
584 ++void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
585 + void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
586 + void amdgpu_detect_virtualization(struct amdgpu_device *adev);
587 +
588 +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
589 +index 1f2e2460e121e..a1a8e026b9fa6 100644
590 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
591 ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
592 +@@ -1475,6 +1475,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
593 + WREG32_SDMA(i, mmSDMA0_CNTL, temp);
594 +
595 + if (!amdgpu_sriov_vf(adev)) {
596 ++ ring = &adev->sdma.instance[i].ring;
597 ++ adev->nbio.funcs->sdma_doorbell_range(adev, i,
598 ++ ring->use_doorbell, ring->doorbell_index,
599 ++ adev->doorbell_index.sdma_doorbell_range);
600 ++
601 + /* unhalt engine */
602 + temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
603 + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
604 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
605 +index 7212b9900e0ab..abd649285a22d 100644
606 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
607 ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
608 +@@ -1332,25 +1332,6 @@ static int soc15_common_sw_fini(void *handle)
609 + return 0;
610 + }
611 +
612 +-static void soc15_doorbell_range_init(struct amdgpu_device *adev)
613 +-{
614 +- int i;
615 +- struct amdgpu_ring *ring;
616 +-
617 +- /* sdma/ih doorbell range are programed by hypervisor */
618 +- if (!amdgpu_sriov_vf(adev)) {
619 +- for (i = 0; i < adev->sdma.num_instances; i++) {
620 +- ring = &adev->sdma.instance[i].ring;
621 +- adev->nbio.funcs->sdma_doorbell_range(adev, i,
622 +- ring->use_doorbell, ring->doorbell_index,
623 +- adev->doorbell_index.sdma_doorbell_range);
624 +- }
625 +-
626 +- adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
627 +- adev->irq.ih.doorbell_index);
628 +- }
629 +-}
630 +-
631 + static int soc15_common_hw_init(void *handle)
632 + {
633 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
634 +@@ -1370,12 +1351,6 @@ static int soc15_common_hw_init(void *handle)
635 +
636 + /* enable the doorbell aperture */
637 + soc15_enable_doorbell_aperture(adev, true);
638 +- /* HW doorbell routing policy: doorbell writing not
639 +- * in SDMA/IH/MM/ACV range will be routed to CP. So
640 +- * we need to init SDMA/IH/MM/ACV doorbell range prior
641 +- * to CP ip block init and ring test.
642 +- */
643 +- soc15_doorbell_range_init(adev);
644 +
645 + return 0;
646 + }
647 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
648 +index 2663f1b318420..e427f4ffa0807 100644
649 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
650 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
651 +@@ -6653,8 +6653,7 @@ static double CalculateUrgentLatency(
652 + return ret;
653 + }
654 +
655 +-
656 +-static void UseMinimumDCFCLK(
657 ++static noinline_for_stack void UseMinimumDCFCLK(
658 + struct display_mode_lib *mode_lib,
659 + int MaxInterDCNTileRepeaters,
660 + int MaxPrefetchMode,
661 +diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
662 +index 09bc2c249e1af..3c4390d71a827 100644
663 +--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
664 ++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
665 +@@ -1524,6 +1524,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
666 + struct fixed31_32 lut2;
667 + struct fixed31_32 delta_lut;
668 + struct fixed31_32 delta_index;
669 ++ const struct fixed31_32 one = dc_fixpt_from_int(1);
670 +
671 + i = 0;
672 + /* fixed_pt library has problems handling too small values */
673 +@@ -1552,6 +1553,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
674 + } else
675 + hw_x = coordinates_x[i].x;
676 +
677 ++ if (dc_fixpt_le(one, hw_x))
678 ++ hw_x = one;
679 ++
680 + norm_x = dc_fixpt_mul(norm_factor, hw_x);
681 + index = dc_fixpt_floor(norm_x);
682 + if (index < 0 || index > 255)
683 +diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
684 +index 3df6d6e850f52..70148ae16f146 100644
685 +--- a/drivers/gpu/drm/gma500/gma_display.c
686 ++++ b/drivers/gpu/drm/gma500/gma_display.c
687 +@@ -529,15 +529,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
688 + WARN_ON(drm_crtc_vblank_get(crtc) != 0);
689 +
690 + gma_crtc->page_flip_event = event;
691 ++ spin_unlock_irqrestore(&dev->event_lock, flags);
692 +
693 + /* Call this locked if we want an event at vblank interrupt. */
694 + ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
695 + if (ret) {
696 +- gma_crtc->page_flip_event = NULL;
697 +- drm_crtc_vblank_put(crtc);
698 ++ spin_lock_irqsave(&dev->event_lock, flags);
699 ++ if (gma_crtc->page_flip_event) {
700 ++ gma_crtc->page_flip_event = NULL;
701 ++ drm_crtc_vblank_put(crtc);
702 ++ }
703 ++ spin_unlock_irqrestore(&dev->event_lock, flags);
704 + }
705 +-
706 +- spin_unlock_irqrestore(&dev->event_lock, flags);
707 + } else {
708 + ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
709 + }
710 +diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
711 +index 43943e9802036..4e41c144a2902 100644
712 +--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
713 ++++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
714 +@@ -1,7 +1,8 @@
715 + # SPDX-License-Identifier: GPL-2.0-only
716 + config DRM_HISI_HIBMC
717 + tristate "DRM Support for Hisilicon Hibmc"
718 +- depends on DRM && PCI && ARM64
719 ++ depends on DRM && PCI && (ARM64 || COMPILE_TEST)
720 ++ depends on MMU
721 + select DRM_KMS_HELPER
722 + select DRM_VRAM_HELPER
723 + select DRM_TTM
724 +diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
725 +index 7d37d2a01e3cf..146c4d04f572d 100644
726 +--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
727 ++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
728 +@@ -668,6 +668,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
729 + if (--dsi->refcount != 0)
730 + return;
731 +
732 ++ /*
733 ++ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
734 ++ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
735 ++ * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
736 ++ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
737 ++ * after dsi is fully set.
738 ++ */
739 ++ mtk_dsi_stop(dsi);
740 ++
741 ++ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
742 + mtk_dsi_reset_engine(dsi);
743 + mtk_dsi_lane0_ulp_mode_enter(dsi);
744 + mtk_dsi_clk_ulp_mode_enter(dsi);
745 +@@ -718,17 +728,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
746 + if (!dsi->enabled)
747 + return;
748 +
749 +- /*
750 +- * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
751 +- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
752 +- * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
753 +- * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
754 +- * after dsi is fully set.
755 +- */
756 +- mtk_dsi_stop(dsi);
757 +-
758 +- mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
759 +-
760 + dsi->enabled = false;
761 + }
762 +
763 +@@ -791,10 +790,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
764 +
765 + static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
766 + .attach = mtk_dsi_bridge_attach,
767 ++ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
768 + .atomic_disable = mtk_dsi_bridge_atomic_disable,
769 ++ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
770 + .atomic_enable = mtk_dsi_bridge_atomic_enable,
771 + .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
772 + .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
773 ++ .atomic_reset = drm_atomic_helper_bridge_reset,
774 + .mode_set = mtk_dsi_bridge_mode_set,
775 + };
776 +
777 +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
778 +index bf2c845ef3a20..b7b37082a9d72 100644
779 +--- a/drivers/gpu/drm/panel/panel-simple.c
780 ++++ b/drivers/gpu/drm/panel/panel-simple.c
781 +@@ -2201,7 +2201,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
782 + .enable = 200,
783 + .disable = 20,
784 + },
785 +- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
786 ++ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
787 + .connector_type = DRM_MODE_CONNECTOR_LVDS,
788 + };
789 +
790 +diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
791 +index dec54c70e0082..857c47c69ef15 100644
792 +--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
793 ++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
794 +@@ -276,8 +276,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
795 + return ret;
796 + }
797 +
798 +-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
799 +- struct drm_display_mode *mode)
800 ++static enum drm_mode_status
801 ++cdn_dp_connector_mode_valid(struct drm_connector *connector,
802 ++ struct drm_display_mode *mode)
803 + {
804 + struct cdn_dp_device *dp = connector_to_dp(connector);
805 + struct drm_display_info *display_info = &dp->connector.display_info;
806 +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
807 +index 5d820037e2918..514279dac7cb5 100644
808 +--- a/drivers/hv/vmbus_drv.c
809 ++++ b/drivers/hv/vmbus_drv.c
810 +@@ -2251,7 +2251,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
811 + bool fb_overlap_ok)
812 + {
813 + struct resource *iter, *shadow;
814 +- resource_size_t range_min, range_max, start;
815 ++ resource_size_t range_min, range_max, start, end;
816 + const char *dev_n = dev_name(&device_obj->device);
817 + int retval;
818 +
819 +@@ -2286,6 +2286,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
820 + range_max = iter->end;
821 + start = (range_min + align - 1) & ~(align - 1);
822 + for (; start + size - 1 <= range_max; start += align) {
823 ++ end = start + size - 1;
824 ++
825 ++ /* Skip the whole fb_mmio region if not fb_overlap_ok */
826 ++ if (!fb_overlap_ok && fb_mmio &&
827 ++ (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
828 ++ ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
829 ++ continue;
830 ++
831 + shadow = __request_region(iter, start, size, NULL,
832 + IORESOURCE_BUSY);
833 + if (!shadow)
834 +diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
835 +index d3719df1c40dc..be4ad516293b0 100644
836 +--- a/drivers/i2c/busses/i2c-imx.c
837 ++++ b/drivers/i2c/busses/i2c-imx.c
838 +@@ -1289,7 +1289,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
839 + if (i2c_imx->dma)
840 + i2c_imx_dma_free(i2c_imx);
841 +
842 +- if (ret == 0) {
843 ++ if (ret >= 0) {
844 + /* setup chip registers to defaults */
845 + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
846 + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
847 +diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
848 +index ab261d762dea3..bea82a787b4f3 100644
849 +--- a/drivers/i2c/busses/i2c-mlxbf.c
850 ++++ b/drivers/i2c/busses/i2c-mlxbf.c
851 +@@ -6,6 +6,7 @@
852 + */
853 +
854 + #include <linux/acpi.h>
855 ++#include <linux/bitfield.h>
856 + #include <linux/delay.h>
857 + #include <linux/err.h>
858 + #include <linux/interrupt.h>
859 +@@ -63,13 +64,14 @@
860 + */
861 + #define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000)
862 + /* Reference clock for Bluefield - 156 MHz. */
863 +-#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000)
864 ++#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL
865 +
866 + /* Constant used to determine the PLL frequency. */
867 +-#define MLNXBF_I2C_COREPLL_CONST 16384
868 ++#define MLNXBF_I2C_COREPLL_CONST 16384ULL
869 ++
870 ++#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL
871 +
872 + /* PLL registers. */
873 +-#define MLXBF_I2C_CORE_PLL_REG0 0x0
874 + #define MLXBF_I2C_CORE_PLL_REG1 0x4
875 + #define MLXBF_I2C_CORE_PLL_REG2 0x8
876 +
877 +@@ -187,22 +189,15 @@ enum {
878 + #define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ
879 +
880 + /* Core PLL TYU configuration. */
881 +-#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0)
882 +-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0)
883 +-#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0)
884 +-
885 +-#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3
886 +-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16
887 +-#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20
888 ++#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3)
889 ++#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16)
890 ++#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20)
891 +
892 + /* Core PLL YU configuration. */
893 + #define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0)
894 + #define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0)
895 +-#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0)
896 ++#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26)
897 +
898 +-#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0
899 +-#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1
900 +-#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26
901 +
902 + /* Core PLL frequency. */
903 + static u64 mlxbf_i2c_corepll_frequency;
904 +@@ -485,8 +480,6 @@ static struct mutex mlxbf_i2c_bus_lock;
905 + #define MLXBF_I2C_MASK_8 GENMASK(7, 0)
906 + #define MLXBF_I2C_MASK_16 GENMASK(15, 0)
907 +
908 +-#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000
909 +-
910 + /*
911 + * Function to poll a set of bits at a specific address; it checks whether
912 + * the bits are equal to zero when eq_zero is set to 'true', and not equal
913 +@@ -675,7 +668,7 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
914 + /* Clear status bits. */
915 + writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
916 + /* Set the cause data. */
917 +- writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR);
918 ++ writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
919 + /* Zero PEC byte. */
920 + writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC);
921 + /* Zero byte count. */
922 +@@ -744,6 +737,9 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
923 + if (flags & MLXBF_I2C_F_WRITE) {
924 + write_en = 1;
925 + write_len += operation->length;
926 ++ if (data_idx + operation->length >
927 ++ MLXBF_I2C_MASTER_DATA_DESC_SIZE)
928 ++ return -ENOBUFS;
929 + memcpy(data_desc + data_idx,
930 + operation->buffer, operation->length);
931 + data_idx += operation->length;
932 +@@ -1413,24 +1409,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev,
933 + return 0;
934 + }
935 +
936 +-static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
937 ++static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
938 + {
939 +- u64 core_frequency, pad_frequency;
940 ++ u64 core_frequency;
941 + u8 core_od, core_r;
942 + u32 corepll_val;
943 + u16 core_f;
944 +
945 +- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
946 +-
947 + corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
948 +
949 + /* Get Core PLL configuration bits. */
950 +- core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) &
951 +- MLXBF_I2C_COREPLL_CORE_F_TYU_MASK;
952 +- core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) &
953 +- MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK;
954 +- core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) &
955 +- MLXBF_I2C_COREPLL_CORE_R_TYU_MASK;
956 ++ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val);
957 ++ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val);
958 ++ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val);
959 +
960 + /*
961 + * Compute PLL output frequency as follow:
962 +@@ -1442,31 +1433,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
963 + * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
964 + * and PadFrequency, respectively.
965 + */
966 +- core_frequency = pad_frequency * (++core_f);
967 ++ core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
968 + core_frequency /= (++core_r) * (++core_od);
969 +
970 + return core_frequency;
971 + }
972 +
973 +-static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
974 ++static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
975 + {
976 + u32 corepll_reg1_val, corepll_reg2_val;
977 +- u64 corepll_frequency, pad_frequency;
978 ++ u64 corepll_frequency;
979 + u8 core_od, core_r;
980 + u32 core_f;
981 +
982 +- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
983 +-
984 + corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
985 + corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
986 +
987 + /* Get Core PLL configuration bits */
988 +- core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) &
989 +- MLXBF_I2C_COREPLL_CORE_F_YU_MASK;
990 +- core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) &
991 +- MLXBF_I2C_COREPLL_CORE_R_YU_MASK;
992 +- core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) &
993 +- MLXBF_I2C_COREPLL_CORE_OD_YU_MASK;
994 ++ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val);
995 ++ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val);
996 ++ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val);
997 +
998 + /*
999 + * Compute PLL output frequency as follow:
1000 +@@ -1478,7 +1464,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
1001 + * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
1002 + * and PadFrequency, respectively.
1003 + */
1004 +- corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST;
1005 ++ corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
1006 + corepll_frequency /= (++core_r) * (++core_od);
1007 +
1008 + return corepll_frequency;
1009 +@@ -2186,14 +2172,14 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = {
1010 + [1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1],
1011 + [2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1]
1012 + },
1013 +- .calculate_freq = mlxbf_calculate_freq_from_tyu
1014 ++ .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu
1015 + },
1016 + [MLXBF_I2C_CHIP_TYPE_2] = {
1017 + .type = MLXBF_I2C_CHIP_TYPE_2,
1018 + .shared_res = {
1019 + [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2]
1020 + },
1021 +- .calculate_freq = mlxbf_calculate_freq_from_yu
1022 ++ .calculate_freq = mlxbf_i2c_calculate_freq_from_yu
1023 + }
1024 + };
1025 +
1026 +diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
1027 +index f6fae64861ce8..27cc5f03611cb 100644
1028 +--- a/drivers/interconnect/qcom/icc-rpmh.c
1029 ++++ b/drivers/interconnect/qcom/icc-rpmh.c
1030 +@@ -20,13 +20,18 @@ void qcom_icc_pre_aggregate(struct icc_node *node)
1031 + {
1032 + size_t i;
1033 + struct qcom_icc_node *qn;
1034 ++ struct qcom_icc_provider *qp;
1035 +
1036 + qn = node->data;
1037 ++ qp = to_qcom_provider(node->provider);
1038 +
1039 + for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
1040 + qn->sum_avg[i] = 0;
1041 + qn->max_peak[i] = 0;
1042 + }
1043 ++
1044 ++ for (i = 0; i < qn->num_bcms; i++)
1045 ++ qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
1046 + }
1047 + EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
1048 +
1049 +@@ -44,10 +49,8 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
1050 + {
1051 + size_t i;
1052 + struct qcom_icc_node *qn;
1053 +- struct qcom_icc_provider *qp;
1054 +
1055 + qn = node->data;
1056 +- qp = to_qcom_provider(node->provider);
1057 +
1058 + if (!tag)
1059 + tag = QCOM_ICC_TAG_ALWAYS;
1060 +@@ -67,9 +70,6 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
1061 + *agg_avg += avg_bw;
1062 + *agg_peak = max_t(u32, *agg_peak, peak_bw);
1063 +
1064 +- for (i = 0; i < qn->num_bcms; i++)
1065 +- qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
1066 +-
1067 + return 0;
1068 + }
1069 + EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
1070 +diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
1071 +index c76b2c7f9b106..b936196c229c8 100644
1072 +--- a/drivers/interconnect/qcom/sm8150.c
1073 ++++ b/drivers/interconnect/qcom/sm8150.c
1074 +@@ -627,7 +627,6 @@ static struct platform_driver qnoc_driver = {
1075 + .driver = {
1076 + .name = "qnoc-sm8150",
1077 + .of_match_table = qnoc_of_match,
1078 +- .sync_state = icc_sync_state,
1079 + },
1080 + };
1081 + module_platform_driver(qnoc_driver);
1082 +diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
1083 +index cc558fec74e38..40820043c8d36 100644
1084 +--- a/drivers/interconnect/qcom/sm8250.c
1085 ++++ b/drivers/interconnect/qcom/sm8250.c
1086 +@@ -643,7 +643,6 @@ static struct platform_driver qnoc_driver = {
1087 + .driver = {
1088 + .name = "qnoc-sm8250",
1089 + .of_match_table = qnoc_of_match,
1090 +- .sync_state = icc_sync_state,
1091 + },
1092 + };
1093 + module_platform_driver(qnoc_driver);
1094 +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
1095 +index 93c60712a948e..c48cf737b521d 100644
1096 +--- a/drivers/iommu/intel/iommu.c
1097 ++++ b/drivers/iommu/intel/iommu.c
1098 +@@ -569,7 +569,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
1099 + {
1100 + unsigned long fl_sagaw, sl_sagaw;
1101 +
1102 +- fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0);
1103 ++ fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
1104 + sl_sagaw = cap_sagaw(iommu->cap);
1105 +
1106 + /* Second level only. */
1107 +diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
1108 +index a2563c2540808..2299d5cca8ffb 100644
1109 +--- a/drivers/media/usb/b2c2/flexcop-usb.c
1110 ++++ b/drivers/media/usb/b2c2/flexcop-usb.c
1111 +@@ -512,7 +512,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
1112 +
1113 + if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
1114 + return -ENODEV;
1115 +- if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc))
1116 ++ if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc))
1117 + return -ENODEV;
1118 +
1119 + switch (fc_usb->udev->speed) {
1120 +diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
1121 +index 0b09cdaaeb6c1..899768ed1688d 100644
1122 +--- a/drivers/mmc/core/sd.c
1123 ++++ b/drivers/mmc/core/sd.c
1124 +@@ -932,15 +932,16 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
1125 +
1126 + /* Erase init depends on CSD and SSR */
1127 + mmc_init_erase(card);
1128 +-
1129 +- /*
1130 +- * Fetch switch information from card.
1131 +- */
1132 +- err = mmc_read_switch(card);
1133 +- if (err)
1134 +- return err;
1135 + }
1136 +
1137 ++ /*
1138 ++ * Fetch switch information from card. Note, sd3_bus_mode can change if
1139 ++ * voltage switch outcome changes, so do this always.
1140 ++ */
1141 ++ err = mmc_read_switch(card);
1142 ++ if (err)
1143 ++ return err;
1144 ++
1145 + /*
1146 + * For SPI, enable CRC as appropriate.
1147 + * This CRC enable is located AFTER the reading of the
1148 +@@ -1089,26 +1090,15 @@ retry:
1149 + if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
1150 + mmc_sd_card_using_v18(card) &&
1151 + host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
1152 +- /*
1153 +- * Re-read switch information in case it has changed since
1154 +- * oldcard was initialized.
1155 +- */
1156 +- if (oldcard) {
1157 +- err = mmc_read_switch(card);
1158 +- if (err)
1159 +- goto free_card;
1160 +- }
1161 +- if (mmc_sd_card_using_v18(card)) {
1162 +- if (mmc_host_set_uhs_voltage(host) ||
1163 +- mmc_sd_init_uhs_card(card)) {
1164 +- v18_fixup_failed = true;
1165 +- mmc_power_cycle(host, ocr);
1166 +- if (!oldcard)
1167 +- mmc_remove_card(card);
1168 +- goto retry;
1169 +- }
1170 +- goto cont;
1171 ++ if (mmc_host_set_uhs_voltage(host) ||
1172 ++ mmc_sd_init_uhs_card(card)) {
1173 ++ v18_fixup_failed = true;
1174 ++ mmc_power_cycle(host, ocr);
1175 ++ if (!oldcard)
1176 ++ mmc_remove_card(card);
1177 ++ goto retry;
1178 + }
1179 ++ goto cont;
1180 + }
1181 +
1182 + /* Initialization sequence for UHS-I cards */
1183 +diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
1184 +index b0f8d551b61db..acb6ff0be5fff 100644
1185 +--- a/drivers/net/bonding/bond_3ad.c
1186 ++++ b/drivers/net/bonding/bond_3ad.c
1187 +@@ -85,8 +85,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
1188 + static u16 ad_ticks_per_sec;
1189 + static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
1190 +
1191 +-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
1192 +- MULTICAST_LACPDU_ADDR;
1193 ++const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
1194 ++ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
1195 ++};
1196 +
1197 + /* ================= main 802.3ad protocol functions ================== */
1198 + static int ad_lacpdu_send(struct port *port);
1199 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1200 +index 9c4b45341fd28..f38a6ce5749bb 100644
1201 +--- a/drivers/net/bonding/bond_main.c
1202 ++++ b/drivers/net/bonding/bond_main.c
1203 +@@ -827,12 +827,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
1204 + dev_uc_unsync(slave_dev, bond_dev);
1205 + dev_mc_unsync(slave_dev, bond_dev);
1206 +
1207 +- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1208 +- /* del lacpdu mc addr from mc list */
1209 +- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1210 +-
1211 +- dev_mc_del(slave_dev, lacpdu_multicast);
1212 +- }
1213 ++ if (BOND_MODE(bond) == BOND_MODE_8023AD)
1214 ++ dev_mc_del(slave_dev, lacpdu_mcast_addr);
1215 + }
1216 +
1217 + /*--------------------------- Active slave change ---------------------------*/
1218 +@@ -852,7 +848,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
1219 + if (bond->dev->flags & IFF_ALLMULTI)
1220 + dev_set_allmulti(old_active->dev, -1);
1221 +
1222 +- bond_hw_addr_flush(bond->dev, old_active->dev);
1223 ++ if (bond->dev->flags & IFF_UP)
1224 ++ bond_hw_addr_flush(bond->dev, old_active->dev);
1225 + }
1226 +
1227 + if (new_active) {
1228 +@@ -863,10 +860,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
1229 + if (bond->dev->flags & IFF_ALLMULTI)
1230 + dev_set_allmulti(new_active->dev, 1);
1231 +
1232 +- netif_addr_lock_bh(bond->dev);
1233 +- dev_uc_sync(new_active->dev, bond->dev);
1234 +- dev_mc_sync(new_active->dev, bond->dev);
1235 +- netif_addr_unlock_bh(bond->dev);
1236 ++ if (bond->dev->flags & IFF_UP) {
1237 ++ netif_addr_lock_bh(bond->dev);
1238 ++ dev_uc_sync(new_active->dev, bond->dev);
1239 ++ dev_mc_sync(new_active->dev, bond->dev);
1240 ++ netif_addr_unlock_bh(bond->dev);
1241 ++ }
1242 + }
1243 + }
1244 +
1245 +@@ -2073,16 +2072,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1246 + }
1247 + }
1248 +
1249 +- netif_addr_lock_bh(bond_dev);
1250 +- dev_mc_sync_multiple(slave_dev, bond_dev);
1251 +- dev_uc_sync_multiple(slave_dev, bond_dev);
1252 +- netif_addr_unlock_bh(bond_dev);
1253 ++ if (bond_dev->flags & IFF_UP) {
1254 ++ netif_addr_lock_bh(bond_dev);
1255 ++ dev_mc_sync_multiple(slave_dev, bond_dev);
1256 ++ dev_uc_sync_multiple(slave_dev, bond_dev);
1257 ++ netif_addr_unlock_bh(bond_dev);
1258 +
1259 +- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1260 +- /* add lacpdu mc addr to mc list */
1261 +- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1262 +-
1263 +- dev_mc_add(slave_dev, lacpdu_multicast);
1264 ++ if (BOND_MODE(bond) == BOND_MODE_8023AD)
1265 ++ dev_mc_add(slave_dev, lacpdu_mcast_addr);
1266 + }
1267 + }
1268 +
1269 +@@ -2310,7 +2307,8 @@ static int __bond_release_one(struct net_device *bond_dev,
1270 + if (old_flags & IFF_ALLMULTI)
1271 + dev_set_allmulti(slave_dev, -1);
1272 +
1273 +- bond_hw_addr_flush(bond_dev, slave_dev);
1274 ++ if (old_flags & IFF_UP)
1275 ++ bond_hw_addr_flush(bond_dev, slave_dev);
1276 + }
1277 +
1278 + slave_disable_netpoll(slave);
1279 +@@ -3772,6 +3770,9 @@ static int bond_open(struct net_device *bond_dev)
1280 + /* register to receive LACPDUs */
1281 + bond->recv_probe = bond_3ad_lacpdu_recv;
1282 + bond_3ad_initiate_agg_selection(bond, 1);
1283 ++
1284 ++ bond_for_each_slave(bond, slave, iter)
1285 ++ dev_mc_add(slave->dev, lacpdu_mcast_addr);
1286 + }
1287 +
1288 + if (bond_mode_can_use_xmit_hash(bond))
1289 +@@ -3783,6 +3784,7 @@ static int bond_open(struct net_device *bond_dev)
1290 + static int bond_close(struct net_device *bond_dev)
1291 + {
1292 + struct bonding *bond = netdev_priv(bond_dev);
1293 ++ struct slave *slave;
1294 +
1295 + bond_work_cancel_all(bond);
1296 + bond->send_peer_notif = 0;
1297 +@@ -3790,6 +3792,19 @@ static int bond_close(struct net_device *bond_dev)
1298 + bond_alb_deinitialize(bond);
1299 + bond->recv_probe = NULL;
1300 +
1301 ++ if (bond_uses_primary(bond)) {
1302 ++ rcu_read_lock();
1303 ++ slave = rcu_dereference(bond->curr_active_slave);
1304 ++ if (slave)
1305 ++ bond_hw_addr_flush(bond_dev, slave->dev);
1306 ++ rcu_read_unlock();
1307 ++ } else {
1308 ++ struct list_head *iter;
1309 ++
1310 ++ bond_for_each_slave(bond, slave, iter)
1311 ++ bond_hw_addr_flush(bond_dev, slave->dev);
1312 ++ }
1313 ++
1314 + return 0;
1315 + }
1316 +
1317 +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
1318 +index 7cbaac238ff62..429950241de32 100644
1319 +--- a/drivers/net/can/flexcan.c
1320 ++++ b/drivers/net/can/flexcan.c
1321 +@@ -954,11 +954,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
1322 + u32 reg_ctrl, reg_id, reg_iflag1;
1323 + int i;
1324 +
1325 +- if (unlikely(drop)) {
1326 +- skb = ERR_PTR(-ENOBUFS);
1327 +- goto mark_as_read;
1328 +- }
1329 +-
1330 + mb = flexcan_get_mb(priv, n);
1331 +
1332 + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
1333 +@@ -987,6 +982,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
1334 + reg_ctrl = priv->read(&mb->can_ctrl);
1335 + }
1336 +
1337 ++ if (unlikely(drop)) {
1338 ++ skb = ERR_PTR(-ENOBUFS);
1339 ++ goto mark_as_read;
1340 ++ }
1341 ++
1342 + if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
1343 + skb = alloc_canfd_skb(offload->dev, &cfd);
1344 + else
1345 +diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
1346 +index 1bfc497da9ac8..a879200eaab02 100644
1347 +--- a/drivers/net/can/usb/gs_usb.c
1348 ++++ b/drivers/net/can/usb/gs_usb.c
1349 +@@ -678,6 +678,7 @@ static int gs_can_open(struct net_device *netdev)
1350 + flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
1351 +
1352 + /* finally start device */
1353 ++ dev->can.state = CAN_STATE_ERROR_ACTIVE;
1354 + dm->mode = cpu_to_le32(GS_CAN_MODE_START);
1355 + dm->flags = cpu_to_le32(flags);
1356 + rc = usb_control_msg(interface_to_usbdev(dev->iface),
1357 +@@ -694,13 +695,12 @@ static int gs_can_open(struct net_device *netdev)
1358 + if (rc < 0) {
1359 + netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
1360 + kfree(dm);
1361 ++ dev->can.state = CAN_STATE_STOPPED;
1362 + return rc;
1363 + }
1364 +
1365 + kfree(dm);
1366 +
1367 +- dev->can.state = CAN_STATE_ERROR_ACTIVE;
1368 +-
1369 + parent->active_channels++;
1370 + if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
1371 + netif_start_queue(netdev);
1372 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
1373 +index 15aa3b3c0089f..4af2538259576 100644
1374 +--- a/drivers/net/ethernet/freescale/enetc/enetc.c
1375 ++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
1376 +@@ -1671,29 +1671,6 @@ static int enetc_set_rss(struct net_device *ndev, int en)
1377 + return 0;
1378 + }
1379 +
1380 +-static int enetc_set_psfp(struct net_device *ndev, int en)
1381 +-{
1382 +- struct enetc_ndev_priv *priv = netdev_priv(ndev);
1383 +- int err;
1384 +-
1385 +- if (en) {
1386 +- err = enetc_psfp_enable(priv);
1387 +- if (err)
1388 +- return err;
1389 +-
1390 +- priv->active_offloads |= ENETC_F_QCI;
1391 +- return 0;
1392 +- }
1393 +-
1394 +- err = enetc_psfp_disable(priv);
1395 +- if (err)
1396 +- return err;
1397 +-
1398 +- priv->active_offloads &= ~ENETC_F_QCI;
1399 +-
1400 +- return 0;
1401 +-}
1402 +-
1403 + static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
1404 + {
1405 + struct enetc_ndev_priv *priv = netdev_priv(ndev);
1406 +@@ -1712,11 +1689,9 @@ static void enetc_enable_txvlan(struct net_device *ndev, bool en)
1407 + enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
1408 + }
1409 +
1410 +-int enetc_set_features(struct net_device *ndev,
1411 +- netdev_features_t features)
1412 ++void enetc_set_features(struct net_device *ndev, netdev_features_t features)
1413 + {
1414 + netdev_features_t changed = ndev->features ^ features;
1415 +- int err = 0;
1416 +
1417 + if (changed & NETIF_F_RXHASH)
1418 + enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
1419 +@@ -1728,11 +1703,6 @@ int enetc_set_features(struct net_device *ndev,
1420 + if (changed & NETIF_F_HW_VLAN_CTAG_TX)
1421 + enetc_enable_txvlan(ndev,
1422 + !!(features & NETIF_F_HW_VLAN_CTAG_TX));
1423 +-
1424 +- if (changed & NETIF_F_HW_TC)
1425 +- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
1426 +-
1427 +- return err;
1428 + }
1429 +
1430 + #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1431 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
1432 +index 15d19cbd5a954..00386c5d3cde9 100644
1433 +--- a/drivers/net/ethernet/freescale/enetc/enetc.h
1434 ++++ b/drivers/net/ethernet/freescale/enetc/enetc.h
1435 +@@ -301,8 +301,7 @@ void enetc_start(struct net_device *ndev);
1436 + void enetc_stop(struct net_device *ndev);
1437 + netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
1438 + struct net_device_stats *enetc_get_stats(struct net_device *ndev);
1439 +-int enetc_set_features(struct net_device *ndev,
1440 +- netdev_features_t features);
1441 ++void enetc_set_features(struct net_device *ndev, netdev_features_t features);
1442 + int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
1443 + int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1444 + void *type_data);
1445 +@@ -335,6 +334,7 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1446 + int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
1447 + int enetc_psfp_init(struct enetc_ndev_priv *priv);
1448 + int enetc_psfp_clean(struct enetc_ndev_priv *priv);
1449 ++int enetc_set_psfp(struct net_device *ndev, bool en);
1450 +
1451 + static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
1452 + {
1453 +@@ -410,4 +410,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
1454 + {
1455 + return 0;
1456 + }
1457 ++
1458 ++static inline int enetc_set_psfp(struct net_device *ndev, bool en)
1459 ++{
1460 ++ return 0;
1461 ++}
1462 + #endif
1463 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
1464 +index 716b396bf0947..6904e10dd46b3 100644
1465 +--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
1466 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
1467 +@@ -671,6 +671,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
1468 + {
1469 + netdev_features_t changed = ndev->features ^ features;
1470 + struct enetc_ndev_priv *priv = netdev_priv(ndev);
1471 ++ int err;
1472 ++
1473 ++ if (changed & NETIF_F_HW_TC) {
1474 ++ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
1475 ++ if (err)
1476 ++ return err;
1477 ++ }
1478 +
1479 + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
1480 + struct enetc_pf *pf = enetc_si_priv(priv->si);
1481 +@@ -684,7 +691,9 @@ static int enetc_pf_set_features(struct net_device *ndev,
1482 + if (changed & NETIF_F_LOOPBACK)
1483 + enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
1484 +
1485 +- return enetc_set_features(ndev, features);
1486 ++ enetc_set_features(ndev, features);
1487 ++
1488 ++ return 0;
1489 + }
1490 +
1491 + static const struct net_device_ops enetc_ndev_ops = {
1492 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
1493 +index 9e6988fd3787a..62efe1aebf86a 100644
1494 +--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
1495 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
1496 +@@ -1525,6 +1525,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1497 + }
1498 + }
1499 +
1500 ++int enetc_set_psfp(struct net_device *ndev, bool en)
1501 ++{
1502 ++ struct enetc_ndev_priv *priv = netdev_priv(ndev);
1503 ++ int err;
1504 ++
1505 ++ if (en) {
1506 ++ err = enetc_psfp_enable(priv);
1507 ++ if (err)
1508 ++ return err;
1509 ++
1510 ++ priv->active_offloads |= ENETC_F_QCI;
1511 ++ return 0;
1512 ++ }
1513 ++
1514 ++ err = enetc_psfp_disable(priv);
1515 ++ if (err)
1516 ++ return err;
1517 ++
1518 ++ priv->active_offloads &= ~ENETC_F_QCI;
1519 ++
1520 ++ return 0;
1521 ++}
1522 ++
1523 + int enetc_psfp_init(struct enetc_ndev_priv *priv)
1524 + {
1525 + if (epsfp.psfp_sfi_bitmap)
1526 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
1527 +index 33c125735db7e..5ce3e2593bdde 100644
1528 +--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
1529 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
1530 +@@ -88,7 +88,9 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
1531 + static int enetc_vf_set_features(struct net_device *ndev,
1532 + netdev_features_t features)
1533 + {
1534 +- return enetc_set_features(ndev, features);
1535 ++ enetc_set_features(ndev, features);
1536 ++
1537 ++ return 0;
1538 + }
1539 +
1540 + /* Probing/ Init */
1541 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
1542 +index 97009cbea7793..c7f243ddbcf72 100644
1543 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
1544 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
1545 +@@ -5733,6 +5733,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
1546 + }
1547 + }
1548 +
1549 ++/**
1550 ++ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
1551 ++ * @vsi: Pointer to vsi structure
1552 ++ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
1553 ++ *
1554 ++ * Helper function to convert units before send to set BW limit
1555 ++ **/
1556 ++static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
1557 ++{
1558 ++ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
1559 ++ dev_warn(&vsi->back->pdev->dev,
1560 ++ "Setting max tx rate to minimum usable value of 50Mbps.\n");
1561 ++ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
1562 ++ } else {
1563 ++ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
1564 ++ }
1565 ++
1566 ++ return max_tx_rate;
1567 ++}
1568 ++
1569 + /**
1570 + * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
1571 + * @vsi: VSI to be configured
1572 +@@ -5755,10 +5775,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
1573 + max_tx_rate, seid);
1574 + return -EINVAL;
1575 + }
1576 +- if (max_tx_rate && max_tx_rate < 50) {
1577 ++ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
1578 + dev_warn(&pf->pdev->dev,
1579 + "Setting max tx rate to minimum usable value of 50Mbps.\n");
1580 +- max_tx_rate = 50;
1581 ++ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
1582 + }
1583 +
1584 + /* Tx rate credits are in values of 50Mbps, 0 is disabled */
1585 +@@ -7719,9 +7739,9 @@ config_tc:
1586 +
1587 + if (pf->flags & I40E_FLAG_TC_MQPRIO) {
1588 + if (vsi->mqprio_qopt.max_rate[0]) {
1589 +- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
1590 ++ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
1591 ++ vsi->mqprio_qopt.max_rate[0]);
1592 +
1593 +- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
1594 + ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
1595 + if (!ret) {
1596 + u64 credits = max_tx_rate;
1597 +@@ -10366,10 +10386,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
1598 + }
1599 +
1600 + if (vsi->mqprio_qopt.max_rate[0]) {
1601 +- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
1602 ++ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
1603 ++ vsi->mqprio_qopt.max_rate[0]);
1604 + u64 credits = 0;
1605 +
1606 +- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
1607 + ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
1608 + if (ret)
1609 + goto end_unlock;
1610 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1611 +index 1947c5a775505..ffff7de801af7 100644
1612 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1613 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1614 +@@ -1985,6 +1985,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
1615 + }
1616 + }
1617 +
1618 ++/**
1619 ++ * i40e_vc_get_max_frame_size
1620 ++ * @vf: pointer to the VF
1621 ++ *
1622 ++ * Max frame size is determined based on the current port's max frame size and
1623 ++ * whether a port VLAN is configured on this VF. The VF is not aware whether
1624 ++ * it's in a port VLAN so the PF needs to account for this in max frame size
1625 ++ * checks and sending the max frame size to the VF.
1626 ++ **/
1627 ++static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
1628 ++{
1629 ++ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
1630 ++
1631 ++ if (vf->port_vlan_id)
1632 ++ max_frame_size -= VLAN_HLEN;
1633 ++
1634 ++ return max_frame_size;
1635 ++}
1636 ++
1637 + /**
1638 + * i40e_vc_get_vf_resources_msg
1639 + * @vf: pointer to the VF info
1640 +@@ -2085,6 +2104,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1641 + vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1642 + vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1643 + vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
1644 ++ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
1645 +
1646 + if (vf->lan_vsi_idx) {
1647 + vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1648 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
1649 +index 99983f7a0ce0b..d481a922f0184 100644
1650 +--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
1651 ++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
1652 +@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
1653 + {
1654 + u32 head, tail;
1655 +
1656 ++ /* underlying hardware might not allow access and/or always return
1657 ++ * 0 for the head/tail registers so just use the cached values
1658 ++ */
1659 + head = ring->next_to_clean;
1660 +- tail = readl(ring->tail);
1661 ++ tail = ring->next_to_use;
1662 +
1663 + if (head != tail)
1664 + return (head < tail) ?
1665 +@@ -1368,7 +1371,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
1666 + #endif
1667 + struct sk_buff *skb;
1668 +
1669 +- if (!rx_buffer)
1670 ++ if (!rx_buffer || !size)
1671 + return NULL;
1672 + /* prefetch first cache line of first page */
1673 + va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1674 +@@ -1526,7 +1529,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
1675 + /* exit if we failed to retrieve a buffer */
1676 + if (!skb) {
1677 + rx_ring->rx_stats.alloc_buff_failed++;
1678 +- if (rx_buffer)
1679 ++ if (rx_buffer && size)
1680 + rx_buffer->pagecnt_bias++;
1681 + break;
1682 + }
1683 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
1684 +index ff479bf721443..5deee75bc4360 100644
1685 +--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
1686 ++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
1687 +@@ -241,11 +241,14 @@ out:
1688 + void iavf_configure_queues(struct iavf_adapter *adapter)
1689 + {
1690 + struct virtchnl_vsi_queue_config_info *vqci;
1691 +- struct virtchnl_queue_pair_info *vqpi;
1692 ++ int i, max_frame = adapter->vf_res->max_mtu;
1693 + int pairs = adapter->num_active_queues;
1694 +- int i, max_frame = IAVF_MAX_RXBUFFER;
1695 ++ struct virtchnl_queue_pair_info *vqpi;
1696 + size_t len;
1697 +
1698 ++ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
1699 ++ max_frame = IAVF_MAX_RXBUFFER;
1700 ++
1701 + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1702 + /* bail because we already have a command pending */
1703 + dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
1704 +diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
1705 +index d0f1b2dc7dff0..c49168ba7a4d6 100644
1706 +--- a/drivers/net/ethernet/sfc/efx_channels.c
1707 ++++ b/drivers/net/ethernet/sfc/efx_channels.c
1708 +@@ -308,7 +308,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
1709 + efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
1710 + efx->n_rx_channels = 1;
1711 + efx->n_tx_channels = 1;
1712 +- efx->tx_channel_offset = 1;
1713 ++ efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
1714 + efx->n_xdp_channels = 0;
1715 + efx->xdp_channel_offset = efx->n_channels;
1716 + efx->legacy_irq = efx->pci_dev->irq;
1717 +diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
1718 +index 1665529a72717..fcc7de8ae2bfa 100644
1719 +--- a/drivers/net/ethernet/sfc/tx.c
1720 ++++ b/drivers/net/ethernet/sfc/tx.c
1721 +@@ -545,7 +545,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
1722 + * previous packets out.
1723 + */
1724 + if (!netdev_xmit_more())
1725 +- efx_tx_send_pending(tx_queue->channel);
1726 ++ efx_tx_send_pending(efx_get_tx_channel(efx, index));
1727 + return NETDEV_TX_OK;
1728 + }
1729 +
1730 +diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
1731 +index 69fc47089e625..940db4ec57142 100644
1732 +--- a/drivers/net/ethernet/sun/sunhme.c
1733 ++++ b/drivers/net/ethernet/sun/sunhme.c
1734 +@@ -2063,9 +2063,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
1735 +
1736 + skb_reserve(copy_skb, 2);
1737 + skb_put(copy_skb, len);
1738 +- dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
1739 ++ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
1740 + skb_copy_from_linear_data(skb, copy_skb->data, len);
1741 +- dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
1742 ++ dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
1743 + /* Reuse original ring buffer. */
1744 + hme_write_rxd(hp, this,
1745 + (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1746 +diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
1747 +index fe91b72eca36c..64b12e462765e 100644
1748 +--- a/drivers/net/ipa/gsi.c
1749 ++++ b/drivers/net/ipa/gsi.c
1750 +@@ -1251,20 +1251,18 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1751 + /* Initialize a ring, including allocating DMA memory for its entries */
1752 + static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1753 + {
1754 +- size_t size = count * GSI_RING_ELEMENT_SIZE;
1755 ++ u32 size = count * GSI_RING_ELEMENT_SIZE;
1756 + struct device *dev = gsi->dev;
1757 + dma_addr_t addr;
1758 +
1759 +- /* Hardware requires a 2^n ring size, with alignment equal to size */
1760 ++ /* Hardware requires a 2^n ring size, with alignment equal to size.
1761 ++ * The DMA address returned by dma_alloc_coherent() is guaranteed to
1762 ++ * be a power-of-2 number of pages, which satisfies the requirement.
1763 ++ */
1764 + ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1765 +- if (ring->virt && addr % size) {
1766 +- dma_free_coherent(dev, size, ring->virt, addr);
1767 +- dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
1768 +- size);
1769 +- return -EINVAL; /* Not a good error value, but distinct */
1770 +- } else if (!ring->virt) {
1771 ++ if (!ring->virt)
1772 + return -ENOMEM;
1773 +- }
1774 ++
1775 + ring->addr = addr;
1776 + ring->count = count;
1777 +
1778 +diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
1779 +index 1785c9d3344d1..d58dce46e061a 100644
1780 +--- a/drivers/net/ipa/gsi_private.h
1781 ++++ b/drivers/net/ipa/gsi_private.h
1782 +@@ -14,7 +14,7 @@ struct gsi_trans;
1783 + struct gsi_ring;
1784 + struct gsi_channel;
1785 +
1786 +-#define GSI_RING_ELEMENT_SIZE 16 /* bytes */
1787 ++#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */
1788 +
1789 + /* Return the entry that follows one provided in a transaction pool */
1790 + void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element);
1791 +diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
1792 +index 6c3ed5b17b80c..70c2b585f98d6 100644
1793 +--- a/drivers/net/ipa/gsi_trans.c
1794 ++++ b/drivers/net/ipa/gsi_trans.c
1795 +@@ -153,11 +153,10 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
1796 + size = __roundup_pow_of_two(size);
1797 + total_size = (count + max_alloc - 1) * size;
1798 +
1799 +- /* The allocator will give us a power-of-2 number of pages. But we
1800 +- * can't guarantee that, so request it. That way we won't waste any
1801 +- * memory that would be available beyond the required space.
1802 +- *
1803 +- * Note that gsi_trans_pool_exit_dma() assumes the total allocated
1804 ++ /* The allocator will give us a power-of-2 number of pages
1805 ++ * sufficient to satisfy our request. Round up our requested
1806 ++ * size to avoid any unused space in the allocation. This way
1807 ++ * gsi_trans_pool_exit_dma() can assume the total allocated
1808 + * size is exactly (count * size).
1809 + */
1810 + total_size = get_order(total_size) << PAGE_SHIFT;
1811 +diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
1812 +index a47378b7d9b2f..dc94ce0356556 100644
1813 +--- a/drivers/net/ipa/ipa_cmd.c
1814 ++++ b/drivers/net/ipa/ipa_cmd.c
1815 +@@ -154,7 +154,7 @@ static void ipa_cmd_validate_build(void)
1816 + * of entries, as and IPv4 and IPv6 route tables have the same number
1817 + * of entries.
1818 + */
1819 +-#define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE)
1820 ++#define TABLE_SIZE (TABLE_COUNT_MAX * sizeof(__le64))
1821 + #define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
1822 + BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
1823 + BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
1824 +diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
1825 +index 7fc1058a5ca93..ba05e26c3c60e 100644
1826 +--- a/drivers/net/ipa/ipa_data.h
1827 ++++ b/drivers/net/ipa/ipa_data.h
1828 +@@ -72,8 +72,8 @@
1829 + * that can be included in a single transaction.
1830 + */
1831 + struct gsi_channel_data {
1832 +- u16 tre_count;
1833 +- u16 event_count;
1834 ++ u16 tre_count; /* must be a power of 2 */
1835 ++ u16 event_count; /* must be a power of 2 */
1836 + u8 tlv_count;
1837 + };
1838 +
1839 +diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
1840 +index 1a87a49538c50..880ec353f958f 100644
1841 +--- a/drivers/net/ipa/ipa_qmi.c
1842 ++++ b/drivers/net/ipa/ipa_qmi.c
1843 +@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
1844 + mem = &ipa->mem[IPA_MEM_V4_ROUTE];
1845 + req.v4_route_tbl_info_valid = 1;
1846 + req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
1847 +- req.v4_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
1848 ++ req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
1849 +
1850 + mem = &ipa->mem[IPA_MEM_V6_ROUTE];
1851 + req.v6_route_tbl_info_valid = 1;
1852 + req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
1853 +- req.v6_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
1854 ++ req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
1855 +
1856 + mem = &ipa->mem[IPA_MEM_V4_FILTER];
1857 + req.v4_filter_tbl_start_valid = 1;
1858 +@@ -352,8 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
1859 + req.v4_hash_route_tbl_info_valid = 1;
1860 + req.v4_hash_route_tbl_info.start =
1861 + ipa->mem_offset + mem->offset;
1862 +- req.v4_hash_route_tbl_info.count =
1863 +- mem->size / IPA_TABLE_ENTRY_SIZE;
1864 ++ req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
1865 + }
1866 +
1867 + mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED];
1868 +@@ -361,8 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
1869 + req.v6_hash_route_tbl_info_valid = 1;
1870 + req.v6_hash_route_tbl_info.start =
1871 + ipa->mem_offset + mem->offset;
1872 +- req.v6_hash_route_tbl_info.count =
1873 +- mem->size / IPA_TABLE_ENTRY_SIZE;
1874 ++ req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
1875 + }
1876 +
1877 + mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED];
1878 +diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
1879 +index 73413371e3d3e..ecf9f863c842b 100644
1880 +--- a/drivers/net/ipa/ipa_qmi_msg.c
1881 ++++ b/drivers/net/ipa/ipa_qmi_msg.c
1882 +@@ -271,7 +271,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
1883 + .tlv_type = 0x12,
1884 + .offset = offsetof(struct ipa_init_modem_driver_req,
1885 + v4_route_tbl_info),
1886 +- .ei_array = ipa_mem_array_ei,
1887 ++ .ei_array = ipa_mem_bounds_ei,
1888 + },
1889 + {
1890 + .data_type = QMI_OPT_FLAG,
1891 +@@ -292,7 +292,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
1892 + .tlv_type = 0x13,
1893 + .offset = offsetof(struct ipa_init_modem_driver_req,
1894 + v6_route_tbl_info),
1895 +- .ei_array = ipa_mem_array_ei,
1896 ++ .ei_array = ipa_mem_bounds_ei,
1897 + },
1898 + {
1899 + .data_type = QMI_OPT_FLAG,
1900 +@@ -456,7 +456,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
1901 + .tlv_type = 0x1b,
1902 + .offset = offsetof(struct ipa_init_modem_driver_req,
1903 + v4_hash_route_tbl_info),
1904 +- .ei_array = ipa_mem_array_ei,
1905 ++ .ei_array = ipa_mem_bounds_ei,
1906 + },
1907 + {
1908 + .data_type = QMI_OPT_FLAG,
1909 +@@ -477,7 +477,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
1910 + .tlv_type = 0x1c,
1911 + .offset = offsetof(struct ipa_init_modem_driver_req,
1912 + v6_hash_route_tbl_info),
1913 +- .ei_array = ipa_mem_array_ei,
1914 ++ .ei_array = ipa_mem_bounds_ei,
1915 + },
1916 + {
1917 + .data_type = QMI_OPT_FLAG,
1918 +diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
1919 +index cfac456cea0ca..58de425bb8e61 100644
1920 +--- a/drivers/net/ipa/ipa_qmi_msg.h
1921 ++++ b/drivers/net/ipa/ipa_qmi_msg.h
1922 +@@ -82,9 +82,11 @@ enum ipa_platform_type {
1923 + IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 5, /* QNX MSM */
1924 + };
1925 +
1926 +-/* This defines the start and end offset of a range of memory. Both
1927 +- * fields are offsets relative to the start of IPA shared memory.
1928 +- * The end value is the last addressable byte *within* the range.
1929 ++/* This defines the start and end offset of a range of memory. The start
1930 ++ * value is a byte offset relative to the start of IPA shared memory. The
1931 ++ * end value is the last addressable unit *within* the range. Typically
1932 ++ * the end value is in units of bytes, however it can also be a maximum
1933 ++ * array index value.
1934 + */
1935 + struct ipa_mem_bounds {
1936 + u32 start;
1937 +@@ -125,18 +127,19 @@ struct ipa_init_modem_driver_req {
1938 + u8 hdr_tbl_info_valid;
1939 + struct ipa_mem_bounds hdr_tbl_info;
1940 +
1941 +- /* Routing table information. These define the location and size of
1942 +- * non-hashable IPv4 and IPv6 filter tables. The start values are
1943 +- * offsets relative to the start of IPA shared memory.
1944 ++ /* Routing table information. These define the location and maximum
1945 ++ * *index* (not byte) for the modem portion of non-hashable IPv4 and
1946 ++ * IPv6 routing tables. The start values are byte offsets relative
1947 ++ * to the start of IPA shared memory.
1948 + */
1949 + u8 v4_route_tbl_info_valid;
1950 +- struct ipa_mem_array v4_route_tbl_info;
1951 ++ struct ipa_mem_bounds v4_route_tbl_info;
1952 + u8 v6_route_tbl_info_valid;
1953 +- struct ipa_mem_array v6_route_tbl_info;
1954 ++ struct ipa_mem_bounds v6_route_tbl_info;
1955 +
1956 + /* Filter table information. These define the location of the
1957 + * non-hashable IPv4 and IPv6 filter tables. The start values are
1958 +- * offsets relative to the start of IPA shared memory.
1959 ++ * byte offsets relative to the start of IPA shared memory.
1960 + */
1961 + u8 v4_filter_tbl_start_valid;
1962 + u32 v4_filter_tbl_start;
1963 +@@ -177,18 +180,20 @@ struct ipa_init_modem_driver_req {
1964 + u8 zip_tbl_info_valid;
1965 + struct ipa_mem_bounds zip_tbl_info;
1966 +
1967 +- /* Routing table information. These define the location and size
1968 +- * of hashable IPv4 and IPv6 filter tables. The start values are
1969 +- * offsets relative to the start of IPA shared memory.
1970 ++ /* Routing table information. These define the location and maximum
1971 ++ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
1972 ++ * routing tables (if supported by hardware). The start values are
1973 ++ * byte offsets relative to the start of IPA shared memory.
1974 + */
1975 + u8 v4_hash_route_tbl_info_valid;
1976 +- struct ipa_mem_array v4_hash_route_tbl_info;
1977 ++ struct ipa_mem_bounds v4_hash_route_tbl_info;
1978 + u8 v6_hash_route_tbl_info_valid;
1979 +- struct ipa_mem_array v6_hash_route_tbl_info;
1980 ++ struct ipa_mem_bounds v6_hash_route_tbl_info;
1981 +
1982 + /* Filter table information. These define the location and size
1983 +- * of hashable IPv4 and IPv6 filter tables. The start values are
1984 +- * offsets relative to the start of IPA shared memory.
1985 ++ * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
1986 ++ * The start values are byte offsets relative to the start of IPA
1987 ++ * shared memory.
1988 + */
1989 + u8 v4_hash_filter_tbl_start_valid;
1990 + u32 v4_hash_filter_tbl_start;
1991 +diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
1992 +index 0747866d60abc..02c1928374144 100644
1993 +--- a/drivers/net/ipa/ipa_table.c
1994 ++++ b/drivers/net/ipa/ipa_table.c
1995 +@@ -27,28 +27,38 @@
1996 + /**
1997 + * DOC: IPA Filter and Route Tables
1998 + *
1999 +- * The IPA has tables defined in its local shared memory that define filter
2000 +- * and routing rules. Each entry in these tables contains a 64-bit DMA
2001 +- * address that refers to DRAM (system memory) containing a rule definition.
2002 ++ * The IPA has tables defined in its local (IPA-resident) memory that define
2003 ++ * filter and routing rules. An entry in either of these tables is a little
2004 ++ * endian 64-bit "slot" that holds the address of a rule definition. (The
2005 ++ * size of these slots is 64 bits regardless of the host DMA address size.)
2006 ++ *
2007 ++ * Separate tables (both filter and route) used for IPv4 and IPv6. There
2008 ++ * are normally another set of "hashed" filter and route tables, which are
2009 ++ * used with a hash of message metadata. Hashed operation is not supported
2010 ++ * by all IPA hardware (IPA v4.2 doesn't support hashed tables).
2011 ++ *
2012 ++ * Rules can be in local memory or in DRAM (system memory). The offset of
2013 ++ * an object (such as a route or filter table) in IPA-resident memory must
2014 ++ * 128-byte aligned. An object in system memory (such as a route or filter
2015 ++ * rule) must be at an 8-byte aligned address. We currently only place
2016 ++ * route or filter rules in system memory.
2017 ++ *
2018 + * A rule consists of a contiguous block of 32-bit values terminated with
2019 + * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits
2020 + * represents "no filtering" or "no routing," and is the reset value for
2021 +- * filter or route table rules. Separate tables (both filter and route)
2022 +- * used for IPv4 and IPv6. Additionally, there can be hashed filter or
2023 +- * route tables, which are used when a hash of message metadata matches.
2024 +- * Hashed operation is not supported by all IPA hardware.
2025 ++ * filter or route table rules.
2026 + *
2027 + * Each filter rule is associated with an AP or modem TX endpoint, though
2028 +- * not all TX endpoints support filtering. The first 64-bit entry in a
2029 ++ * not all TX endpoints support filtering. The first 64-bit slot in a
2030 + * filter table is a bitmap indicating which endpoints have entries in
2031 + * the table. The low-order bit (bit 0) in this bitmap represents a
2032 + * special global filter, which applies to all traffic. This is not
2033 + * used in the current code. Bit 1, if set, indicates that there is an
2034 +- * entry (i.e. a DMA address referring to a rule) for endpoint 0 in the
2035 +- * table. Bit 2, if set, indicates there is an entry for endpoint 1,
2036 +- * and so on. Space is set aside in IPA local memory to hold as many
2037 +- * filter table entries as might be required, but typically they are not
2038 +- * all used.
2039 ++ * entry (i.e. slot containing a system address referring to a rule) for
2040 ++ * endpoint 0 in the table. Bit 3, if set, indicates there is an entry
2041 ++ * for endpoint 2, and so on. Space is set aside in IPA local memory to
2042 ++ * hold as many filter table entries as might be required, but typically
2043 ++ * they are not all used.
2044 + *
2045 + * The AP initializes all entries in a filter table to refer to a "zero"
2046 + * entry. Once initialized the modem and AP update the entries for
2047 +@@ -96,13 +106,8 @@
2048 + * ----------------------
2049 + */
2050 +
2051 +-/* IPA hardware constrains filter and route tables alignment */
2052 +-#define IPA_TABLE_ALIGN 128 /* Minimum table alignment */
2053 +-
2054 + /* Assignment of route table entries to the modem and AP */
2055 + #define IPA_ROUTE_MODEM_MIN 0
2056 +-#define IPA_ROUTE_MODEM_COUNT 8
2057 +-
2058 + #define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
2059 + #define IPA_ROUTE_AP_COUNT \
2060 + (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
2061 +@@ -118,21 +123,14 @@
2062 + /* Check things that can be validated at build time. */
2063 + static void ipa_table_validate_build(void)
2064 + {
2065 +- /* IPA hardware accesses memory 128 bytes at a time. Addresses
2066 +- * referred to by entries in filter and route tables must be
2067 +- * aligned on 128-byte byte boundaries. The only rule address
2068 +- * ever use is the "zero rule", and it's aligned at the base
2069 +- * of a coherent DMA allocation.
2070 +- */
2071 +- BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN);
2072 +-
2073 +- /* Filter and route tables contain DMA addresses that refer to
2074 +- * filter or route rules. We use a fixed constant to represent
2075 +- * the size of either type of table entry. Code in ipa_table_init()
2076 +- * uses a pointer to __le64 to initialize table entriews.
2077 ++ /* Filter and route tables contain DMA addresses that refer
2078 ++ * to filter or route rules. But the size of a table entry
2079 ++ * is 64 bits regardless of what the size of an AP DMA address
2080 ++ * is. A fixed constant defines the size of an entry, and
2081 ++ * code in ipa_table_init() uses a pointer to __le64 to
2082 ++ * initialize tables.
2083 + */
2084 +- BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t));
2085 +- BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64));
2086 ++ BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64));
2087 +
2088 + /* A "zero rule" is used to represent no filtering or no routing.
2089 + * It is a 64-bit block of zeroed memory. Code in ipa_table_init()
2090 +@@ -163,7 +161,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
2091 + else
2092 + mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]
2093 + : &ipa->mem[IPA_MEM_V4_ROUTE];
2094 +- size = IPA_ROUTE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE;
2095 ++ size = IPA_ROUTE_COUNT_MAX * sizeof(__le64);
2096 + } else {
2097 + if (ipv6)
2098 + mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED]
2099 +@@ -171,7 +169,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
2100 + else
2101 + mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED]
2102 + : &ipa->mem[IPA_MEM_V4_FILTER];
2103 +- size = (1 + IPA_FILTER_COUNT_MAX) * IPA_TABLE_ENTRY_SIZE;
2104 ++ size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
2105 + }
2106 +
2107 + if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
2108 +@@ -270,8 +268,8 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
2109 + if (filter)
2110 + first++; /* skip over bitmap */
2111 +
2112 +- offset = mem->offset + first * IPA_TABLE_ENTRY_SIZE;
2113 +- size = count * IPA_TABLE_ENTRY_SIZE;
2114 ++ offset = mem->offset + first * sizeof(__le64);
2115 ++ size = count * sizeof(__le64);
2116 + addr = ipa_table_addr(ipa, false, count);
2117 +
2118 + ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true);
2119 +@@ -455,11 +453,11 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
2120 + count = 1 + hweight32(ipa->filter_map);
2121 + hash_count = hash_mem->size ? count : 0;
2122 + } else {
2123 +- count = mem->size / IPA_TABLE_ENTRY_SIZE;
2124 +- hash_count = hash_mem->size / IPA_TABLE_ENTRY_SIZE;
2125 ++ count = mem->size / sizeof(__le64);
2126 ++ hash_count = hash_mem->size / sizeof(__le64);
2127 + }
2128 +- size = count * IPA_TABLE_ENTRY_SIZE;
2129 +- hash_size = hash_count * IPA_TABLE_ENTRY_SIZE;
2130 ++ size = count * sizeof(__le64);
2131 ++ hash_size = hash_count * sizeof(__le64);
2132 +
2133 + addr = ipa_table_addr(ipa, filter, count);
2134 + hash_addr = ipa_table_addr(ipa, filter, hash_count);
2135 +@@ -662,7 +660,13 @@ int ipa_table_init(struct ipa *ipa)
2136 +
2137 + ipa_table_validate_build();
2138 +
2139 +- size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
2140 ++ /* The IPA hardware requires route and filter table rules to be
2141 ++ * aligned on a 128-byte boundary. We put the "zero rule" at the
2142 ++ * base of the table area allocated here. The DMA address returned
2143 ++ * by dma_alloc_coherent() is guaranteed to be a power-of-2 number
2144 ++ * of pages, which satisfies the rule alignment requirement.
2145 ++ */
2146 ++ size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
2147 + virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
2148 + if (!virt)
2149 + return -ENOMEM;
2150 +@@ -694,7 +698,7 @@ void ipa_table_exit(struct ipa *ipa)
2151 + struct device *dev = &ipa->pdev->dev;
2152 + size_t size;
2153 +
2154 +- size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
2155 ++ size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
2156 +
2157 + dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr);
2158 + ipa->table_addr = 0;
2159 +diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
2160 +index 78038d14fcea9..35e519cef25da 100644
2161 +--- a/drivers/net/ipa/ipa_table.h
2162 ++++ b/drivers/net/ipa/ipa_table.h
2163 +@@ -10,12 +10,12 @@
2164 +
2165 + struct ipa;
2166 +
2167 +-/* The size of a filter or route table entry */
2168 +-#define IPA_TABLE_ENTRY_SIZE sizeof(__le64) /* Holds a physical address */
2169 +-
2170 + /* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
2171 + #define IPA_FILTER_COUNT_MAX 14
2172 +
2173 ++/* The number of route table entries allotted to the modem */
2174 ++#define IPA_ROUTE_MODEM_COUNT 8
2175 ++
2176 + /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
2177 + #define IPA_ROUTE_COUNT_MAX 15
2178 +
2179 +diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
2180 +index 8801d093135c3..a33149ee0ddcf 100644
2181 +--- a/drivers/net/ipvlan/ipvlan_core.c
2182 ++++ b/drivers/net/ipvlan/ipvlan_core.c
2183 +@@ -496,7 +496,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
2184 +
2185 + static int ipvlan_process_outbound(struct sk_buff *skb)
2186 + {
2187 +- struct ethhdr *ethh = eth_hdr(skb);
2188 + int ret = NET_XMIT_DROP;
2189 +
2190 + /* The ipvlan is a pseudo-L2 device, so the packets that we receive
2191 +@@ -506,6 +505,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
2192 + if (skb_mac_header_was_set(skb)) {
2193 + /* In this mode we dont care about
2194 + * multicast and broadcast traffic */
2195 ++ struct ethhdr *ethh = eth_hdr(skb);
2196 ++
2197 + if (is_multicast_ether_addr(ethh->h_dest)) {
2198 + pr_debug_ratelimited(
2199 + "Dropped {multi|broad}cast of type=[%x]\n",
2200 +@@ -590,7 +591,7 @@ out:
2201 + static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
2202 + {
2203 + const struct ipvl_dev *ipvlan = netdev_priv(dev);
2204 +- struct ethhdr *eth = eth_hdr(skb);
2205 ++ struct ethhdr *eth = skb_eth_hdr(skb);
2206 + struct ipvl_addr *addr;
2207 + void *lyr3h;
2208 + int addr_type;
2209 +@@ -620,6 +621,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
2210 + return dev_forward_skb(ipvlan->phy_dev, skb);
2211 +
2212 + } else if (is_multicast_ether_addr(eth->h_dest)) {
2213 ++ skb_reset_mac_header(skb);
2214 + ipvlan_skb_crossing_ns(skb, NULL);
2215 + ipvlan_multicast_enqueue(ipvlan->port, skb, true);
2216 + return NET_XMIT_SUCCESS;
2217 +diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
2218 +index ea0bf13e8ac3f..5bae47f3da405 100644
2219 +--- a/drivers/net/mdio/of_mdio.c
2220 ++++ b/drivers/net/mdio/of_mdio.c
2221 +@@ -332,6 +332,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
2222 + return 0;
2223 +
2224 + unregister:
2225 ++ of_node_put(child);
2226 + mdiobus_unregister(mdio);
2227 + return rc;
2228 + }
2229 +diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
2230 +index 75a62d1cc7375..7045595f8d7d1 100644
2231 +--- a/drivers/net/phy/aquantia_main.c
2232 ++++ b/drivers/net/phy/aquantia_main.c
2233 +@@ -89,6 +89,9 @@
2234 + #define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
2235 + #define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
2236 +
2237 ++#define VEND1_GLOBAL_GEN_STAT2 0xc831
2238 ++#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
2239 ++
2240 + #define VEND1_GLOBAL_RSVD_STAT1 0xc885
2241 + #define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
2242 + #define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
2243 +@@ -123,6 +126,12 @@
2244 + #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
2245 + #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
2246 +
2247 ++/* Sleep and timeout for checking if the Processor-Intensive
2248 ++ * MDIO operation is finished
2249 ++ */
2250 ++#define AQR107_OP_IN_PROG_SLEEP 1000
2251 ++#define AQR107_OP_IN_PROG_TIMEOUT 100000
2252 ++
2253 + struct aqr107_hw_stat {
2254 + const char *name;
2255 + int reg;
2256 +@@ -569,16 +578,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
2257 + phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
2258 + }
2259 +
2260 ++static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
2261 ++{
2262 ++ int val, err;
2263 ++
2264 ++ /* The datasheet notes to wait at least 1ms after issuing a
2265 ++ * processor intensive operation before checking.
2266 ++ * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
2267 ++ * because that just determines the maximum time slept, not the minimum.
2268 ++ */
2269 ++ usleep_range(1000, 5000);
2270 ++
2271 ++ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
2272 ++ VEND1_GLOBAL_GEN_STAT2, val,
2273 ++ !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
2274 ++ AQR107_OP_IN_PROG_SLEEP,
2275 ++ AQR107_OP_IN_PROG_TIMEOUT, false);
2276 ++ if (err) {
2277 ++ phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
2278 ++ return err;
2279 ++ }
2280 ++
2281 ++ return 0;
2282 ++}
2283 ++
2284 + static int aqr107_suspend(struct phy_device *phydev)
2285 + {
2286 +- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
2287 +- MDIO_CTRL1_LPOWER);
2288 ++ int err;
2289 ++
2290 ++ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
2291 ++ MDIO_CTRL1_LPOWER);
2292 ++ if (err)
2293 ++ return err;
2294 ++
2295 ++ return aqr107_wait_processor_intensive_op(phydev);
2296 + }
2297 +
2298 + static int aqr107_resume(struct phy_device *phydev)
2299 + {
2300 +- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
2301 +- MDIO_CTRL1_LPOWER);
2302 ++ int err;
2303 ++
2304 ++ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
2305 ++ MDIO_CTRL1_LPOWER);
2306 ++ if (err)
2307 ++ return err;
2308 ++
2309 ++ return aqr107_wait_processor_intensive_op(phydev);
2310 + }
2311 +
2312 + static int aqr107_probe(struct phy_device *phydev)
2313 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2314 +index 615f3776b4bee..7117d559a32e4 100644
2315 +--- a/drivers/net/team/team.c
2316 ++++ b/drivers/net/team/team.c
2317 +@@ -1270,10 +1270,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
2318 + }
2319 + }
2320 +
2321 +- netif_addr_lock_bh(dev);
2322 +- dev_uc_sync_multiple(port_dev, dev);
2323 +- dev_mc_sync_multiple(port_dev, dev);
2324 +- netif_addr_unlock_bh(dev);
2325 ++ if (dev->flags & IFF_UP) {
2326 ++ netif_addr_lock_bh(dev);
2327 ++ dev_uc_sync_multiple(port_dev, dev);
2328 ++ dev_mc_sync_multiple(port_dev, dev);
2329 ++ netif_addr_unlock_bh(dev);
2330 ++ }
2331 +
2332 + port->index = -1;
2333 + list_add_tail_rcu(&port->list, &team->port_list);
2334 +@@ -1344,8 +1346,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
2335 + netdev_rx_handler_unregister(port_dev);
2336 + team_port_disable_netpoll(port);
2337 + vlan_vids_del_by_dev(port_dev, dev);
2338 +- dev_uc_unsync(port_dev, dev);
2339 +- dev_mc_unsync(port_dev, dev);
2340 ++ if (dev->flags & IFF_UP) {
2341 ++ dev_uc_unsync(port_dev, dev);
2342 ++ dev_mc_unsync(port_dev, dev);
2343 ++ }
2344 + dev_close(port_dev);
2345 + team_port_leave(team, port);
2346 +
2347 +@@ -1695,6 +1699,14 @@ static int team_open(struct net_device *dev)
2348 +
2349 + static int team_close(struct net_device *dev)
2350 + {
2351 ++ struct team *team = netdev_priv(dev);
2352 ++ struct team_port *port;
2353 ++
2354 ++ list_for_each_entry(port, &team->port_list, list) {
2355 ++ dev_uc_unsync(port->dev, dev);
2356 ++ dev_mc_unsync(port->dev, dev);
2357 ++ }
2358 ++
2359 + return 0;
2360 + }
2361 +
2362 +diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
2363 +index d0f3b6d7f4089..5c804bcabfe6b 100644
2364 +--- a/drivers/net/wireguard/netlink.c
2365 ++++ b/drivers/net/wireguard/netlink.c
2366 +@@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
2367 + if (attrs[WGPEER_A_ENDPOINT]) {
2368 + struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
2369 + size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
2370 ++ struct endpoint endpoint = { { { 0 } } };
2371 +
2372 +- if ((len == sizeof(struct sockaddr_in) &&
2373 +- addr->sa_family == AF_INET) ||
2374 +- (len == sizeof(struct sockaddr_in6) &&
2375 +- addr->sa_family == AF_INET6)) {
2376 +- struct endpoint endpoint = { { { 0 } } };
2377 +-
2378 +- memcpy(&endpoint.addr, addr, len);
2379 ++ if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
2380 ++ endpoint.addr4 = *(struct sockaddr_in *)addr;
2381 ++ wg_socket_set_peer_endpoint(peer, &endpoint);
2382 ++ } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
2383 ++ endpoint.addr6 = *(struct sockaddr_in6 *)addr;
2384 + wg_socket_set_peer_endpoint(peer, &endpoint);
2385 + }
2386 + }
2387 +diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
2388 +index ba87d294604fe..d4bb40a695ab6 100644
2389 +--- a/drivers/net/wireguard/selftest/ratelimiter.c
2390 ++++ b/drivers/net/wireguard/selftest/ratelimiter.c
2391 +@@ -6,29 +6,28 @@
2392 + #ifdef DEBUG
2393 +
2394 + #include <linux/jiffies.h>
2395 +-#include <linux/hrtimer.h>
2396 +
2397 + static const struct {
2398 + bool result;
2399 +- u64 nsec_to_sleep_before;
2400 ++ unsigned int msec_to_sleep_before;
2401 + } expected_results[] __initconst = {
2402 + [0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
2403 + [PACKETS_BURSTABLE] = { false, 0 },
2404 +- [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND },
2405 ++ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
2406 + [PACKETS_BURSTABLE + 2] = { false, 0 },
2407 +- [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
2408 ++ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
2409 + [PACKETS_BURSTABLE + 4] = { true, 0 },
2410 + [PACKETS_BURSTABLE + 5] = { false, 0 }
2411 + };
2412 +
2413 + static __init unsigned int maximum_jiffies_at_index(int index)
2414 + {
2415 +- u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3;
2416 ++ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
2417 + int i;
2418 +
2419 + for (i = 0; i <= index; ++i)
2420 +- total_nsecs += expected_results[i].nsec_to_sleep_before;
2421 +- return nsecs_to_jiffies(total_nsecs);
2422 ++ total_msecs += expected_results[i].msec_to_sleep_before;
2423 ++ return msecs_to_jiffies(total_msecs);
2424 + }
2425 +
2426 + static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
2427 +@@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
2428 + loop_start_time = jiffies;
2429 +
2430 + for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
2431 +- if (expected_results[i].nsec_to_sleep_before) {
2432 +- ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3),
2433 +- ns_to_ktime(expected_results[i].nsec_to_sleep_before));
2434 +- set_current_state(TASK_UNINTERRUPTIBLE);
2435 +- schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME);
2436 +- }
2437 ++ if (expected_results[i].msec_to_sleep_before)
2438 ++ msleep(expected_results[i].msec_to_sleep_before);
2439 +
2440 + if (time_is_before_jiffies(loop_start_time +
2441 + maximum_jiffies_at_index(i)))
2442 +@@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void)
2443 + if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
2444 + return true;
2445 +
2446 +- BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0);
2447 ++ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
2448 +
2449 + if (wg_ratelimiter_init())
2450 + goto out;
2451 +@@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void)
2452 + ++test;
2453 + #endif
2454 +
2455 +- for (trials = TRIALS_BEFORE_GIVING_UP;;) {
2456 ++ for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
2457 + int test_count = 0, ret;
2458 +
2459 + ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
2460 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2461 +index 1465a92ea3fc9..b26617026e831 100644
2462 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2463 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2464 +@@ -950,7 +950,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
2465 + offset %= 32;
2466 +
2467 + val = mt76_rr(dev, addr);
2468 +- val >>= (tid % 32);
2469 ++ val >>= offset;
2470 +
2471 + if (offset > 20) {
2472 + addr += 4;
2473 +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
2474 +index dc78a523a69f2..b6b938aa66158 100644
2475 +--- a/drivers/s390/block/dasd_alias.c
2476 ++++ b/drivers/s390/block/dasd_alias.c
2477 +@@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device)
2478 + struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
2479 + {
2480 + struct dasd_eckd_private *alias_priv, *private = base_device->private;
2481 +- struct alias_pav_group *group = private->pavgroup;
2482 + struct alias_lcu *lcu = private->lcu;
2483 + struct dasd_device *alias_device;
2484 ++ struct alias_pav_group *group;
2485 + unsigned long flags;
2486 +
2487 +- if (!group || !lcu)
2488 ++ if (!lcu)
2489 + return NULL;
2490 + if (lcu->pav == NO_PAV ||
2491 + lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
2492 +@@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
2493 + }
2494 +
2495 + spin_lock_irqsave(&lcu->lock, flags);
2496 ++ group = private->pavgroup;
2497 ++ if (!group) {
2498 ++ spin_unlock_irqrestore(&lcu->lock, flags);
2499 ++ return NULL;
2500 ++ }
2501 + alias_device = group->next;
2502 + if (!alias_device) {
2503 + if (list_empty(&group->aliaslist)) {
2504 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
2505 +index 3153f164554aa..c1b76cda60dbc 100644
2506 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
2507 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
2508 +@@ -2822,23 +2822,22 @@ static int
2509 + _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2510 + {
2511 + struct sysinfo s;
2512 +- int dma_mask;
2513 +
2514 + if (ioc->is_mcpu_endpoint ||
2515 + sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
2516 +- dma_get_required_mask(&pdev->dev) <= 32)
2517 +- dma_mask = 32;
2518 ++ dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32))
2519 ++ ioc->dma_mask = 32;
2520 + /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
2521 + else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
2522 +- dma_mask = 63;
2523 ++ ioc->dma_mask = 63;
2524 + else
2525 +- dma_mask = 64;
2526 ++ ioc->dma_mask = 64;
2527 +
2528 +- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
2529 +- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
2530 ++ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) ||
2531 ++ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))
2532 + return -ENODEV;
2533 +
2534 +- if (dma_mask > 32) {
2535 ++ if (ioc->dma_mask > 32) {
2536 + ioc->base_add_sg_single = &_base_add_sg_single_64;
2537 + ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2538 + } else {
2539 +@@ -2848,7 +2847,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2540 +
2541 + si_meminfo(&s);
2542 + ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2543 +- dma_mask, convert_to_kb(s.totalram));
2544 ++ ioc->dma_mask, convert_to_kb(s.totalram));
2545 +
2546 + return 0;
2547 + }
2548 +@@ -4902,10 +4901,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
2549 + dma_pool_free(ioc->pcie_sgl_dma_pool,
2550 + ioc->pcie_sg_lookup[i].pcie_sgl,
2551 + ioc->pcie_sg_lookup[i].pcie_sgl_dma);
2552 ++ ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
2553 + }
2554 + dma_pool_destroy(ioc->pcie_sgl_dma_pool);
2555 + }
2556 +-
2557 + if (ioc->config_page) {
2558 + dexitprintk(ioc,
2559 + ioc_info(ioc, "config_page(0x%p): free\n",
2560 +@@ -4960,6 +4959,89 @@ mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
2561 + return 0;
2562 + }
2563 +
2564 ++/**
2565 ++ * _base_reduce_hba_queue_depth- Retry with reduced queue depth
2566 ++ * @ioc: Adapter object
2567 ++ *
2568 ++ * Return: 0 for success, non-zero for failure.
2569 ++ **/
2570 ++static inline int
2571 ++_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
2572 ++{
2573 ++ int reduce_sz = 64;
2574 ++
2575 ++ if ((ioc->hba_queue_depth - reduce_sz) >
2576 ++ (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
2577 ++ ioc->hba_queue_depth -= reduce_sz;
2578 ++ return 0;
2579 ++ } else
2580 ++ return -ENOMEM;
2581 ++}
2582 ++
2583 ++/**
2584 ++ * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
2585 ++ * for pcie sgl pools.
2586 ++ * @ioc: Adapter object
2587 ++ * @sz: DMA Pool size
2588 ++ * @ct: Chain tracker
2589 ++ * Return: 0 for success, non-zero for failure.
2590 ++ */
2591 ++
2592 ++static int
2593 ++_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
2594 ++{
2595 ++ int i = 0, j = 0;
2596 ++ struct chain_tracker *ct;
2597 ++
2598 ++ ioc->pcie_sgl_dma_pool =
2599 ++ dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
2600 ++ ioc->page_size, 0);
2601 ++ if (!ioc->pcie_sgl_dma_pool) {
2602 ++ ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
2603 ++ return -ENOMEM;
2604 ++ }
2605 ++
2606 ++ ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
2607 ++ ioc->chains_per_prp_buffer =
2608 ++ min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
2609 ++ for (i = 0; i < ioc->scsiio_depth; i++) {
2610 ++ ioc->pcie_sg_lookup[i].pcie_sgl =
2611 ++ dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
2612 ++ &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
2613 ++ if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
2614 ++ ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
2615 ++ return -EAGAIN;
2616 ++ }
2617 ++
2618 ++ if (!mpt3sas_check_same_4gb_region(
2619 ++ (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) {
2620 ++ ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
2621 ++ ioc->pcie_sg_lookup[i].pcie_sgl,
2622 ++ (unsigned long long)
2623 ++ ioc->pcie_sg_lookup[i].pcie_sgl_dma);
2624 ++ ioc->use_32bit_dma = true;
2625 ++ return -EAGAIN;
2626 ++ }
2627 ++
2628 ++ for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
2629 ++ ct = &ioc->chain_lookup[i].chains_per_smid[j];
2630 ++ ct->chain_buffer =
2631 ++ ioc->pcie_sg_lookup[i].pcie_sgl +
2632 ++ (j * ioc->chain_segment_sz);
2633 ++ ct->chain_buffer_dma =
2634 ++ ioc->pcie_sg_lookup[i].pcie_sgl_dma +
2635 ++ (j * ioc->chain_segment_sz);
2636 ++ }
2637 ++ }
2638 ++ dinitprintk(ioc, ioc_info(ioc,
2639 ++ "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
2640 ++ ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
2641 ++ dinitprintk(ioc, ioc_info(ioc,
2642 ++ "Number of chains can fit in a PRP page(%d)\n",
2643 ++ ioc->chains_per_prp_buffer));
2644 ++ return 0;
2645 ++}
2646 ++
2647 + /**
2648 + * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
2649 + * for reply queues.
2650 +@@ -5058,7 +5140,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
2651 + unsigned short sg_tablesize;
2652 + u16 sge_size;
2653 + int i, j;
2654 +- int ret = 0;
2655 ++ int ret = 0, rc = 0;
2656 + struct chain_tracker *ct;
2657 +
2658 + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
2659 +@@ -5357,6 +5439,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
2660 + * be required for NVMe PRP's, only each set of NVMe blocks will be
2661 + * contiguous, so a new set is allocated for each possible I/O.
2662 + */
2663 ++
2664 + ioc->chains_per_prp_buffer = 0;
2665 + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
2666 + nvme_blocks_needed =
2667 +@@ -5371,43 +5454,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
2668 + goto out;
2669 + }
2670 + sz = nvme_blocks_needed * ioc->page_size;
2671 +- ioc->pcie_sgl_dma_pool =
2672 +- dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
2673 +- if (!ioc->pcie_sgl_dma_pool) {
2674 +- ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
2675 +- goto out;
2676 +- }
2677 +-
2678 +- ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
2679 +- ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
2680 +- ioc->chains_needed_per_io);
2681 +-
2682 +- for (i = 0; i < ioc->scsiio_depth; i++) {
2683 +- ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
2684 +- ioc->pcie_sgl_dma_pool, GFP_KERNEL,
2685 +- &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
2686 +- if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
2687 +- ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
2688 +- goto out;
2689 +- }
2690 +- for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
2691 +- ct = &ioc->chain_lookup[i].chains_per_smid[j];
2692 +- ct->chain_buffer =
2693 +- ioc->pcie_sg_lookup[i].pcie_sgl +
2694 +- (j * ioc->chain_segment_sz);
2695 +- ct->chain_buffer_dma =
2696 +- ioc->pcie_sg_lookup[i].pcie_sgl_dma +
2697 +- (j * ioc->chain_segment_sz);
2698 +- }
2699 +- }
2700 +-
2701 +- dinitprintk(ioc,
2702 +- ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
2703 +- ioc->scsiio_depth, sz,
2704 +- (sz * ioc->scsiio_depth) / 1024));
2705 +- dinitprintk(ioc,
2706 +- ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
2707 +- ioc->chains_per_prp_buffer));
2708 ++ rc = _base_allocate_pcie_sgl_pool(ioc, sz);
2709 ++ if (rc == -ENOMEM)
2710 ++ return -ENOMEM;
2711 ++ else if (rc == -EAGAIN)
2712 ++ goto try_32bit_dma;
2713 + total_sz += sz * ioc->scsiio_depth;
2714 + }
2715 +
2716 +@@ -5577,6 +5628,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
2717 + ioc->shost->sg_tablesize);
2718 + return 0;
2719 +
2720 ++try_32bit_dma:
2721 ++ _base_release_memory_pools(ioc);
2722 ++ if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
2723 ++ /* Change dma coherent mask to 32 bit and reallocate */
2724 ++ if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
2725 ++ pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
2726 ++ pci_name(ioc->pdev));
2727 ++ return -ENODEV;
2728 ++ }
2729 ++ } else if (_base_reduce_hba_queue_depth(ioc) != 0)
2730 ++ return -ENOMEM;
2731 ++ goto retry_allocation;
2732 ++
2733 + out:
2734 + return -ENOMEM;
2735 + }
2736 +@@ -7239,6 +7303,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
2737 +
2738 + ioc->rdpq_array_enable_assigned = 0;
2739 + ioc->use_32bit_dma = false;
2740 ++ ioc->dma_mask = 64;
2741 + if (ioc->is_aero_ioc)
2742 + ioc->base_readl = &_base_readl_aero;
2743 + else
2744 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
2745 +index bc8beb10f3fc3..823bbe64a477f 100644
2746 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
2747 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
2748 +@@ -1257,6 +1257,7 @@ struct MPT3SAS_ADAPTER {
2749 + u16 thresh_hold;
2750 + u8 high_iops_queues;
2751 + u32 drv_support_bitmap;
2752 ++ u32 dma_mask;
2753 + bool enable_sdev_max_qd;
2754 + bool use_32bit_dma;
2755 +
2756 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
2757 +index 602065bfc9bb8..b7872ad3e7622 100644
2758 +--- a/drivers/tty/serial/atmel_serial.c
2759 ++++ b/drivers/tty/serial/atmel_serial.c
2760 +@@ -295,20 +295,16 @@ static int atmel_config_rs485(struct uart_port *port,
2761 +
2762 + mode = atmel_uart_readl(port, ATMEL_US_MR);
2763 +
2764 +- /* Resetting serial mode to RS232 (0x0) */
2765 +- mode &= ~ATMEL_US_USMODE;
2766 +-
2767 +- port->rs485 = *rs485conf;
2768 +-
2769 + if (rs485conf->flags & SER_RS485_ENABLED) {
2770 + dev_dbg(port->dev, "Setting UART to RS485\n");
2771 +- if (port->rs485.flags & SER_RS485_RX_DURING_TX)
2772 ++ if (rs485conf->flags & SER_RS485_RX_DURING_TX)
2773 + atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2774 + else
2775 + atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2776 +
2777 + atmel_uart_writel(port, ATMEL_US_TTGR,
2778 + rs485conf->delay_rts_after_send);
2779 ++ mode &= ~ATMEL_US_USMODE;
2780 + mode |= ATMEL_US_USMODE_RS485;
2781 + } else {
2782 + dev_dbg(port->dev, "Setting UART to RS232\n");
2783 +diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
2784 +index c2be22c3b7d1b..cda71802b6982 100644
2785 +--- a/drivers/tty/serial/serial-tegra.c
2786 ++++ b/drivers/tty/serial/serial-tegra.c
2787 +@@ -520,7 +520,7 @@ static void tegra_uart_tx_dma_complete(void *args)
2788 + count = tup->tx_bytes_requested - state.residue;
2789 + async_tx_ack(tup->tx_dma_desc);
2790 + spin_lock_irqsave(&tup->uport.lock, flags);
2791 +- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
2792 ++ uart_xmit_advance(&tup->uport, count);
2793 + tup->tx_in_progress = 0;
2794 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
2795 + uart_write_wakeup(&tup->uport);
2796 +@@ -608,7 +608,6 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u)
2797 + static void tegra_uart_stop_tx(struct uart_port *u)
2798 + {
2799 + struct tegra_uart_port *tup = to_tegra_uport(u);
2800 +- struct circ_buf *xmit = &tup->uport.state->xmit;
2801 + struct dma_tx_state state;
2802 + unsigned int count;
2803 +
2804 +@@ -619,7 +618,7 @@ static void tegra_uart_stop_tx(struct uart_port *u)
2805 + dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
2806 + count = tup->tx_bytes_requested - state.residue;
2807 + async_tx_ack(tup->tx_dma_desc);
2808 +- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
2809 ++ uart_xmit_advance(&tup->uport, count);
2810 + tup->tx_in_progress = 0;
2811 + }
2812 +
2813 +diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
2814 +index aaf8748a61479..31ae705aa38b7 100644
2815 +--- a/drivers/tty/serial/tegra-tcu.c
2816 ++++ b/drivers/tty/serial/tegra-tcu.c
2817 +@@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port)
2818 + break;
2819 +
2820 + tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count);
2821 +- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
2822 ++ uart_xmit_advance(port, count);
2823 + }
2824 +
2825 + uart_write_wakeup(port);
2826 +diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
2827 +index c6fc14b169dac..e3a8b6c71aa1d 100644
2828 +--- a/drivers/usb/cdns3/gadget.c
2829 ++++ b/drivers/usb/cdns3/gadget.c
2830 +@@ -1531,7 +1531,8 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
2831 + TRB_LEN(le32_to_cpu(trb->length));
2832 +
2833 + if (priv_req->num_of_trb > 1 &&
2834 +- le32_to_cpu(trb->control) & TRB_SMM)
2835 ++ le32_to_cpu(trb->control) & TRB_SMM &&
2836 ++ le32_to_cpu(trb->control) & TRB_CHAIN)
2837 + transfer_end = true;
2838 +
2839 + cdns3_ep_inc_deq(priv_ep);
2840 +@@ -1691,6 +1692,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
2841 + ep_cfg &= ~EP_CFG_ENABLE;
2842 + writel(ep_cfg, &priv_dev->regs->ep_cfg);
2843 + priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
2844 ++ priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
2845 + }
2846 + cdns3_transfer_completed(priv_dev, priv_ep);
2847 + } else if (!(priv_ep->flags & EP_STALLED) &&
2848 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2849 +index 53b3d77fba6a2..f2a3c0b5b535d 100644
2850 +--- a/drivers/usb/core/hub.c
2851 ++++ b/drivers/usb/core/hub.c
2852 +@@ -5968,7 +5968,7 @@ re_enumerate_no_bos:
2853 + *
2854 + * Return: The same as for usb_reset_and_verify_device().
2855 + * However, if a reset is already in progress (for instance, if a
2856 +- * driver doesn't have pre_ or post_reset() callbacks, and while
2857 ++ * driver doesn't have pre_reset() or post_reset() callbacks, and while
2858 + * being unbound or re-bound during the ongoing reset its disconnect()
2859 + * or probe() routine tries to perform a second, nested reset), the
2860 + * routine returns -EINPROGRESS.
2861 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
2862 +index 5aae7504f78a1..4a0eec1765118 100644
2863 +--- a/drivers/usb/dwc3/core.c
2864 ++++ b/drivers/usb/dwc3/core.c
2865 +@@ -114,8 +114,6 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
2866 + dwc->current_dr_role = mode;
2867 + }
2868 +
2869 +-static int dwc3_core_soft_reset(struct dwc3 *dwc);
2870 +-
2871 + static void __dwc3_set_mode(struct work_struct *work)
2872 + {
2873 + struct dwc3 *dwc = work_to_dwc(work);
2874 +@@ -265,7 +263,7 @@ u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
2875 + * dwc3_core_soft_reset - Issues core soft reset and PHY reset
2876 + * @dwc: pointer to our context structure
2877 + */
2878 +-static int dwc3_core_soft_reset(struct dwc3 *dwc)
2879 ++int dwc3_core_soft_reset(struct dwc3 *dwc)
2880 + {
2881 + u32 reg;
2882 + int retries = 1000;
2883 +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
2884 +index 79e1b82e5e057..cbebe541f7e8f 100644
2885 +--- a/drivers/usb/dwc3/core.h
2886 ++++ b/drivers/usb/dwc3/core.h
2887 +@@ -1010,6 +1010,7 @@ struct dwc3_scratchpad_array {
2888 + * @tx_max_burst_prd: max periodic ESS transmit burst size
2889 + * @hsphy_interface: "utmi" or "ulpi"
2890 + * @connected: true when we're connected to a host, false otherwise
2891 ++ * @softconnect: true when gadget connect is called, false when disconnect runs
2892 + * @delayed_status: true when gadget driver asks for delayed status
2893 + * @ep0_bounced: true when we used bounce buffer
2894 + * @ep0_expect_in: true when we expect a DATA IN transfer
2895 +@@ -1218,6 +1219,7 @@ struct dwc3 {
2896 + const char *hsphy_interface;
2897 +
2898 + unsigned connected:1;
2899 ++ unsigned softconnect:1;
2900 + unsigned delayed_status:1;
2901 + unsigned ep0_bounced:1;
2902 + unsigned ep0_expect_in:1;
2903 +@@ -1456,6 +1458,8 @@ bool dwc3_has_imod(struct dwc3 *dwc);
2904 + int dwc3_event_buffers_setup(struct dwc3 *dwc);
2905 + void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
2906 +
2907 ++int dwc3_core_soft_reset(struct dwc3 *dwc);
2908 ++
2909 + #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
2910 + int dwc3_host_init(struct dwc3 *dwc);
2911 + void dwc3_host_exit(struct dwc3 *dwc);
2912 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2913 +index a2a10c05ef3fb..41ed2f6f8a8d0 100644
2914 +--- a/drivers/usb/dwc3/gadget.c
2915 ++++ b/drivers/usb/dwc3/gadget.c
2916 +@@ -2120,14 +2120,42 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
2917 + static void __dwc3_gadget_stop(struct dwc3 *dwc);
2918 + static int __dwc3_gadget_start(struct dwc3 *dwc);
2919 +
2920 ++static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
2921 ++{
2922 ++ unsigned long flags;
2923 ++
2924 ++ spin_lock_irqsave(&dwc->lock, flags);
2925 ++ dwc->connected = false;
2926 ++
2927 ++ /*
2928 ++ * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
2929 ++ * Section 4.1.8 Table 4-7, it states that for a device-initiated
2930 ++ * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
2931 ++ * command for any active transfers" before clearing the RunStop
2932 ++ * bit.
2933 ++ */
2934 ++ dwc3_stop_active_transfers(dwc);
2935 ++ __dwc3_gadget_stop(dwc);
2936 ++ spin_unlock_irqrestore(&dwc->lock, flags);
2937 ++
2938 ++ /*
2939 ++ * Note: if the GEVNTCOUNT indicates events in the event buffer, the
2940 ++ * driver needs to acknowledge them before the controller can halt.
2941 ++ * Simply let the interrupt handler acknowledges and handle the
2942 ++ * remaining event generated by the controller while polling for
2943 ++ * DSTS.DEVCTLHLT.
2944 ++ */
2945 ++ return dwc3_gadget_run_stop(dwc, false, false);
2946 ++}
2947 ++
2948 + static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
2949 + {
2950 + struct dwc3 *dwc = gadget_to_dwc(g);
2951 +- unsigned long flags;
2952 + int ret;
2953 +
2954 + is_on = !!is_on;
2955 +
2956 ++ dwc->softconnect = is_on;
2957 + /*
2958 + * Per databook, when we want to stop the gadget, if a control transfer
2959 + * is still in process, complete it and get the core into setup phase.
2960 +@@ -2163,50 +2191,27 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
2961 + return 0;
2962 + }
2963 +
2964 +- /*
2965 +- * Synchronize and disable any further event handling while controller
2966 +- * is being enabled/disabled.
2967 +- */
2968 +- disable_irq(dwc->irq_gadget);
2969 +-
2970 +- spin_lock_irqsave(&dwc->lock, flags);
2971 ++ if (dwc->pullups_connected == is_on) {
2972 ++ pm_runtime_put(dwc->dev);
2973 ++ return 0;
2974 ++ }
2975 +
2976 + if (!is_on) {
2977 +- u32 count;
2978 +-
2979 +- dwc->connected = false;
2980 ++ ret = dwc3_gadget_soft_disconnect(dwc);
2981 ++ } else {
2982 + /*
2983 +- * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
2984 +- * Section 4.1.8 Table 4-7, it states that for a device-initiated
2985 +- * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
2986 +- * command for any active transfers" before clearing the RunStop
2987 +- * bit.
2988 ++ * In the Synopsys DWC_usb31 1.90a programming guide section
2989 ++ * 4.1.9, it specifies that for a reconnect after a
2990 ++ * device-initiated disconnect requires a core soft reset
2991 ++ * (DCTL.CSftRst) before enabling the run/stop bit.
2992 + */
2993 +- dwc3_stop_active_transfers(dwc);
2994 +- __dwc3_gadget_stop(dwc);
2995 ++ dwc3_core_soft_reset(dwc);
2996 +
2997 +- /*
2998 +- * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
2999 +- * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
3000 +- * "software needs to acknowledge the events that are generated
3001 +- * (by writing to GEVNTCOUNTn) while it is waiting for this bit
3002 +- * to be set to '1'."
3003 +- */
3004 +- count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
3005 +- count &= DWC3_GEVNTCOUNT_MASK;
3006 +- if (count > 0) {
3007 +- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
3008 +- dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
3009 +- dwc->ev_buf->length;
3010 +- }
3011 +- } else {
3012 ++ dwc3_event_buffers_setup(dwc);
3013 + __dwc3_gadget_start(dwc);
3014 ++ ret = dwc3_gadget_run_stop(dwc, true, false);
3015 + }
3016 +
3017 +- ret = dwc3_gadget_run_stop(dwc, is_on, false);
3018 +- spin_unlock_irqrestore(&dwc->lock, flags);
3019 +- enable_irq(dwc->irq_gadget);
3020 +-
3021 + pm_runtime_put(dwc->dev);
3022 +
3023 + return ret;
3024 +@@ -4048,7 +4053,7 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
3025 + {
3026 + int ret;
3027 +
3028 +- if (!dwc->gadget_driver)
3029 ++ if (!dwc->gadget_driver || !dwc->softconnect)
3030 + return 0;
3031 +
3032 + ret = __dwc3_gadget_start(dwc);
3033 +diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
3034 +index 8950d1f10a7fb..86c4bc9df3b80 100644
3035 +--- a/drivers/usb/host/xhci-mtk-sch.c
3036 ++++ b/drivers/usb/host/xhci-mtk-sch.c
3037 +@@ -25,6 +25,13 @@
3038 + */
3039 + #define TT_MICROFRAMES_MAX 9
3040 +
3041 ++/* schedule error type */
3042 ++#define ESCH_SS_Y6 1001
3043 ++#define ESCH_SS_OVERLAP 1002
3044 ++#define ESCH_CS_OVERFLOW 1003
3045 ++#define ESCH_BW_OVERFLOW 1004
3046 ++#define ESCH_FIXME 1005
3047 ++
3048 + /* mtk scheduler bitmasks */
3049 + #define EP_BPKTS(p) ((p) & 0x7f)
3050 + #define EP_BCSCOUNT(p) (((p) & 0x7) << 8)
3051 +@@ -32,6 +39,24 @@
3052 + #define EP_BOFFSET(p) ((p) & 0x3fff)
3053 + #define EP_BREPEAT(p) (((p) & 0x7fff) << 16)
3054 +
3055 ++static char *sch_error_string(int err_num)
3056 ++{
3057 ++ switch (err_num) {
3058 ++ case ESCH_SS_Y6:
3059 ++ return "Can't schedule Start-Split in Y6";
3060 ++ case ESCH_SS_OVERLAP:
3061 ++ return "Can't find a suitable Start-Split location";
3062 ++ case ESCH_CS_OVERFLOW:
3063 ++ return "The last Complete-Split is greater than 7";
3064 ++ case ESCH_BW_OVERFLOW:
3065 ++ return "Bandwidth exceeds the maximum limit";
3066 ++ case ESCH_FIXME:
3067 ++ return "FIXME, to be resolved";
3068 ++ default:
3069 ++ return "Unknown";
3070 ++ }
3071 ++}
3072 ++
3073 + static int is_fs_or_ls(enum usb_device_speed speed)
3074 + {
3075 + return speed == USB_SPEED_FULL || speed == USB_SPEED_LOW;
3076 +@@ -375,7 +400,6 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
3077 + sch_ep->bw_budget_table[j];
3078 + }
3079 + }
3080 +- sch_ep->allocated = used;
3081 + }
3082 +
3083 + static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
3084 +@@ -384,19 +408,20 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
3085 + u32 num_esit, tmp;
3086 + int base;
3087 + int i, j;
3088 ++ u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
3089 +
3090 + num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
3091 ++
3092 ++ if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP)
3093 ++ offset++;
3094 ++
3095 + for (i = 0; i < num_esit; i++) {
3096 + base = offset + i * sch_ep->esit;
3097 +
3098 +- /*
3099 +- * Compared with hs bus, no matter what ep type,
3100 +- * the hub will always delay one uframe to send data
3101 +- */
3102 +- for (j = 0; j < sch_ep->cs_count; j++) {
3103 ++ for (j = 0; j < uframes; j++) {
3104 + tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe;
3105 + if (tmp > FS_PAYLOAD_MAX)
3106 +- return -ERANGE;
3107 ++ return -ESCH_BW_OVERFLOW;
3108 + }
3109 + }
3110 +
3111 +@@ -406,15 +431,11 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
3112 + static int check_sch_tt(struct usb_device *udev,
3113 + struct mu3h_sch_ep_info *sch_ep, u32 offset)
3114 + {
3115 +- struct mu3h_sch_tt *tt = sch_ep->sch_tt;
3116 + u32 extra_cs_count;
3117 +- u32 fs_budget_start;
3118 + u32 start_ss, last_ss;
3119 + u32 start_cs, last_cs;
3120 +- int i;
3121 +
3122 + start_ss = offset % 8;
3123 +- fs_budget_start = (start_ss + 1) % 8;
3124 +
3125 + if (sch_ep->ep_type == ISOC_OUT_EP) {
3126 + last_ss = start_ss + sch_ep->cs_count - 1;
3127 +@@ -424,11 +445,7 @@ static int check_sch_tt(struct usb_device *udev,
3128 + * must never schedule Start-Split in Y6
3129 + */
3130 + if (!(start_ss == 7 || last_ss < 6))
3131 +- return -ERANGE;
3132 +-
3133 +- for (i = 0; i < sch_ep->cs_count; i++)
3134 +- if (test_bit(offset + i, tt->ss_bit_map))
3135 +- return -ERANGE;
3136 ++ return -ESCH_SS_Y6;
3137 +
3138 + } else {
3139 + u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
3140 +@@ -438,29 +455,24 @@ static int check_sch_tt(struct usb_device *udev,
3141 + * must never schedule Start-Split in Y6
3142 + */
3143 + if (start_ss == 6)
3144 +- return -ERANGE;
3145 ++ return -ESCH_SS_Y6;
3146 +
3147 + /* one uframe for ss + one uframe for idle */
3148 + start_cs = (start_ss + 2) % 8;
3149 + last_cs = start_cs + cs_count - 1;
3150 +
3151 + if (last_cs > 7)
3152 +- return -ERANGE;
3153 ++ return -ESCH_CS_OVERFLOW;
3154 +
3155 + if (sch_ep->ep_type == ISOC_IN_EP)
3156 + extra_cs_count = (last_cs == 7) ? 1 : 2;
3157 + else /* ep_type : INTR IN / INTR OUT */
3158 +- extra_cs_count = (fs_budget_start == 6) ? 1 : 2;
3159 ++ extra_cs_count = 1;
3160 +
3161 + cs_count += extra_cs_count;
3162 + if (cs_count > 7)
3163 + cs_count = 7; /* HW limit */
3164 +
3165 +- for (i = 0; i < cs_count + 2; i++) {
3166 +- if (test_bit(offset + i, tt->ss_bit_map))
3167 +- return -ERANGE;
3168 +- }
3169 +-
3170 + sch_ep->cs_count = cs_count;
3171 + /* one for ss, the other for idle */
3172 + sch_ep->num_budget_microframes = cs_count + 2;
3173 +@@ -482,28 +494,24 @@ static void update_sch_tt(struct usb_device *udev,
3174 + struct mu3h_sch_tt *tt = sch_ep->sch_tt;
3175 + u32 base, num_esit;
3176 + int bw_updated;
3177 +- int bits;
3178 + int i, j;
3179 ++ int offset = sch_ep->offset;
3180 ++ u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
3181 +
3182 + num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
3183 +- bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1;
3184 +
3185 + if (used)
3186 + bw_updated = sch_ep->bw_cost_per_microframe;
3187 + else
3188 + bw_updated = -sch_ep->bw_cost_per_microframe;
3189 +
3190 +- for (i = 0; i < num_esit; i++) {
3191 +- base = sch_ep->offset + i * sch_ep->esit;
3192 ++ if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP)
3193 ++ offset++;
3194 +
3195 +- for (j = 0; j < bits; j++) {
3196 +- if (used)
3197 +- set_bit(base + j, tt->ss_bit_map);
3198 +- else
3199 +- clear_bit(base + j, tt->ss_bit_map);
3200 +- }
3201 ++ for (i = 0; i < num_esit; i++) {
3202 ++ base = offset + i * sch_ep->esit;
3203 +
3204 +- for (j = 0; j < sch_ep->cs_count; j++)
3205 ++ for (j = 0; j < uframes; j++)
3206 + tt->fs_bus_bw[base + j] += bw_updated;
3207 + }
3208 +
3209 +@@ -513,21 +521,48 @@ static void update_sch_tt(struct usb_device *udev,
3210 + list_del(&sch_ep->tt_endpoint);
3211 + }
3212 +
3213 ++static int load_ep_bw(struct usb_device *udev, struct mu3h_sch_bw_info *sch_bw,
3214 ++ struct mu3h_sch_ep_info *sch_ep, bool loaded)
3215 ++{
3216 ++ if (sch_ep->sch_tt)
3217 ++ update_sch_tt(udev, sch_ep, loaded);
3218 ++
3219 ++ /* update bus bandwidth info */
3220 ++ update_bus_bw(sch_bw, sch_ep, loaded);
3221 ++ sch_ep->allocated = loaded;
3222 ++
3223 ++ return 0;
3224 ++}
3225 ++
3226 ++static u32 get_esit_boundary(struct mu3h_sch_ep_info *sch_ep)
3227 ++{
3228 ++ u32 boundary = sch_ep->esit;
3229 ++
3230 ++ if (sch_ep->sch_tt) { /* LS/FS with TT */
3231 ++ /*
3232 ++ * tune for CS, normally esit >= 8 for FS/LS,
3233 ++ * not add one for other types to avoid access array
3234 ++ * out of boundary
3235 ++ */
3236 ++ if (sch_ep->ep_type == ISOC_OUT_EP && boundary > 1)
3237 ++ boundary--;
3238 ++ }
3239 ++
3240 ++ return boundary;
3241 ++}
3242 ++
3243 + static int check_sch_bw(struct usb_device *udev,
3244 + struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
3245 + {
3246 + u32 offset;
3247 +- u32 esit;
3248 + u32 min_bw;
3249 + u32 min_index;
3250 + u32 worst_bw;
3251 + u32 bw_boundary;
3252 ++ u32 esit_boundary;
3253 + u32 min_num_budget;
3254 + u32 min_cs_count;
3255 +- bool tt_offset_ok = false;
3256 +- int ret;
3257 +-
3258 +- esit = sch_ep->esit;
3259 ++ int ret = 0;
3260 +
3261 + /*
3262 + * Search through all possible schedule microframes.
3263 +@@ -537,16 +572,15 @@ static int check_sch_bw(struct usb_device *udev,
3264 + min_index = 0;
3265 + min_cs_count = sch_ep->cs_count;
3266 + min_num_budget = sch_ep->num_budget_microframes;
3267 +- for (offset = 0; offset < esit; offset++) {
3268 +- if (is_fs_or_ls(udev->speed)) {
3269 ++ esit_boundary = get_esit_boundary(sch_ep);
3270 ++ for (offset = 0; offset < sch_ep->esit; offset++) {
3271 ++ if (sch_ep->sch_tt) {
3272 + ret = check_sch_tt(udev, sch_ep, offset);
3273 + if (ret)
3274 + continue;
3275 +- else
3276 +- tt_offset_ok = true;
3277 + }
3278 +
3279 +- if ((offset + sch_ep->num_budget_microframes) > sch_ep->esit)
3280 ++ if ((offset + sch_ep->num_budget_microframes) > esit_boundary)
3281 + break;
3282 +
3283 + worst_bw = get_max_bw(sch_bw, sch_ep, offset);
3284 +@@ -569,35 +603,21 @@ static int check_sch_bw(struct usb_device *udev,
3285 +
3286 + /* check bandwidth */
3287 + if (min_bw > bw_boundary)
3288 +- return -ERANGE;
3289 ++ return ret ? ret : -ESCH_BW_OVERFLOW;
3290 +
3291 + sch_ep->offset = min_index;
3292 + sch_ep->cs_count = min_cs_count;
3293 + sch_ep->num_budget_microframes = min_num_budget;
3294 +
3295 +- if (is_fs_or_ls(udev->speed)) {
3296 +- /* all offset for tt is not ok*/
3297 +- if (!tt_offset_ok)
3298 +- return -ERANGE;
3299 +-
3300 +- update_sch_tt(udev, sch_ep, 1);
3301 +- }
3302 +-
3303 +- /* update bus bandwidth info */
3304 +- update_bus_bw(sch_bw, sch_ep, 1);
3305 +-
3306 +- return 0;
3307 ++ return load_ep_bw(udev, sch_bw, sch_ep, true);
3308 + }
3309 +
3310 + static void destroy_sch_ep(struct usb_device *udev,
3311 + struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
3312 + {
3313 + /* only release ep bw check passed by check_sch_bw() */
3314 +- if (sch_ep->allocated) {
3315 +- update_bus_bw(sch_bw, sch_ep, 0);
3316 +- if (sch_ep->sch_tt)
3317 +- update_sch_tt(udev, sch_ep, 0);
3318 +- }
3319 ++ if (sch_ep->allocated)
3320 ++ load_ep_bw(udev, sch_bw, sch_ep, false);
3321 +
3322 + if (sch_ep->sch_tt)
3323 + drop_tt(udev);
3324 +@@ -760,7 +780,8 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3325 +
3326 + ret = check_sch_bw(udev, sch_bw, sch_ep);
3327 + if (ret) {
3328 +- xhci_err(xhci, "Not enough bandwidth!\n");
3329 ++ xhci_err(xhci, "Not enough bandwidth! (%s)\n",
3330 ++ sch_error_string(-ret));
3331 + return -ENOSPC;
3332 + }
3333 + }
3334 +diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
3335 +index 2fc0568ba054e..3e2c607b5d64c 100644
3336 +--- a/drivers/usb/host/xhci-mtk.h
3337 ++++ b/drivers/usb/host/xhci-mtk.h
3338 +@@ -20,14 +20,12 @@
3339 + #define XHCI_MTK_MAX_ESIT 64
3340 +
3341 + /**
3342 +- * @ss_bit_map: used to avoid start split microframes overlay
3343 + * @fs_bus_bw: array to keep track of bandwidth already used for FS
3344 + * @ep_list: Endpoints using this TT
3345 + * @usb_tt: usb TT related
3346 + * @tt_port: TT port number
3347 + */
3348 + struct mu3h_sch_tt {
3349 +- DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT);
3350 + u32 fs_bus_bw[XHCI_MTK_MAX_ESIT];
3351 + struct list_head ep_list;
3352 + struct usb_tt *usb_tt;
3353 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3354 +index 211e03a204072..eea3dd18a044c 100644
3355 +--- a/drivers/usb/serial/option.c
3356 ++++ b/drivers/usb/serial/option.c
3357 +@@ -256,6 +256,7 @@ static void option_instat_callback(struct urb *urb);
3358 + #define QUECTEL_PRODUCT_EM060K 0x030b
3359 + #define QUECTEL_PRODUCT_EM12 0x0512
3360 + #define QUECTEL_PRODUCT_RM500Q 0x0800
3361 ++#define QUECTEL_PRODUCT_RM520N 0x0801
3362 + #define QUECTEL_PRODUCT_EC200S_CN 0x6002
3363 + #define QUECTEL_PRODUCT_EC200T 0x6026
3364 + #define QUECTEL_PRODUCT_RM500K 0x7001
3365 +@@ -1138,6 +1139,8 @@ static const struct usb_device_id option_ids[] = {
3366 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
3367 + .driver_info = NUMEP2 },
3368 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
3369 ++ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
3370 ++ .driver_info = ZLP },
3371 + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
3372 + .driver_info = RSVD(4) },
3373 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
3374 +@@ -1159,6 +1162,9 @@ static const struct usb_device_id option_ids[] = {
3375 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
3376 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
3377 + .driver_info = ZLP },
3378 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
3379 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
3380 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
3381 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
3382 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
3383 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
3384 +diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
3385 +index acdef6fbb85e0..80daa70e288b0 100644
3386 +--- a/drivers/usb/typec/mux/intel_pmc_mux.c
3387 ++++ b/drivers/usb/typec/mux/intel_pmc_mux.c
3388 +@@ -83,8 +83,6 @@ enum {
3389 + /*
3390 + * Input Output Manager (IOM) PORT STATUS
3391 + */
3392 +-#define IOM_PORT_STATUS_OFFSET 0x560
3393 +-
3394 + #define IOM_PORT_STATUS_ACTIVITY_TYPE_MASK GENMASK(9, 6)
3395 + #define IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT 6
3396 + #define IOM_PORT_STATUS_ACTIVITY_TYPE_USB 0x03
3397 +@@ -144,6 +142,7 @@ struct pmc_usb {
3398 + struct pmc_usb_port *port;
3399 + struct acpi_device *iom_adev;
3400 + void __iomem *iom_base;
3401 ++ u32 iom_port_status_offset;
3402 + };
3403 +
3404 + static void update_port_status(struct pmc_usb_port *port)
3405 +@@ -153,7 +152,8 @@ static void update_port_status(struct pmc_usb_port *port)
3406 + /* SoC expects the USB Type-C port numbers to start with 0 */
3407 + port_num = port->usb3_port - 1;
3408 +
3409 +- port->iom_status = readl(port->pmc->iom_base + IOM_PORT_STATUS_OFFSET +
3410 ++ port->iom_status = readl(port->pmc->iom_base +
3411 ++ port->pmc->iom_port_status_offset +
3412 + port_num * sizeof(u32));
3413 + }
3414 +
3415 +@@ -541,19 +541,42 @@ err_unregister_switch:
3416 +
3417 + static int is_memory(struct acpi_resource *res, void *data)
3418 + {
3419 +- struct resource r;
3420 ++ struct resource_win win = {};
3421 ++ struct resource *r = &win.res;
3422 +
3423 +- return !acpi_dev_resource_memory(res, &r);
3424 ++ return !(acpi_dev_resource_memory(res, r) ||
3425 ++ acpi_dev_resource_address_space(res, &win));
3426 + }
3427 +
3428 ++/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */
3429 ++static const struct acpi_device_id iom_acpi_ids[] = {
3430 ++ /* TigerLake */
3431 ++ { "INTC1072", 0x560, },
3432 ++
3433 ++ /* AlderLake */
3434 ++ { "INTC1079", 0x160, },
3435 ++
3436 ++ /* Meteor Lake */
3437 ++ { "INTC107A", 0x160, },
3438 ++ {}
3439 ++};
3440 ++
3441 + static int pmc_usb_probe_iom(struct pmc_usb *pmc)
3442 + {
3443 + struct list_head resource_list;
3444 + struct resource_entry *rentry;
3445 +- struct acpi_device *adev;
3446 ++ static const struct acpi_device_id *dev_id;
3447 ++ struct acpi_device *adev = NULL;
3448 + int ret;
3449 +
3450 +- adev = acpi_dev_get_first_match_dev("INTC1072", NULL, -1);
3451 ++ for (dev_id = &iom_acpi_ids[0]; dev_id->id[0]; dev_id++) {
3452 ++ if (acpi_dev_present(dev_id->id, NULL, -1)) {
3453 ++ pmc->iom_port_status_offset = (u32)dev_id->driver_data;
3454 ++ adev = acpi_dev_get_first_match_dev(dev_id->id, NULL, -1);
3455 ++ break;
3456 ++ }
3457 ++ }
3458 ++
3459 + if (!adev)
3460 + return -ENODEV;
3461 +
3462 +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
3463 +index fbd438e9b9b03..ce50ca9a320c7 100644
3464 +--- a/drivers/vfio/vfio_iommu_type1.c
3465 ++++ b/drivers/vfio/vfio_iommu_type1.c
3466 +@@ -98,6 +98,12 @@ struct vfio_dma {
3467 + unsigned long *bitmap;
3468 + };
3469 +
3470 ++struct vfio_batch {
3471 ++ struct page **pages; /* for pin_user_pages_remote */
3472 ++ struct page *fallback_page; /* if pages alloc fails */
3473 ++ int capacity; /* length of pages array */
3474 ++};
3475 ++
3476 + struct vfio_group {
3477 + struct iommu_group *iommu_group;
3478 + struct list_head next;
3479 +@@ -428,6 +434,31 @@ static int put_pfn(unsigned long pfn, int prot)
3480 + return 0;
3481 + }
3482 +
3483 ++#define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *))
3484 ++
3485 ++static void vfio_batch_init(struct vfio_batch *batch)
3486 ++{
3487 ++ if (unlikely(disable_hugepages))
3488 ++ goto fallback;
3489 ++
3490 ++ batch->pages = (struct page **) __get_free_page(GFP_KERNEL);
3491 ++ if (!batch->pages)
3492 ++ goto fallback;
3493 ++
3494 ++ batch->capacity = VFIO_BATCH_MAX_CAPACITY;
3495 ++ return;
3496 ++
3497 ++fallback:
3498 ++ batch->pages = &batch->fallback_page;
3499 ++ batch->capacity = 1;
3500 ++}
3501 ++
3502 ++static void vfio_batch_fini(struct vfio_batch *batch)
3503 ++{
3504 ++ if (batch->capacity == VFIO_BATCH_MAX_CAPACITY)
3505 ++ free_page((unsigned long)batch->pages);
3506 ++}
3507 ++
3508 + static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
3509 + unsigned long vaddr, unsigned long *pfn,
3510 + bool write_fault)
3511 +@@ -464,10 +495,14 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
3512 + return ret;
3513 + }
3514 +
3515 +-static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
3516 +- int prot, unsigned long *pfn)
3517 ++/*
3518 ++ * Returns the positive number of pfns successfully obtained or a negative
3519 ++ * error code.
3520 ++ */
3521 ++static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
3522 ++ long npages, int prot, unsigned long *pfn,
3523 ++ struct page **pages)
3524 + {
3525 +- struct page *page[1];
3526 + struct vm_area_struct *vma;
3527 + unsigned int flags = 0;
3528 + int ret;
3529 +@@ -476,11 +511,22 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
3530 + flags |= FOLL_WRITE;
3531 +
3532 + mmap_read_lock(mm);
3533 +- ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM,
3534 +- page, NULL, NULL);
3535 +- if (ret == 1) {
3536 +- *pfn = page_to_pfn(page[0]);
3537 +- ret = 0;
3538 ++ ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM,
3539 ++ pages, NULL, NULL);
3540 ++ if (ret > 0) {
3541 ++ int i;
3542 ++
3543 ++ /*
3544 ++ * The zero page is always resident, we don't need to pin it
3545 ++ * and it falls into our invalid/reserved test so we don't
3546 ++ * unpin in put_pfn(). Unpin all zero pages in the batch here.
3547 ++ */
3548 ++ for (i = 0 ; i < ret; i++) {
3549 ++ if (unlikely(is_zero_pfn(page_to_pfn(pages[i]))))
3550 ++ unpin_user_page(pages[i]);
3551 ++ }
3552 ++
3553 ++ *pfn = page_to_pfn(pages[0]);
3554 + goto done;
3555 + }
3556 +
3557 +@@ -494,8 +540,12 @@ retry:
3558 + if (ret == -EAGAIN)
3559 + goto retry;
3560 +
3561 +- if (!ret && !is_invalid_reserved_pfn(*pfn))
3562 +- ret = -EFAULT;
3563 ++ if (!ret) {
3564 ++ if (is_invalid_reserved_pfn(*pfn))
3565 ++ ret = 1;
3566 ++ else
3567 ++ ret = -EFAULT;
3568 ++ }
3569 + }
3570 + done:
3571 + mmap_read_unlock(mm);
3572 +@@ -509,7 +559,7 @@ done:
3573 + */
3574 + static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
3575 + long npage, unsigned long *pfn_base,
3576 +- unsigned long limit)
3577 ++ unsigned long limit, struct vfio_batch *batch)
3578 + {
3579 + unsigned long pfn = 0;
3580 + long ret, pinned = 0, lock_acct = 0;
3581 +@@ -520,8 +570,9 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
3582 + if (!current->mm)
3583 + return -ENODEV;
3584 +
3585 +- ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
3586 +- if (ret)
3587 ++ ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, pfn_base,
3588 ++ batch->pages);
3589 ++ if (ret < 0)
3590 + return ret;
3591 +
3592 + pinned++;
3593 +@@ -547,8 +598,9 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
3594 + /* Lock all the consecutive pages from pfn_base */
3595 + for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
3596 + pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
3597 +- ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
3598 +- if (ret)
3599 ++ ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, &pfn,
3600 ++ batch->pages);
3601 ++ if (ret < 0)
3602 + break;
3603 +
3604 + if (pfn != *pfn_base + pinned ||
3605 +@@ -574,7 +626,7 @@ out:
3606 + ret = vfio_lock_acct(dma, lock_acct, false);
3607 +
3608 + unpin_out:
3609 +- if (ret) {
3610 ++ if (ret < 0) {
3611 + if (!rsvd) {
3612 + for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
3613 + put_pfn(pfn, dma->prot);
3614 +@@ -610,6 +662,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
3615 + static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
3616 + unsigned long *pfn_base, bool do_accounting)
3617 + {
3618 ++ struct page *pages[1];
3619 + struct mm_struct *mm;
3620 + int ret;
3621 +
3622 +@@ -617,8 +670,13 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
3623 + if (!mm)
3624 + return -ENODEV;
3625 +
3626 +- ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
3627 +- if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
3628 ++ ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages);
3629 ++ if (ret != 1)
3630 ++ goto out;
3631 ++
3632 ++ ret = 0;
3633 ++
3634 ++ if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
3635 + ret = vfio_lock_acct(dma, 1, true);
3636 + if (ret) {
3637 + put_pfn(*pfn_base, dma->prot);
3638 +@@ -630,6 +688,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
3639 + }
3640 + }
3641 +
3642 ++out:
3643 + mmput(mm);
3644 + return ret;
3645 + }
3646 +@@ -1263,15 +1322,19 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
3647 + {
3648 + dma_addr_t iova = dma->iova;
3649 + unsigned long vaddr = dma->vaddr;
3650 ++ struct vfio_batch batch;
3651 + size_t size = map_size;
3652 + long npage;
3653 + unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
3654 + int ret = 0;
3655 +
3656 ++ vfio_batch_init(&batch);
3657 ++
3658 + while (size) {
3659 + /* Pin a contiguous chunk of memory */
3660 + npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
3661 +- size >> PAGE_SHIFT, &pfn, limit);
3662 ++ size >> PAGE_SHIFT, &pfn, limit,
3663 ++ &batch);
3664 + if (npage <= 0) {
3665 + WARN_ON(!npage);
3666 + ret = (int)npage;
3667 +@@ -1291,6 +1354,7 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
3668 + dma->size += npage << PAGE_SHIFT;
3669 + }
3670 +
3671 ++ vfio_batch_fini(&batch);
3672 + dma->iommu_mapped = true;
3673 +
3674 + if (ret)
3675 +@@ -1449,6 +1513,7 @@ static int vfio_bus_type(struct device *dev, void *data)
3676 + static int vfio_iommu_replay(struct vfio_iommu *iommu,
3677 + struct vfio_domain *domain)
3678 + {
3679 ++ struct vfio_batch batch;
3680 + struct vfio_domain *d = NULL;
3681 + struct rb_node *n;
3682 + unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
3683 +@@ -1459,6 +1524,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
3684 + d = list_first_entry(&iommu->domain_list,
3685 + struct vfio_domain, next);
3686 +
3687 ++ vfio_batch_init(&batch);
3688 ++
3689 + n = rb_first(&iommu->dma_list);
3690 +
3691 + for (; n; n = rb_next(n)) {
3692 +@@ -1506,7 +1573,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
3693 +
3694 + npage = vfio_pin_pages_remote(dma, vaddr,
3695 + n >> PAGE_SHIFT,
3696 +- &pfn, limit);
3697 ++ &pfn, limit,
3698 ++ &batch);
3699 + if (npage <= 0) {
3700 + WARN_ON(!npage);
3701 + ret = (int)npage;
3702 +@@ -1539,6 +1607,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
3703 + dma->iommu_mapped = true;
3704 + }
3705 +
3706 ++ vfio_batch_fini(&batch);
3707 + return 0;
3708 +
3709 + unwind:
3710 +@@ -1579,6 +1648,7 @@ unwind:
3711 + }
3712 + }
3713 +
3714 ++ vfio_batch_fini(&batch);
3715 + return ret;
3716 + }
3717 +
3718 +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
3719 +index 24c6f36177bac..a6ca4eda9a5ae 100644
3720 +--- a/fs/cifs/cifsproto.h
3721 ++++ b/fs/cifs/cifsproto.h
3722 +@@ -230,6 +230,8 @@ extern unsigned int setup_special_user_owner_ACE(struct cifs_ace *pace);
3723 + extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
3724 + extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
3725 + unsigned int to_read);
3726 ++extern ssize_t cifs_discard_from_socket(struct TCP_Server_Info *server,
3727 ++ size_t to_read);
3728 + extern int cifs_read_page_from_socket(struct TCP_Server_Info *server,
3729 + struct page *page,
3730 + unsigned int page_offset,
3731 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
3732 +index 0496934feecb7..c279527aae92d 100644
3733 +--- a/fs/cifs/cifssmb.c
3734 ++++ b/fs/cifs/cifssmb.c
3735 +@@ -1451,9 +1451,9 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server)
3736 + while (remaining > 0) {
3737 + int length;
3738 +
3739 +- length = cifs_read_from_socket(server, server->bigbuf,
3740 +- min_t(unsigned int, remaining,
3741 +- CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
3742 ++ length = cifs_discard_from_socket(server,
3743 ++ min_t(size_t, remaining,
3744 ++ CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
3745 + if (length < 0)
3746 + return length;
3747 + server->total_read += length;
3748 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3749 +index 7f5d173760cfc..d1c3086d7ddd0 100644
3750 +--- a/fs/cifs/connect.c
3751 ++++ b/fs/cifs/connect.c
3752 +@@ -695,9 +695,6 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
3753 + int length = 0;
3754 + int total_read;
3755 +
3756 +- smb_msg->msg_control = NULL;
3757 +- smb_msg->msg_controllen = 0;
3758 +-
3759 + for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
3760 + try_to_freeze();
3761 +
3762 +@@ -748,18 +745,33 @@ int
3763 + cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
3764 + unsigned int to_read)
3765 + {
3766 +- struct msghdr smb_msg;
3767 ++ struct msghdr smb_msg = {};
3768 + struct kvec iov = {.iov_base = buf, .iov_len = to_read};
3769 + iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read);
3770 +
3771 + return cifs_readv_from_socket(server, &smb_msg);
3772 + }
3773 +
3774 ++ssize_t
3775 ++cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
3776 ++{
3777 ++ struct msghdr smb_msg = {};
3778 ++
3779 ++ /*
3780 ++ * iov_iter_discard already sets smb_msg.type and count and iov_offset
3781 ++ * and cifs_readv_from_socket sets msg_control and msg_controllen
3782 ++ * so little to initialize in struct msghdr
3783 ++ */
3784 ++ iov_iter_discard(&smb_msg.msg_iter, READ, to_read);
3785 ++
3786 ++ return cifs_readv_from_socket(server, &smb_msg);
3787 ++}
3788 ++
3789 + int
3790 + cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
3791 + unsigned int page_offset, unsigned int to_read)
3792 + {
3793 +- struct msghdr smb_msg;
3794 ++ struct msghdr smb_msg = {};
3795 + struct bio_vec bv = {
3796 + .bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
3797 + iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read);
3798 +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
3799 +index 383ae8744c337..b137006f0fd25 100644
3800 +--- a/fs/cifs/transport.c
3801 ++++ b/fs/cifs/transport.c
3802 +@@ -209,10 +209,6 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
3803 +
3804 + *sent = 0;
3805 +
3806 +- smb_msg->msg_name = NULL;
3807 +- smb_msg->msg_namelen = 0;
3808 +- smb_msg->msg_control = NULL;
3809 +- smb_msg->msg_controllen = 0;
3810 + if (server->noblocksnd)
3811 + smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
3812 + else
3813 +@@ -324,7 +320,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
3814 + sigset_t mask, oldmask;
3815 + size_t total_len = 0, sent, size;
3816 + struct socket *ssocket = server->ssocket;
3817 +- struct msghdr smb_msg;
3818 ++ struct msghdr smb_msg = {};
3819 + __be32 rfc1002_marker;
3820 +
3821 + if (cifs_rdma_enabled(server)) {
3822 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3823 +index 0f49bf547b848..30add5a3df3df 100644
3824 +--- a/fs/ext4/extents.c
3825 ++++ b/fs/ext4/extents.c
3826 +@@ -459,6 +459,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
3827 + error_msg = "invalid eh_entries";
3828 + goto corrupted;
3829 + }
3830 ++ if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
3831 ++ error_msg = "eh_entries is 0 but eh_depth is > 0";
3832 ++ goto corrupted;
3833 ++ }
3834 + if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
3835 + error_msg = "invalid extent entries";
3836 + goto corrupted;
3837 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
3838 +index 875af329c43ec..c53c9b1322049 100644
3839 +--- a/fs/ext4/ialloc.c
3840 ++++ b/fs/ext4/ialloc.c
3841 +@@ -508,7 +508,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
3842 + goto fallback;
3843 + }
3844 +
3845 +- max_dirs = ndirs / ngroups + inodes_per_group / 16;
3846 ++ max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
3847 + min_inodes = avefreei - inodes_per_group*flex_size / 4;
3848 + if (min_inodes < 1)
3849 + min_inodes = 1;
3850 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
3851 +index c32d0895c3a3d..d5ca02a7766e0 100644
3852 +--- a/fs/ext4/mballoc.c
3853 ++++ b/fs/ext4/mballoc.c
3854 +@@ -4959,6 +4959,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
3855 + ext4_fsblk_t block = 0;
3856 + unsigned int inquota = 0;
3857 + unsigned int reserv_clstrs = 0;
3858 ++ int retries = 0;
3859 + u64 seq;
3860 +
3861 + might_sleep();
3862 +@@ -5061,7 +5062,8 @@ repeat:
3863 + ar->len = ac->ac_b_ex.fe_len;
3864 + }
3865 + } else {
3866 +- if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
3867 ++ if (++retries < 3 &&
3868 ++ ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
3869 + goto repeat;
3870 + /*
3871 + * If block allocation fails then the pa allocated above
3872 +diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
3873 +index c667c63f2cb00..fa8aefe6b7ec3 100644
3874 +--- a/fs/xfs/libxfs/xfs_inode_buf.c
3875 ++++ b/fs/xfs/libxfs/xfs_inode_buf.c
3876 +@@ -358,19 +358,36 @@ xfs_dinode_verify_fork(
3877 + int whichfork)
3878 + {
3879 + uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
3880 ++ mode_t mode = be16_to_cpu(dip->di_mode);
3881 ++ uint32_t fork_size = XFS_DFORK_SIZE(dip, mp, whichfork);
3882 ++ uint32_t fork_format = XFS_DFORK_FORMAT(dip, whichfork);
3883 +
3884 +- switch (XFS_DFORK_FORMAT(dip, whichfork)) {
3885 ++ /*
3886 ++ * For fork types that can contain local data, check that the fork
3887 ++ * format matches the size of local data contained within the fork.
3888 ++ *
3889 ++ * For all types, check that when the size says the should be in extent
3890 ++ * or btree format, the inode isn't claiming it is in local format.
3891 ++ */
3892 ++ if (whichfork == XFS_DATA_FORK) {
3893 ++ if (S_ISDIR(mode) || S_ISLNK(mode)) {
3894 ++ if (be64_to_cpu(dip->di_size) <= fork_size &&
3895 ++ fork_format != XFS_DINODE_FMT_LOCAL)
3896 ++ return __this_address;
3897 ++ }
3898 ++
3899 ++ if (be64_to_cpu(dip->di_size) > fork_size &&
3900 ++ fork_format == XFS_DINODE_FMT_LOCAL)
3901 ++ return __this_address;
3902 ++ }
3903 ++
3904 ++ switch (fork_format) {
3905 + case XFS_DINODE_FMT_LOCAL:
3906 + /*
3907 +- * no local regular files yet
3908 ++ * No local regular files yet.
3909 + */
3910 +- if (whichfork == XFS_DATA_FORK) {
3911 +- if (S_ISREG(be16_to_cpu(dip->di_mode)))
3912 +- return __this_address;
3913 +- if (be64_to_cpu(dip->di_size) >
3914 +- XFS_DFORK_SIZE(dip, mp, whichfork))
3915 +- return __this_address;
3916 +- }
3917 ++ if (S_ISREG(mode) && whichfork == XFS_DATA_FORK)
3918 ++ return __this_address;
3919 + if (di_nextents)
3920 + return __this_address;
3921 + break;
3922 +diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
3923 +index 1f61e085676b3..19008838df769 100644
3924 +--- a/fs/xfs/xfs_inode.c
3925 ++++ b/fs/xfs/xfs_inode.c
3926 +@@ -802,6 +802,7 @@ xfs_ialloc(
3927 + xfs_buf_t **ialloc_context,
3928 + xfs_inode_t **ipp)
3929 + {
3930 ++ struct inode *dir = pip ? VFS_I(pip) : NULL;
3931 + struct xfs_mount *mp = tp->t_mountp;
3932 + xfs_ino_t ino;
3933 + xfs_inode_t *ip;
3934 +@@ -847,18 +848,17 @@ xfs_ialloc(
3935 + return error;
3936 + ASSERT(ip != NULL);
3937 + inode = VFS_I(ip);
3938 +- inode->i_mode = mode;
3939 + set_nlink(inode, nlink);
3940 +- inode->i_uid = current_fsuid();
3941 + inode->i_rdev = rdev;
3942 + ip->i_d.di_projid = prid;
3943 +
3944 +- if (pip && XFS_INHERIT_GID(pip)) {
3945 +- inode->i_gid = VFS_I(pip)->i_gid;
3946 +- if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
3947 +- inode->i_mode |= S_ISGID;
3948 ++ if (dir && !(dir->i_mode & S_ISGID) &&
3949 ++ (mp->m_flags & XFS_MOUNT_GRPID)) {
3950 ++ inode->i_uid = current_fsuid();
3951 ++ inode->i_gid = dir->i_gid;
3952 ++ inode->i_mode = mode;
3953 + } else {
3954 +- inode->i_gid = current_fsgid();
3955 ++ inode_init_owner(inode, dir, mode);
3956 + }
3957 +
3958 + /*
3959 +@@ -2669,14 +2669,13 @@ xfs_ifree_cluster(
3960 + }
3961 +
3962 + /*
3963 +- * This is called to return an inode to the inode free list.
3964 +- * The inode should already be truncated to 0 length and have
3965 +- * no pages associated with it. This routine also assumes that
3966 +- * the inode is already a part of the transaction.
3967 ++ * This is called to return an inode to the inode free list. The inode should
3968 ++ * already be truncated to 0 length and have no pages associated with it. This
3969 ++ * routine also assumes that the inode is already a part of the transaction.
3970 + *
3971 +- * The on-disk copy of the inode will have been added to the list
3972 +- * of unlinked inodes in the AGI. We need to remove the inode from
3973 +- * that list atomically with respect to freeing it here.
3974 ++ * The on-disk copy of the inode will have been added to the list of unlinked
3975 ++ * inodes in the AGI. We need to remove the inode from that list atomically with
3976 ++ * respect to freeing it here.
3977 + */
3978 + int
3979 + xfs_ifree(
3980 +@@ -2694,13 +2693,16 @@ xfs_ifree(
3981 + ASSERT(ip->i_d.di_nblocks == 0);
3982 +
3983 + /*
3984 +- * Pull the on-disk inode from the AGI unlinked list.
3985 ++ * Free the inode first so that we guarantee that the AGI lock is going
3986 ++ * to be taken before we remove the inode from the unlinked list. This
3987 ++ * makes the AGI lock -> unlinked list modification order the same as
3988 ++ * used in O_TMPFILE creation.
3989 + */
3990 +- error = xfs_iunlink_remove(tp, ip);
3991 ++ error = xfs_difree(tp, ip->i_ino, &xic);
3992 + if (error)
3993 + return error;
3994 +
3995 +- error = xfs_difree(tp, ip->i_ino, &xic);
3996 ++ error = xfs_iunlink_remove(tp, ip);
3997 + if (error)
3998 + return error;
3999 +
4000 +diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
4001 +index b68fca08be27c..3088d94684c1c 100644
4002 +--- a/include/linux/inetdevice.h
4003 ++++ b/include/linux/inetdevice.h
4004 +@@ -178,6 +178,15 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
4005 +
4006 + int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
4007 + int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *);
4008 ++#ifdef CONFIG_INET
4009 ++int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size);
4010 ++#else
4011 ++static inline int inet_gifconf(struct net_device *dev, char __user *buf,
4012 ++ int len, int size)
4013 ++{
4014 ++ return 0;
4015 ++}
4016 ++#endif
4017 + void devinet_init(void);
4018 + struct in_device *inetdev_by_index(struct net *, int);
4019 + __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
4020 +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
4021 +index 94871f12e5362..896e563e2c181 100644
4022 +--- a/include/linux/kvm_host.h
4023 ++++ b/include/linux/kvm_host.h
4024 +@@ -1489,6 +1489,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
4025 + void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
4026 + unsigned long start, unsigned long end);
4027 +
4028 ++void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
4029 ++
4030 + #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
4031 + int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
4032 + #else
4033 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
4034 +index 6564fb4ac49e1..ef75567efd27a 100644
4035 +--- a/include/linux/netdevice.h
4036 ++++ b/include/linux/netdevice.h
4037 +@@ -3201,14 +3201,6 @@ static inline bool dev_has_header(const struct net_device *dev)
4038 + return dev->header_ops && dev->header_ops->create;
4039 + }
4040 +
4041 +-typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
4042 +- int len, int size);
4043 +-int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
4044 +-static inline int unregister_gifconf(unsigned int family)
4045 +-{
4046 +- return register_gifconf(family, NULL);
4047 +-}
4048 +-
4049 + #ifdef CONFIG_NET_FLOW_LIMIT
4050 + #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
4051 + struct sd_flow_limit {
4052 +diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
4053 +index 9c1292ea47fdc..59a8caf3230a4 100644
4054 +--- a/include/linux/serial_core.h
4055 ++++ b/include/linux/serial_core.h
4056 +@@ -300,6 +300,23 @@ struct uart_state {
4057 + /* number of characters left in xmit buffer before we ask for more */
4058 + #define WAKEUP_CHARS 256
4059 +
4060 ++/**
4061 ++ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
4062 ++ * @up: uart_port structure describing the port
4063 ++ * @chars: number of characters sent
4064 ++ *
4065 ++ * This function advances the tail of circular xmit buffer by the number of
4066 ++ * @chars transmitted and handles accounting of transmitted bytes (into
4067 ++ * @up's icount.tx).
4068 ++ */
4069 ++static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
4070 ++{
4071 ++ struct circ_buf *xmit = &up->state->xmit;
4072 ++
4073 ++ xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1);
4074 ++ up->icount.tx += chars;
4075 ++}
4076 ++
4077 + struct module;
4078 + struct tty_driver;
4079 +
4080 +diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
4081 +index 1a28f299a4c61..895eae18271fa 100644
4082 +--- a/include/net/bond_3ad.h
4083 ++++ b/include/net/bond_3ad.h
4084 +@@ -15,8 +15,6 @@
4085 + #define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
4086 + #define AD_TIMER_INTERVAL 100 /*msec*/
4087 +
4088 +-#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
4089 +-
4090 + #define AD_LACP_SLOW 0
4091 + #define AD_LACP_FAST 1
4092 +
4093 +diff --git a/include/net/bonding.h b/include/net/bonding.h
4094 +index 67d676059aa0d..d9cc3f5602fb2 100644
4095 +--- a/include/net/bonding.h
4096 ++++ b/include/net/bonding.h
4097 +@@ -763,6 +763,9 @@ extern struct rtnl_link_ops bond_link_ops;
4098 + /* exported from bond_sysfs_slave.c */
4099 + extern const struct sysfs_ops slave_sysfs_ops;
4100 +
4101 ++/* exported from bond_3ad.c */
4102 ++extern const u8 lacpdu_mcast_addr[];
4103 ++
4104 + static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
4105 + {
4106 + atomic_long_inc(&dev->tx_dropped);
4107 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
4108 +index fdf5fa4bf4448..0cc2a62e88f9e 100644
4109 +--- a/kernel/workqueue.c
4110 ++++ b/kernel/workqueue.c
4111 +@@ -3047,10 +3047,8 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
4112 + if (WARN_ON(!work->func))
4113 + return false;
4114 +
4115 +- if (!from_cancel) {
4116 +- lock_map_acquire(&work->lockdep_map);
4117 +- lock_map_release(&work->lockdep_map);
4118 +- }
4119 ++ lock_map_acquire(&work->lockdep_map);
4120 ++ lock_map_release(&work->lockdep_map);
4121 +
4122 + if (start_flush_work(work, &barr, from_cancel)) {
4123 + wait_for_completion(&barr.done);
4124 +diff --git a/mm/slub.c b/mm/slub.c
4125 +index b395ef0645444..b0f637519ac99 100644
4126 +--- a/mm/slub.c
4127 ++++ b/mm/slub.c
4128 +@@ -5559,7 +5559,8 @@ static char *create_unique_id(struct kmem_cache *s)
4129 + char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4130 + char *p = name;
4131 +
4132 +- BUG_ON(!name);
4133 ++ if (!name)
4134 ++ return ERR_PTR(-ENOMEM);
4135 +
4136 + *p++ = ':';
4137 + /*
4138 +@@ -5617,6 +5618,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
4139 + * for the symlinks.
4140 + */
4141 + name = create_unique_id(s);
4142 ++ if (IS_ERR(name))
4143 ++ return PTR_ERR(name);
4144 + }
4145 +
4146 + s->kobj.kset = kset;
4147 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
4148 +index 310740cc684ad..06b80b5843819 100644
4149 +--- a/net/bridge/netfilter/ebtables.c
4150 ++++ b/net/bridge/netfilter/ebtables.c
4151 +@@ -999,8 +999,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
4152 + goto free_iterate;
4153 + }
4154 +
4155 +- if (repl->valid_hooks != t->valid_hooks)
4156 ++ if (repl->valid_hooks != t->valid_hooks) {
4157 ++ ret = -EINVAL;
4158 + goto free_unlock;
4159 ++ }
4160 +
4161 + if (repl->num_counters && repl->num_counters != t->private->nentries) {
4162 + ret = -EINVAL;
4163 +diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
4164 +index 54fb18b4f55e4..993420da29307 100644
4165 +--- a/net/core/dev_ioctl.c
4166 ++++ b/net/core/dev_ioctl.c
4167 +@@ -1,6 +1,7 @@
4168 + // SPDX-License-Identifier: GPL-2.0
4169 + #include <linux/kmod.h>
4170 + #include <linux/netdevice.h>
4171 ++#include <linux/inetdevice.h>
4172 + #include <linux/etherdevice.h>
4173 + #include <linux/rtnetlink.h>
4174 + #include <linux/net_tstamp.h>
4175 +@@ -25,26 +26,6 @@ static int dev_ifname(struct net *net, struct ifreq *ifr)
4176 + return netdev_get_name(net, ifr->ifr_name, ifr->ifr_ifindex);
4177 + }
4178 +
4179 +-static gifconf_func_t *gifconf_list[NPROTO];
4180 +-
4181 +-/**
4182 +- * register_gifconf - register a SIOCGIF handler
4183 +- * @family: Address family
4184 +- * @gifconf: Function handler
4185 +- *
4186 +- * Register protocol dependent address dumping routines. The handler
4187 +- * that is passed must not be freed or reused until it has been replaced
4188 +- * by another handler.
4189 +- */
4190 +-int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
4191 +-{
4192 +- if (family >= NPROTO)
4193 +- return -EINVAL;
4194 +- gifconf_list[family] = gifconf;
4195 +- return 0;
4196 +-}
4197 +-EXPORT_SYMBOL(register_gifconf);
4198 +-
4199 + /*
4200 + * Perform a SIOCGIFCONF call. This structure will change
4201 + * size eventually, and there is nothing I can do about it.
4202 +@@ -57,7 +38,6 @@ int dev_ifconf(struct net *net, struct ifconf *ifc, int size)
4203 + char __user *pos;
4204 + int len;
4205 + int total;
4206 +- int i;
4207 +
4208 + /*
4209 + * Fetch the caller's info block.
4210 +@@ -72,19 +52,15 @@ int dev_ifconf(struct net *net, struct ifconf *ifc, int size)
4211 +
4212 + total = 0;
4213 + for_each_netdev(net, dev) {
4214 +- for (i = 0; i < NPROTO; i++) {
4215 +- if (gifconf_list[i]) {
4216 +- int done;
4217 +- if (!pos)
4218 +- done = gifconf_list[i](dev, NULL, 0, size);
4219 +- else
4220 +- done = gifconf_list[i](dev, pos + total,
4221 +- len - total, size);
4222 +- if (done < 0)
4223 +- return -EFAULT;
4224 +- total += done;
4225 +- }
4226 +- }
4227 ++ int done;
4228 ++ if (!pos)
4229 ++ done = inet_gifconf(dev, NULL, 0, size);
4230 ++ else
4231 ++ done = inet_gifconf(dev, pos + total,
4232 ++ len - total, size);
4233 ++ if (done < 0)
4234 ++ return -EFAULT;
4235 ++ total += done;
4236 + }
4237 +
4238 + /*
4239 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
4240 +index f9baa9b1c77f7..ed120828c7e21 100644
4241 +--- a/net/core/flow_dissector.c
4242 ++++ b/net/core/flow_dissector.c
4243 +@@ -1485,7 +1485,7 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow)
4244 + }
4245 + EXPORT_SYMBOL(flow_get_u32_dst);
4246 +
4247 +-/* Sort the source and destination IP (and the ports if the IP are the same),
4248 ++/* Sort the source and destination IP and the ports,
4249 + * to have consistent hash within the two directions
4250 + */
4251 + static inline void __flow_hash_consistentify(struct flow_keys *keys)
4252 +@@ -1494,13 +1494,12 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
4253 +
4254 + switch (keys->control.addr_type) {
4255 + case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
4256 +- addr_diff = (__force u32)keys->addrs.v4addrs.dst -
4257 +- (__force u32)keys->addrs.v4addrs.src;
4258 +- if ((addr_diff < 0) ||
4259 +- (addr_diff == 0 &&
4260 +- ((__force u16)keys->ports.dst <
4261 +- (__force u16)keys->ports.src))) {
4262 ++ if ((__force u32)keys->addrs.v4addrs.dst <
4263 ++ (__force u32)keys->addrs.v4addrs.src)
4264 + swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
4265 ++
4266 ++ if ((__force u16)keys->ports.dst <
4267 ++ (__force u16)keys->ports.src) {
4268 + swap(keys->ports.src, keys->ports.dst);
4269 + }
4270 + break;
4271 +@@ -1508,13 +1507,13 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
4272 + addr_diff = memcmp(&keys->addrs.v6addrs.dst,
4273 + &keys->addrs.v6addrs.src,
4274 + sizeof(keys->addrs.v6addrs.dst));
4275 +- if ((addr_diff < 0) ||
4276 +- (addr_diff == 0 &&
4277 +- ((__force u16)keys->ports.dst <
4278 +- (__force u16)keys->ports.src))) {
4279 ++ if (addr_diff < 0) {
4280 + for (i = 0; i < 4; i++)
4281 + swap(keys->addrs.v6addrs.src.s6_addr32[i],
4282 + keys->addrs.v6addrs.dst.s6_addr32[i]);
4283 ++ }
4284 ++ if ((__force u16)keys->ports.dst <
4285 ++ (__force u16)keys->ports.src) {
4286 + swap(keys->ports.src, keys->ports.dst);
4287 + }
4288 + break;
4289 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
4290 +index 8f17538755507..88b6120878cd9 100644
4291 +--- a/net/ipv4/devinet.c
4292 ++++ b/net/ipv4/devinet.c
4293 +@@ -1244,7 +1244,7 @@ out:
4294 + return ret;
4295 + }
4296 +
4297 +-static int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
4298 ++int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
4299 + {
4300 + struct in_device *in_dev = __in_dev_get_rtnl(dev);
4301 + const struct in_ifaddr *ifa;
4302 +@@ -2766,8 +2766,6 @@ void __init devinet_init(void)
4303 + INIT_HLIST_HEAD(&inet_addr_lst[i]);
4304 +
4305 + register_pernet_subsys(&devinet_ops);
4306 +-
4307 +- register_gifconf(PF_INET, inet_gifconf);
4308 + register_netdevice_notifier(&ip_netdev_notifier);
4309 +
4310 + queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
4311 +diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
4312 +index 26245419ef4a9..65b5b05fe38d3 100644
4313 +--- a/net/netfilter/nf_conntrack_irc.c
4314 ++++ b/net/netfilter/nf_conntrack_irc.c
4315 +@@ -148,15 +148,37 @@ static int help(struct sk_buff *skb, unsigned int protoff,
4316 + data = ib_ptr;
4317 + data_limit = ib_ptr + skb->len - dataoff;
4318 +
4319 +- /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
4320 +- * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
4321 +- while (data < data_limit - (19 + MINMATCHLEN)) {
4322 +- if (memcmp(data, "\1DCC ", 5)) {
4323 ++ /* Skip any whitespace */
4324 ++ while (data < data_limit - 10) {
4325 ++ if (*data == ' ' || *data == '\r' || *data == '\n')
4326 ++ data++;
4327 ++ else
4328 ++ break;
4329 ++ }
4330 ++
4331 ++ /* strlen("PRIVMSG x ")=10 */
4332 ++ if (data < data_limit - 10) {
4333 ++ if (strncasecmp("PRIVMSG ", data, 8))
4334 ++ goto out;
4335 ++ data += 8;
4336 ++ }
4337 ++
4338 ++ /* strlen(" :\1DCC SENT t AAAAAAAA P\1\n")=26
4339 ++ * 7+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=26
4340 ++ */
4341 ++ while (data < data_limit - (21 + MINMATCHLEN)) {
4342 ++ /* Find first " :", the start of message */
4343 ++ if (memcmp(data, " :", 2)) {
4344 + data++;
4345 + continue;
4346 + }
4347 ++ data += 2;
4348 ++
4349 ++ /* then check that place only for the DCC command */
4350 ++ if (memcmp(data, "\1DCC ", 5))
4351 ++ goto out;
4352 + data += 5;
4353 +- /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
4354 ++ /* we have at least (21+MINMATCHLEN)-(2+5) bytes valid data left */
4355 +
4356 + iph = ip_hdr(skb);
4357 + pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
4358 +@@ -172,7 +194,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
4359 + pr_debug("DCC %s detected\n", dccprotos[i]);
4360 +
4361 + /* we have at least
4362 +- * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
4363 ++ * (21+MINMATCHLEN)-7-dccprotos[i].matchlen bytes valid
4364 + * data left (== 14/13 bytes) */
4365 + if (parse_dcc(data, data_limit, &dcc_ip,
4366 + &dcc_port, &addr_beg_p, &addr_end_p)) {
4367 +diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
4368 +index b83dc9bf0a5dd..78fd9122b70c7 100644
4369 +--- a/net/netfilter/nf_conntrack_sip.c
4370 ++++ b/net/netfilter/nf_conntrack_sip.c
4371 +@@ -477,7 +477,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
4372 + return ret;
4373 + if (ret == 0)
4374 + break;
4375 +- dataoff += *matchoff;
4376 ++ dataoff = *matchoff;
4377 + }
4378 + *in_header = 0;
4379 + }
4380 +@@ -489,7 +489,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
4381 + break;
4382 + if (ret == 0)
4383 + return ret;
4384 +- dataoff += *matchoff;
4385 ++ dataoff = *matchoff;
4386 + }
4387 +
4388 + if (in_header)
4389 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
4390 +index b8e7e1c5c08a8..810995d712ac7 100644
4391 +--- a/net/netfilter/nf_tables_api.c
4392 ++++ b/net/netfilter/nf_tables_api.c
4393 +@@ -2001,7 +2001,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
4394 + u8 policy, u32 flags)
4395 + {
4396 + const struct nlattr * const *nla = ctx->nla;
4397 +- struct nft_stats __percpu *stats = NULL;
4398 + struct nft_table *table = ctx->table;
4399 + struct nft_base_chain *basechain;
4400 + struct net *net = ctx->net;
4401 +@@ -2015,6 +2014,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
4402 + return -EOVERFLOW;
4403 +
4404 + if (nla[NFTA_CHAIN_HOOK]) {
4405 ++ struct nft_stats __percpu *stats = NULL;
4406 + struct nft_chain_hook hook;
4407 +
4408 + if (flags & NFT_CHAIN_BINDING)
4409 +@@ -2045,8 +2045,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
4410 + if (err < 0) {
4411 + nft_chain_release_hook(&hook);
4412 + kfree(basechain);
4413 ++ free_percpu(stats);
4414 + return err;
4415 + }
4416 ++ if (stats)
4417 ++ static_branch_inc(&nft_counters_enabled);
4418 + } else {
4419 + if (flags & NFT_CHAIN_BASE)
4420 + return -EINVAL;
4421 +@@ -2121,9 +2124,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
4422 + goto err_unregister_hook;
4423 + }
4424 +
4425 +- if (stats)
4426 +- static_branch_inc(&nft_counters_enabled);
4427 +-
4428 + table->use++;
4429 +
4430 + return 0;
4431 +diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
4432 +index 79fbf37291f38..51e3953b414c0 100644
4433 +--- a/net/netfilter/nfnetlink_osf.c
4434 ++++ b/net/netfilter/nfnetlink_osf.c
4435 +@@ -269,6 +269,7 @@ bool nf_osf_find(const struct sk_buff *skb,
4436 + struct nf_osf_hdr_ctx ctx;
4437 + const struct tcphdr *tcp;
4438 + struct tcphdr _tcph;
4439 ++ bool found = false;
4440 +
4441 + memset(&ctx, 0, sizeof(ctx));
4442 +
4443 +@@ -283,10 +284,11 @@ bool nf_osf_find(const struct sk_buff *skb,
4444 +
4445 + data->genre = f->genre;
4446 + data->version = f->version;
4447 ++ found = true;
4448 + break;
4449 + }
4450 +
4451 +- return true;
4452 ++ return found;
4453 + }
4454 + EXPORT_SYMBOL_GPL(nf_osf_find);
4455 +
4456 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
4457 +index b8ffb7e4f696c..c410a736301bc 100644
4458 +--- a/net/sched/cls_api.c
4459 ++++ b/net/sched/cls_api.c
4460 +@@ -2124,6 +2124,7 @@ replay:
4461 + }
4462 +
4463 + if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
4464 ++ tfilter_put(tp, fh);
4465 + NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
4466 + err = -EINVAL;
4467 + goto errout;
4468 +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
4469 +index eca525791013e..ab8835a72cee6 100644
4470 +--- a/net/sched/sch_taprio.c
4471 ++++ b/net/sched/sch_taprio.c
4472 +@@ -65,6 +65,7 @@ struct taprio_sched {
4473 + u32 flags;
4474 + enum tk_offsets tk_offset;
4475 + int clockid;
4476 ++ bool offloaded;
4477 + atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
4478 + * speeds it's sub-nanoseconds per byte
4479 + */
4480 +@@ -1267,6 +1268,8 @@ static int taprio_enable_offload(struct net_device *dev,
4481 + goto done;
4482 + }
4483 +
4484 ++ q->offloaded = true;
4485 ++
4486 + done:
4487 + taprio_offload_free(offload);
4488 +
4489 +@@ -1281,12 +1284,9 @@ static int taprio_disable_offload(struct net_device *dev,
4490 + struct tc_taprio_qopt_offload *offload;
4491 + int err;
4492 +
4493 +- if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
4494 ++ if (!q->offloaded)
4495 + return 0;
4496 +
4497 +- if (!ops->ndo_setup_tc)
4498 +- return -EOPNOTSUPP;
4499 +-
4500 + offload = taprio_offload_alloc(0);
4501 + if (!offload) {
4502 + NL_SET_ERR_MSG(extack,
4503 +@@ -1302,6 +1302,8 @@ static int taprio_disable_offload(struct net_device *dev,
4504 + goto out;
4505 + }
4506 +
4507 ++ q->offloaded = false;
4508 ++
4509 + out:
4510 + taprio_offload_free(offload);
4511 +
4512 +@@ -1904,12 +1906,14 @@ start_error:
4513 +
4514 + static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
4515 + {
4516 +- struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
4517 ++ struct taprio_sched *q = qdisc_priv(sch);
4518 ++ struct net_device *dev = qdisc_dev(sch);
4519 ++ unsigned int ntx = cl - 1;
4520 +
4521 +- if (!dev_queue)
4522 ++ if (ntx >= dev->num_tx_queues)
4523 + return NULL;
4524 +
4525 +- return dev_queue->qdisc_sleeping;
4526 ++ return q->qdiscs[ntx];
4527 + }
4528 +
4529 + static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
4530 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
4531 +index ef2fd28999baf..bf485a2017a4e 100644
4532 +--- a/net/smc/smc_core.c
4533 ++++ b/net/smc/smc_core.c
4534 +@@ -1584,7 +1584,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
4535 + static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
4536 + struct smc_buf_desc *buf_desc, bool is_rmb)
4537 + {
4538 +- int i, rc = 0;
4539 ++ int i, rc = 0, cnt = 0;
4540 +
4541 + /* protect against parallel link reconfiguration */
4542 + mutex_lock(&lgr->llc_conf_mutex);
4543 +@@ -1597,9 +1597,12 @@ static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
4544 + rc = -ENOMEM;
4545 + goto out;
4546 + }
4547 ++ cnt++;
4548 + }
4549 + out:
4550 + mutex_unlock(&lgr->llc_conf_mutex);
4551 ++ if (!rc && !cnt)
4552 ++ rc = -EINVAL;
4553 + return rc;
4554 + }
4555 +
4556 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4557 +index 600ea241ead79..79b8d4258fd3b 100644
4558 +--- a/sound/pci/hda/hda_intel.c
4559 ++++ b/sound/pci/hda/hda_intel.c
4560 +@@ -2584,6 +2584,8 @@ static const struct pci_device_id azx_ids[] = {
4561 + /* 5 Series/3400 */
4562 + { PCI_DEVICE(0x8086, 0x3b56),
4563 + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
4564 ++ { PCI_DEVICE(0x8086, 0x3b57),
4565 ++ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
4566 + /* Poulsbo */
4567 + { PCI_DEVICE(0x8086, 0x811b),
4568 + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE },
4569 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4570 +index 71e11481ba41c..7551cdf3b4529 100644
4571 +--- a/sound/pci/hda/patch_hdmi.c
4572 ++++ b/sound/pci/hda/patch_hdmi.c
4573 +@@ -3839,6 +3839,7 @@ static int patch_tegra_hdmi(struct hda_codec *codec)
4574 + if (err)
4575 + return err;
4576 +
4577 ++ codec->depop_delay = 10;
4578 + codec->patch_ops.build_pcms = tegra_hdmi_build_pcms;
4579 + spec = codec->spec;
4580 + spec->chmap.ops.chmap_cea_alloc_validate_get_type =
4581 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4582 +index 78f4f684a3c72..574fe798d5125 100644
4583 +--- a/sound/pci/hda/patch_realtek.c
4584 ++++ b/sound/pci/hda/patch_realtek.c
4585 +@@ -6824,6 +6824,8 @@ enum {
4586 + ALC294_FIXUP_ASUS_GU502_HP,
4587 + ALC294_FIXUP_ASUS_GU502_PINS,
4588 + ALC294_FIXUP_ASUS_GU502_VERBS,
4589 ++ ALC294_FIXUP_ASUS_G513_PINS,
4590 ++ ALC285_FIXUP_ASUS_G533Z_PINS,
4591 + ALC285_FIXUP_HP_GPIO_LED,
4592 + ALC285_FIXUP_HP_MUTE_LED,
4593 + ALC236_FIXUP_HP_GPIO_LED,
4594 +@@ -8149,6 +8151,24 @@ static const struct hda_fixup alc269_fixups[] = {
4595 + [ALC294_FIXUP_ASUS_GU502_HP] = {
4596 + .type = HDA_FIXUP_FUNC,
4597 + .v.func = alc294_fixup_gu502_hp,
4598 ++ },
4599 ++ [ALC294_FIXUP_ASUS_G513_PINS] = {
4600 ++ .type = HDA_FIXUP_PINS,
4601 ++ .v.pins = (const struct hda_pintbl[]) {
4602 ++ { 0x19, 0x03a11050 }, /* front HP mic */
4603 ++ { 0x1a, 0x03a11c30 }, /* rear external mic */
4604 ++ { 0x21, 0x03211420 }, /* front HP out */
4605 ++ { }
4606 ++ },
4607 ++ },
4608 ++ [ALC285_FIXUP_ASUS_G533Z_PINS] = {
4609 ++ .type = HDA_FIXUP_PINS,
4610 ++ .v.pins = (const struct hda_pintbl[]) {
4611 ++ { 0x14, 0x90170120 },
4612 ++ { }
4613 ++ },
4614 ++ .chained = true,
4615 ++ .chain_id = ALC294_FIXUP_ASUS_G513_PINS,
4616 + },
4617 + [ALC294_FIXUP_ASUS_COEF_1B] = {
4618 + .type = HDA_FIXUP_VERBS,
4619 +@@ -8754,6 +8774,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4620 + SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
4621 + SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
4622 + SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
4623 ++ SND_PCI_QUIRK(0x1028, 0x087d, "Dell Precision 5530", ALC289_FIXUP_DUAL_SPK),
4624 + SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
4625 + SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
4626 + SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
4627 +@@ -8769,6 +8790,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4628 + SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
4629 + SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
4630 + SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK),
4631 ++ SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
4632 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4633 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4634 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
4635 +@@ -8912,10 +8934,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4636 + SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
4637 + SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
4638 + SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
4639 ++ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
4640 ++ SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
4641 + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
4642 + SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
4643 + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
4644 +- SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
4645 + SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
4646 + SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
4647 + SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
4648 +@@ -8930,14 +8953,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4649 + SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
4650 + SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
4651 + SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4652 ++ SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
4653 + SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
4654 ++ SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
4655 + SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
4656 + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
4657 + SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
4658 ++ SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
4659 + SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
4660 ++ SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
4661 + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
4662 +- SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
4663 +- SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
4664 + SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
4665 + SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
4666 + SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
4667 +@@ -9134,6 +9159,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4668 + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
4669 + SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
4670 + SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
4671 ++ SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
4672 + SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
4673 + SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
4674 + SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
4675 +diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
4676 +index 953338b9e887e..02cd9f75e3d2f 100644
4677 +--- a/tools/perf/util/genelf.c
4678 ++++ b/tools/perf/util/genelf.c
4679 +@@ -251,6 +251,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
4680 + Elf_Data *d;
4681 + Elf_Scn *scn;
4682 + Elf_Ehdr *ehdr;
4683 ++ Elf_Phdr *phdr;
4684 + Elf_Shdr *shdr;
4685 + uint64_t eh_frame_base_offset;
4686 + char *strsym = NULL;
4687 +@@ -285,6 +286,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
4688 + ehdr->e_version = EV_CURRENT;
4689 + ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
4690 +
4691 ++ /*
4692 ++ * setup program header
4693 ++ */
4694 ++ phdr = elf_newphdr(e, 1);
4695 ++ phdr[0].p_type = PT_LOAD;
4696 ++ phdr[0].p_offset = 0;
4697 ++ phdr[0].p_vaddr = 0;
4698 ++ phdr[0].p_paddr = 0;
4699 ++ phdr[0].p_filesz = csize;
4700 ++ phdr[0].p_memsz = csize;
4701 ++ phdr[0].p_flags = PF_X | PF_R;
4702 ++ phdr[0].p_align = 8;
4703 ++
4704 + /*
4705 + * setup text section
4706 + */
4707 +diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
4708 +index d4137559be053..ac638945b4cb0 100644
4709 +--- a/tools/perf/util/genelf.h
4710 ++++ b/tools/perf/util/genelf.h
4711 +@@ -50,8 +50,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
4712 +
4713 + #if GEN_ELF_CLASS == ELFCLASS64
4714 + #define elf_newehdr elf64_newehdr
4715 ++#define elf_newphdr elf64_newphdr
4716 + #define elf_getshdr elf64_getshdr
4717 + #define Elf_Ehdr Elf64_Ehdr
4718 ++#define Elf_Phdr Elf64_Phdr
4719 + #define Elf_Shdr Elf64_Shdr
4720 + #define Elf_Sym Elf64_Sym
4721 + #define ELF_ST_TYPE(a) ELF64_ST_TYPE(a)
4722 +@@ -59,8 +61,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
4723 + #define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a)
4724 + #else
4725 + #define elf_newehdr elf32_newehdr
4726 ++#define elf_newphdr elf32_newphdr
4727 + #define elf_getshdr elf32_getshdr
4728 + #define Elf_Ehdr Elf32_Ehdr
4729 ++#define Elf_Phdr Elf32_Phdr
4730 + #define Elf_Shdr Elf32_Shdr
4731 + #define Elf_Sym Elf32_Sym
4732 + #define ELF_ST_TYPE(a) ELF32_ST_TYPE(a)
4733 +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
4734 +index d8d79a9ec7758..3e423a9200151 100644
4735 +--- a/tools/perf/util/symbol-elf.c
4736 ++++ b/tools/perf/util/symbol-elf.c
4737 +@@ -2002,8 +2002,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
4738 + * unusual. One significant peculiarity is that the mapping (start -> pgoff)
4739 + * is not the same for the kernel map and the modules map. That happens because
4740 + * the data is copied adjacently whereas the original kcore has gaps. Finally,
4741 +- * kallsyms and modules files are compared with their copies to check that
4742 +- * modules have not been loaded or unloaded while the copies were taking place.
4743 ++ * kallsyms file is compared with its copy to check that modules have not been
4744 ++ * loaded or unloaded while the copies were taking place.
4745 + *
4746 + * Return: %0 on success, %-1 on failure.
4747 + */
4748 +@@ -2066,9 +2066,6 @@ int kcore_copy(const char *from_dir, const char *to_dir)
4749 + goto out_extract_close;
4750 + }
4751 +
4752 +- if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
4753 +- goto out_extract_close;
4754 +-
4755 + if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
4756 + goto out_extract_close;
4757 +
4758 +diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh
4759 +index e714bae473fb4..81f31179ac887 100755
4760 +--- a/tools/testing/selftests/net/forwarding/sch_red.sh
4761 ++++ b/tools/testing/selftests/net/forwarding/sch_red.sh
4762 +@@ -1,3 +1,4 @@
4763 ++#!/bin/bash
4764 + # SPDX-License-Identifier: GPL-2.0
4765 +
4766 + # This test sends one stream of traffic from H1 through a TBF shaper, to a RED
4767 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4768 +index 578235291e92e..c4cce817a4522 100644
4769 +--- a/virt/kvm/kvm_main.c
4770 ++++ b/virt/kvm/kvm_main.c
4771 +@@ -159,6 +159,10 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
4772 + {
4773 + }
4774 +
4775 ++__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
4776 ++{
4777 ++}
4778 ++
4779 + bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
4780 + {
4781 + /*
4782 +@@ -340,6 +344,12 @@ void kvm_reload_remote_mmus(struct kvm *kvm)
4783 + kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
4784 + }
4785 +
4786 ++static void kvm_flush_shadow_all(struct kvm *kvm)
4787 ++{
4788 ++ kvm_arch_flush_shadow_all(kvm);
4789 ++ kvm_arch_guest_memory_reclaimed(kvm);
4790 ++}
4791 ++
4792 + #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
4793 + static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
4794 + gfp_t gfp_flags)
4795 +@@ -489,6 +499,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
4796 + kvm_flush_remote_tlbs(kvm);
4797 +
4798 + spin_unlock(&kvm->mmu_lock);
4799 ++ kvm_arch_guest_memory_reclaimed(kvm);
4800 + srcu_read_unlock(&kvm->srcu, idx);
4801 +
4802 + return 0;
4803 +@@ -592,7 +603,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
4804 + int idx;
4805 +
4806 + idx = srcu_read_lock(&kvm->srcu);
4807 +- kvm_arch_flush_shadow_all(kvm);
4808 ++ kvm_flush_shadow_all(kvm);
4809 + srcu_read_unlock(&kvm->srcu, idx);
4810 + }
4811 +
4812 +@@ -896,7 +907,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
4813 + #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
4814 + mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
4815 + #else
4816 +- kvm_arch_flush_shadow_all(kvm);
4817 ++ kvm_flush_shadow_all(kvm);
4818 + #endif
4819 + kvm_arch_destroy_vm(kvm);
4820 + kvm_destroy_devices(kvm);
4821 +@@ -1238,6 +1249,7 @@ static int kvm_set_memslot(struct kvm *kvm,
4822 + * - kvm_is_visible_gfn (mmu_check_root)
4823 + */
4824 + kvm_arch_flush_shadow_memslot(kvm, slot);
4825 ++ kvm_arch_guest_memory_reclaimed(kvm);
4826 + }
4827 +
4828 + r = kvm_arch_prepare_memory_region(kvm, new, mem, change);